]> git.proxmox.com Git - qemu.git/blame - exec.c
Fix wrong region_offset when overlaying a page with another
[qemu.git] / exec.c
CommitLineData
54936004 1/*
fd6ce8f6 2 * virtual page mapping and translated block handling
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
54936004 18 */
67b915a5 19#include "config.h"
d5a8f07c
FB
20#ifdef _WIN32
21#include <windows.h>
22#else
a98d49b1 23#include <sys/types.h>
d5a8f07c
FB
24#include <sys/mman.h>
25#endif
54936004 26
055403b2 27#include "qemu-common.h"
6180a181 28#include "cpu.h"
b67d9a52 29#include "tcg.h"
b3c7724c 30#include "hw/hw.h"
cc9e98cb 31#include "hw/qdev.h"
74576198 32#include "osdep.h"
7ba1e619 33#include "kvm.h"
432d268c 34#include "hw/xen.h"
29e922b6 35#include "qemu-timer.h"
62152b8a
AK
36#include "memory.h"
37#include "exec-memory.h"
53a5960a
PB
38#if defined(CONFIG_USER_ONLY)
39#include <qemu.h>
f01576f1
JL
40#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41#include <sys/param.h>
42#if __FreeBSD_version >= 700104
43#define HAVE_KINFO_GETVMMAP
44#define sigqueue sigqueue_freebsd /* avoid redefinition */
45#include <sys/time.h>
46#include <sys/proc.h>
47#include <machine/profile.h>
48#define _KERNEL
49#include <sys/user.h>
50#undef _KERNEL
51#undef sigqueue
52#include <libutil.h>
53#endif
54#endif
432d268c
JN
55#else /* !CONFIG_USER_ONLY */
56#include "xen-mapcache.h"
6506e4f9 57#include "trace.h"
53a5960a 58#endif
54936004 59
67d95c15
AK
60#define WANT_EXEC_OBSOLETE
61#include "exec-obsolete.h"
62
fd6ce8f6 63//#define DEBUG_TB_INVALIDATE
66e85a21 64//#define DEBUG_FLUSH
9fa3e853 65//#define DEBUG_TLB
67d3b957 66//#define DEBUG_UNASSIGNED
fd6ce8f6
FB
67
68/* make various TB consistency checks */
5fafdf24
TS
69//#define DEBUG_TB_CHECK
70//#define DEBUG_TLB_CHECK
fd6ce8f6 71
1196be37 72//#define DEBUG_IOPORT
db7b5426 73//#define DEBUG_SUBPAGE
1196be37 74
99773bd4
PB
75#if !defined(CONFIG_USER_ONLY)
76/* TB consistency checks only implemented for usermode emulation. */
77#undef DEBUG_TB_CHECK
78#endif
79
9fa3e853
FB
80#define SMC_BITMAP_USE_THRESHOLD 10
81
bdaf78e0 82static TranslationBlock *tbs;
24ab68ac 83static int code_gen_max_blocks;
9fa3e853 84TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
bdaf78e0 85static int nb_tbs;
eb51d102 86/* any access to the tbs or the page table must use this lock */
c227f099 87spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
fd6ce8f6 88
141ac468
BS
89#if defined(__arm__) || defined(__sparc_v9__)
90/* The prologue must be reachable with a direct jump. ARM and Sparc64
91 have limited branch ranges (possibly also PPC) so place it in a
d03d860b
BS
92 section close to code segment. */
93#define code_gen_section \
94 __attribute__((__section__(".gen_code"))) \
95 __attribute__((aligned (32)))
f8e2af11
SW
96#elif defined(_WIN32)
97/* Maximum alignment for Win32 is 16. */
98#define code_gen_section \
99 __attribute__((aligned (16)))
d03d860b
BS
100#else
101#define code_gen_section \
102 __attribute__((aligned (32)))
103#endif
104
105uint8_t code_gen_prologue[1024] code_gen_section;
bdaf78e0
BS
106static uint8_t *code_gen_buffer;
107static unsigned long code_gen_buffer_size;
26a5f13b 108/* threshold to flush the translated code buffer */
bdaf78e0 109static unsigned long code_gen_buffer_max_size;
24ab68ac 110static uint8_t *code_gen_ptr;
fd6ce8f6 111
e2eef170 112#if !defined(CONFIG_USER_ONLY)
9fa3e853 113int phys_ram_fd;
74576198 114static int in_migration;
94a6b54f 115
85d59fef 116RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
62152b8a
AK
117
118static MemoryRegion *system_memory;
309cb471 119static MemoryRegion *system_io;
62152b8a 120
e2eef170 121#endif
9fa3e853 122
6a00d601
FB
123CPUState *first_cpu;
124/* current CPU in the current thread. It is only valid inside
125 cpu_exec() */
b3c4bbe5 126DEFINE_TLS(CPUState *,cpu_single_env);
2e70f6ef 127/* 0 = Do not count executed instructions.
bf20dc07 128 1 = Precise instruction counting.
2e70f6ef
PB
129 2 = Adaptive rate instruction counting. */
130int use_icount = 0;
6a00d601 131
54936004 132typedef struct PageDesc {
92e873b9 133 /* list of TBs intersecting this ram page */
fd6ce8f6 134 TranslationBlock *first_tb;
9fa3e853
FB
135 /* in order to optimize self modifying code, we count the number
136 of lookups we do to a given page to use a bitmap */
137 unsigned int code_write_count;
138 uint8_t *code_bitmap;
139#if defined(CONFIG_USER_ONLY)
140 unsigned long flags;
141#endif
54936004
FB
142} PageDesc;
143
41c1b1c9 144/* In system mode we want L1_MAP to be based on ram offsets,
5cd2c5b6
RH
145 while in user mode we want it to be based on virtual addresses. */
146#if !defined(CONFIG_USER_ONLY)
41c1b1c9
PB
147#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
148# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
149#else
5cd2c5b6 150# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
41c1b1c9 151#endif
bedb69ea 152#else
5cd2c5b6 153# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
bedb69ea 154#endif
54936004 155
5cd2c5b6
RH
156/* Size of the L2 (and L3, etc) page tables. */
157#define L2_BITS 10
54936004
FB
158#define L2_SIZE (1 << L2_BITS)
159
5cd2c5b6
RH
160/* The bits remaining after N lower levels of page tables. */
161#define P_L1_BITS_REM \
162 ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
163#define V_L1_BITS_REM \
164 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
165
166/* Size of the L1 page table. Avoid silly small sizes. */
167#if P_L1_BITS_REM < 4
168#define P_L1_BITS (P_L1_BITS_REM + L2_BITS)
169#else
170#define P_L1_BITS P_L1_BITS_REM
171#endif
172
173#if V_L1_BITS_REM < 4
174#define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
175#else
176#define V_L1_BITS V_L1_BITS_REM
177#endif
178
179#define P_L1_SIZE ((target_phys_addr_t)1 << P_L1_BITS)
180#define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
181
182#define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS)
183#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
184
83fb7adf 185unsigned long qemu_real_host_page_size;
83fb7adf
FB
186unsigned long qemu_host_page_size;
187unsigned long qemu_host_page_mask;
54936004 188
5cd2c5b6
RH
189/* This is a multi-level map on the virtual address space.
190 The bottom level has pointers to PageDesc. */
191static void *l1_map[V_L1_SIZE];
54936004 192
e2eef170 193#if !defined(CONFIG_USER_ONLY)
41c1b1c9
PB
194typedef struct PhysPageDesc {
195 /* offset in host memory of the page + io_index in the low bits */
196 ram_addr_t phys_offset;
197 ram_addr_t region_offset;
198} PhysPageDesc;
199
5cd2c5b6
RH
200/* This is a multi-level map on the physical address space.
201 The bottom level has pointers to PhysPageDesc. */
202static void *l1_phys_map[P_L1_SIZE];
6d9a1304 203
e2eef170 204static void io_mem_init(void);
62152b8a 205static void memory_map_init(void);
e2eef170 206
33417e70 207/* io memory support */
acbbec5d
AK
208CPUWriteMemoryFunc *_io_mem_write[IO_MEM_NB_ENTRIES][4];
209CPUReadMemoryFunc *_io_mem_read[IO_MEM_NB_ENTRIES][4];
a4193c8a 210void *io_mem_opaque[IO_MEM_NB_ENTRIES];
511d2b14 211static char io_mem_used[IO_MEM_NB_ENTRIES];
6658ffb8
PB
212static int io_mem_watch;
213#endif
33417e70 214
34865134 215/* log support */
1e8b27ca
JR
216#ifdef WIN32
217static const char *logfilename = "qemu.log";
218#else
d9b630fd 219static const char *logfilename = "/tmp/qemu.log";
1e8b27ca 220#endif
34865134
FB
221FILE *logfile;
222int loglevel;
e735b91c 223static int log_append = 0;
34865134 224
e3db7226 225/* statistics */
b3755a91 226#if !defined(CONFIG_USER_ONLY)
e3db7226 227static int tlb_flush_count;
b3755a91 228#endif
e3db7226
FB
229static int tb_flush_count;
230static int tb_phys_invalidate_count;
231
7cb69cae
FB
232#ifdef _WIN32
233static void map_exec(void *addr, long size)
234{
235 DWORD old_protect;
236 VirtualProtect(addr, size,
237 PAGE_EXECUTE_READWRITE, &old_protect);
238
239}
240#else
241static void map_exec(void *addr, long size)
242{
4369415f 243 unsigned long start, end, page_size;
7cb69cae 244
4369415f 245 page_size = getpagesize();
7cb69cae 246 start = (unsigned long)addr;
4369415f 247 start &= ~(page_size - 1);
7cb69cae
FB
248
249 end = (unsigned long)addr + size;
4369415f
FB
250 end += page_size - 1;
251 end &= ~(page_size - 1);
7cb69cae
FB
252
253 mprotect((void *)start, end - start,
254 PROT_READ | PROT_WRITE | PROT_EXEC);
255}
256#endif
257
b346ff46 258static void page_init(void)
54936004 259{
83fb7adf 260 /* NOTE: we can always suppose that qemu_host_page_size >=
54936004 261 TARGET_PAGE_SIZE */
c2b48b69
AL
262#ifdef _WIN32
263 {
264 SYSTEM_INFO system_info;
265
266 GetSystemInfo(&system_info);
267 qemu_real_host_page_size = system_info.dwPageSize;
268 }
269#else
270 qemu_real_host_page_size = getpagesize();
271#endif
83fb7adf
FB
272 if (qemu_host_page_size == 0)
273 qemu_host_page_size = qemu_real_host_page_size;
274 if (qemu_host_page_size < TARGET_PAGE_SIZE)
275 qemu_host_page_size = TARGET_PAGE_SIZE;
83fb7adf 276 qemu_host_page_mask = ~(qemu_host_page_size - 1);
50a9569b 277
2e9a5713 278#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
50a9569b 279 {
f01576f1
JL
280#ifdef HAVE_KINFO_GETVMMAP
281 struct kinfo_vmentry *freep;
282 int i, cnt;
283
284 freep = kinfo_getvmmap(getpid(), &cnt);
285 if (freep) {
286 mmap_lock();
287 for (i = 0; i < cnt; i++) {
288 unsigned long startaddr, endaddr;
289
290 startaddr = freep[i].kve_start;
291 endaddr = freep[i].kve_end;
292 if (h2g_valid(startaddr)) {
293 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
294
295 if (h2g_valid(endaddr)) {
296 endaddr = h2g(endaddr);
fd436907 297 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
f01576f1
JL
298 } else {
299#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
300 endaddr = ~0ul;
fd436907 301 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
f01576f1
JL
302#endif
303 }
304 }
305 }
306 free(freep);
307 mmap_unlock();
308 }
309#else
50a9569b 310 FILE *f;
50a9569b 311
0776590d 312 last_brk = (unsigned long)sbrk(0);
5cd2c5b6 313
fd436907 314 f = fopen("/compat/linux/proc/self/maps", "r");
50a9569b 315 if (f) {
5cd2c5b6
RH
316 mmap_lock();
317
50a9569b 318 do {
5cd2c5b6
RH
319 unsigned long startaddr, endaddr;
320 int n;
321
322 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
323
324 if (n == 2 && h2g_valid(startaddr)) {
325 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
326
327 if (h2g_valid(endaddr)) {
328 endaddr = h2g(endaddr);
329 } else {
330 endaddr = ~0ul;
331 }
332 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
50a9569b
AZ
333 }
334 } while (!feof(f));
5cd2c5b6 335
50a9569b 336 fclose(f);
5cd2c5b6 337 mmap_unlock();
50a9569b 338 }
f01576f1 339#endif
50a9569b
AZ
340 }
341#endif
54936004
FB
342}
343
41c1b1c9 344static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
54936004 345{
41c1b1c9
PB
346 PageDesc *pd;
347 void **lp;
348 int i;
349
5cd2c5b6 350#if defined(CONFIG_USER_ONLY)
7267c094 351 /* We can't use g_malloc because it may recurse into a locked mutex. */
5cd2c5b6
RH
352# define ALLOC(P, SIZE) \
353 do { \
354 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
355 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
5cd2c5b6
RH
356 } while (0)
357#else
358# define ALLOC(P, SIZE) \
7267c094 359 do { P = g_malloc0(SIZE); } while (0)
17e2377a 360#endif
434929bf 361
5cd2c5b6
RH
362 /* Level 1. Always allocated. */
363 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
364
365 /* Level 2..N-1. */
366 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
367 void **p = *lp;
368
369 if (p == NULL) {
370 if (!alloc) {
371 return NULL;
372 }
373 ALLOC(p, sizeof(void *) * L2_SIZE);
374 *lp = p;
17e2377a 375 }
5cd2c5b6
RH
376
377 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
378 }
379
380 pd = *lp;
381 if (pd == NULL) {
382 if (!alloc) {
383 return NULL;
384 }
385 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
386 *lp = pd;
54936004 387 }
5cd2c5b6
RH
388
389#undef ALLOC
5cd2c5b6
RH
390
391 return pd + (index & (L2_SIZE - 1));
54936004
FB
392}
393
41c1b1c9 394static inline PageDesc *page_find(tb_page_addr_t index)
54936004 395{
5cd2c5b6 396 return page_find_alloc(index, 0);
fd6ce8f6
FB
397}
398
6d9a1304 399#if !defined(CONFIG_USER_ONLY)
c227f099 400static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
92e873b9 401{
e3f4e2a4 402 PhysPageDesc *pd;
5cd2c5b6
RH
403 void **lp;
404 int i;
92e873b9 405
5cd2c5b6
RH
406 /* Level 1. Always allocated. */
407 lp = l1_phys_map + ((index >> P_L1_SHIFT) & (P_L1_SIZE - 1));
108c49b8 408
5cd2c5b6
RH
409 /* Level 2..N-1. */
410 for (i = P_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
411 void **p = *lp;
412 if (p == NULL) {
413 if (!alloc) {
414 return NULL;
415 }
7267c094 416 *lp = p = g_malloc0(sizeof(void *) * L2_SIZE);
5cd2c5b6
RH
417 }
418 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
108c49b8 419 }
5cd2c5b6 420
e3f4e2a4 421 pd = *lp;
5cd2c5b6 422 if (pd == NULL) {
e3f4e2a4 423 int i;
5ab97b7f 424 int first_index = index & ~(L2_SIZE - 1);
5cd2c5b6
RH
425
426 if (!alloc) {
108c49b8 427 return NULL;
5cd2c5b6
RH
428 }
429
7267c094 430 *lp = pd = g_malloc(sizeof(PhysPageDesc) * L2_SIZE);
5cd2c5b6 431
67c4d23c 432 for (i = 0; i < L2_SIZE; i++) {
5cd2c5b6 433 pd[i].phys_offset = IO_MEM_UNASSIGNED;
5ab97b7f 434 pd[i].region_offset = (first_index + i) << TARGET_PAGE_BITS;
67c4d23c 435 }
92e873b9 436 }
5cd2c5b6
RH
437
438 return pd + (index & (L2_SIZE - 1));
92e873b9
FB
439}
440
f1f6e3b8 441static inline PhysPageDesc phys_page_find(target_phys_addr_t index)
92e873b9 442{
f1f6e3b8
AK
443 PhysPageDesc *p = phys_page_find_alloc(index, 0);
444
445 if (p) {
446 return *p;
447 } else {
448 return (PhysPageDesc) {
449 .phys_offset = IO_MEM_UNASSIGNED,
450 .region_offset = index << TARGET_PAGE_BITS,
451 };
452 }
92e873b9
FB
453}
454
c227f099
AL
455static void tlb_protect_code(ram_addr_t ram_addr);
456static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 457 target_ulong vaddr);
c8a706fe
PB
458#define mmap_lock() do { } while(0)
459#define mmap_unlock() do { } while(0)
9fa3e853 460#endif
fd6ce8f6 461
4369415f
FB
462#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
463
464#if defined(CONFIG_USER_ONLY)
ccbb4d44 465/* Currently it is not recommended to allocate big chunks of data in
4369415f
FB
466 user mode. It will change when a dedicated libc will be used */
467#define USE_STATIC_CODE_GEN_BUFFER
468#endif
469
470#ifdef USE_STATIC_CODE_GEN_BUFFER
ebf50fb3
AJ
471static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
472 __attribute__((aligned (CODE_GEN_ALIGN)));
4369415f
FB
473#endif
474
8fcd3692 475static void code_gen_alloc(unsigned long tb_size)
26a5f13b 476{
4369415f
FB
477#ifdef USE_STATIC_CODE_GEN_BUFFER
478 code_gen_buffer = static_code_gen_buffer;
479 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
480 map_exec(code_gen_buffer, code_gen_buffer_size);
481#else
26a5f13b
FB
482 code_gen_buffer_size = tb_size;
483 if (code_gen_buffer_size == 0) {
4369415f 484#if defined(CONFIG_USER_ONLY)
4369415f
FB
485 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
486#else
ccbb4d44 487 /* XXX: needs adjustments */
94a6b54f 488 code_gen_buffer_size = (unsigned long)(ram_size / 4);
4369415f 489#endif
26a5f13b
FB
490 }
491 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
492 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
493 /* The code gen buffer location may have constraints depending on
494 the host cpu and OS */
495#if defined(__linux__)
496 {
497 int flags;
141ac468
BS
498 void *start = NULL;
499
26a5f13b
FB
500 flags = MAP_PRIVATE | MAP_ANONYMOUS;
501#if defined(__x86_64__)
502 flags |= MAP_32BIT;
503 /* Cannot map more than that */
504 if (code_gen_buffer_size > (800 * 1024 * 1024))
505 code_gen_buffer_size = (800 * 1024 * 1024);
141ac468
BS
506#elif defined(__sparc_v9__)
507 // Map the buffer below 2G, so we can use direct calls and branches
508 flags |= MAP_FIXED;
509 start = (void *) 0x60000000UL;
510 if (code_gen_buffer_size > (512 * 1024 * 1024))
511 code_gen_buffer_size = (512 * 1024 * 1024);
1cb0661e 512#elif defined(__arm__)
222f23f5 513 /* Keep the buffer no bigger than 16GB to branch between blocks */
1cb0661e
AZ
514 if (code_gen_buffer_size > 16 * 1024 * 1024)
515 code_gen_buffer_size = 16 * 1024 * 1024;
eba0b893
RH
516#elif defined(__s390x__)
517 /* Map the buffer so that we can use direct calls and branches. */
518 /* We have a +- 4GB range on the branches; leave some slop. */
519 if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
520 code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
521 }
522 start = (void *)0x90000000UL;
26a5f13b 523#endif
141ac468
BS
524 code_gen_buffer = mmap(start, code_gen_buffer_size,
525 PROT_WRITE | PROT_READ | PROT_EXEC,
26a5f13b
FB
526 flags, -1, 0);
527 if (code_gen_buffer == MAP_FAILED) {
528 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
529 exit(1);
530 }
531 }
cbb608a5 532#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
9f4b09a4
TN
533 || defined(__DragonFly__) || defined(__OpenBSD__) \
534 || defined(__NetBSD__)
06e67a82
AL
535 {
536 int flags;
537 void *addr = NULL;
538 flags = MAP_PRIVATE | MAP_ANONYMOUS;
539#if defined(__x86_64__)
540 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
541 * 0x40000000 is free */
542 flags |= MAP_FIXED;
543 addr = (void *)0x40000000;
544 /* Cannot map more than that */
545 if (code_gen_buffer_size > (800 * 1024 * 1024))
546 code_gen_buffer_size = (800 * 1024 * 1024);
4cd31ad2
BS
547#elif defined(__sparc_v9__)
548 // Map the buffer below 2G, so we can use direct calls and branches
549 flags |= MAP_FIXED;
550 addr = (void *) 0x60000000UL;
551 if (code_gen_buffer_size > (512 * 1024 * 1024)) {
552 code_gen_buffer_size = (512 * 1024 * 1024);
553 }
06e67a82
AL
554#endif
555 code_gen_buffer = mmap(addr, code_gen_buffer_size,
556 PROT_WRITE | PROT_READ | PROT_EXEC,
557 flags, -1, 0);
558 if (code_gen_buffer == MAP_FAILED) {
559 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
560 exit(1);
561 }
562 }
26a5f13b 563#else
7267c094 564 code_gen_buffer = g_malloc(code_gen_buffer_size);
26a5f13b
FB
565 map_exec(code_gen_buffer, code_gen_buffer_size);
566#endif
4369415f 567#endif /* !USE_STATIC_CODE_GEN_BUFFER */
26a5f13b 568 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
a884da8a
PM
569 code_gen_buffer_max_size = code_gen_buffer_size -
570 (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
26a5f13b 571 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
7267c094 572 tbs = g_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
26a5f13b
FB
573}
574
575/* Must be called before using the QEMU cpus. 'tb_size' is the size
576 (in bytes) allocated to the translation buffer. Zero means default
577 size. */
d5ab9713 578void tcg_exec_init(unsigned long tb_size)
26a5f13b 579{
26a5f13b
FB
580 cpu_gen_init();
581 code_gen_alloc(tb_size);
582 code_gen_ptr = code_gen_buffer;
4369415f 583 page_init();
9002ec79
RH
584#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
585 /* There's no guest base to take into account, so go ahead and
586 initialize the prologue now. */
587 tcg_prologue_init(&tcg_ctx);
588#endif
26a5f13b
FB
589}
590
d5ab9713
JK
591bool tcg_enabled(void)
592{
593 return code_gen_buffer != NULL;
594}
595
596void cpu_exec_init_all(void)
597{
598#if !defined(CONFIG_USER_ONLY)
599 memory_map_init();
600 io_mem_init();
601#endif
602}
603
9656f324
PB
604#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
605
e59fb374 606static int cpu_common_post_load(void *opaque, int version_id)
e7f4eff7
JQ
607{
608 CPUState *env = opaque;
9656f324 609
3098dba0
AJ
610 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
611 version_id is increased. */
612 env->interrupt_request &= ~0x01;
9656f324
PB
613 tlb_flush(env, 1);
614
615 return 0;
616}
e7f4eff7
JQ
617
618static const VMStateDescription vmstate_cpu_common = {
619 .name = "cpu_common",
620 .version_id = 1,
621 .minimum_version_id = 1,
622 .minimum_version_id_old = 1,
e7f4eff7
JQ
623 .post_load = cpu_common_post_load,
624 .fields = (VMStateField []) {
625 VMSTATE_UINT32(halted, CPUState),
626 VMSTATE_UINT32(interrupt_request, CPUState),
627 VMSTATE_END_OF_LIST()
628 }
629};
9656f324
PB
630#endif
631
950f1472
GC
632CPUState *qemu_get_cpu(int cpu)
633{
634 CPUState *env = first_cpu;
635
636 while (env) {
637 if (env->cpu_index == cpu)
638 break;
639 env = env->next_cpu;
640 }
641
642 return env;
643}
644
6a00d601 645void cpu_exec_init(CPUState *env)
fd6ce8f6 646{
6a00d601
FB
647 CPUState **penv;
648 int cpu_index;
649
c2764719
PB
650#if defined(CONFIG_USER_ONLY)
651 cpu_list_lock();
652#endif
6a00d601
FB
653 env->next_cpu = NULL;
654 penv = &first_cpu;
655 cpu_index = 0;
656 while (*penv != NULL) {
1e9fa730 657 penv = &(*penv)->next_cpu;
6a00d601
FB
658 cpu_index++;
659 }
660 env->cpu_index = cpu_index;
268a362c 661 env->numa_node = 0;
72cf2d4f
BS
662 QTAILQ_INIT(&env->breakpoints);
663 QTAILQ_INIT(&env->watchpoints);
dc7a09cf
JK
664#ifndef CONFIG_USER_ONLY
665 env->thread_id = qemu_get_thread_id();
666#endif
6a00d601 667 *penv = env;
c2764719
PB
668#if defined(CONFIG_USER_ONLY)
669 cpu_list_unlock();
670#endif
b3c7724c 671#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
0be71e32
AW
672 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
673 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
b3c7724c
PB
674 cpu_save, cpu_load, env);
675#endif
fd6ce8f6
FB
676}
677
d1a1eb74
TG
678/* Allocate a new translation block. Flush the translation buffer if
679 too many translation blocks or too much generated code. */
680static TranslationBlock *tb_alloc(target_ulong pc)
681{
682 TranslationBlock *tb;
683
684 if (nb_tbs >= code_gen_max_blocks ||
685 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
686 return NULL;
687 tb = &tbs[nb_tbs++];
688 tb->pc = pc;
689 tb->cflags = 0;
690 return tb;
691}
692
693void tb_free(TranslationBlock *tb)
694{
695 /* In practice this is mostly used for single use temporary TB
696 Ignore the hard cases and just back up if this TB happens to
697 be the last one generated. */
698 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
699 code_gen_ptr = tb->tc_ptr;
700 nb_tbs--;
701 }
702}
703
9fa3e853
FB
704static inline void invalidate_page_bitmap(PageDesc *p)
705{
706 if (p->code_bitmap) {
7267c094 707 g_free(p->code_bitmap);
9fa3e853
FB
708 p->code_bitmap = NULL;
709 }
710 p->code_write_count = 0;
711}
712
5cd2c5b6
RH
713/* Set to NULL all the 'first_tb' fields in all PageDescs. */
714
715static void page_flush_tb_1 (int level, void **lp)
fd6ce8f6 716{
5cd2c5b6 717 int i;
fd6ce8f6 718
5cd2c5b6
RH
719 if (*lp == NULL) {
720 return;
721 }
722 if (level == 0) {
723 PageDesc *pd = *lp;
7296abac 724 for (i = 0; i < L2_SIZE; ++i) {
5cd2c5b6
RH
725 pd[i].first_tb = NULL;
726 invalidate_page_bitmap(pd + i);
fd6ce8f6 727 }
5cd2c5b6
RH
728 } else {
729 void **pp = *lp;
7296abac 730 for (i = 0; i < L2_SIZE; ++i) {
5cd2c5b6
RH
731 page_flush_tb_1 (level - 1, pp + i);
732 }
733 }
734}
735
736static void page_flush_tb(void)
737{
738 int i;
739 for (i = 0; i < V_L1_SIZE; i++) {
740 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
fd6ce8f6
FB
741 }
742}
743
744/* flush all the translation blocks */
d4e8164f 745/* XXX: tb_flush is currently not thread safe */
6a00d601 746void tb_flush(CPUState *env1)
fd6ce8f6 747{
6a00d601 748 CPUState *env;
0124311e 749#if defined(DEBUG_FLUSH)
ab3d1727
BS
750 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
751 (unsigned long)(code_gen_ptr - code_gen_buffer),
752 nb_tbs, nb_tbs > 0 ?
753 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
fd6ce8f6 754#endif
26a5f13b 755 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
a208e54a
PB
756 cpu_abort(env1, "Internal error: code buffer overflow\n");
757
fd6ce8f6 758 nb_tbs = 0;
3b46e624 759
6a00d601
FB
760 for(env = first_cpu; env != NULL; env = env->next_cpu) {
761 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
762 }
9fa3e853 763
8a8a608f 764 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
fd6ce8f6 765 page_flush_tb();
9fa3e853 766
fd6ce8f6 767 code_gen_ptr = code_gen_buffer;
d4e8164f
FB
768 /* XXX: flush processor icache at this point if cache flush is
769 expensive */
e3db7226 770 tb_flush_count++;
fd6ce8f6
FB
771}
772
773#ifdef DEBUG_TB_CHECK
774
bc98a7ef 775static void tb_invalidate_check(target_ulong address)
fd6ce8f6
FB
776{
777 TranslationBlock *tb;
778 int i;
779 address &= TARGET_PAGE_MASK;
99773bd4
PB
780 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
781 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
782 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
783 address >= tb->pc + tb->size)) {
0bf9e31a
BS
784 printf("ERROR invalidate: address=" TARGET_FMT_lx
785 " PC=%08lx size=%04x\n",
99773bd4 786 address, (long)tb->pc, tb->size);
fd6ce8f6
FB
787 }
788 }
789 }
790}
791
792/* verify that all the pages have correct rights for code */
793static void tb_page_check(void)
794{
795 TranslationBlock *tb;
796 int i, flags1, flags2;
3b46e624 797
99773bd4
PB
798 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
799 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
800 flags1 = page_get_flags(tb->pc);
801 flags2 = page_get_flags(tb->pc + tb->size - 1);
802 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
803 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
99773bd4 804 (long)tb->pc, tb->size, flags1, flags2);
fd6ce8f6
FB
805 }
806 }
807 }
808}
809
810#endif
811
812/* invalidate one TB */
813static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
814 int next_offset)
815{
816 TranslationBlock *tb1;
817 for(;;) {
818 tb1 = *ptb;
819 if (tb1 == tb) {
820 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
821 break;
822 }
823 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
824 }
825}
826
9fa3e853
FB
827static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
828{
829 TranslationBlock *tb1;
830 unsigned int n1;
831
832 for(;;) {
833 tb1 = *ptb;
834 n1 = (long)tb1 & 3;
835 tb1 = (TranslationBlock *)((long)tb1 & ~3);
836 if (tb1 == tb) {
837 *ptb = tb1->page_next[n1];
838 break;
839 }
840 ptb = &tb1->page_next[n1];
841 }
842}
843
d4e8164f
FB
844static inline void tb_jmp_remove(TranslationBlock *tb, int n)
845{
846 TranslationBlock *tb1, **ptb;
847 unsigned int n1;
848
849 ptb = &tb->jmp_next[n];
850 tb1 = *ptb;
851 if (tb1) {
852 /* find tb(n) in circular list */
853 for(;;) {
854 tb1 = *ptb;
855 n1 = (long)tb1 & 3;
856 tb1 = (TranslationBlock *)((long)tb1 & ~3);
857 if (n1 == n && tb1 == tb)
858 break;
859 if (n1 == 2) {
860 ptb = &tb1->jmp_first;
861 } else {
862 ptb = &tb1->jmp_next[n1];
863 }
864 }
865 /* now we can suppress tb(n) from the list */
866 *ptb = tb->jmp_next[n];
867
868 tb->jmp_next[n] = NULL;
869 }
870}
871
872/* reset the jump entry 'n' of a TB so that it is not chained to
873 another TB */
874static inline void tb_reset_jump(TranslationBlock *tb, int n)
875{
876 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
877}
878
41c1b1c9 879void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
fd6ce8f6 880{
6a00d601 881 CPUState *env;
8a40a180 882 PageDesc *p;
d4e8164f 883 unsigned int h, n1;
41c1b1c9 884 tb_page_addr_t phys_pc;
8a40a180 885 TranslationBlock *tb1, *tb2;
3b46e624 886
8a40a180
FB
887 /* remove the TB from the hash list */
888 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
889 h = tb_phys_hash_func(phys_pc);
5fafdf24 890 tb_remove(&tb_phys_hash[h], tb,
8a40a180
FB
891 offsetof(TranslationBlock, phys_hash_next));
892
893 /* remove the TB from the page list */
894 if (tb->page_addr[0] != page_addr) {
895 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
896 tb_page_remove(&p->first_tb, tb);
897 invalidate_page_bitmap(p);
898 }
899 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
900 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
901 tb_page_remove(&p->first_tb, tb);
902 invalidate_page_bitmap(p);
903 }
904
36bdbe54 905 tb_invalidated_flag = 1;
59817ccb 906
fd6ce8f6 907 /* remove the TB from the hash list */
8a40a180 908 h = tb_jmp_cache_hash_func(tb->pc);
6a00d601
FB
909 for(env = first_cpu; env != NULL; env = env->next_cpu) {
910 if (env->tb_jmp_cache[h] == tb)
911 env->tb_jmp_cache[h] = NULL;
912 }
d4e8164f
FB
913
914 /* suppress this TB from the two jump lists */
915 tb_jmp_remove(tb, 0);
916 tb_jmp_remove(tb, 1);
917
918 /* suppress any remaining jumps to this TB */
919 tb1 = tb->jmp_first;
920 for(;;) {
921 n1 = (long)tb1 & 3;
922 if (n1 == 2)
923 break;
924 tb1 = (TranslationBlock *)((long)tb1 & ~3);
925 tb2 = tb1->jmp_next[n1];
926 tb_reset_jump(tb1, n1);
927 tb1->jmp_next[n1] = NULL;
928 tb1 = tb2;
929 }
930 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
9fa3e853 931
e3db7226 932 tb_phys_invalidate_count++;
9fa3e853
FB
933}
934
935static inline void set_bits(uint8_t *tab, int start, int len)
936{
937 int end, mask, end1;
938
939 end = start + len;
940 tab += start >> 3;
941 mask = 0xff << (start & 7);
942 if ((start & ~7) == (end & ~7)) {
943 if (start < end) {
944 mask &= ~(0xff << (end & 7));
945 *tab |= mask;
946 }
947 } else {
948 *tab++ |= mask;
949 start = (start + 8) & ~7;
950 end1 = end & ~7;
951 while (start < end1) {
952 *tab++ = 0xff;
953 start += 8;
954 }
955 if (start < end) {
956 mask = ~(0xff << (end & 7));
957 *tab |= mask;
958 }
959 }
960}
961
962static void build_page_bitmap(PageDesc *p)
963{
964 int n, tb_start, tb_end;
965 TranslationBlock *tb;
3b46e624 966
7267c094 967 p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8);
9fa3e853
FB
968
969 tb = p->first_tb;
970 while (tb != NULL) {
971 n = (long)tb & 3;
972 tb = (TranslationBlock *)((long)tb & ~3);
973 /* NOTE: this is subtle as a TB may span two physical pages */
974 if (n == 0) {
975 /* NOTE: tb_end may be after the end of the page, but
976 it is not a problem */
977 tb_start = tb->pc & ~TARGET_PAGE_MASK;
978 tb_end = tb_start + tb->size;
979 if (tb_end > TARGET_PAGE_SIZE)
980 tb_end = TARGET_PAGE_SIZE;
981 } else {
982 tb_start = 0;
983 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
984 }
985 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
986 tb = tb->page_next[n];
987 }
988}
989
2e70f6ef
PB
990TranslationBlock *tb_gen_code(CPUState *env,
991 target_ulong pc, target_ulong cs_base,
992 int flags, int cflags)
d720b93d
FB
993{
994 TranslationBlock *tb;
995 uint8_t *tc_ptr;
41c1b1c9
PB
996 tb_page_addr_t phys_pc, phys_page2;
997 target_ulong virt_page2;
d720b93d
FB
998 int code_gen_size;
999
41c1b1c9 1000 phys_pc = get_page_addr_code(env, pc);
c27004ec 1001 tb = tb_alloc(pc);
d720b93d
FB
1002 if (!tb) {
1003 /* flush must be done */
1004 tb_flush(env);
1005 /* cannot fail at this point */
c27004ec 1006 tb = tb_alloc(pc);
2e70f6ef
PB
1007 /* Don't forget to invalidate previous TB info. */
1008 tb_invalidated_flag = 1;
d720b93d
FB
1009 }
1010 tc_ptr = code_gen_ptr;
1011 tb->tc_ptr = tc_ptr;
1012 tb->cs_base = cs_base;
1013 tb->flags = flags;
1014 tb->cflags = cflags;
d07bde88 1015 cpu_gen_code(env, tb, &code_gen_size);
d720b93d 1016 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
3b46e624 1017
d720b93d 1018 /* check next page if needed */
c27004ec 1019 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
d720b93d 1020 phys_page2 = -1;
c27004ec 1021 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
41c1b1c9 1022 phys_page2 = get_page_addr_code(env, virt_page2);
d720b93d 1023 }
41c1b1c9 1024 tb_link_page(tb, phys_pc, phys_page2);
2e70f6ef 1025 return tb;
d720b93d 1026}
3b46e624 1027
9fa3e853
FB
1028/* invalidate all TBs which intersect with the target physical page
1029 starting in range [start;end[. NOTE: start and end must refer to
d720b93d
FB
1030 the same physical page. 'is_cpu_write_access' should be true if called
1031 from a real cpu write access: the virtual CPU will exit the current
1032 TB if code is modified inside this TB. */
41c1b1c9 1033void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
d720b93d
FB
1034 int is_cpu_write_access)
1035{
6b917547 1036 TranslationBlock *tb, *tb_next, *saved_tb;
d720b93d 1037 CPUState *env = cpu_single_env;
41c1b1c9 1038 tb_page_addr_t tb_start, tb_end;
6b917547
AL
1039 PageDesc *p;
1040 int n;
1041#ifdef TARGET_HAS_PRECISE_SMC
1042 int current_tb_not_found = is_cpu_write_access;
1043 TranslationBlock *current_tb = NULL;
1044 int current_tb_modified = 0;
1045 target_ulong current_pc = 0;
1046 target_ulong current_cs_base = 0;
1047 int current_flags = 0;
1048#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
1049
1050 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 1051 if (!p)
9fa3e853 1052 return;
5fafdf24 1053 if (!p->code_bitmap &&
d720b93d
FB
1054 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1055 is_cpu_write_access) {
9fa3e853
FB
1056 /* build code bitmap */
1057 build_page_bitmap(p);
1058 }
1059
1060 /* we remove all the TBs in the range [start, end[ */
1061 /* XXX: see if in some cases it could be faster to invalidate all the code */
1062 tb = p->first_tb;
1063 while (tb != NULL) {
1064 n = (long)tb & 3;
1065 tb = (TranslationBlock *)((long)tb & ~3);
1066 tb_next = tb->page_next[n];
1067 /* NOTE: this is subtle as a TB may span two physical pages */
1068 if (n == 0) {
1069 /* NOTE: tb_end may be after the end of the page, but
1070 it is not a problem */
1071 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1072 tb_end = tb_start + tb->size;
1073 } else {
1074 tb_start = tb->page_addr[1];
1075 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1076 }
1077 if (!(tb_end <= start || tb_start >= end)) {
d720b93d
FB
1078#ifdef TARGET_HAS_PRECISE_SMC
1079 if (current_tb_not_found) {
1080 current_tb_not_found = 0;
1081 current_tb = NULL;
2e70f6ef 1082 if (env->mem_io_pc) {
d720b93d 1083 /* now we have a real cpu fault */
2e70f6ef 1084 current_tb = tb_find_pc(env->mem_io_pc);
d720b93d
FB
1085 }
1086 }
1087 if (current_tb == tb &&
2e70f6ef 1088 (current_tb->cflags & CF_COUNT_MASK) != 1) {
d720b93d
FB
1089 /* If we are modifying the current TB, we must stop
1090 its execution. We could be more precise by checking
1091 that the modification is after the current PC, but it
1092 would require a specialized function to partially
1093 restore the CPU state */
3b46e624 1094
d720b93d 1095 current_tb_modified = 1;
618ba8e6 1096 cpu_restore_state(current_tb, env, env->mem_io_pc);
6b917547
AL
1097 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1098 &current_flags);
d720b93d
FB
1099 }
1100#endif /* TARGET_HAS_PRECISE_SMC */
6f5a9f7e
FB
1101 /* we need to do that to handle the case where a signal
1102 occurs while doing tb_phys_invalidate() */
1103 saved_tb = NULL;
1104 if (env) {
1105 saved_tb = env->current_tb;
1106 env->current_tb = NULL;
1107 }
9fa3e853 1108 tb_phys_invalidate(tb, -1);
6f5a9f7e
FB
1109 if (env) {
1110 env->current_tb = saved_tb;
1111 if (env->interrupt_request && env->current_tb)
1112 cpu_interrupt(env, env->interrupt_request);
1113 }
9fa3e853
FB
1114 }
1115 tb = tb_next;
1116 }
1117#if !defined(CONFIG_USER_ONLY)
1118 /* if no code remaining, no need to continue to use slow writes */
1119 if (!p->first_tb) {
1120 invalidate_page_bitmap(p);
d720b93d 1121 if (is_cpu_write_access) {
2e70f6ef 1122 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
d720b93d
FB
1123 }
1124 }
1125#endif
1126#ifdef TARGET_HAS_PRECISE_SMC
1127 if (current_tb_modified) {
1128 /* we generate a block containing just the instruction
1129 modifying the memory. It will ensure that it cannot modify
1130 itself */
ea1c1802 1131 env->current_tb = NULL;
2e70f6ef 1132 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
d720b93d 1133 cpu_resume_from_signal(env, NULL);
9fa3e853 1134 }
fd6ce8f6 1135#endif
9fa3e853 1136}
fd6ce8f6 1137
9fa3e853 1138/* len must be <= 8 and start must be a multiple of len */
41c1b1c9 1139static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
9fa3e853
FB
1140{
1141 PageDesc *p;
1142 int offset, b;
59817ccb 1143#if 0
a4193c8a 1144 if (1) {
93fcfe39
AL
1145 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1146 cpu_single_env->mem_io_vaddr, len,
1147 cpu_single_env->eip,
1148 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
59817ccb
FB
1149 }
1150#endif
9fa3e853 1151 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 1152 if (!p)
9fa3e853
FB
1153 return;
1154 if (p->code_bitmap) {
1155 offset = start & ~TARGET_PAGE_MASK;
1156 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1157 if (b & ((1 << len) - 1))
1158 goto do_invalidate;
1159 } else {
1160 do_invalidate:
d720b93d 1161 tb_invalidate_phys_page_range(start, start + len, 1);
9fa3e853
FB
1162 }
1163}
1164
9fa3e853 1165#if !defined(CONFIG_SOFTMMU)
41c1b1c9 1166static void tb_invalidate_phys_page(tb_page_addr_t addr,
d720b93d 1167 unsigned long pc, void *puc)
9fa3e853 1168{
6b917547 1169 TranslationBlock *tb;
9fa3e853 1170 PageDesc *p;
6b917547 1171 int n;
d720b93d 1172#ifdef TARGET_HAS_PRECISE_SMC
6b917547 1173 TranslationBlock *current_tb = NULL;
d720b93d 1174 CPUState *env = cpu_single_env;
6b917547
AL
1175 int current_tb_modified = 0;
1176 target_ulong current_pc = 0;
1177 target_ulong current_cs_base = 0;
1178 int current_flags = 0;
d720b93d 1179#endif
9fa3e853
FB
1180
1181 addr &= TARGET_PAGE_MASK;
1182 p = page_find(addr >> TARGET_PAGE_BITS);
5fafdf24 1183 if (!p)
9fa3e853
FB
1184 return;
1185 tb = p->first_tb;
d720b93d
FB
1186#ifdef TARGET_HAS_PRECISE_SMC
1187 if (tb && pc != 0) {
1188 current_tb = tb_find_pc(pc);
1189 }
1190#endif
9fa3e853
FB
1191 while (tb != NULL) {
1192 n = (long)tb & 3;
1193 tb = (TranslationBlock *)((long)tb & ~3);
d720b93d
FB
1194#ifdef TARGET_HAS_PRECISE_SMC
1195 if (current_tb == tb &&
2e70f6ef 1196 (current_tb->cflags & CF_COUNT_MASK) != 1) {
d720b93d
FB
1197 /* If we are modifying the current TB, we must stop
1198 its execution. We could be more precise by checking
1199 that the modification is after the current PC, but it
1200 would require a specialized function to partially
1201 restore the CPU state */
3b46e624 1202
d720b93d 1203 current_tb_modified = 1;
618ba8e6 1204 cpu_restore_state(current_tb, env, pc);
6b917547
AL
1205 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1206 &current_flags);
d720b93d
FB
1207 }
1208#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
1209 tb_phys_invalidate(tb, addr);
1210 tb = tb->page_next[n];
1211 }
fd6ce8f6 1212 p->first_tb = NULL;
d720b93d
FB
1213#ifdef TARGET_HAS_PRECISE_SMC
1214 if (current_tb_modified) {
1215 /* we generate a block containing just the instruction
1216 modifying the memory. It will ensure that it cannot modify
1217 itself */
ea1c1802 1218 env->current_tb = NULL;
2e70f6ef 1219 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
d720b93d
FB
1220 cpu_resume_from_signal(env, puc);
1221 }
1222#endif
fd6ce8f6 1223}
9fa3e853 1224#endif
fd6ce8f6
FB
1225
1226/* add the tb in the target page and protect it if necessary */
5fafdf24 1227static inline void tb_alloc_page(TranslationBlock *tb,
41c1b1c9 1228 unsigned int n, tb_page_addr_t page_addr)
fd6ce8f6
FB
1229{
1230 PageDesc *p;
4429ab44
JQ
1231#ifndef CONFIG_USER_ONLY
1232 bool page_already_protected;
1233#endif
9fa3e853
FB
1234
1235 tb->page_addr[n] = page_addr;
5cd2c5b6 1236 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
9fa3e853 1237 tb->page_next[n] = p->first_tb;
4429ab44
JQ
1238#ifndef CONFIG_USER_ONLY
1239 page_already_protected = p->first_tb != NULL;
1240#endif
9fa3e853
FB
1241 p->first_tb = (TranslationBlock *)((long)tb | n);
1242 invalidate_page_bitmap(p);
fd6ce8f6 1243
107db443 1244#if defined(TARGET_HAS_SMC) || 1
d720b93d 1245
9fa3e853 1246#if defined(CONFIG_USER_ONLY)
fd6ce8f6 1247 if (p->flags & PAGE_WRITE) {
53a5960a
PB
1248 target_ulong addr;
1249 PageDesc *p2;
9fa3e853
FB
1250 int prot;
1251
fd6ce8f6
FB
1252 /* force the host page as non writable (writes will have a
1253 page fault + mprotect overhead) */
53a5960a 1254 page_addr &= qemu_host_page_mask;
fd6ce8f6 1255 prot = 0;
53a5960a
PB
1256 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1257 addr += TARGET_PAGE_SIZE) {
1258
1259 p2 = page_find (addr >> TARGET_PAGE_BITS);
1260 if (!p2)
1261 continue;
1262 prot |= p2->flags;
1263 p2->flags &= ~PAGE_WRITE;
53a5960a 1264 }
5fafdf24 1265 mprotect(g2h(page_addr), qemu_host_page_size,
fd6ce8f6
FB
1266 (prot & PAGE_BITS) & ~PAGE_WRITE);
1267#ifdef DEBUG_TB_INVALIDATE
ab3d1727 1268 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
53a5960a 1269 page_addr);
fd6ce8f6 1270#endif
fd6ce8f6 1271 }
9fa3e853
FB
1272#else
1273 /* if some code is already present, then the pages are already
1274 protected. So we handle the case where only the first TB is
1275 allocated in a physical page */
4429ab44 1276 if (!page_already_protected) {
6a00d601 1277 tlb_protect_code(page_addr);
9fa3e853
FB
1278 }
1279#endif
d720b93d
FB
1280
1281#endif /* TARGET_HAS_SMC */
fd6ce8f6
FB
1282}
1283
9fa3e853
FB
1284/* add a new TB and link it to the physical page tables. phys_page2 is
1285 (-1) to indicate that only one page contains the TB. */
41c1b1c9
PB
1286void tb_link_page(TranslationBlock *tb,
1287 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
d4e8164f 1288{
9fa3e853
FB
1289 unsigned int h;
1290 TranslationBlock **ptb;
1291
c8a706fe
PB
1292 /* Grab the mmap lock to stop another thread invalidating this TB
1293 before we are done. */
1294 mmap_lock();
9fa3e853
FB
1295 /* add in the physical hash table */
1296 h = tb_phys_hash_func(phys_pc);
1297 ptb = &tb_phys_hash[h];
1298 tb->phys_hash_next = *ptb;
1299 *ptb = tb;
fd6ce8f6
FB
1300
1301 /* add in the page list */
9fa3e853
FB
1302 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1303 if (phys_page2 != -1)
1304 tb_alloc_page(tb, 1, phys_page2);
1305 else
1306 tb->page_addr[1] = -1;
9fa3e853 1307
d4e8164f
FB
1308 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1309 tb->jmp_next[0] = NULL;
1310 tb->jmp_next[1] = NULL;
1311
1312 /* init original jump addresses */
1313 if (tb->tb_next_offset[0] != 0xffff)
1314 tb_reset_jump(tb, 0);
1315 if (tb->tb_next_offset[1] != 0xffff)
1316 tb_reset_jump(tb, 1);
8a40a180
FB
1317
1318#ifdef DEBUG_TB_CHECK
1319 tb_page_check();
1320#endif
c8a706fe 1321 mmap_unlock();
fd6ce8f6
FB
1322}
1323
9fa3e853
FB
1324/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1325 tb[1].tc_ptr. Return NULL if not found */
1326TranslationBlock *tb_find_pc(unsigned long tc_ptr)
fd6ce8f6 1327{
9fa3e853
FB
1328 int m_min, m_max, m;
1329 unsigned long v;
1330 TranslationBlock *tb;
a513fe19
FB
1331
1332 if (nb_tbs <= 0)
1333 return NULL;
1334 if (tc_ptr < (unsigned long)code_gen_buffer ||
1335 tc_ptr >= (unsigned long)code_gen_ptr)
1336 return NULL;
1337 /* binary search (cf Knuth) */
1338 m_min = 0;
1339 m_max = nb_tbs - 1;
1340 while (m_min <= m_max) {
1341 m = (m_min + m_max) >> 1;
1342 tb = &tbs[m];
1343 v = (unsigned long)tb->tc_ptr;
1344 if (v == tc_ptr)
1345 return tb;
1346 else if (tc_ptr < v) {
1347 m_max = m - 1;
1348 } else {
1349 m_min = m + 1;
1350 }
5fafdf24 1351 }
a513fe19
FB
1352 return &tbs[m_max];
1353}
7501267e 1354
ea041c0e
FB
1355static void tb_reset_jump_recursive(TranslationBlock *tb);
1356
1357static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1358{
1359 TranslationBlock *tb1, *tb_next, **ptb;
1360 unsigned int n1;
1361
1362 tb1 = tb->jmp_next[n];
1363 if (tb1 != NULL) {
1364 /* find head of list */
1365 for(;;) {
1366 n1 = (long)tb1 & 3;
1367 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1368 if (n1 == 2)
1369 break;
1370 tb1 = tb1->jmp_next[n1];
1371 }
1372 /* we are now sure now that tb jumps to tb1 */
1373 tb_next = tb1;
1374
1375 /* remove tb from the jmp_first list */
1376 ptb = &tb_next->jmp_first;
1377 for(;;) {
1378 tb1 = *ptb;
1379 n1 = (long)tb1 & 3;
1380 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1381 if (n1 == n && tb1 == tb)
1382 break;
1383 ptb = &tb1->jmp_next[n1];
1384 }
1385 *ptb = tb->jmp_next[n];
1386 tb->jmp_next[n] = NULL;
3b46e624 1387
ea041c0e
FB
1388 /* suppress the jump to next tb in generated code */
1389 tb_reset_jump(tb, n);
1390
0124311e 1391 /* suppress jumps in the tb on which we could have jumped */
ea041c0e
FB
1392 tb_reset_jump_recursive(tb_next);
1393 }
1394}
1395
1396static void tb_reset_jump_recursive(TranslationBlock *tb)
1397{
1398 tb_reset_jump_recursive2(tb, 0);
1399 tb_reset_jump_recursive2(tb, 1);
1400}
1401
1fddef4b 1402#if defined(TARGET_HAS_ICE)
94df27fd
PB
1403#if defined(CONFIG_USER_ONLY)
1404static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1405{
1406 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1407}
1408#else
d720b93d
FB
1409static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1410{
c227f099 1411 target_phys_addr_t addr;
9b3c35e0 1412 target_ulong pd;
c227f099 1413 ram_addr_t ram_addr;
f1f6e3b8 1414 PhysPageDesc p;
d720b93d 1415
c2f07f81
PB
1416 addr = cpu_get_phys_page_debug(env, pc);
1417 p = phys_page_find(addr >> TARGET_PAGE_BITS);
f1f6e3b8 1418 pd = p.phys_offset;
c2f07f81 1419 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
706cd4b5 1420 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
d720b93d 1421}
c27004ec 1422#endif
94df27fd 1423#endif /* TARGET_HAS_ICE */
d720b93d 1424
c527ee8f
PB
1425#if defined(CONFIG_USER_ONLY)
1426void cpu_watchpoint_remove_all(CPUState *env, int mask)
1427
1428{
1429}
1430
1431int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1432 int flags, CPUWatchpoint **watchpoint)
1433{
1434 return -ENOSYS;
1435}
1436#else
6658ffb8 1437/* Add a watchpoint. */
a1d1bb31
AL
1438int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1439 int flags, CPUWatchpoint **watchpoint)
6658ffb8 1440{
b4051334 1441 target_ulong len_mask = ~(len - 1);
c0ce998e 1442 CPUWatchpoint *wp;
6658ffb8 1443
b4051334
AL
1444 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1445 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1446 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1447 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1448 return -EINVAL;
1449 }
7267c094 1450 wp = g_malloc(sizeof(*wp));
a1d1bb31
AL
1451
1452 wp->vaddr = addr;
b4051334 1453 wp->len_mask = len_mask;
a1d1bb31
AL
1454 wp->flags = flags;
1455
2dc9f411 1456 /* keep all GDB-injected watchpoints in front */
c0ce998e 1457 if (flags & BP_GDB)
72cf2d4f 1458 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
c0ce998e 1459 else
72cf2d4f 1460 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
6658ffb8 1461
6658ffb8 1462 tlb_flush_page(env, addr);
a1d1bb31
AL
1463
1464 if (watchpoint)
1465 *watchpoint = wp;
1466 return 0;
6658ffb8
PB
1467}
1468
a1d1bb31
AL
1469/* Remove a specific watchpoint. */
1470int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1471 int flags)
6658ffb8 1472{
b4051334 1473 target_ulong len_mask = ~(len - 1);
a1d1bb31 1474 CPUWatchpoint *wp;
6658ffb8 1475
72cf2d4f 1476 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334 1477 if (addr == wp->vaddr && len_mask == wp->len_mask
6e140f28 1478 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
a1d1bb31 1479 cpu_watchpoint_remove_by_ref(env, wp);
6658ffb8
PB
1480 return 0;
1481 }
1482 }
a1d1bb31 1483 return -ENOENT;
6658ffb8
PB
1484}
1485
a1d1bb31
AL
1486/* Remove a specific watchpoint by reference. */
1487void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1488{
72cf2d4f 1489 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
7d03f82f 1490
a1d1bb31
AL
1491 tlb_flush_page(env, watchpoint->vaddr);
1492
7267c094 1493 g_free(watchpoint);
a1d1bb31
AL
1494}
1495
1496/* Remove all matching watchpoints. */
1497void cpu_watchpoint_remove_all(CPUState *env, int mask)
1498{
c0ce998e 1499 CPUWatchpoint *wp, *next;
a1d1bb31 1500
72cf2d4f 1501 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
a1d1bb31
AL
1502 if (wp->flags & mask)
1503 cpu_watchpoint_remove_by_ref(env, wp);
c0ce998e 1504 }
7d03f82f 1505}
c527ee8f 1506#endif
7d03f82f 1507
a1d1bb31
AL
1508/* Add a breakpoint. */
1509int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1510 CPUBreakpoint **breakpoint)
4c3a88a2 1511{
1fddef4b 1512#if defined(TARGET_HAS_ICE)
c0ce998e 1513 CPUBreakpoint *bp;
3b46e624 1514
7267c094 1515 bp = g_malloc(sizeof(*bp));
4c3a88a2 1516
a1d1bb31
AL
1517 bp->pc = pc;
1518 bp->flags = flags;
1519
2dc9f411 1520 /* keep all GDB-injected breakpoints in front */
c0ce998e 1521 if (flags & BP_GDB)
72cf2d4f 1522 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
c0ce998e 1523 else
72cf2d4f 1524 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
3b46e624 1525
d720b93d 1526 breakpoint_invalidate(env, pc);
a1d1bb31
AL
1527
1528 if (breakpoint)
1529 *breakpoint = bp;
4c3a88a2
FB
1530 return 0;
1531#else
a1d1bb31 1532 return -ENOSYS;
4c3a88a2
FB
1533#endif
1534}
1535
a1d1bb31
AL
1536/* Remove a specific breakpoint. */
1537int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1538{
7d03f82f 1539#if defined(TARGET_HAS_ICE)
a1d1bb31
AL
1540 CPUBreakpoint *bp;
1541
72cf2d4f 1542 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31
AL
1543 if (bp->pc == pc && bp->flags == flags) {
1544 cpu_breakpoint_remove_by_ref(env, bp);
1545 return 0;
1546 }
7d03f82f 1547 }
a1d1bb31
AL
1548 return -ENOENT;
1549#else
1550 return -ENOSYS;
7d03f82f
EI
1551#endif
1552}
1553
a1d1bb31
AL
1554/* Remove a specific breakpoint by reference. */
1555void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
4c3a88a2 1556{
1fddef4b 1557#if defined(TARGET_HAS_ICE)
72cf2d4f 1558 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
d720b93d 1559
a1d1bb31
AL
1560 breakpoint_invalidate(env, breakpoint->pc);
1561
7267c094 1562 g_free(breakpoint);
a1d1bb31
AL
1563#endif
1564}
1565
1566/* Remove all matching breakpoints. */
1567void cpu_breakpoint_remove_all(CPUState *env, int mask)
1568{
1569#if defined(TARGET_HAS_ICE)
c0ce998e 1570 CPUBreakpoint *bp, *next;
a1d1bb31 1571
72cf2d4f 1572 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
a1d1bb31
AL
1573 if (bp->flags & mask)
1574 cpu_breakpoint_remove_by_ref(env, bp);
c0ce998e 1575 }
4c3a88a2
FB
1576#endif
1577}
1578
c33a346e
FB
1579/* enable or disable single step mode. EXCP_DEBUG is returned by the
1580 CPU loop after each instruction */
1581void cpu_single_step(CPUState *env, int enabled)
1582{
1fddef4b 1583#if defined(TARGET_HAS_ICE)
c33a346e
FB
1584 if (env->singlestep_enabled != enabled) {
1585 env->singlestep_enabled = enabled;
e22a25c9
AL
1586 if (kvm_enabled())
1587 kvm_update_guest_debug(env, 0);
1588 else {
ccbb4d44 1589 /* must flush all the translated code to avoid inconsistencies */
e22a25c9
AL
1590 /* XXX: only flush what is necessary */
1591 tb_flush(env);
1592 }
c33a346e
FB
1593 }
1594#endif
1595}
1596
34865134
FB
1597/* enable or disable low levels log */
1598void cpu_set_log(int log_flags)
1599{
1600 loglevel = log_flags;
1601 if (loglevel && !logfile) {
11fcfab4 1602 logfile = fopen(logfilename, log_append ? "a" : "w");
34865134
FB
1603 if (!logfile) {
1604 perror(logfilename);
1605 _exit(1);
1606 }
9fa3e853
FB
1607#if !defined(CONFIG_SOFTMMU)
1608 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1609 {
b55266b5 1610 static char logfile_buf[4096];
9fa3e853
FB
1611 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1612 }
daf767b1
SW
1613#elif defined(_WIN32)
1614 /* Win32 doesn't support line-buffering, so use unbuffered output. */
1615 setvbuf(logfile, NULL, _IONBF, 0);
1616#else
34865134 1617 setvbuf(logfile, NULL, _IOLBF, 0);
9fa3e853 1618#endif
e735b91c
PB
1619 log_append = 1;
1620 }
1621 if (!loglevel && logfile) {
1622 fclose(logfile);
1623 logfile = NULL;
34865134
FB
1624 }
1625}
1626
1627void cpu_set_log_filename(const char *filename)
1628{
1629 logfilename = strdup(filename);
e735b91c
PB
1630 if (logfile) {
1631 fclose(logfile);
1632 logfile = NULL;
1633 }
1634 cpu_set_log(loglevel);
34865134 1635}
c33a346e 1636
3098dba0 1637static void cpu_unlink_tb(CPUState *env)
ea041c0e 1638{
3098dba0
AJ
1639 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1640 problem and hope the cpu will stop of its own accord. For userspace
1641 emulation this often isn't actually as bad as it sounds. Often
1642 signals are used primarily to interrupt blocking syscalls. */
ea041c0e 1643 TranslationBlock *tb;
c227f099 1644 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
59817ccb 1645
cab1b4bd 1646 spin_lock(&interrupt_lock);
3098dba0
AJ
1647 tb = env->current_tb;
1648 /* if the cpu is currently executing code, we must unlink it and
1649 all the potentially executing TB */
f76cfe56 1650 if (tb) {
3098dba0
AJ
1651 env->current_tb = NULL;
1652 tb_reset_jump_recursive(tb);
be214e6c 1653 }
cab1b4bd 1654 spin_unlock(&interrupt_lock);
3098dba0
AJ
1655}
1656
97ffbd8d 1657#ifndef CONFIG_USER_ONLY
3098dba0 1658/* mask must never be zero, except for A20 change call */
ec6959d0 1659static void tcg_handle_interrupt(CPUState *env, int mask)
3098dba0
AJ
1660{
1661 int old_mask;
be214e6c 1662
2e70f6ef 1663 old_mask = env->interrupt_request;
68a79315 1664 env->interrupt_request |= mask;
3098dba0 1665
8edac960
AL
1666 /*
1667 * If called from iothread context, wake the target cpu in
1668 * case its halted.
1669 */
b7680cb6 1670 if (!qemu_cpu_is_self(env)) {
8edac960
AL
1671 qemu_cpu_kick(env);
1672 return;
1673 }
8edac960 1674
2e70f6ef 1675 if (use_icount) {
266910c4 1676 env->icount_decr.u16.high = 0xffff;
2e70f6ef 1677 if (!can_do_io(env)
be214e6c 1678 && (mask & ~old_mask) != 0) {
2e70f6ef
PB
1679 cpu_abort(env, "Raised interrupt while not in I/O function");
1680 }
2e70f6ef 1681 } else {
3098dba0 1682 cpu_unlink_tb(env);
ea041c0e
FB
1683 }
1684}
1685
ec6959d0
JK
1686CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1687
97ffbd8d
JK
1688#else /* CONFIG_USER_ONLY */
1689
1690void cpu_interrupt(CPUState *env, int mask)
1691{
1692 env->interrupt_request |= mask;
1693 cpu_unlink_tb(env);
1694}
1695#endif /* CONFIG_USER_ONLY */
1696
b54ad049
FB
1697void cpu_reset_interrupt(CPUState *env, int mask)
1698{
1699 env->interrupt_request &= ~mask;
1700}
1701
3098dba0
AJ
1702void cpu_exit(CPUState *env)
1703{
1704 env->exit_request = 1;
1705 cpu_unlink_tb(env);
1706}
1707
c7cd6a37 1708const CPULogItem cpu_log_items[] = {
5fafdf24 1709 { CPU_LOG_TB_OUT_ASM, "out_asm",
f193c797
FB
1710 "show generated host assembly code for each compiled TB" },
1711 { CPU_LOG_TB_IN_ASM, "in_asm",
1712 "show target assembly code for each compiled TB" },
5fafdf24 1713 { CPU_LOG_TB_OP, "op",
57fec1fe 1714 "show micro ops for each compiled TB" },
f193c797 1715 { CPU_LOG_TB_OP_OPT, "op_opt",
e01a1157
BS
1716 "show micro ops "
1717#ifdef TARGET_I386
1718 "before eflags optimization and "
f193c797 1719#endif
e01a1157 1720 "after liveness analysis" },
f193c797
FB
1721 { CPU_LOG_INT, "int",
1722 "show interrupts/exceptions in short format" },
1723 { CPU_LOG_EXEC, "exec",
1724 "show trace before each executed TB (lots of logs)" },
9fddaa0c 1725 { CPU_LOG_TB_CPU, "cpu",
e91c8a77 1726 "show CPU state before block translation" },
f193c797
FB
1727#ifdef TARGET_I386
1728 { CPU_LOG_PCALL, "pcall",
1729 "show protected mode far calls/returns/exceptions" },
eca1bdf4
AL
1730 { CPU_LOG_RESET, "cpu_reset",
1731 "show CPU state before CPU resets" },
f193c797 1732#endif
8e3a9fd2 1733#ifdef DEBUG_IOPORT
fd872598
FB
1734 { CPU_LOG_IOPORT, "ioport",
1735 "show all i/o ports accesses" },
8e3a9fd2 1736#endif
f193c797
FB
1737 { 0, NULL, NULL },
1738};
1739
1740static int cmp1(const char *s1, int n, const char *s2)
1741{
1742 if (strlen(s2) != n)
1743 return 0;
1744 return memcmp(s1, s2, n) == 0;
1745}
3b46e624 1746
f193c797
FB
1747/* takes a comma separated list of log masks. Return 0 if error. */
1748int cpu_str_to_log_mask(const char *str)
1749{
c7cd6a37 1750 const CPULogItem *item;
f193c797
FB
1751 int mask;
1752 const char *p, *p1;
1753
1754 p = str;
1755 mask = 0;
1756 for(;;) {
1757 p1 = strchr(p, ',');
1758 if (!p1)
1759 p1 = p + strlen(p);
9742bf26
YT
1760 if(cmp1(p,p1-p,"all")) {
1761 for(item = cpu_log_items; item->mask != 0; item++) {
1762 mask |= item->mask;
1763 }
1764 } else {
1765 for(item = cpu_log_items; item->mask != 0; item++) {
1766 if (cmp1(p, p1 - p, item->name))
1767 goto found;
1768 }
1769 return 0;
f193c797 1770 }
f193c797
FB
1771 found:
1772 mask |= item->mask;
1773 if (*p1 != ',')
1774 break;
1775 p = p1 + 1;
1776 }
1777 return mask;
1778}
ea041c0e 1779
7501267e
FB
1780void cpu_abort(CPUState *env, const char *fmt, ...)
1781{
1782 va_list ap;
493ae1f0 1783 va_list ap2;
7501267e
FB
1784
1785 va_start(ap, fmt);
493ae1f0 1786 va_copy(ap2, ap);
7501267e
FB
1787 fprintf(stderr, "qemu: fatal: ");
1788 vfprintf(stderr, fmt, ap);
1789 fprintf(stderr, "\n");
1790#ifdef TARGET_I386
7fe48483
FB
1791 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1792#else
1793 cpu_dump_state(env, stderr, fprintf, 0);
7501267e 1794#endif
93fcfe39
AL
1795 if (qemu_log_enabled()) {
1796 qemu_log("qemu: fatal: ");
1797 qemu_log_vprintf(fmt, ap2);
1798 qemu_log("\n");
f9373291 1799#ifdef TARGET_I386
93fcfe39 1800 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
f9373291 1801#else
93fcfe39 1802 log_cpu_state(env, 0);
f9373291 1803#endif
31b1a7b4 1804 qemu_log_flush();
93fcfe39 1805 qemu_log_close();
924edcae 1806 }
493ae1f0 1807 va_end(ap2);
f9373291 1808 va_end(ap);
fd052bf6
RV
1809#if defined(CONFIG_USER_ONLY)
1810 {
1811 struct sigaction act;
1812 sigfillset(&act.sa_mask);
1813 act.sa_handler = SIG_DFL;
1814 sigaction(SIGABRT, &act, NULL);
1815 }
1816#endif
7501267e
FB
1817 abort();
1818}
1819
c5be9f08
TS
1820CPUState *cpu_copy(CPUState *env)
1821{
01ba9816 1822 CPUState *new_env = cpu_init(env->cpu_model_str);
c5be9f08
TS
1823 CPUState *next_cpu = new_env->next_cpu;
1824 int cpu_index = new_env->cpu_index;
5a38f081
AL
1825#if defined(TARGET_HAS_ICE)
1826 CPUBreakpoint *bp;
1827 CPUWatchpoint *wp;
1828#endif
1829
c5be9f08 1830 memcpy(new_env, env, sizeof(CPUState));
5a38f081
AL
1831
1832 /* Preserve chaining and index. */
c5be9f08
TS
1833 new_env->next_cpu = next_cpu;
1834 new_env->cpu_index = cpu_index;
5a38f081
AL
1835
1836 /* Clone all break/watchpoints.
1837 Note: Once we support ptrace with hw-debug register access, make sure
1838 BP_CPU break/watchpoints are handled correctly on clone. */
72cf2d4f
BS
1839 QTAILQ_INIT(&env->breakpoints);
1840 QTAILQ_INIT(&env->watchpoints);
5a38f081 1841#if defined(TARGET_HAS_ICE)
72cf2d4f 1842 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
5a38f081
AL
1843 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1844 }
72cf2d4f 1845 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
5a38f081
AL
1846 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1847 wp->flags, NULL);
1848 }
1849#endif
1850
c5be9f08
TS
1851 return new_env;
1852}
1853
0124311e
FB
1854#if !defined(CONFIG_USER_ONLY)
1855
5c751e99
EI
1856static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1857{
1858 unsigned int i;
1859
1860 /* Discard jump cache entries for any tb which might potentially
1861 overlap the flushed page. */
1862 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1863 memset (&env->tb_jmp_cache[i], 0,
9742bf26 1864 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
5c751e99
EI
1865
1866 i = tb_jmp_cache_hash_page(addr);
1867 memset (&env->tb_jmp_cache[i], 0,
9742bf26 1868 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
5c751e99
EI
1869}
1870
08738984
IK
1871static CPUTLBEntry s_cputlb_empty_entry = {
1872 .addr_read = -1,
1873 .addr_write = -1,
1874 .addr_code = -1,
1875 .addend = -1,
1876};
1877
ee8b7021
FB
1878/* NOTE: if flush_global is true, also flush global entries (not
1879 implemented yet) */
1880void tlb_flush(CPUState *env, int flush_global)
33417e70 1881{
33417e70 1882 int i;
0124311e 1883
9fa3e853
FB
1884#if defined(DEBUG_TLB)
1885 printf("tlb_flush:\n");
1886#endif
0124311e
FB
1887 /* must reset current TB so that interrupts cannot modify the
1888 links while we are modifying them */
1889 env->current_tb = NULL;
1890
33417e70 1891 for(i = 0; i < CPU_TLB_SIZE; i++) {
cfde4bd9
IY
1892 int mmu_idx;
1893 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
08738984 1894 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
cfde4bd9 1895 }
33417e70 1896 }
9fa3e853 1897
8a40a180 1898 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
9fa3e853 1899
d4c430a8
PB
1900 env->tlb_flush_addr = -1;
1901 env->tlb_flush_mask = 0;
e3db7226 1902 tlb_flush_count++;
33417e70
FB
1903}
1904
274da6b2 1905static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
61382a50 1906{
5fafdf24 1907 if (addr == (tlb_entry->addr_read &
84b7b8e7 1908 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1909 addr == (tlb_entry->addr_write &
84b7b8e7 1910 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1911 addr == (tlb_entry->addr_code &
84b7b8e7 1912 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
08738984 1913 *tlb_entry = s_cputlb_empty_entry;
84b7b8e7 1914 }
61382a50
FB
1915}
1916
2e12669a 1917void tlb_flush_page(CPUState *env, target_ulong addr)
33417e70 1918{
8a40a180 1919 int i;
cfde4bd9 1920 int mmu_idx;
0124311e 1921
9fa3e853 1922#if defined(DEBUG_TLB)
108c49b8 1923 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
9fa3e853 1924#endif
d4c430a8
PB
1925 /* Check if we need to flush due to large pages. */
1926 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
1927#if defined(DEBUG_TLB)
1928 printf("tlb_flush_page: forced full flush ("
1929 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
1930 env->tlb_flush_addr, env->tlb_flush_mask);
1931#endif
1932 tlb_flush(env, 1);
1933 return;
1934 }
0124311e
FB
1935 /* must reset current TB so that interrupts cannot modify the
1936 links while we are modifying them */
1937 env->current_tb = NULL;
61382a50
FB
1938
1939 addr &= TARGET_PAGE_MASK;
1940 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
cfde4bd9
IY
1941 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1942 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
0124311e 1943
5c751e99 1944 tlb_flush_jmp_cache(env, addr);
9fa3e853
FB
1945}
1946
9fa3e853
FB
1947/* update the TLBs so that writes to code in the virtual page 'addr'
1948 can be detected */
c227f099 1949static void tlb_protect_code(ram_addr_t ram_addr)
9fa3e853 1950{
5fafdf24 1951 cpu_physical_memory_reset_dirty(ram_addr,
6a00d601
FB
1952 ram_addr + TARGET_PAGE_SIZE,
1953 CODE_DIRTY_FLAG);
9fa3e853
FB
1954}
1955
9fa3e853 1956/* update the TLB so that writes in physical page 'phys_addr' are no longer
3a7d929e 1957 tested for self modifying code */
c227f099 1958static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 1959 target_ulong vaddr)
9fa3e853 1960{
f7c11b53 1961 cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
1ccde1cb
FB
1962}
1963
5fafdf24 1964static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1ccde1cb
FB
1965 unsigned long start, unsigned long length)
1966{
1967 unsigned long addr;
84b7b8e7
FB
1968 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1969 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1ccde1cb 1970 if ((addr - start) < length) {
0f459d16 1971 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1ccde1cb
FB
1972 }
1973 }
1974}
1975
5579c7f3 1976/* Note: start and end must be within the same ram block. */
c227f099 1977void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 1978 int dirty_flags)
1ccde1cb
FB
1979{
1980 CPUState *env;
4f2ac237 1981 unsigned long length, start1;
f7c11b53 1982 int i;
1ccde1cb
FB
1983
1984 start &= TARGET_PAGE_MASK;
1985 end = TARGET_PAGE_ALIGN(end);
1986
1987 length = end - start;
1988 if (length == 0)
1989 return;
f7c11b53 1990 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
f23db169 1991
1ccde1cb
FB
1992 /* we modify the TLB cache so that the dirty bit will be set again
1993 when accessing the range */
b2e0a138 1994 start1 = (unsigned long)qemu_safe_ram_ptr(start);
a57d23e4 1995 /* Check that we don't span multiple blocks - this breaks the
5579c7f3 1996 address comparisons below. */
b2e0a138 1997 if ((unsigned long)qemu_safe_ram_ptr(end - 1) - start1
5579c7f3
PB
1998 != (end - 1) - start) {
1999 abort();
2000 }
2001
6a00d601 2002 for(env = first_cpu; env != NULL; env = env->next_cpu) {
cfde4bd9
IY
2003 int mmu_idx;
2004 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2005 for(i = 0; i < CPU_TLB_SIZE; i++)
2006 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
2007 start1, length);
2008 }
6a00d601 2009 }
1ccde1cb
FB
2010}
2011
74576198
AL
2012int cpu_physical_memory_set_dirty_tracking(int enable)
2013{
f6f3fbca 2014 int ret = 0;
74576198 2015 in_migration = enable;
f6f3fbca 2016 return ret;
74576198
AL
2017}
2018
3a7d929e
FB
2019static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2020{
c227f099 2021 ram_addr_t ram_addr;
5579c7f3 2022 void *p;
3a7d929e 2023
84b7b8e7 2024 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
5579c7f3
PB
2025 p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
2026 + tlb_entry->addend);
e890261f 2027 ram_addr = qemu_ram_addr_from_host_nofail(p);
3a7d929e 2028 if (!cpu_physical_memory_is_dirty(ram_addr)) {
0f459d16 2029 tlb_entry->addr_write |= TLB_NOTDIRTY;
3a7d929e
FB
2030 }
2031 }
2032}
2033
2034/* update the TLB according to the current state of the dirty bits */
2035void cpu_tlb_update_dirty(CPUState *env)
2036{
2037 int i;
cfde4bd9
IY
2038 int mmu_idx;
2039 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2040 for(i = 0; i < CPU_TLB_SIZE; i++)
2041 tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2042 }
3a7d929e
FB
2043}
2044
0f459d16 2045static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1ccde1cb 2046{
0f459d16
PB
2047 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2048 tlb_entry->addr_write = vaddr;
1ccde1cb
FB
2049}
2050
0f459d16
PB
2051/* update the TLB corresponding to virtual page vaddr
2052 so that it is no longer dirty */
2053static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1ccde1cb 2054{
1ccde1cb 2055 int i;
cfde4bd9 2056 int mmu_idx;
1ccde1cb 2057
0f459d16 2058 vaddr &= TARGET_PAGE_MASK;
1ccde1cb 2059 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
cfde4bd9
IY
2060 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2061 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
9fa3e853
FB
2062}
2063
d4c430a8
PB
2064/* Our TLB does not support large pages, so remember the area covered by
2065 large pages and trigger a full TLB flush if these are invalidated. */
2066static void tlb_add_large_page(CPUState *env, target_ulong vaddr,
2067 target_ulong size)
2068{
2069 target_ulong mask = ~(size - 1);
2070
2071 if (env->tlb_flush_addr == (target_ulong)-1) {
2072 env->tlb_flush_addr = vaddr & mask;
2073 env->tlb_flush_mask = mask;
2074 return;
2075 }
2076 /* Extend the existing region to include the new page.
2077 This is a compromise between unnecessary flushes and the cost
2078 of maintaining a full variable size TLB. */
2079 mask &= env->tlb_flush_mask;
2080 while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
2081 mask <<= 1;
2082 }
2083 env->tlb_flush_addr &= mask;
2084 env->tlb_flush_mask = mask;
2085}
2086
2087/* Add a new TLB entry. At most one entry for a given virtual address
2088 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2089 supplied size is only used by tlb_flush_page. */
2090void tlb_set_page(CPUState *env, target_ulong vaddr,
2091 target_phys_addr_t paddr, int prot,
2092 int mmu_idx, target_ulong size)
9fa3e853 2093{
f1f6e3b8 2094 PhysPageDesc p;
4f2ac237 2095 unsigned long pd;
9fa3e853 2096 unsigned int index;
4f2ac237 2097 target_ulong address;
0f459d16 2098 target_ulong code_address;
355b1943 2099 unsigned long addend;
84b7b8e7 2100 CPUTLBEntry *te;
a1d1bb31 2101 CPUWatchpoint *wp;
c227f099 2102 target_phys_addr_t iotlb;
9fa3e853 2103
d4c430a8
PB
2104 assert(size >= TARGET_PAGE_SIZE);
2105 if (size != TARGET_PAGE_SIZE) {
2106 tlb_add_large_page(env, vaddr, size);
2107 }
92e873b9 2108 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
f1f6e3b8 2109 pd = p.phys_offset;
9fa3e853 2110#if defined(DEBUG_TLB)
7fd3f494
SW
2111 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
2112 " prot=%x idx=%d pd=0x%08lx\n",
2113 vaddr, paddr, prot, mmu_idx, pd);
9fa3e853
FB
2114#endif
2115
0f459d16
PB
2116 address = vaddr;
2117 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2118 /* IO memory case (romd handled later) */
2119 address |= TLB_MMIO;
2120 }
5579c7f3 2121 addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
0f459d16
PB
2122 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2123 /* Normal RAM. */
2124 iotlb = pd & TARGET_PAGE_MASK;
2125 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2126 iotlb |= IO_MEM_NOTDIRTY;
2127 else
2128 iotlb |= IO_MEM_ROM;
2129 } else {
ccbb4d44 2130 /* IO handlers are currently passed a physical address.
0f459d16
PB
2131 It would be nice to pass an offset from the base address
2132 of that region. This would avoid having to special case RAM,
2133 and avoid full address decoding in every device.
2134 We can't use the high bits of pd for this because
2135 IO_MEM_ROMD uses these as a ram address. */
8da3ff18 2136 iotlb = (pd & ~TARGET_PAGE_MASK);
f1f6e3b8 2137 iotlb += p.region_offset;
0f459d16
PB
2138 }
2139
2140 code_address = address;
2141 /* Make accesses to pages with watchpoints go via the
2142 watchpoint trap routines. */
72cf2d4f 2143 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
a1d1bb31 2144 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
bf298f83
JK
2145 /* Avoid trapping reads of pages with a write breakpoint. */
2146 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
2147 iotlb = io_mem_watch + paddr;
2148 address |= TLB_MMIO;
2149 break;
2150 }
6658ffb8 2151 }
0f459d16 2152 }
d79acba4 2153
0f459d16
PB
2154 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2155 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2156 te = &env->tlb_table[mmu_idx][index];
2157 te->addend = addend - vaddr;
2158 if (prot & PAGE_READ) {
2159 te->addr_read = address;
2160 } else {
2161 te->addr_read = -1;
2162 }
5c751e99 2163
0f459d16
PB
2164 if (prot & PAGE_EXEC) {
2165 te->addr_code = code_address;
2166 } else {
2167 te->addr_code = -1;
2168 }
2169 if (prot & PAGE_WRITE) {
2170 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2171 (pd & IO_MEM_ROMD)) {
2172 /* Write access calls the I/O callback. */
2173 te->addr_write = address | TLB_MMIO;
2174 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2175 !cpu_physical_memory_is_dirty(pd)) {
2176 te->addr_write = address | TLB_NOTDIRTY;
9fa3e853 2177 } else {
0f459d16 2178 te->addr_write = address;
9fa3e853 2179 }
0f459d16
PB
2180 } else {
2181 te->addr_write = -1;
9fa3e853 2182 }
9fa3e853
FB
2183}
2184
0124311e
FB
2185#else
2186
ee8b7021 2187void tlb_flush(CPUState *env, int flush_global)
0124311e
FB
2188{
2189}
2190
2e12669a 2191void tlb_flush_page(CPUState *env, target_ulong addr)
0124311e
FB
2192{
2193}
2194
edf8e2af
MW
2195/*
2196 * Walks guest process memory "regions" one by one
2197 * and calls callback function 'fn' for each region.
2198 */
5cd2c5b6
RH
2199
2200struct walk_memory_regions_data
2201{
2202 walk_memory_regions_fn fn;
2203 void *priv;
2204 unsigned long start;
2205 int prot;
2206};
2207
2208static int walk_memory_regions_end(struct walk_memory_regions_data *data,
b480d9b7 2209 abi_ulong end, int new_prot)
5cd2c5b6
RH
2210{
2211 if (data->start != -1ul) {
2212 int rc = data->fn(data->priv, data->start, end, data->prot);
2213 if (rc != 0) {
2214 return rc;
2215 }
2216 }
2217
2218 data->start = (new_prot ? end : -1ul);
2219 data->prot = new_prot;
2220
2221 return 0;
2222}
2223
2224static int walk_memory_regions_1(struct walk_memory_regions_data *data,
b480d9b7 2225 abi_ulong base, int level, void **lp)
5cd2c5b6 2226{
b480d9b7 2227 abi_ulong pa;
5cd2c5b6
RH
2228 int i, rc;
2229
2230 if (*lp == NULL) {
2231 return walk_memory_regions_end(data, base, 0);
2232 }
2233
2234 if (level == 0) {
2235 PageDesc *pd = *lp;
7296abac 2236 for (i = 0; i < L2_SIZE; ++i) {
5cd2c5b6
RH
2237 int prot = pd[i].flags;
2238
2239 pa = base | (i << TARGET_PAGE_BITS);
2240 if (prot != data->prot) {
2241 rc = walk_memory_regions_end(data, pa, prot);
2242 if (rc != 0) {
2243 return rc;
9fa3e853 2244 }
9fa3e853 2245 }
5cd2c5b6
RH
2246 }
2247 } else {
2248 void **pp = *lp;
7296abac 2249 for (i = 0; i < L2_SIZE; ++i) {
b480d9b7
PB
2250 pa = base | ((abi_ulong)i <<
2251 (TARGET_PAGE_BITS + L2_BITS * level));
5cd2c5b6
RH
2252 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2253 if (rc != 0) {
2254 return rc;
2255 }
2256 }
2257 }
2258
2259 return 0;
2260}
2261
2262int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2263{
2264 struct walk_memory_regions_data data;
2265 unsigned long i;
2266
2267 data.fn = fn;
2268 data.priv = priv;
2269 data.start = -1ul;
2270 data.prot = 0;
2271
2272 for (i = 0; i < V_L1_SIZE; i++) {
b480d9b7 2273 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
5cd2c5b6
RH
2274 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2275 if (rc != 0) {
2276 return rc;
9fa3e853 2277 }
33417e70 2278 }
5cd2c5b6
RH
2279
2280 return walk_memory_regions_end(&data, 0, 0);
edf8e2af
MW
2281}
2282
b480d9b7
PB
2283static int dump_region(void *priv, abi_ulong start,
2284 abi_ulong end, unsigned long prot)
edf8e2af
MW
2285{
2286 FILE *f = (FILE *)priv;
2287
b480d9b7
PB
2288 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2289 " "TARGET_ABI_FMT_lx" %c%c%c\n",
edf8e2af
MW
2290 start, end, end - start,
2291 ((prot & PAGE_READ) ? 'r' : '-'),
2292 ((prot & PAGE_WRITE) ? 'w' : '-'),
2293 ((prot & PAGE_EXEC) ? 'x' : '-'));
2294
2295 return (0);
2296}
2297
2298/* dump memory mappings */
2299void page_dump(FILE *f)
2300{
2301 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2302 "start", "end", "size", "prot");
2303 walk_memory_regions(f, dump_region);
33417e70
FB
2304}
2305
53a5960a 2306int page_get_flags(target_ulong address)
33417e70 2307{
9fa3e853
FB
2308 PageDesc *p;
2309
2310 p = page_find(address >> TARGET_PAGE_BITS);
33417e70 2311 if (!p)
9fa3e853
FB
2312 return 0;
2313 return p->flags;
2314}
2315
376a7909
RH
2316/* Modify the flags of a page and invalidate the code if necessary.
2317 The flag PAGE_WRITE_ORG is positioned automatically depending
2318 on PAGE_WRITE. The mmap_lock should already be held. */
53a5960a 2319void page_set_flags(target_ulong start, target_ulong end, int flags)
9fa3e853 2320{
376a7909
RH
2321 target_ulong addr, len;
2322
2323 /* This function should never be called with addresses outside the
2324 guest address space. If this assert fires, it probably indicates
2325 a missing call to h2g_valid. */
b480d9b7
PB
2326#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2327 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
376a7909
RH
2328#endif
2329 assert(start < end);
9fa3e853
FB
2330
2331 start = start & TARGET_PAGE_MASK;
2332 end = TARGET_PAGE_ALIGN(end);
376a7909
RH
2333
2334 if (flags & PAGE_WRITE) {
9fa3e853 2335 flags |= PAGE_WRITE_ORG;
376a7909
RH
2336 }
2337
2338 for (addr = start, len = end - start;
2339 len != 0;
2340 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2341 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2342
2343 /* If the write protection bit is set, then we invalidate
2344 the code inside. */
5fafdf24 2345 if (!(p->flags & PAGE_WRITE) &&
9fa3e853
FB
2346 (flags & PAGE_WRITE) &&
2347 p->first_tb) {
d720b93d 2348 tb_invalidate_phys_page(addr, 0, NULL);
9fa3e853
FB
2349 }
2350 p->flags = flags;
2351 }
33417e70
FB
2352}
2353
3d97b40b
TS
2354int page_check_range(target_ulong start, target_ulong len, int flags)
2355{
2356 PageDesc *p;
2357 target_ulong end;
2358 target_ulong addr;
2359
376a7909
RH
2360 /* This function should never be called with addresses outside the
2361 guest address space. If this assert fires, it probably indicates
2362 a missing call to h2g_valid. */
338e9e6c
BS
2363#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2364 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
376a7909
RH
2365#endif
2366
3e0650a9
RH
2367 if (len == 0) {
2368 return 0;
2369 }
376a7909
RH
2370 if (start + len - 1 < start) {
2371 /* We've wrapped around. */
55f280c9 2372 return -1;
376a7909 2373 }
55f280c9 2374
3d97b40b
TS
2375 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2376 start = start & TARGET_PAGE_MASK;
2377
376a7909
RH
2378 for (addr = start, len = end - start;
2379 len != 0;
2380 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
3d97b40b
TS
2381 p = page_find(addr >> TARGET_PAGE_BITS);
2382 if( !p )
2383 return -1;
2384 if( !(p->flags & PAGE_VALID) )
2385 return -1;
2386
dae3270c 2387 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
3d97b40b 2388 return -1;
dae3270c
FB
2389 if (flags & PAGE_WRITE) {
2390 if (!(p->flags & PAGE_WRITE_ORG))
2391 return -1;
2392 /* unprotect the page if it was put read-only because it
2393 contains translated code */
2394 if (!(p->flags & PAGE_WRITE)) {
2395 if (!page_unprotect(addr, 0, NULL))
2396 return -1;
2397 }
2398 return 0;
2399 }
3d97b40b
TS
2400 }
2401 return 0;
2402}
2403
9fa3e853 2404/* called from signal handler: invalidate the code and unprotect the
ccbb4d44 2405 page. Return TRUE if the fault was successfully handled. */
53a5960a 2406int page_unprotect(target_ulong address, unsigned long pc, void *puc)
9fa3e853 2407{
45d679d6
AJ
2408 unsigned int prot;
2409 PageDesc *p;
53a5960a 2410 target_ulong host_start, host_end, addr;
9fa3e853 2411
c8a706fe
PB
2412 /* Technically this isn't safe inside a signal handler. However we
2413 know this only ever happens in a synchronous SEGV handler, so in
2414 practice it seems to be ok. */
2415 mmap_lock();
2416
45d679d6
AJ
2417 p = page_find(address >> TARGET_PAGE_BITS);
2418 if (!p) {
c8a706fe 2419 mmap_unlock();
9fa3e853 2420 return 0;
c8a706fe 2421 }
45d679d6 2422
9fa3e853
FB
2423 /* if the page was really writable, then we change its
2424 protection back to writable */
45d679d6
AJ
2425 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2426 host_start = address & qemu_host_page_mask;
2427 host_end = host_start + qemu_host_page_size;
2428
2429 prot = 0;
2430 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2431 p = page_find(addr >> TARGET_PAGE_BITS);
2432 p->flags |= PAGE_WRITE;
2433 prot |= p->flags;
2434
9fa3e853
FB
2435 /* and since the content will be modified, we must invalidate
2436 the corresponding translated code. */
45d679d6 2437 tb_invalidate_phys_page(addr, pc, puc);
9fa3e853 2438#ifdef DEBUG_TB_CHECK
45d679d6 2439 tb_invalidate_check(addr);
9fa3e853 2440#endif
9fa3e853 2441 }
45d679d6
AJ
2442 mprotect((void *)g2h(host_start), qemu_host_page_size,
2443 prot & PAGE_BITS);
2444
2445 mmap_unlock();
2446 return 1;
9fa3e853 2447 }
c8a706fe 2448 mmap_unlock();
9fa3e853
FB
2449 return 0;
2450}
2451
6a00d601
FB
2452static inline void tlb_set_dirty(CPUState *env,
2453 unsigned long addr, target_ulong vaddr)
1ccde1cb
FB
2454{
2455}
9fa3e853
FB
2456#endif /* defined(CONFIG_USER_ONLY) */
2457
e2eef170 2458#if !defined(CONFIG_USER_ONLY)
8da3ff18 2459
c04b2b78
PB
2460#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2461typedef struct subpage_t {
2462 target_phys_addr_t base;
f6405247
RH
2463 ram_addr_t sub_io_index[TARGET_PAGE_SIZE];
2464 ram_addr_t region_offset[TARGET_PAGE_SIZE];
c04b2b78
PB
2465} subpage_t;
2466
c227f099
AL
2467static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2468 ram_addr_t memory, ram_addr_t region_offset);
f6405247
RH
2469static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2470 ram_addr_t orig_memory,
2471 ram_addr_t region_offset);
db7b5426
BS
2472#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2473 need_subpage) \
2474 do { \
2475 if (addr > start_addr) \
2476 start_addr2 = 0; \
2477 else { \
2478 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2479 if (start_addr2 > 0) \
2480 need_subpage = 1; \
2481 } \
2482 \
49e9fba2 2483 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
db7b5426
BS
2484 end_addr2 = TARGET_PAGE_SIZE - 1; \
2485 else { \
2486 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2487 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2488 need_subpage = 1; \
2489 } \
2490 } while (0)
2491
8f2498f9
MT
2492/* register physical memory.
2493 For RAM, 'size' must be a multiple of the target page size.
2494 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
8da3ff18
PB
2495 io memory page. The address used when calling the IO function is
2496 the offset from the start of the region, plus region_offset. Both
ccbb4d44 2497 start_addr and region_offset are rounded down to a page boundary
8da3ff18
PB
2498 before calculating this offset. This should not be a problem unless
2499 the low bits of start_addr and region_offset differ. */
0fd542fb 2500void cpu_register_physical_memory_log(target_phys_addr_t start_addr,
c227f099
AL
2501 ram_addr_t size,
2502 ram_addr_t phys_offset,
0fd542fb
MT
2503 ram_addr_t region_offset,
2504 bool log_dirty)
33417e70 2505{
c227f099 2506 target_phys_addr_t addr, end_addr;
92e873b9 2507 PhysPageDesc *p;
9d42037b 2508 CPUState *env;
c227f099 2509 ram_addr_t orig_size = size;
f6405247 2510 subpage_t *subpage;
33417e70 2511
3b8e6a2d 2512 assert(size);
f6f3fbca 2513
67c4d23c
PB
2514 if (phys_offset == IO_MEM_UNASSIGNED) {
2515 region_offset = start_addr;
2516 }
8da3ff18 2517 region_offset &= TARGET_PAGE_MASK;
5fd386f6 2518 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
c227f099 2519 end_addr = start_addr + (target_phys_addr_t)size;
3b8e6a2d
EI
2520
2521 addr = start_addr;
2522 do {
f1f6e3b8 2523 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 0);
db7b5426 2524 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
c227f099
AL
2525 ram_addr_t orig_memory = p->phys_offset;
2526 target_phys_addr_t start_addr2, end_addr2;
db7b5426
BS
2527 int need_subpage = 0;
2528
2529 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2530 need_subpage);
f6405247 2531 if (need_subpage) {
db7b5426
BS
2532 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2533 subpage = subpage_init((addr & TARGET_PAGE_MASK),
8da3ff18
PB
2534 &p->phys_offset, orig_memory,
2535 p->region_offset);
db7b5426
BS
2536 } else {
2537 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2538 >> IO_MEM_SHIFT];
2539 }
8da3ff18
PB
2540 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2541 region_offset);
2542 p->region_offset = 0;
db7b5426
BS
2543 } else {
2544 p->phys_offset = phys_offset;
2774c6d0 2545 p->region_offset = region_offset;
db7b5426
BS
2546 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2547 (phys_offset & IO_MEM_ROMD))
2548 phys_offset += TARGET_PAGE_SIZE;
2549 }
2550 } else {
2551 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2552 p->phys_offset = phys_offset;
8da3ff18 2553 p->region_offset = region_offset;
db7b5426 2554 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
8da3ff18 2555 (phys_offset & IO_MEM_ROMD)) {
db7b5426 2556 phys_offset += TARGET_PAGE_SIZE;
0e8f0967 2557 } else {
c227f099 2558 target_phys_addr_t start_addr2, end_addr2;
db7b5426
BS
2559 int need_subpage = 0;
2560
2561 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2562 end_addr2, need_subpage);
2563
f6405247 2564 if (need_subpage) {
db7b5426 2565 subpage = subpage_init((addr & TARGET_PAGE_MASK),
8da3ff18 2566 &p->phys_offset, IO_MEM_UNASSIGNED,
67c4d23c 2567 addr & TARGET_PAGE_MASK);
db7b5426 2568 subpage_register(subpage, start_addr2, end_addr2,
8da3ff18
PB
2569 phys_offset, region_offset);
2570 p->region_offset = 0;
db7b5426
BS
2571 }
2572 }
2573 }
8da3ff18 2574 region_offset += TARGET_PAGE_SIZE;
3b8e6a2d
EI
2575 addr += TARGET_PAGE_SIZE;
2576 } while (addr != end_addr);
3b46e624 2577
9d42037b
FB
2578 /* since each CPU stores ram addresses in its TLB cache, we must
2579 reset the modified entries */
2580 /* XXX: slow ! */
2581 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2582 tlb_flush(env, 1);
2583 }
33417e70
FB
2584}
2585
c227f099 2586void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
f65ed4c1
AL
2587{
2588 if (kvm_enabled())
2589 kvm_coalesce_mmio_region(addr, size);
2590}
2591
c227f099 2592void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
f65ed4c1
AL
2593{
2594 if (kvm_enabled())
2595 kvm_uncoalesce_mmio_region(addr, size);
2596}
2597
62a2744c
SY
2598void qemu_flush_coalesced_mmio_buffer(void)
2599{
2600 if (kvm_enabled())
2601 kvm_flush_coalesced_mmio_buffer();
2602}
2603
c902760f
MT
2604#if defined(__linux__) && !defined(TARGET_S390X)
2605
2606#include <sys/vfs.h>
2607
2608#define HUGETLBFS_MAGIC 0x958458f6
2609
2610static long gethugepagesize(const char *path)
2611{
2612 struct statfs fs;
2613 int ret;
2614
2615 do {
9742bf26 2616 ret = statfs(path, &fs);
c902760f
MT
2617 } while (ret != 0 && errno == EINTR);
2618
2619 if (ret != 0) {
9742bf26
YT
2620 perror(path);
2621 return 0;
c902760f
MT
2622 }
2623
2624 if (fs.f_type != HUGETLBFS_MAGIC)
9742bf26 2625 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
c902760f
MT
2626
2627 return fs.f_bsize;
2628}
2629
04b16653
AW
2630static void *file_ram_alloc(RAMBlock *block,
2631 ram_addr_t memory,
2632 const char *path)
c902760f
MT
2633{
2634 char *filename;
2635 void *area;
2636 int fd;
2637#ifdef MAP_POPULATE
2638 int flags;
2639#endif
2640 unsigned long hpagesize;
2641
2642 hpagesize = gethugepagesize(path);
2643 if (!hpagesize) {
9742bf26 2644 return NULL;
c902760f
MT
2645 }
2646
2647 if (memory < hpagesize) {
2648 return NULL;
2649 }
2650
2651 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2652 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2653 return NULL;
2654 }
2655
2656 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
9742bf26 2657 return NULL;
c902760f
MT
2658 }
2659
2660 fd = mkstemp(filename);
2661 if (fd < 0) {
9742bf26
YT
2662 perror("unable to create backing store for hugepages");
2663 free(filename);
2664 return NULL;
c902760f
MT
2665 }
2666 unlink(filename);
2667 free(filename);
2668
2669 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2670
2671 /*
2672 * ftruncate is not supported by hugetlbfs in older
2673 * hosts, so don't bother bailing out on errors.
2674 * If anything goes wrong with it under other filesystems,
2675 * mmap will fail.
2676 */
2677 if (ftruncate(fd, memory))
9742bf26 2678 perror("ftruncate");
c902760f
MT
2679
2680#ifdef MAP_POPULATE
2681 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2682 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2683 * to sidestep this quirk.
2684 */
2685 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2686 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2687#else
2688 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2689#endif
2690 if (area == MAP_FAILED) {
9742bf26
YT
2691 perror("file_ram_alloc: can't mmap RAM pages");
2692 close(fd);
2693 return (NULL);
c902760f 2694 }
04b16653 2695 block->fd = fd;
c902760f
MT
2696 return area;
2697}
2698#endif
2699
d17b5288 2700static ram_addr_t find_ram_offset(ram_addr_t size)
04b16653
AW
2701{
2702 RAMBlock *block, *next_block;
3e837b2c 2703 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
04b16653
AW
2704
2705 if (QLIST_EMPTY(&ram_list.blocks))
2706 return 0;
2707
2708 QLIST_FOREACH(block, &ram_list.blocks, next) {
f15fbc4b 2709 ram_addr_t end, next = RAM_ADDR_MAX;
04b16653
AW
2710
2711 end = block->offset + block->length;
2712
2713 QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2714 if (next_block->offset >= end) {
2715 next = MIN(next, next_block->offset);
2716 }
2717 }
2718 if (next - end >= size && next - end < mingap) {
3e837b2c 2719 offset = end;
04b16653
AW
2720 mingap = next - end;
2721 }
2722 }
3e837b2c
AW
2723
2724 if (offset == RAM_ADDR_MAX) {
2725 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
2726 (uint64_t)size);
2727 abort();
2728 }
2729
04b16653
AW
2730 return offset;
2731}
2732
2733static ram_addr_t last_ram_offset(void)
d17b5288
AW
2734{
2735 RAMBlock *block;
2736 ram_addr_t last = 0;
2737
2738 QLIST_FOREACH(block, &ram_list.blocks, next)
2739 last = MAX(last, block->offset + block->length);
2740
2741 return last;
2742}
2743
c5705a77 2744void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
84b89d78
CM
2745{
2746 RAMBlock *new_block, *block;
2747
c5705a77
AK
2748 new_block = NULL;
2749 QLIST_FOREACH(block, &ram_list.blocks, next) {
2750 if (block->offset == addr) {
2751 new_block = block;
2752 break;
2753 }
2754 }
2755 assert(new_block);
2756 assert(!new_block->idstr[0]);
84b89d78
CM
2757
2758 if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
2759 char *id = dev->parent_bus->info->get_dev_path(dev);
2760 if (id) {
2761 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
7267c094 2762 g_free(id);
84b89d78
CM
2763 }
2764 }
2765 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2766
2767 QLIST_FOREACH(block, &ram_list.blocks, next) {
c5705a77 2768 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
84b89d78
CM
2769 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2770 new_block->idstr);
2771 abort();
2772 }
2773 }
c5705a77
AK
2774}
2775
2776ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
2777 MemoryRegion *mr)
2778{
2779 RAMBlock *new_block;
2780
2781 size = TARGET_PAGE_ALIGN(size);
2782 new_block = g_malloc0(sizeof(*new_block));
84b89d78 2783
7c637366 2784 new_block->mr = mr;
432d268c 2785 new_block->offset = find_ram_offset(size);
6977dfe6
YT
2786 if (host) {
2787 new_block->host = host;
cd19cfa2 2788 new_block->flags |= RAM_PREALLOC_MASK;
6977dfe6
YT
2789 } else {
2790 if (mem_path) {
c902760f 2791#if defined (__linux__) && !defined(TARGET_S390X)
6977dfe6
YT
2792 new_block->host = file_ram_alloc(new_block, size, mem_path);
2793 if (!new_block->host) {
2794 new_block->host = qemu_vmalloc(size);
e78815a5 2795 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
6977dfe6 2796 }
c902760f 2797#else
6977dfe6
YT
2798 fprintf(stderr, "-mem-path option unsupported\n");
2799 exit(1);
c902760f 2800#endif
6977dfe6 2801 } else {
6b02494d 2802#if defined(TARGET_S390X) && defined(CONFIG_KVM)
ff83678a
CB
2803 /* S390 KVM requires the topmost vma of the RAM to be smaller than
2804 an system defined value, which is at least 256GB. Larger systems
2805 have larger values. We put the guest between the end of data
2806 segment (system break) and this value. We use 32GB as a base to
2807 have enough room for the system break to grow. */
2808 new_block->host = mmap((void*)0x800000000, size,
6977dfe6 2809 PROT_EXEC|PROT_READ|PROT_WRITE,
ff83678a 2810 MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
fb8b2735
AG
2811 if (new_block->host == MAP_FAILED) {
2812 fprintf(stderr, "Allocating RAM failed\n");
2813 abort();
2814 }
6b02494d 2815#else
868bb33f 2816 if (xen_enabled()) {
fce537d4 2817 xen_ram_alloc(new_block->offset, size, mr);
432d268c
JN
2818 } else {
2819 new_block->host = qemu_vmalloc(size);
2820 }
6b02494d 2821#endif
e78815a5 2822 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
6977dfe6 2823 }
c902760f 2824 }
94a6b54f
PB
2825 new_block->length = size;
2826
f471a17e 2827 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
94a6b54f 2828
7267c094 2829 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
04b16653 2830 last_ram_offset() >> TARGET_PAGE_BITS);
d17b5288 2831 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
94a6b54f
PB
2832 0xff, size >> TARGET_PAGE_BITS);
2833
6f0437e8
JK
2834 if (kvm_enabled())
2835 kvm_setup_guest_memory(new_block->host, size);
2836
94a6b54f
PB
2837 return new_block->offset;
2838}
e9a1ab19 2839
c5705a77 2840ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
6977dfe6 2841{
c5705a77 2842 return qemu_ram_alloc_from_ptr(size, NULL, mr);
6977dfe6
YT
2843}
2844
1f2e98b6
AW
2845void qemu_ram_free_from_ptr(ram_addr_t addr)
2846{
2847 RAMBlock *block;
2848
2849 QLIST_FOREACH(block, &ram_list.blocks, next) {
2850 if (addr == block->offset) {
2851 QLIST_REMOVE(block, next);
7267c094 2852 g_free(block);
1f2e98b6
AW
2853 return;
2854 }
2855 }
2856}
2857
c227f099 2858void qemu_ram_free(ram_addr_t addr)
e9a1ab19 2859{
04b16653
AW
2860 RAMBlock *block;
2861
2862 QLIST_FOREACH(block, &ram_list.blocks, next) {
2863 if (addr == block->offset) {
2864 QLIST_REMOVE(block, next);
cd19cfa2
HY
2865 if (block->flags & RAM_PREALLOC_MASK) {
2866 ;
2867 } else if (mem_path) {
04b16653
AW
2868#if defined (__linux__) && !defined(TARGET_S390X)
2869 if (block->fd) {
2870 munmap(block->host, block->length);
2871 close(block->fd);
2872 } else {
2873 qemu_vfree(block->host);
2874 }
fd28aa13
JK
2875#else
2876 abort();
04b16653
AW
2877#endif
2878 } else {
2879#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2880 munmap(block->host, block->length);
2881#else
868bb33f 2882 if (xen_enabled()) {
e41d7c69 2883 xen_invalidate_map_cache_entry(block->host);
432d268c
JN
2884 } else {
2885 qemu_vfree(block->host);
2886 }
04b16653
AW
2887#endif
2888 }
7267c094 2889 g_free(block);
04b16653
AW
2890 return;
2891 }
2892 }
2893
e9a1ab19
FB
2894}
2895
cd19cfa2
HY
2896#ifndef _WIN32
2897void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
2898{
2899 RAMBlock *block;
2900 ram_addr_t offset;
2901 int flags;
2902 void *area, *vaddr;
2903
2904 QLIST_FOREACH(block, &ram_list.blocks, next) {
2905 offset = addr - block->offset;
2906 if (offset < block->length) {
2907 vaddr = block->host + offset;
2908 if (block->flags & RAM_PREALLOC_MASK) {
2909 ;
2910 } else {
2911 flags = MAP_FIXED;
2912 munmap(vaddr, length);
2913 if (mem_path) {
2914#if defined(__linux__) && !defined(TARGET_S390X)
2915 if (block->fd) {
2916#ifdef MAP_POPULATE
2917 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
2918 MAP_PRIVATE;
2919#else
2920 flags |= MAP_PRIVATE;
2921#endif
2922 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2923 flags, block->fd, offset);
2924 } else {
2925 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2926 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2927 flags, -1, 0);
2928 }
fd28aa13
JK
2929#else
2930 abort();
cd19cfa2
HY
2931#endif
2932 } else {
2933#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2934 flags |= MAP_SHARED | MAP_ANONYMOUS;
2935 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
2936 flags, -1, 0);
2937#else
2938 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2939 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2940 flags, -1, 0);
2941#endif
2942 }
2943 if (area != vaddr) {
f15fbc4b
AP
2944 fprintf(stderr, "Could not remap addr: "
2945 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
cd19cfa2
HY
2946 length, addr);
2947 exit(1);
2948 }
2949 qemu_madvise(vaddr, length, QEMU_MADV_MERGEABLE);
2950 }
2951 return;
2952 }
2953 }
2954}
2955#endif /* !_WIN32 */
2956
dc828ca1 2957/* Return a host pointer to ram allocated with qemu_ram_alloc.
5579c7f3
PB
2958 With the exception of the softmmu code in this file, this should
2959 only be used for local memory (e.g. video ram) that the device owns,
2960 and knows it isn't going to access beyond the end of the block.
2961
2962 It should not be used for general purpose DMA.
2963 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2964 */
c227f099 2965void *qemu_get_ram_ptr(ram_addr_t addr)
dc828ca1 2966{
94a6b54f
PB
2967 RAMBlock *block;
2968
f471a17e
AW
2969 QLIST_FOREACH(block, &ram_list.blocks, next) {
2970 if (addr - block->offset < block->length) {
7d82af38
VP
2971 /* Move this entry to to start of the list. */
2972 if (block != QLIST_FIRST(&ram_list.blocks)) {
2973 QLIST_REMOVE(block, next);
2974 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
2975 }
868bb33f 2976 if (xen_enabled()) {
432d268c
JN
2977 /* We need to check if the requested address is in the RAM
2978 * because we don't want to map the entire memory in QEMU.
712c2b41 2979 * In that case just map until the end of the page.
432d268c
JN
2980 */
2981 if (block->offset == 0) {
e41d7c69 2982 return xen_map_cache(addr, 0, 0);
432d268c 2983 } else if (block->host == NULL) {
e41d7c69
JK
2984 block->host =
2985 xen_map_cache(block->offset, block->length, 1);
432d268c
JN
2986 }
2987 }
f471a17e
AW
2988 return block->host + (addr - block->offset);
2989 }
94a6b54f 2990 }
f471a17e
AW
2991
2992 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2993 abort();
2994
2995 return NULL;
dc828ca1
PB
2996}
2997
b2e0a138
MT
2998/* Return a host pointer to ram allocated with qemu_ram_alloc.
2999 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
3000 */
3001void *qemu_safe_ram_ptr(ram_addr_t addr)
3002{
3003 RAMBlock *block;
3004
3005 QLIST_FOREACH(block, &ram_list.blocks, next) {
3006 if (addr - block->offset < block->length) {
868bb33f 3007 if (xen_enabled()) {
432d268c
JN
3008 /* We need to check if the requested address is in the RAM
3009 * because we don't want to map the entire memory in QEMU.
712c2b41 3010 * In that case just map until the end of the page.
432d268c
JN
3011 */
3012 if (block->offset == 0) {
e41d7c69 3013 return xen_map_cache(addr, 0, 0);
432d268c 3014 } else if (block->host == NULL) {
e41d7c69
JK
3015 block->host =
3016 xen_map_cache(block->offset, block->length, 1);
432d268c
JN
3017 }
3018 }
b2e0a138
MT
3019 return block->host + (addr - block->offset);
3020 }
3021 }
3022
3023 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3024 abort();
3025
3026 return NULL;
3027}
3028
38bee5dc
SS
3029/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
3030 * but takes a size argument */
8ab934f9 3031void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
38bee5dc 3032{
8ab934f9
SS
3033 if (*size == 0) {
3034 return NULL;
3035 }
868bb33f 3036 if (xen_enabled()) {
e41d7c69 3037 return xen_map_cache(addr, *size, 1);
868bb33f 3038 } else {
38bee5dc
SS
3039 RAMBlock *block;
3040
3041 QLIST_FOREACH(block, &ram_list.blocks, next) {
3042 if (addr - block->offset < block->length) {
3043 if (addr - block->offset + *size > block->length)
3044 *size = block->length - addr + block->offset;
3045 return block->host + (addr - block->offset);
3046 }
3047 }
3048
3049 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3050 abort();
38bee5dc
SS
3051 }
3052}
3053
050a0ddf
AP
3054void qemu_put_ram_ptr(void *addr)
3055{
3056 trace_qemu_put_ram_ptr(addr);
050a0ddf
AP
3057}
3058
e890261f 3059int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
5579c7f3 3060{
94a6b54f
PB
3061 RAMBlock *block;
3062 uint8_t *host = ptr;
3063
868bb33f 3064 if (xen_enabled()) {
e41d7c69 3065 *ram_addr = xen_ram_addr_from_mapcache(ptr);
712c2b41
SS
3066 return 0;
3067 }
3068
f471a17e 3069 QLIST_FOREACH(block, &ram_list.blocks, next) {
432d268c
JN
3070 /* This case append when the block is not mapped. */
3071 if (block->host == NULL) {
3072 continue;
3073 }
f471a17e 3074 if (host - block->host < block->length) {
e890261f
MT
3075 *ram_addr = block->offset + (host - block->host);
3076 return 0;
f471a17e 3077 }
94a6b54f 3078 }
432d268c 3079
e890261f
MT
3080 return -1;
3081}
f471a17e 3082
e890261f
MT
3083/* Some of the softmmu routines need to translate from a host pointer
3084 (typically a TLB entry) back to a ram offset. */
3085ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
3086{
3087 ram_addr_t ram_addr;
f471a17e 3088
e890261f
MT
3089 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
3090 fprintf(stderr, "Bad ram pointer %p\n", ptr);
3091 abort();
3092 }
3093 return ram_addr;
5579c7f3
PB
3094}
3095
c227f099 3096static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
33417e70 3097{
67d3b957 3098#ifdef DEBUG_UNASSIGNED
ab3d1727 3099 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
b4f0a316 3100#endif
5b450407 3101#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
b14ef7c9 3102 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, 1);
e18231a3
BS
3103#endif
3104 return 0;
3105}
3106
c227f099 3107static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
e18231a3
BS
3108{
3109#ifdef DEBUG_UNASSIGNED
3110 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3111#endif
5b450407 3112#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
b14ef7c9 3113 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, 2);
e18231a3
BS
3114#endif
3115 return 0;
3116}
3117
c227f099 3118static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
e18231a3
BS
3119{
3120#ifdef DEBUG_UNASSIGNED
3121 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3122#endif
5b450407 3123#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
b14ef7c9 3124 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, 4);
67d3b957 3125#endif
33417e70
FB
3126 return 0;
3127}
3128
c227f099 3129static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
33417e70 3130{
67d3b957 3131#ifdef DEBUG_UNASSIGNED
ab3d1727 3132 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
67d3b957 3133#endif
5b450407 3134#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
b14ef7c9 3135 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, 1);
e18231a3
BS
3136#endif
3137}
3138
c227f099 3139static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
e18231a3
BS
3140{
3141#ifdef DEBUG_UNASSIGNED
3142 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3143#endif
5b450407 3144#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
b14ef7c9 3145 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, 2);
e18231a3
BS
3146#endif
3147}
3148
c227f099 3149static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
e18231a3
BS
3150{
3151#ifdef DEBUG_UNASSIGNED
3152 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3153#endif
5b450407 3154#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
b14ef7c9 3155 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, 4);
b4f0a316 3156#endif
33417e70
FB
3157}
3158
d60efc6b 3159static CPUReadMemoryFunc * const unassigned_mem_read[3] = {
33417e70 3160 unassigned_mem_readb,
e18231a3
BS
3161 unassigned_mem_readw,
3162 unassigned_mem_readl,
33417e70
FB
3163};
3164
d60efc6b 3165static CPUWriteMemoryFunc * const unassigned_mem_write[3] = {
33417e70 3166 unassigned_mem_writeb,
e18231a3
BS
3167 unassigned_mem_writew,
3168 unassigned_mem_writel,
33417e70
FB
3169};
3170
c227f099 3171static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
0f459d16 3172 uint32_t val)
9fa3e853 3173{
3a7d929e 3174 int dirty_flags;
f7c11b53 3175 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3a7d929e 3176 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 3177#if !defined(CONFIG_USER_ONLY)
3a7d929e 3178 tb_invalidate_phys_page_fast(ram_addr, 1);
f7c11b53 3179 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
9fa3e853 3180#endif
3a7d929e 3181 }
5579c7f3 3182 stb_p(qemu_get_ram_ptr(ram_addr), val);
f23db169 3183 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
f7c11b53 3184 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
f23db169
FB
3185 /* we remove the notdirty callback only if the code has been
3186 flushed */
3187 if (dirty_flags == 0xff)
2e70f6ef 3188 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
3189}
3190
c227f099 3191static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
0f459d16 3192 uint32_t val)
9fa3e853 3193{
3a7d929e 3194 int dirty_flags;
f7c11b53 3195 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3a7d929e 3196 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 3197#if !defined(CONFIG_USER_ONLY)
3a7d929e 3198 tb_invalidate_phys_page_fast(ram_addr, 2);
f7c11b53 3199 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
9fa3e853 3200#endif
3a7d929e 3201 }
5579c7f3 3202 stw_p(qemu_get_ram_ptr(ram_addr), val);
f23db169 3203 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
f7c11b53 3204 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
f23db169
FB
3205 /* we remove the notdirty callback only if the code has been
3206 flushed */
3207 if (dirty_flags == 0xff)
2e70f6ef 3208 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
3209}
3210
c227f099 3211static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
0f459d16 3212 uint32_t val)
9fa3e853 3213{
3a7d929e 3214 int dirty_flags;
f7c11b53 3215 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3a7d929e 3216 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 3217#if !defined(CONFIG_USER_ONLY)
3a7d929e 3218 tb_invalidate_phys_page_fast(ram_addr, 4);
f7c11b53 3219 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
9fa3e853 3220#endif
3a7d929e 3221 }
5579c7f3 3222 stl_p(qemu_get_ram_ptr(ram_addr), val);
f23db169 3223 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
f7c11b53 3224 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
f23db169
FB
3225 /* we remove the notdirty callback only if the code has been
3226 flushed */
3227 if (dirty_flags == 0xff)
2e70f6ef 3228 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
3229}
3230
d60efc6b 3231static CPUReadMemoryFunc * const error_mem_read[3] = {
9fa3e853
FB
3232 NULL, /* never used */
3233 NULL, /* never used */
3234 NULL, /* never used */
3235};
3236
d60efc6b 3237static CPUWriteMemoryFunc * const notdirty_mem_write[3] = {
1ccde1cb
FB
3238 notdirty_mem_writeb,
3239 notdirty_mem_writew,
3240 notdirty_mem_writel,
3241};
3242
0f459d16 3243/* Generate a debug exception if a watchpoint has been hit. */
b4051334 3244static void check_watchpoint(int offset, int len_mask, int flags)
0f459d16
PB
3245{
3246 CPUState *env = cpu_single_env;
06d55cc1
AL
3247 target_ulong pc, cs_base;
3248 TranslationBlock *tb;
0f459d16 3249 target_ulong vaddr;
a1d1bb31 3250 CPUWatchpoint *wp;
06d55cc1 3251 int cpu_flags;
0f459d16 3252
06d55cc1
AL
3253 if (env->watchpoint_hit) {
3254 /* We re-entered the check after replacing the TB. Now raise
3255 * the debug interrupt so that is will trigger after the
3256 * current instruction. */
3257 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3258 return;
3259 }
2e70f6ef 3260 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
72cf2d4f 3261 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334
AL
3262 if ((vaddr == (wp->vaddr & len_mask) ||
3263 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
6e140f28
AL
3264 wp->flags |= BP_WATCHPOINT_HIT;
3265 if (!env->watchpoint_hit) {
3266 env->watchpoint_hit = wp;
3267 tb = tb_find_pc(env->mem_io_pc);
3268 if (!tb) {
3269 cpu_abort(env, "check_watchpoint: could not find TB for "
3270 "pc=%p", (void *)env->mem_io_pc);
3271 }
618ba8e6 3272 cpu_restore_state(tb, env, env->mem_io_pc);
6e140f28
AL
3273 tb_phys_invalidate(tb, -1);
3274 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3275 env->exception_index = EXCP_DEBUG;
3276 } else {
3277 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3278 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
3279 }
3280 cpu_resume_from_signal(env, NULL);
06d55cc1 3281 }
6e140f28
AL
3282 } else {
3283 wp->flags &= ~BP_WATCHPOINT_HIT;
0f459d16
PB
3284 }
3285 }
3286}
3287
6658ffb8
PB
3288/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3289 so these check for a hit then pass through to the normal out-of-line
3290 phys routines. */
c227f099 3291static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
6658ffb8 3292{
b4051334 3293 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
6658ffb8
PB
3294 return ldub_phys(addr);
3295}
3296
c227f099 3297static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
6658ffb8 3298{
b4051334 3299 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
6658ffb8
PB
3300 return lduw_phys(addr);
3301}
3302
c227f099 3303static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
6658ffb8 3304{
b4051334 3305 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
6658ffb8
PB
3306 return ldl_phys(addr);
3307}
3308
c227f099 3309static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
6658ffb8
PB
3310 uint32_t val)
3311{
b4051334 3312 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
6658ffb8
PB
3313 stb_phys(addr, val);
3314}
3315
c227f099 3316static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
6658ffb8
PB
3317 uint32_t val)
3318{
b4051334 3319 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
6658ffb8
PB
3320 stw_phys(addr, val);
3321}
3322
c227f099 3323static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
6658ffb8
PB
3324 uint32_t val)
3325{
b4051334 3326 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
6658ffb8
PB
3327 stl_phys(addr, val);
3328}
3329
d60efc6b 3330static CPUReadMemoryFunc * const watch_mem_read[3] = {
6658ffb8
PB
3331 watch_mem_readb,
3332 watch_mem_readw,
3333 watch_mem_readl,
3334};
3335
d60efc6b 3336static CPUWriteMemoryFunc * const watch_mem_write[3] = {
6658ffb8
PB
3337 watch_mem_writeb,
3338 watch_mem_writew,
3339 watch_mem_writel,
3340};
6658ffb8 3341
f6405247
RH
3342static inline uint32_t subpage_readlen (subpage_t *mmio,
3343 target_phys_addr_t addr,
3344 unsigned int len)
db7b5426 3345{
f6405247 3346 unsigned int idx = SUBPAGE_IDX(addr);
db7b5426
BS
3347#if defined(DEBUG_SUBPAGE)
3348 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3349 mmio, len, addr, idx);
3350#endif
db7b5426 3351
f6405247
RH
3352 addr += mmio->region_offset[idx];
3353 idx = mmio->sub_io_index[idx];
acbbec5d 3354 return io_mem_read(idx, addr, 1 <<len);
db7b5426
BS
3355}
3356
c227f099 3357static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
f6405247 3358 uint32_t value, unsigned int len)
db7b5426 3359{
f6405247 3360 unsigned int idx = SUBPAGE_IDX(addr);
db7b5426 3361#if defined(DEBUG_SUBPAGE)
f6405247
RH
3362 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n",
3363 __func__, mmio, len, addr, idx, value);
db7b5426 3364#endif
f6405247
RH
3365
3366 addr += mmio->region_offset[idx];
3367 idx = mmio->sub_io_index[idx];
acbbec5d 3368 io_mem_write(idx, addr, value, 1 << len);
db7b5426
BS
3369}
3370
c227f099 3371static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
db7b5426 3372{
db7b5426
BS
3373 return subpage_readlen(opaque, addr, 0);
3374}
3375
c227f099 3376static void subpage_writeb (void *opaque, target_phys_addr_t addr,
db7b5426
BS
3377 uint32_t value)
3378{
db7b5426
BS
3379 subpage_writelen(opaque, addr, value, 0);
3380}
3381
c227f099 3382static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
db7b5426 3383{
db7b5426
BS
3384 return subpage_readlen(opaque, addr, 1);
3385}
3386
c227f099 3387static void subpage_writew (void *opaque, target_phys_addr_t addr,
db7b5426
BS
3388 uint32_t value)
3389{
db7b5426
BS
3390 subpage_writelen(opaque, addr, value, 1);
3391}
3392
c227f099 3393static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
db7b5426 3394{
db7b5426
BS
3395 return subpage_readlen(opaque, addr, 2);
3396}
3397
f6405247
RH
3398static void subpage_writel (void *opaque, target_phys_addr_t addr,
3399 uint32_t value)
db7b5426 3400{
db7b5426
BS
3401 subpage_writelen(opaque, addr, value, 2);
3402}
3403
d60efc6b 3404static CPUReadMemoryFunc * const subpage_read[] = {
db7b5426
BS
3405 &subpage_readb,
3406 &subpage_readw,
3407 &subpage_readl,
3408};
3409
d60efc6b 3410static CPUWriteMemoryFunc * const subpage_write[] = {
db7b5426
BS
3411 &subpage_writeb,
3412 &subpage_writew,
3413 &subpage_writel,
3414};
3415
56384e8b
AF
3416static uint32_t subpage_ram_readb(void *opaque, target_phys_addr_t addr)
3417{
3418 ram_addr_t raddr = addr;
3419 void *ptr = qemu_get_ram_ptr(raddr);
3420 return ldub_p(ptr);
3421}
3422
3423static void subpage_ram_writeb(void *opaque, target_phys_addr_t addr,
3424 uint32_t value)
3425{
3426 ram_addr_t raddr = addr;
3427 void *ptr = qemu_get_ram_ptr(raddr);
3428 stb_p(ptr, value);
3429}
3430
3431static uint32_t subpage_ram_readw(void *opaque, target_phys_addr_t addr)
3432{
3433 ram_addr_t raddr = addr;
3434 void *ptr = qemu_get_ram_ptr(raddr);
3435 return lduw_p(ptr);
3436}
3437
3438static void subpage_ram_writew(void *opaque, target_phys_addr_t addr,
3439 uint32_t value)
3440{
3441 ram_addr_t raddr = addr;
3442 void *ptr = qemu_get_ram_ptr(raddr);
3443 stw_p(ptr, value);
3444}
3445
3446static uint32_t subpage_ram_readl(void *opaque, target_phys_addr_t addr)
3447{
3448 ram_addr_t raddr = addr;
3449 void *ptr = qemu_get_ram_ptr(raddr);
3450 return ldl_p(ptr);
3451}
3452
3453static void subpage_ram_writel(void *opaque, target_phys_addr_t addr,
3454 uint32_t value)
3455{
3456 ram_addr_t raddr = addr;
3457 void *ptr = qemu_get_ram_ptr(raddr);
3458 stl_p(ptr, value);
3459}
3460
3461static CPUReadMemoryFunc * const subpage_ram_read[] = {
3462 &subpage_ram_readb,
3463 &subpage_ram_readw,
3464 &subpage_ram_readl,
3465};
3466
3467static CPUWriteMemoryFunc * const subpage_ram_write[] = {
3468 &subpage_ram_writeb,
3469 &subpage_ram_writew,
3470 &subpage_ram_writel,
3471};
3472
c227f099
AL
3473static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3474 ram_addr_t memory, ram_addr_t region_offset)
db7b5426
BS
3475{
3476 int idx, eidx;
3477
3478 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3479 return -1;
3480 idx = SUBPAGE_IDX(start);
3481 eidx = SUBPAGE_IDX(end);
3482#if defined(DEBUG_SUBPAGE)
0bf9e31a 3483 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
db7b5426
BS
3484 mmio, start, end, idx, eidx, memory);
3485#endif
56384e8b
AF
3486 if ((memory & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
3487 memory = IO_MEM_SUBPAGE_RAM;
3488 }
f6405247 3489 memory = (memory >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
db7b5426 3490 for (; idx <= eidx; idx++) {
f6405247
RH
3491 mmio->sub_io_index[idx] = memory;
3492 mmio->region_offset[idx] = region_offset;
db7b5426
BS
3493 }
3494
3495 return 0;
3496}
3497
f6405247
RH
3498static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
3499 ram_addr_t orig_memory,
3500 ram_addr_t region_offset)
db7b5426 3501{
c227f099 3502 subpage_t *mmio;
db7b5426
BS
3503 int subpage_memory;
3504
7267c094 3505 mmio = g_malloc0(sizeof(subpage_t));
1eec614b
AL
3506
3507 mmio->base = base;
be675c97 3508 subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio);
db7b5426 3509#if defined(DEBUG_SUBPAGE)
1eec614b
AL
3510 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3511 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
db7b5426 3512#endif
1eec614b 3513 *phys = subpage_memory | IO_MEM_SUBPAGE;
f6405247 3514 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, orig_memory, region_offset);
db7b5426
BS
3515
3516 return mmio;
3517}
3518
88715657
AL
3519static int get_free_io_mem_idx(void)
3520{
3521 int i;
3522
3523 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3524 if (!io_mem_used[i]) {
3525 io_mem_used[i] = 1;
3526 return i;
3527 }
c6703b47 3528 fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
88715657
AL
3529 return -1;
3530}
3531
33417e70
FB
3532/* mem_read and mem_write are arrays of functions containing the
3533 function to access byte (index 0), word (index 1) and dword (index
0b4e6e3e 3534 2). Functions can be omitted with a NULL function pointer.
3ee89922 3535 If io_index is non zero, the corresponding io zone is
4254fab8
BS
3536 modified. If it is zero, a new io zone is allocated. The return
3537 value can be used with cpu_register_physical_memory(). (-1) is
3538 returned if error. */
1eed09cb 3539static int cpu_register_io_memory_fixed(int io_index,
d60efc6b
BS
3540 CPUReadMemoryFunc * const *mem_read,
3541 CPUWriteMemoryFunc * const *mem_write,
be675c97 3542 void *opaque)
33417e70 3543{
3cab721d
RH
3544 int i;
3545
33417e70 3546 if (io_index <= 0) {
88715657
AL
3547 io_index = get_free_io_mem_idx();
3548 if (io_index == -1)
3549 return io_index;
33417e70 3550 } else {
1eed09cb 3551 io_index >>= IO_MEM_SHIFT;
33417e70
FB
3552 if (io_index >= IO_MEM_NB_ENTRIES)
3553 return -1;
3554 }
b5ff1b31 3555
3cab721d 3556 for (i = 0; i < 3; ++i) {
acbbec5d 3557 _io_mem_read[io_index][i]
3cab721d
RH
3558 = (mem_read[i] ? mem_read[i] : unassigned_mem_read[i]);
3559 }
3560 for (i = 0; i < 3; ++i) {
acbbec5d 3561 _io_mem_write[io_index][i]
3cab721d
RH
3562 = (mem_write[i] ? mem_write[i] : unassigned_mem_write[i]);
3563 }
a4193c8a 3564 io_mem_opaque[io_index] = opaque;
f6405247
RH
3565
3566 return (io_index << IO_MEM_SHIFT);
33417e70 3567}
61382a50 3568
d60efc6b
BS
3569int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
3570 CPUWriteMemoryFunc * const *mem_write,
be675c97 3571 void *opaque)
1eed09cb 3572{
be675c97 3573 return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque);
1eed09cb
AK
3574}
3575
88715657
AL
3576void cpu_unregister_io_memory(int io_table_address)
3577{
3578 int i;
3579 int io_index = io_table_address >> IO_MEM_SHIFT;
3580
3581 for (i=0;i < 3; i++) {
acbbec5d
AK
3582 _io_mem_read[io_index][i] = unassigned_mem_read[i];
3583 _io_mem_write[io_index][i] = unassigned_mem_write[i];
88715657
AL
3584 }
3585 io_mem_opaque[io_index] = NULL;
3586 io_mem_used[io_index] = 0;
3587}
3588
e9179ce1
AK
3589static void io_mem_init(void)
3590{
3591 int i;
3592
2507c12a 3593 cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read,
be675c97 3594 unassigned_mem_write, NULL);
2507c12a 3595 cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read,
be675c97 3596 unassigned_mem_write, NULL);
2507c12a 3597 cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read,
be675c97 3598 notdirty_mem_write, NULL);
56384e8b 3599 cpu_register_io_memory_fixed(IO_MEM_SUBPAGE_RAM, subpage_ram_read,
be675c97 3600 subpage_ram_write, NULL);
e9179ce1
AK
3601 for (i=0; i<5; i++)
3602 io_mem_used[i] = 1;
3603
3604 io_mem_watch = cpu_register_io_memory(watch_mem_read,
be675c97 3605 watch_mem_write, NULL);
e9179ce1
AK
3606}
3607
62152b8a
AK
3608static void memory_map_init(void)
3609{
7267c094 3610 system_memory = g_malloc(sizeof(*system_memory));
8417cebf 3611 memory_region_init(system_memory, "system", INT64_MAX);
62152b8a 3612 set_system_memory_map(system_memory);
309cb471 3613
7267c094 3614 system_io = g_malloc(sizeof(*system_io));
309cb471
AK
3615 memory_region_init(system_io, "io", 65536);
3616 set_system_io_map(system_io);
62152b8a
AK
3617}
3618
3619MemoryRegion *get_system_memory(void)
3620{
3621 return system_memory;
3622}
3623
309cb471
AK
3624MemoryRegion *get_system_io(void)
3625{
3626 return system_io;
3627}
3628
e2eef170
PB
3629#endif /* !defined(CONFIG_USER_ONLY) */
3630
13eb76e0
FB
3631/* physical memory access (slow version, mainly for debug) */
3632#if defined(CONFIG_USER_ONLY)
a68fe89c
PB
3633int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3634 uint8_t *buf, int len, int is_write)
13eb76e0
FB
3635{
3636 int l, flags;
3637 target_ulong page;
53a5960a 3638 void * p;
13eb76e0
FB
3639
3640 while (len > 0) {
3641 page = addr & TARGET_PAGE_MASK;
3642 l = (page + TARGET_PAGE_SIZE) - addr;
3643 if (l > len)
3644 l = len;
3645 flags = page_get_flags(page);
3646 if (!(flags & PAGE_VALID))
a68fe89c 3647 return -1;
13eb76e0
FB
3648 if (is_write) {
3649 if (!(flags & PAGE_WRITE))
a68fe89c 3650 return -1;
579a97f7 3651 /* XXX: this code should not depend on lock_user */
72fb7daa 3652 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
a68fe89c 3653 return -1;
72fb7daa
AJ
3654 memcpy(p, buf, l);
3655 unlock_user(p, addr, l);
13eb76e0
FB
3656 } else {
3657 if (!(flags & PAGE_READ))
a68fe89c 3658 return -1;
579a97f7 3659 /* XXX: this code should not depend on lock_user */
72fb7daa 3660 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
a68fe89c 3661 return -1;
72fb7daa 3662 memcpy(buf, p, l);
5b257578 3663 unlock_user(p, addr, 0);
13eb76e0
FB
3664 }
3665 len -= l;
3666 buf += l;
3667 addr += l;
3668 }
a68fe89c 3669 return 0;
13eb76e0 3670}
8df1cd07 3671
13eb76e0 3672#else
c227f099 3673void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
3674 int len, int is_write)
3675{
3676 int l, io_index;
3677 uint8_t *ptr;
3678 uint32_t val;
c227f099 3679 target_phys_addr_t page;
8ca5692d 3680 ram_addr_t pd;
f1f6e3b8 3681 PhysPageDesc p;
3b46e624 3682
13eb76e0
FB
3683 while (len > 0) {
3684 page = addr & TARGET_PAGE_MASK;
3685 l = (page + TARGET_PAGE_SIZE) - addr;
3686 if (l > len)
3687 l = len;
92e873b9 3688 p = phys_page_find(page >> TARGET_PAGE_BITS);
f1f6e3b8 3689 pd = p.phys_offset;
3b46e624 3690
13eb76e0 3691 if (is_write) {
3a7d929e 3692 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
f1f6e3b8 3693 target_phys_addr_t addr1;
13eb76e0 3694 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
f1f6e3b8 3695 addr1 = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
6a00d601
FB
3696 /* XXX: could force cpu_single_env to NULL to avoid
3697 potential bugs */
6c2934db 3698 if (l >= 4 && ((addr1 & 3) == 0)) {
1c213d19 3699 /* 32 bit write access */
c27004ec 3700 val = ldl_p(buf);
acbbec5d 3701 io_mem_write(io_index, addr1, val, 4);
13eb76e0 3702 l = 4;
6c2934db 3703 } else if (l >= 2 && ((addr1 & 1) == 0)) {
1c213d19 3704 /* 16 bit write access */
c27004ec 3705 val = lduw_p(buf);
acbbec5d 3706 io_mem_write(io_index, addr1, val, 2);
13eb76e0
FB
3707 l = 2;
3708 } else {
1c213d19 3709 /* 8 bit write access */
c27004ec 3710 val = ldub_p(buf);
acbbec5d 3711 io_mem_write(io_index, addr1, val, 1);
13eb76e0
FB
3712 l = 1;
3713 }
3714 } else {
8ca5692d 3715 ram_addr_t addr1;
b448f2f3 3716 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
13eb76e0 3717 /* RAM case */
5579c7f3 3718 ptr = qemu_get_ram_ptr(addr1);
13eb76e0 3719 memcpy(ptr, buf, l);
3a7d929e
FB
3720 if (!cpu_physical_memory_is_dirty(addr1)) {
3721 /* invalidate code */
3722 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3723 /* set dirty bit */
f7c11b53
YT
3724 cpu_physical_memory_set_dirty_flags(
3725 addr1, (0xff & ~CODE_DIRTY_FLAG));
3a7d929e 3726 }
050a0ddf 3727 qemu_put_ram_ptr(ptr);
13eb76e0
FB
3728 }
3729 } else {
5fafdf24 3730 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2a4188a3 3731 !(pd & IO_MEM_ROMD)) {
f1f6e3b8 3732 target_phys_addr_t addr1;
13eb76e0
FB
3733 /* I/O case */
3734 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
f1f6e3b8 3735 addr1 = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
6c2934db 3736 if (l >= 4 && ((addr1 & 3) == 0)) {
13eb76e0 3737 /* 32 bit read access */
acbbec5d 3738 val = io_mem_read(io_index, addr1, 4);
c27004ec 3739 stl_p(buf, val);
13eb76e0 3740 l = 4;
6c2934db 3741 } else if (l >= 2 && ((addr1 & 1) == 0)) {
13eb76e0 3742 /* 16 bit read access */
acbbec5d 3743 val = io_mem_read(io_index, addr1, 2);
c27004ec 3744 stw_p(buf, val);
13eb76e0
FB
3745 l = 2;
3746 } else {
1c213d19 3747 /* 8 bit read access */
acbbec5d 3748 val = io_mem_read(io_index, addr1, 1);
c27004ec 3749 stb_p(buf, val);
13eb76e0
FB
3750 l = 1;
3751 }
3752 } else {
3753 /* RAM case */
050a0ddf
AP
3754 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
3755 memcpy(buf, ptr + (addr & ~TARGET_PAGE_MASK), l);
3756 qemu_put_ram_ptr(ptr);
13eb76e0
FB
3757 }
3758 }
3759 len -= l;
3760 buf += l;
3761 addr += l;
3762 }
3763}
8df1cd07 3764
d0ecd2aa 3765/* used for ROM loading : can write in RAM and ROM */
c227f099 3766void cpu_physical_memory_write_rom(target_phys_addr_t addr,
d0ecd2aa
FB
3767 const uint8_t *buf, int len)
3768{
3769 int l;
3770 uint8_t *ptr;
c227f099 3771 target_phys_addr_t page;
d0ecd2aa 3772 unsigned long pd;
f1f6e3b8 3773 PhysPageDesc p;
3b46e624 3774
d0ecd2aa
FB
3775 while (len > 0) {
3776 page = addr & TARGET_PAGE_MASK;
3777 l = (page + TARGET_PAGE_SIZE) - addr;
3778 if (l > len)
3779 l = len;
3780 p = phys_page_find(page >> TARGET_PAGE_BITS);
f1f6e3b8 3781 pd = p.phys_offset;
3b46e624 3782
d0ecd2aa 3783 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2a4188a3
FB
3784 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3785 !(pd & IO_MEM_ROMD)) {
d0ecd2aa
FB
3786 /* do nothing */
3787 } else {
3788 unsigned long addr1;
3789 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3790 /* ROM/RAM case */
5579c7f3 3791 ptr = qemu_get_ram_ptr(addr1);
d0ecd2aa 3792 memcpy(ptr, buf, l);
050a0ddf 3793 qemu_put_ram_ptr(ptr);
d0ecd2aa
FB
3794 }
3795 len -= l;
3796 buf += l;
3797 addr += l;
3798 }
3799}
3800
6d16c2f8
AL
3801typedef struct {
3802 void *buffer;
c227f099
AL
3803 target_phys_addr_t addr;
3804 target_phys_addr_t len;
6d16c2f8
AL
3805} BounceBuffer;
3806
3807static BounceBuffer bounce;
3808
ba223c29
AL
3809typedef struct MapClient {
3810 void *opaque;
3811 void (*callback)(void *opaque);
72cf2d4f 3812 QLIST_ENTRY(MapClient) link;
ba223c29
AL
3813} MapClient;
3814
72cf2d4f
BS
3815static QLIST_HEAD(map_client_list, MapClient) map_client_list
3816 = QLIST_HEAD_INITIALIZER(map_client_list);
ba223c29
AL
3817
3818void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3819{
7267c094 3820 MapClient *client = g_malloc(sizeof(*client));
ba223c29
AL
3821
3822 client->opaque = opaque;
3823 client->callback = callback;
72cf2d4f 3824 QLIST_INSERT_HEAD(&map_client_list, client, link);
ba223c29
AL
3825 return client;
3826}
3827
3828void cpu_unregister_map_client(void *_client)
3829{
3830 MapClient *client = (MapClient *)_client;
3831
72cf2d4f 3832 QLIST_REMOVE(client, link);
7267c094 3833 g_free(client);
ba223c29
AL
3834}
3835
3836static void cpu_notify_map_clients(void)
3837{
3838 MapClient *client;
3839
72cf2d4f
BS
3840 while (!QLIST_EMPTY(&map_client_list)) {
3841 client = QLIST_FIRST(&map_client_list);
ba223c29 3842 client->callback(client->opaque);
34d5e948 3843 cpu_unregister_map_client(client);
ba223c29
AL
3844 }
3845}
3846
6d16c2f8
AL
3847/* Map a physical memory region into a host virtual address.
3848 * May map a subset of the requested range, given by and returned in *plen.
3849 * May return NULL if resources needed to perform the mapping are exhausted.
3850 * Use only for reads OR writes - not for read-modify-write operations.
ba223c29
AL
3851 * Use cpu_register_map_client() to know when retrying the map operation is
3852 * likely to succeed.
6d16c2f8 3853 */
c227f099
AL
3854void *cpu_physical_memory_map(target_phys_addr_t addr,
3855 target_phys_addr_t *plen,
6d16c2f8
AL
3856 int is_write)
3857{
c227f099 3858 target_phys_addr_t len = *plen;
38bee5dc 3859 target_phys_addr_t todo = 0;
6d16c2f8 3860 int l;
c227f099 3861 target_phys_addr_t page;
6d16c2f8 3862 unsigned long pd;
f1f6e3b8 3863 PhysPageDesc p;
f15fbc4b 3864 ram_addr_t raddr = RAM_ADDR_MAX;
8ab934f9
SS
3865 ram_addr_t rlen;
3866 void *ret;
6d16c2f8
AL
3867
3868 while (len > 0) {
3869 page = addr & TARGET_PAGE_MASK;
3870 l = (page + TARGET_PAGE_SIZE) - addr;
3871 if (l > len)
3872 l = len;
3873 p = phys_page_find(page >> TARGET_PAGE_BITS);
f1f6e3b8 3874 pd = p.phys_offset;
6d16c2f8
AL
3875
3876 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
38bee5dc 3877 if (todo || bounce.buffer) {
6d16c2f8
AL
3878 break;
3879 }
3880 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3881 bounce.addr = addr;
3882 bounce.len = l;
3883 if (!is_write) {
54f7b4a3 3884 cpu_physical_memory_read(addr, bounce.buffer, l);
6d16c2f8 3885 }
38bee5dc
SS
3886
3887 *plen = l;
3888 return bounce.buffer;
6d16c2f8 3889 }
8ab934f9
SS
3890 if (!todo) {
3891 raddr = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3892 }
6d16c2f8
AL
3893
3894 len -= l;
3895 addr += l;
38bee5dc 3896 todo += l;
6d16c2f8 3897 }
8ab934f9
SS
3898 rlen = todo;
3899 ret = qemu_ram_ptr_length(raddr, &rlen);
3900 *plen = rlen;
3901 return ret;
6d16c2f8
AL
3902}
3903
3904/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3905 * Will also mark the memory as dirty if is_write == 1. access_len gives
3906 * the amount of memory that was actually read or written by the caller.
3907 */
c227f099
AL
3908void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3909 int is_write, target_phys_addr_t access_len)
6d16c2f8
AL
3910{
3911 if (buffer != bounce.buffer) {
3912 if (is_write) {
e890261f 3913 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
6d16c2f8
AL
3914 while (access_len) {
3915 unsigned l;
3916 l = TARGET_PAGE_SIZE;
3917 if (l > access_len)
3918 l = access_len;
3919 if (!cpu_physical_memory_is_dirty(addr1)) {
3920 /* invalidate code */
3921 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3922 /* set dirty bit */
f7c11b53
YT
3923 cpu_physical_memory_set_dirty_flags(
3924 addr1, (0xff & ~CODE_DIRTY_FLAG));
6d16c2f8
AL
3925 }
3926 addr1 += l;
3927 access_len -= l;
3928 }
3929 }
868bb33f 3930 if (xen_enabled()) {
e41d7c69 3931 xen_invalidate_map_cache_entry(buffer);
050a0ddf 3932 }
6d16c2f8
AL
3933 return;
3934 }
3935 if (is_write) {
3936 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3937 }
f8a83245 3938 qemu_vfree(bounce.buffer);
6d16c2f8 3939 bounce.buffer = NULL;
ba223c29 3940 cpu_notify_map_clients();
6d16c2f8 3941}
d0ecd2aa 3942
8df1cd07 3943/* warning: addr must be aligned */
1e78bcc1
AG
3944static inline uint32_t ldl_phys_internal(target_phys_addr_t addr,
3945 enum device_endian endian)
8df1cd07
FB
3946{
3947 int io_index;
3948 uint8_t *ptr;
3949 uint32_t val;
3950 unsigned long pd;
f1f6e3b8 3951 PhysPageDesc p;
8df1cd07
FB
3952
3953 p = phys_page_find(addr >> TARGET_PAGE_BITS);
f1f6e3b8 3954 pd = p.phys_offset;
3b46e624 3955
5fafdf24 3956 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2a4188a3 3957 !(pd & IO_MEM_ROMD)) {
8df1cd07
FB
3958 /* I/O case */
3959 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
f1f6e3b8 3960 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
acbbec5d 3961 val = io_mem_read(io_index, addr, 4);
1e78bcc1
AG
3962#if defined(TARGET_WORDS_BIGENDIAN)
3963 if (endian == DEVICE_LITTLE_ENDIAN) {
3964 val = bswap32(val);
3965 }
3966#else
3967 if (endian == DEVICE_BIG_ENDIAN) {
3968 val = bswap32(val);
3969 }
3970#endif
8df1cd07
FB
3971 } else {
3972 /* RAM case */
5579c7f3 3973 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
8df1cd07 3974 (addr & ~TARGET_PAGE_MASK);
1e78bcc1
AG
3975 switch (endian) {
3976 case DEVICE_LITTLE_ENDIAN:
3977 val = ldl_le_p(ptr);
3978 break;
3979 case DEVICE_BIG_ENDIAN:
3980 val = ldl_be_p(ptr);
3981 break;
3982 default:
3983 val = ldl_p(ptr);
3984 break;
3985 }
8df1cd07
FB
3986 }
3987 return val;
3988}
3989
1e78bcc1
AG
3990uint32_t ldl_phys(target_phys_addr_t addr)
3991{
3992 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3993}
3994
3995uint32_t ldl_le_phys(target_phys_addr_t addr)
3996{
3997 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3998}
3999
4000uint32_t ldl_be_phys(target_phys_addr_t addr)
4001{
4002 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
4003}
4004
84b7b8e7 4005/* warning: addr must be aligned */
1e78bcc1
AG
4006static inline uint64_t ldq_phys_internal(target_phys_addr_t addr,
4007 enum device_endian endian)
84b7b8e7
FB
4008{
4009 int io_index;
4010 uint8_t *ptr;
4011 uint64_t val;
4012 unsigned long pd;
f1f6e3b8 4013 PhysPageDesc p;
84b7b8e7
FB
4014
4015 p = phys_page_find(addr >> TARGET_PAGE_BITS);
f1f6e3b8 4016 pd = p.phys_offset;
3b46e624 4017
2a4188a3
FB
4018 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
4019 !(pd & IO_MEM_ROMD)) {
84b7b8e7
FB
4020 /* I/O case */
4021 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
f1f6e3b8 4022 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
1e78bcc1
AG
4023
4024 /* XXX This is broken when device endian != cpu endian.
4025 Fix and add "endian" variable check */
84b7b8e7 4026#ifdef TARGET_WORDS_BIGENDIAN
acbbec5d
AK
4027 val = io_mem_read(io_index, addr, 4) << 32;
4028 val |= io_mem_read(io_index, addr + 4, 4);
84b7b8e7 4029#else
acbbec5d
AK
4030 val = io_mem_read(io_index, addr, 4);
4031 val |= io_mem_read(io_index, addr + 4, 4) << 32;
84b7b8e7
FB
4032#endif
4033 } else {
4034 /* RAM case */
5579c7f3 4035 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
84b7b8e7 4036 (addr & ~TARGET_PAGE_MASK);
1e78bcc1
AG
4037 switch (endian) {
4038 case DEVICE_LITTLE_ENDIAN:
4039 val = ldq_le_p(ptr);
4040 break;
4041 case DEVICE_BIG_ENDIAN:
4042 val = ldq_be_p(ptr);
4043 break;
4044 default:
4045 val = ldq_p(ptr);
4046 break;
4047 }
84b7b8e7
FB
4048 }
4049 return val;
4050}
4051
1e78bcc1
AG
4052uint64_t ldq_phys(target_phys_addr_t addr)
4053{
4054 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4055}
4056
4057uint64_t ldq_le_phys(target_phys_addr_t addr)
4058{
4059 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4060}
4061
4062uint64_t ldq_be_phys(target_phys_addr_t addr)
4063{
4064 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
4065}
4066
aab33094 4067/* XXX: optimize */
c227f099 4068uint32_t ldub_phys(target_phys_addr_t addr)
aab33094
FB
4069{
4070 uint8_t val;
4071 cpu_physical_memory_read(addr, &val, 1);
4072 return val;
4073}
4074
733f0b02 4075/* warning: addr must be aligned */
1e78bcc1
AG
4076static inline uint32_t lduw_phys_internal(target_phys_addr_t addr,
4077 enum device_endian endian)
aab33094 4078{
733f0b02
MT
4079 int io_index;
4080 uint8_t *ptr;
4081 uint64_t val;
4082 unsigned long pd;
f1f6e3b8 4083 PhysPageDesc p;
733f0b02
MT
4084
4085 p = phys_page_find(addr >> TARGET_PAGE_BITS);
f1f6e3b8 4086 pd = p.phys_offset;
733f0b02
MT
4087
4088 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
4089 !(pd & IO_MEM_ROMD)) {
4090 /* I/O case */
4091 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
f1f6e3b8 4092 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
acbbec5d 4093 val = io_mem_read(io_index, addr, 2);
1e78bcc1
AG
4094#if defined(TARGET_WORDS_BIGENDIAN)
4095 if (endian == DEVICE_LITTLE_ENDIAN) {
4096 val = bswap16(val);
4097 }
4098#else
4099 if (endian == DEVICE_BIG_ENDIAN) {
4100 val = bswap16(val);
4101 }
4102#endif
733f0b02
MT
4103 } else {
4104 /* RAM case */
4105 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4106 (addr & ~TARGET_PAGE_MASK);
1e78bcc1
AG
4107 switch (endian) {
4108 case DEVICE_LITTLE_ENDIAN:
4109 val = lduw_le_p(ptr);
4110 break;
4111 case DEVICE_BIG_ENDIAN:
4112 val = lduw_be_p(ptr);
4113 break;
4114 default:
4115 val = lduw_p(ptr);
4116 break;
4117 }
733f0b02
MT
4118 }
4119 return val;
aab33094
FB
4120}
4121
1e78bcc1
AG
4122uint32_t lduw_phys(target_phys_addr_t addr)
4123{
4124 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4125}
4126
4127uint32_t lduw_le_phys(target_phys_addr_t addr)
4128{
4129 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4130}
4131
4132uint32_t lduw_be_phys(target_phys_addr_t addr)
4133{
4134 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
4135}
4136
8df1cd07
FB
4137/* warning: addr must be aligned. The ram page is not masked as dirty
4138 and the code inside is not invalidated. It is useful if the dirty
4139 bits are used to track modified PTEs */
c227f099 4140void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
8df1cd07
FB
4141{
4142 int io_index;
4143 uint8_t *ptr;
4144 unsigned long pd;
f1f6e3b8 4145 PhysPageDesc p;
8df1cd07
FB
4146
4147 p = phys_page_find(addr >> TARGET_PAGE_BITS);
f1f6e3b8 4148 pd = p.phys_offset;
3b46e624 4149
3a7d929e 4150 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07 4151 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
f1f6e3b8 4152 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
acbbec5d 4153 io_mem_write(io_index, addr, val, 4);
8df1cd07 4154 } else {
74576198 4155 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
5579c7f3 4156 ptr = qemu_get_ram_ptr(addr1);
8df1cd07 4157 stl_p(ptr, val);
74576198
AL
4158
4159 if (unlikely(in_migration)) {
4160 if (!cpu_physical_memory_is_dirty(addr1)) {
4161 /* invalidate code */
4162 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4163 /* set dirty bit */
f7c11b53
YT
4164 cpu_physical_memory_set_dirty_flags(
4165 addr1, (0xff & ~CODE_DIRTY_FLAG));
74576198
AL
4166 }
4167 }
8df1cd07
FB
4168 }
4169}
4170
c227f099 4171void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
bc98a7ef
JM
4172{
4173 int io_index;
4174 uint8_t *ptr;
4175 unsigned long pd;
f1f6e3b8 4176 PhysPageDesc p;
bc98a7ef
JM
4177
4178 p = phys_page_find(addr >> TARGET_PAGE_BITS);
f1f6e3b8 4179 pd = p.phys_offset;
3b46e624 4180
bc98a7ef
JM
4181 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4182 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
f1f6e3b8 4183 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
bc98a7ef 4184#ifdef TARGET_WORDS_BIGENDIAN
acbbec5d
AK
4185 io_mem_write(io_index, addr, val >> 32, 4);
4186 io_mem_write(io_index, addr + 4, (uint32_t)val, 4);
bc98a7ef 4187#else
acbbec5d
AK
4188 io_mem_write(io_index, addr, (uint32_t)val, 4);
4189 io_mem_write(io_index, addr + 4, val >> 32, 4);
bc98a7ef
JM
4190#endif
4191 } else {
5579c7f3 4192 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
bc98a7ef
JM
4193 (addr & ~TARGET_PAGE_MASK);
4194 stq_p(ptr, val);
4195 }
4196}
4197
8df1cd07 4198/* warning: addr must be aligned */
1e78bcc1
AG
4199static inline void stl_phys_internal(target_phys_addr_t addr, uint32_t val,
4200 enum device_endian endian)
8df1cd07
FB
4201{
4202 int io_index;
4203 uint8_t *ptr;
4204 unsigned long pd;
f1f6e3b8 4205 PhysPageDesc p;
8df1cd07
FB
4206
4207 p = phys_page_find(addr >> TARGET_PAGE_BITS);
f1f6e3b8 4208 pd = p.phys_offset;
3b46e624 4209
3a7d929e 4210 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07 4211 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
f1f6e3b8 4212 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
1e78bcc1
AG
4213#if defined(TARGET_WORDS_BIGENDIAN)
4214 if (endian == DEVICE_LITTLE_ENDIAN) {
4215 val = bswap32(val);
4216 }
4217#else
4218 if (endian == DEVICE_BIG_ENDIAN) {
4219 val = bswap32(val);
4220 }
4221#endif
acbbec5d 4222 io_mem_write(io_index, addr, val, 4);
8df1cd07
FB
4223 } else {
4224 unsigned long addr1;
4225 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4226 /* RAM case */
5579c7f3 4227 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
4228 switch (endian) {
4229 case DEVICE_LITTLE_ENDIAN:
4230 stl_le_p(ptr, val);
4231 break;
4232 case DEVICE_BIG_ENDIAN:
4233 stl_be_p(ptr, val);
4234 break;
4235 default:
4236 stl_p(ptr, val);
4237 break;
4238 }
3a7d929e
FB
4239 if (!cpu_physical_memory_is_dirty(addr1)) {
4240 /* invalidate code */
4241 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4242 /* set dirty bit */
f7c11b53
YT
4243 cpu_physical_memory_set_dirty_flags(addr1,
4244 (0xff & ~CODE_DIRTY_FLAG));
3a7d929e 4245 }
8df1cd07
FB
4246 }
4247}
4248
1e78bcc1
AG
4249void stl_phys(target_phys_addr_t addr, uint32_t val)
4250{
4251 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4252}
4253
4254void stl_le_phys(target_phys_addr_t addr, uint32_t val)
4255{
4256 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4257}
4258
4259void stl_be_phys(target_phys_addr_t addr, uint32_t val)
4260{
4261 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4262}
4263
aab33094 4264/* XXX: optimize */
c227f099 4265void stb_phys(target_phys_addr_t addr, uint32_t val)
aab33094
FB
4266{
4267 uint8_t v = val;
4268 cpu_physical_memory_write(addr, &v, 1);
4269}
4270
733f0b02 4271/* warning: addr must be aligned */
1e78bcc1
AG
4272static inline void stw_phys_internal(target_phys_addr_t addr, uint32_t val,
4273 enum device_endian endian)
aab33094 4274{
733f0b02
MT
4275 int io_index;
4276 uint8_t *ptr;
4277 unsigned long pd;
f1f6e3b8 4278 PhysPageDesc p;
733f0b02
MT
4279
4280 p = phys_page_find(addr >> TARGET_PAGE_BITS);
f1f6e3b8 4281 pd = p.phys_offset;
733f0b02
MT
4282
4283 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4284 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
f1f6e3b8 4285 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
1e78bcc1
AG
4286#if defined(TARGET_WORDS_BIGENDIAN)
4287 if (endian == DEVICE_LITTLE_ENDIAN) {
4288 val = bswap16(val);
4289 }
4290#else
4291 if (endian == DEVICE_BIG_ENDIAN) {
4292 val = bswap16(val);
4293 }
4294#endif
acbbec5d 4295 io_mem_write(io_index, addr, val, 2);
733f0b02
MT
4296 } else {
4297 unsigned long addr1;
4298 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4299 /* RAM case */
4300 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
4301 switch (endian) {
4302 case DEVICE_LITTLE_ENDIAN:
4303 stw_le_p(ptr, val);
4304 break;
4305 case DEVICE_BIG_ENDIAN:
4306 stw_be_p(ptr, val);
4307 break;
4308 default:
4309 stw_p(ptr, val);
4310 break;
4311 }
733f0b02
MT
4312 if (!cpu_physical_memory_is_dirty(addr1)) {
4313 /* invalidate code */
4314 tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
4315 /* set dirty bit */
4316 cpu_physical_memory_set_dirty_flags(addr1,
4317 (0xff & ~CODE_DIRTY_FLAG));
4318 }
4319 }
aab33094
FB
4320}
4321
1e78bcc1
AG
4322void stw_phys(target_phys_addr_t addr, uint32_t val)
4323{
4324 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4325}
4326
4327void stw_le_phys(target_phys_addr_t addr, uint32_t val)
4328{
4329 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4330}
4331
4332void stw_be_phys(target_phys_addr_t addr, uint32_t val)
4333{
4334 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4335}
4336
aab33094 4337/* XXX: optimize */
c227f099 4338void stq_phys(target_phys_addr_t addr, uint64_t val)
aab33094
FB
4339{
4340 val = tswap64(val);
71d2b725 4341 cpu_physical_memory_write(addr, &val, 8);
aab33094
FB
4342}
4343
1e78bcc1
AG
4344void stq_le_phys(target_phys_addr_t addr, uint64_t val)
4345{
4346 val = cpu_to_le64(val);
4347 cpu_physical_memory_write(addr, &val, 8);
4348}
4349
4350void stq_be_phys(target_phys_addr_t addr, uint64_t val)
4351{
4352 val = cpu_to_be64(val);
4353 cpu_physical_memory_write(addr, &val, 8);
4354}
4355
5e2972fd 4356/* virtual memory access for debug (includes writing to ROM) */
5fafdf24 4357int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
b448f2f3 4358 uint8_t *buf, int len, int is_write)
13eb76e0
FB
4359{
4360 int l;
c227f099 4361 target_phys_addr_t phys_addr;
9b3c35e0 4362 target_ulong page;
13eb76e0
FB
4363
4364 while (len > 0) {
4365 page = addr & TARGET_PAGE_MASK;
4366 phys_addr = cpu_get_phys_page_debug(env, page);
4367 /* if no physical page mapped, return an error */
4368 if (phys_addr == -1)
4369 return -1;
4370 l = (page + TARGET_PAGE_SIZE) - addr;
4371 if (l > len)
4372 l = len;
5e2972fd 4373 phys_addr += (addr & ~TARGET_PAGE_MASK);
5e2972fd
AL
4374 if (is_write)
4375 cpu_physical_memory_write_rom(phys_addr, buf, l);
4376 else
5e2972fd 4377 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
13eb76e0
FB
4378 len -= l;
4379 buf += l;
4380 addr += l;
4381 }
4382 return 0;
4383}
a68fe89c 4384#endif
13eb76e0 4385
2e70f6ef
PB
4386/* in deterministic execution mode, instructions doing device I/Os
4387 must be at the end of the TB */
4388void cpu_io_recompile(CPUState *env, void *retaddr)
4389{
4390 TranslationBlock *tb;
4391 uint32_t n, cflags;
4392 target_ulong pc, cs_base;
4393 uint64_t flags;
4394
4395 tb = tb_find_pc((unsigned long)retaddr);
4396 if (!tb) {
4397 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
4398 retaddr);
4399 }
4400 n = env->icount_decr.u16.low + tb->icount;
618ba8e6 4401 cpu_restore_state(tb, env, (unsigned long)retaddr);
2e70f6ef 4402 /* Calculate how many instructions had been executed before the fault
bf20dc07 4403 occurred. */
2e70f6ef
PB
4404 n = n - env->icount_decr.u16.low;
4405 /* Generate a new TB ending on the I/O insn. */
4406 n++;
4407 /* On MIPS and SH, delay slot instructions can only be restarted if
4408 they were already the first instruction in the TB. If this is not
bf20dc07 4409 the first instruction in a TB then re-execute the preceding
2e70f6ef
PB
4410 branch. */
4411#if defined(TARGET_MIPS)
4412 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4413 env->active_tc.PC -= 4;
4414 env->icount_decr.u16.low++;
4415 env->hflags &= ~MIPS_HFLAG_BMASK;
4416 }
4417#elif defined(TARGET_SH4)
4418 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4419 && n > 1) {
4420 env->pc -= 2;
4421 env->icount_decr.u16.low++;
4422 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4423 }
4424#endif
4425 /* This should never happen. */
4426 if (n > CF_COUNT_MASK)
4427 cpu_abort(env, "TB too big during recompile");
4428
4429 cflags = n | CF_LAST_IO;
4430 pc = tb->pc;
4431 cs_base = tb->cs_base;
4432 flags = tb->flags;
4433 tb_phys_invalidate(tb, -1);
4434 /* FIXME: In theory this could raise an exception. In practice
4435 we have already translated the block once so it's probably ok. */
4436 tb_gen_code(env, pc, cs_base, flags, cflags);
bf20dc07 4437 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
2e70f6ef
PB
4438 the first in the TB) then we end up generating a whole new TB and
4439 repeating the fault, which is horribly inefficient.
4440 Better would be to execute just this insn uncached, or generate a
4441 second new TB. */
4442 cpu_resume_from_signal(env, NULL);
4443}
4444
b3755a91
PB
4445#if !defined(CONFIG_USER_ONLY)
4446
055403b2 4447void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
e3db7226
FB
4448{
4449 int i, target_code_size, max_target_code_size;
4450 int direct_jmp_count, direct_jmp2_count, cross_page;
4451 TranslationBlock *tb;
3b46e624 4452
e3db7226
FB
4453 target_code_size = 0;
4454 max_target_code_size = 0;
4455 cross_page = 0;
4456 direct_jmp_count = 0;
4457 direct_jmp2_count = 0;
4458 for(i = 0; i < nb_tbs; i++) {
4459 tb = &tbs[i];
4460 target_code_size += tb->size;
4461 if (tb->size > max_target_code_size)
4462 max_target_code_size = tb->size;
4463 if (tb->page_addr[1] != -1)
4464 cross_page++;
4465 if (tb->tb_next_offset[0] != 0xffff) {
4466 direct_jmp_count++;
4467 if (tb->tb_next_offset[1] != 0xffff) {
4468 direct_jmp2_count++;
4469 }
4470 }
4471 }
4472 /* XXX: avoid using doubles ? */
57fec1fe 4473 cpu_fprintf(f, "Translation buffer state:\n");
055403b2 4474 cpu_fprintf(f, "gen code size %td/%ld\n",
26a5f13b
FB
4475 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4476 cpu_fprintf(f, "TB count %d/%d\n",
4477 nb_tbs, code_gen_max_blocks);
5fafdf24 4478 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
e3db7226
FB
4479 nb_tbs ? target_code_size / nb_tbs : 0,
4480 max_target_code_size);
055403b2 4481 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
e3db7226
FB
4482 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4483 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
5fafdf24
TS
4484 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4485 cross_page,
e3db7226
FB
4486 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4487 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
5fafdf24 4488 direct_jmp_count,
e3db7226
FB
4489 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4490 direct_jmp2_count,
4491 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
57fec1fe 4492 cpu_fprintf(f, "\nStatistics:\n");
e3db7226
FB
4493 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4494 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4495 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
b67d9a52 4496 tcg_dump_info(f, cpu_fprintf);
e3db7226
FB
4497}
4498
61382a50 4499#define MMUSUFFIX _cmmu
3917149d 4500#undef GETPC
61382a50
FB
4501#define GETPC() NULL
4502#define env cpu_single_env
b769d8fe 4503#define SOFTMMU_CODE_ACCESS
61382a50
FB
4504
4505#define SHIFT 0
4506#include "softmmu_template.h"
4507
4508#define SHIFT 1
4509#include "softmmu_template.h"
4510
4511#define SHIFT 2
4512#include "softmmu_template.h"
4513
4514#define SHIFT 3
4515#include "softmmu_template.h"
4516
4517#undef env
4518
4519#endif