]> git.proxmox.com Git - qemu.git/blame - exec.c
CPUPhysMemoryClient: Pass guest physical address not region offset
[qemu.git] / exec.c
CommitLineData
54936004 1/*
fd6ce8f6 2 * virtual page mapping and translated block handling
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
54936004 18 */
67b915a5 19#include "config.h"
d5a8f07c
FB
20#ifdef _WIN32
21#include <windows.h>
22#else
a98d49b1 23#include <sys/types.h>
d5a8f07c
FB
24#include <sys/mman.h>
25#endif
54936004 26
055403b2 27#include "qemu-common.h"
6180a181
FB
28#include "cpu.h"
29#include "exec-all.h"
b67d9a52 30#include "tcg.h"
b3c7724c 31#include "hw/hw.h"
cc9e98cb 32#include "hw/qdev.h"
74576198 33#include "osdep.h"
7ba1e619 34#include "kvm.h"
29e922b6 35#include "qemu-timer.h"
53a5960a
PB
36#if defined(CONFIG_USER_ONLY)
37#include <qemu.h>
fd052bf6 38#include <signal.h>
f01576f1
JL
39#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
40#include <sys/param.h>
41#if __FreeBSD_version >= 700104
42#define HAVE_KINFO_GETVMMAP
43#define sigqueue sigqueue_freebsd /* avoid redefinition */
44#include <sys/time.h>
45#include <sys/proc.h>
46#include <machine/profile.h>
47#define _KERNEL
48#include <sys/user.h>
49#undef _KERNEL
50#undef sigqueue
51#include <libutil.h>
52#endif
53#endif
53a5960a 54#endif
54936004 55
fd6ce8f6 56//#define DEBUG_TB_INVALIDATE
66e85a21 57//#define DEBUG_FLUSH
9fa3e853 58//#define DEBUG_TLB
67d3b957 59//#define DEBUG_UNASSIGNED
fd6ce8f6
FB
60
61/* make various TB consistency checks */
5fafdf24
TS
62//#define DEBUG_TB_CHECK
63//#define DEBUG_TLB_CHECK
fd6ce8f6 64
1196be37 65//#define DEBUG_IOPORT
db7b5426 66//#define DEBUG_SUBPAGE
1196be37 67
99773bd4
PB
68#if !defined(CONFIG_USER_ONLY)
69/* TB consistency checks only implemented for usermode emulation. */
70#undef DEBUG_TB_CHECK
71#endif
72
9fa3e853
FB
73#define SMC_BITMAP_USE_THRESHOLD 10
74
bdaf78e0 75static TranslationBlock *tbs;
24ab68ac 76static int code_gen_max_blocks;
9fa3e853 77TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
bdaf78e0 78static int nb_tbs;
eb51d102 79/* any access to the tbs or the page table must use this lock */
c227f099 80spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
fd6ce8f6 81
141ac468
BS
82#if defined(__arm__) || defined(__sparc_v9__)
83/* The prologue must be reachable with a direct jump. ARM and Sparc64
84 have limited branch ranges (possibly also PPC) so place it in a
d03d860b
BS
85 section close to code segment. */
86#define code_gen_section \
87 __attribute__((__section__(".gen_code"))) \
88 __attribute__((aligned (32)))
f8e2af11
SW
89#elif defined(_WIN32)
90/* Maximum alignment for Win32 is 16. */
91#define code_gen_section \
92 __attribute__((aligned (16)))
d03d860b
BS
93#else
94#define code_gen_section \
95 __attribute__((aligned (32)))
96#endif
97
98uint8_t code_gen_prologue[1024] code_gen_section;
bdaf78e0
BS
99static uint8_t *code_gen_buffer;
100static unsigned long code_gen_buffer_size;
26a5f13b 101/* threshold to flush the translated code buffer */
bdaf78e0 102static unsigned long code_gen_buffer_max_size;
24ab68ac 103static uint8_t *code_gen_ptr;
fd6ce8f6 104
e2eef170 105#if !defined(CONFIG_USER_ONLY)
9fa3e853 106int phys_ram_fd;
74576198 107static int in_migration;
94a6b54f 108
f471a17e 109RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list) };
e2eef170 110#endif
9fa3e853 111
6a00d601
FB
112CPUState *first_cpu;
113/* current CPU in the current thread. It is only valid inside
114 cpu_exec() */
5fafdf24 115CPUState *cpu_single_env;
2e70f6ef 116/* 0 = Do not count executed instructions.
bf20dc07 117 1 = Precise instruction counting.
2e70f6ef
PB
118 2 = Adaptive rate instruction counting. */
119int use_icount = 0;
120/* Current instruction counter. While executing translated code this may
121 include some instructions that have not yet been executed. */
122int64_t qemu_icount;
6a00d601 123
54936004 124typedef struct PageDesc {
92e873b9 125 /* list of TBs intersecting this ram page */
fd6ce8f6 126 TranslationBlock *first_tb;
9fa3e853
FB
127 /* in order to optimize self modifying code, we count the number
128 of lookups we do to a given page to use a bitmap */
129 unsigned int code_write_count;
130 uint8_t *code_bitmap;
131#if defined(CONFIG_USER_ONLY)
132 unsigned long flags;
133#endif
54936004
FB
134} PageDesc;
135
41c1b1c9 136/* In system mode we want L1_MAP to be based on ram offsets,
5cd2c5b6
RH
137 while in user mode we want it to be based on virtual addresses. */
138#if !defined(CONFIG_USER_ONLY)
41c1b1c9
PB
139#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
140# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
141#else
5cd2c5b6 142# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
41c1b1c9 143#endif
bedb69ea 144#else
5cd2c5b6 145# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
bedb69ea 146#endif
54936004 147
5cd2c5b6
RH
148/* Size of the L2 (and L3, etc) page tables. */
149#define L2_BITS 10
54936004
FB
150#define L2_SIZE (1 << L2_BITS)
151
5cd2c5b6
RH
152/* The bits remaining after N lower levels of page tables. */
153#define P_L1_BITS_REM \
154 ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
155#define V_L1_BITS_REM \
156 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
157
158/* Size of the L1 page table. Avoid silly small sizes. */
159#if P_L1_BITS_REM < 4
160#define P_L1_BITS (P_L1_BITS_REM + L2_BITS)
161#else
162#define P_L1_BITS P_L1_BITS_REM
163#endif
164
165#if V_L1_BITS_REM < 4
166#define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
167#else
168#define V_L1_BITS V_L1_BITS_REM
169#endif
170
171#define P_L1_SIZE ((target_phys_addr_t)1 << P_L1_BITS)
172#define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
173
174#define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS)
175#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
176
83fb7adf
FB
177unsigned long qemu_real_host_page_size;
178unsigned long qemu_host_page_bits;
179unsigned long qemu_host_page_size;
180unsigned long qemu_host_page_mask;
54936004 181
5cd2c5b6
RH
182/* This is a multi-level map on the virtual address space.
183 The bottom level has pointers to PageDesc. */
184static void *l1_map[V_L1_SIZE];
54936004 185
e2eef170 186#if !defined(CONFIG_USER_ONLY)
41c1b1c9
PB
187typedef struct PhysPageDesc {
188 /* offset in host memory of the page + io_index in the low bits */
189 ram_addr_t phys_offset;
190 ram_addr_t region_offset;
191} PhysPageDesc;
192
5cd2c5b6
RH
193/* This is a multi-level map on the physical address space.
194 The bottom level has pointers to PhysPageDesc. */
195static void *l1_phys_map[P_L1_SIZE];
6d9a1304 196
e2eef170
PB
197static void io_mem_init(void);
198
33417e70 199/* io memory support */
33417e70
FB
200CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
201CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
a4193c8a 202void *io_mem_opaque[IO_MEM_NB_ENTRIES];
511d2b14 203static char io_mem_used[IO_MEM_NB_ENTRIES];
6658ffb8
PB
204static int io_mem_watch;
205#endif
33417e70 206
34865134 207/* log support */
1e8b27ca
JR
208#ifdef WIN32
209static const char *logfilename = "qemu.log";
210#else
d9b630fd 211static const char *logfilename = "/tmp/qemu.log";
1e8b27ca 212#endif
34865134
FB
213FILE *logfile;
214int loglevel;
e735b91c 215static int log_append = 0;
34865134 216
e3db7226 217/* statistics */
b3755a91 218#if !defined(CONFIG_USER_ONLY)
e3db7226 219static int tlb_flush_count;
b3755a91 220#endif
e3db7226
FB
221static int tb_flush_count;
222static int tb_phys_invalidate_count;
223
7cb69cae
FB
224#ifdef _WIN32
225static void map_exec(void *addr, long size)
226{
227 DWORD old_protect;
228 VirtualProtect(addr, size,
229 PAGE_EXECUTE_READWRITE, &old_protect);
230
231}
232#else
233static void map_exec(void *addr, long size)
234{
4369415f 235 unsigned long start, end, page_size;
7cb69cae 236
4369415f 237 page_size = getpagesize();
7cb69cae 238 start = (unsigned long)addr;
4369415f 239 start &= ~(page_size - 1);
7cb69cae
FB
240
241 end = (unsigned long)addr + size;
4369415f
FB
242 end += page_size - 1;
243 end &= ~(page_size - 1);
7cb69cae
FB
244
245 mprotect((void *)start, end - start,
246 PROT_READ | PROT_WRITE | PROT_EXEC);
247}
248#endif
249
b346ff46 250static void page_init(void)
54936004 251{
83fb7adf 252 /* NOTE: we can always suppose that qemu_host_page_size >=
54936004 253 TARGET_PAGE_SIZE */
c2b48b69
AL
254#ifdef _WIN32
255 {
256 SYSTEM_INFO system_info;
257
258 GetSystemInfo(&system_info);
259 qemu_real_host_page_size = system_info.dwPageSize;
260 }
261#else
262 qemu_real_host_page_size = getpagesize();
263#endif
83fb7adf
FB
264 if (qemu_host_page_size == 0)
265 qemu_host_page_size = qemu_real_host_page_size;
266 if (qemu_host_page_size < TARGET_PAGE_SIZE)
267 qemu_host_page_size = TARGET_PAGE_SIZE;
268 qemu_host_page_bits = 0;
269 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
270 qemu_host_page_bits++;
271 qemu_host_page_mask = ~(qemu_host_page_size - 1);
50a9569b 272
2e9a5713 273#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
50a9569b 274 {
f01576f1
JL
275#ifdef HAVE_KINFO_GETVMMAP
276 struct kinfo_vmentry *freep;
277 int i, cnt;
278
279 freep = kinfo_getvmmap(getpid(), &cnt);
280 if (freep) {
281 mmap_lock();
282 for (i = 0; i < cnt; i++) {
283 unsigned long startaddr, endaddr;
284
285 startaddr = freep[i].kve_start;
286 endaddr = freep[i].kve_end;
287 if (h2g_valid(startaddr)) {
288 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
289
290 if (h2g_valid(endaddr)) {
291 endaddr = h2g(endaddr);
fd436907 292 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
f01576f1
JL
293 } else {
294#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
295 endaddr = ~0ul;
fd436907 296 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
f01576f1
JL
297#endif
298 }
299 }
300 }
301 free(freep);
302 mmap_unlock();
303 }
304#else
50a9569b 305 FILE *f;
50a9569b 306
0776590d 307 last_brk = (unsigned long)sbrk(0);
5cd2c5b6 308
fd436907 309 f = fopen("/compat/linux/proc/self/maps", "r");
50a9569b 310 if (f) {
5cd2c5b6
RH
311 mmap_lock();
312
50a9569b 313 do {
5cd2c5b6
RH
314 unsigned long startaddr, endaddr;
315 int n;
316
317 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
318
319 if (n == 2 && h2g_valid(startaddr)) {
320 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
321
322 if (h2g_valid(endaddr)) {
323 endaddr = h2g(endaddr);
324 } else {
325 endaddr = ~0ul;
326 }
327 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
50a9569b
AZ
328 }
329 } while (!feof(f));
5cd2c5b6 330
50a9569b 331 fclose(f);
5cd2c5b6 332 mmap_unlock();
50a9569b 333 }
f01576f1 334#endif
50a9569b
AZ
335 }
336#endif
54936004
FB
337}
338
41c1b1c9 339static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
54936004 340{
41c1b1c9
PB
341 PageDesc *pd;
342 void **lp;
343 int i;
344
5cd2c5b6 345#if defined(CONFIG_USER_ONLY)
2e9a5713 346 /* We can't use qemu_malloc because it may recurse into a locked mutex. */
5cd2c5b6
RH
347# define ALLOC(P, SIZE) \
348 do { \
349 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
350 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
5cd2c5b6
RH
351 } while (0)
352#else
353# define ALLOC(P, SIZE) \
354 do { P = qemu_mallocz(SIZE); } while (0)
17e2377a 355#endif
434929bf 356
5cd2c5b6
RH
357 /* Level 1. Always allocated. */
358 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
359
360 /* Level 2..N-1. */
361 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
362 void **p = *lp;
363
364 if (p == NULL) {
365 if (!alloc) {
366 return NULL;
367 }
368 ALLOC(p, sizeof(void *) * L2_SIZE);
369 *lp = p;
17e2377a 370 }
5cd2c5b6
RH
371
372 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
373 }
374
375 pd = *lp;
376 if (pd == NULL) {
377 if (!alloc) {
378 return NULL;
379 }
380 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
381 *lp = pd;
54936004 382 }
5cd2c5b6
RH
383
384#undef ALLOC
5cd2c5b6
RH
385
386 return pd + (index & (L2_SIZE - 1));
54936004
FB
387}
388
41c1b1c9 389static inline PageDesc *page_find(tb_page_addr_t index)
54936004 390{
5cd2c5b6 391 return page_find_alloc(index, 0);
fd6ce8f6
FB
392}
393
6d9a1304 394#if !defined(CONFIG_USER_ONLY)
c227f099 395static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
92e873b9 396{
e3f4e2a4 397 PhysPageDesc *pd;
5cd2c5b6
RH
398 void **lp;
399 int i;
92e873b9 400
5cd2c5b6
RH
401 /* Level 1. Always allocated. */
402 lp = l1_phys_map + ((index >> P_L1_SHIFT) & (P_L1_SIZE - 1));
108c49b8 403
5cd2c5b6
RH
404 /* Level 2..N-1. */
405 for (i = P_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
406 void **p = *lp;
407 if (p == NULL) {
408 if (!alloc) {
409 return NULL;
410 }
411 *lp = p = qemu_mallocz(sizeof(void *) * L2_SIZE);
412 }
413 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
108c49b8 414 }
5cd2c5b6 415
e3f4e2a4 416 pd = *lp;
5cd2c5b6 417 if (pd == NULL) {
e3f4e2a4 418 int i;
5cd2c5b6
RH
419
420 if (!alloc) {
108c49b8 421 return NULL;
5cd2c5b6
RH
422 }
423
424 *lp = pd = qemu_malloc(sizeof(PhysPageDesc) * L2_SIZE);
425
67c4d23c 426 for (i = 0; i < L2_SIZE; i++) {
5cd2c5b6
RH
427 pd[i].phys_offset = IO_MEM_UNASSIGNED;
428 pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
67c4d23c 429 }
92e873b9 430 }
5cd2c5b6
RH
431
432 return pd + (index & (L2_SIZE - 1));
92e873b9
FB
433}
434
c227f099 435static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
92e873b9 436{
108c49b8 437 return phys_page_find_alloc(index, 0);
92e873b9
FB
438}
439
c227f099
AL
440static void tlb_protect_code(ram_addr_t ram_addr);
441static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 442 target_ulong vaddr);
c8a706fe
PB
443#define mmap_lock() do { } while(0)
444#define mmap_unlock() do { } while(0)
9fa3e853 445#endif
fd6ce8f6 446
4369415f
FB
447#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
448
449#if defined(CONFIG_USER_ONLY)
ccbb4d44 450/* Currently it is not recommended to allocate big chunks of data in
4369415f
FB
451 user mode. It will change when a dedicated libc will be used */
452#define USE_STATIC_CODE_GEN_BUFFER
453#endif
454
455#ifdef USE_STATIC_CODE_GEN_BUFFER
ebf50fb3
AJ
456static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
457 __attribute__((aligned (CODE_GEN_ALIGN)));
4369415f
FB
458#endif
459
8fcd3692 460static void code_gen_alloc(unsigned long tb_size)
26a5f13b 461{
4369415f
FB
462#ifdef USE_STATIC_CODE_GEN_BUFFER
463 code_gen_buffer = static_code_gen_buffer;
464 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
465 map_exec(code_gen_buffer, code_gen_buffer_size);
466#else
26a5f13b
FB
467 code_gen_buffer_size = tb_size;
468 if (code_gen_buffer_size == 0) {
4369415f
FB
469#if defined(CONFIG_USER_ONLY)
470 /* in user mode, phys_ram_size is not meaningful */
471 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
472#else
ccbb4d44 473 /* XXX: needs adjustments */
94a6b54f 474 code_gen_buffer_size = (unsigned long)(ram_size / 4);
4369415f 475#endif
26a5f13b
FB
476 }
477 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
478 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
479 /* The code gen buffer location may have constraints depending on
480 the host cpu and OS */
481#if defined(__linux__)
482 {
483 int flags;
141ac468
BS
484 void *start = NULL;
485
26a5f13b
FB
486 flags = MAP_PRIVATE | MAP_ANONYMOUS;
487#if defined(__x86_64__)
488 flags |= MAP_32BIT;
489 /* Cannot map more than that */
490 if (code_gen_buffer_size > (800 * 1024 * 1024))
491 code_gen_buffer_size = (800 * 1024 * 1024);
141ac468
BS
492#elif defined(__sparc_v9__)
493 // Map the buffer below 2G, so we can use direct calls and branches
494 flags |= MAP_FIXED;
495 start = (void *) 0x60000000UL;
496 if (code_gen_buffer_size > (512 * 1024 * 1024))
497 code_gen_buffer_size = (512 * 1024 * 1024);
1cb0661e 498#elif defined(__arm__)
63d41246 499 /* Map the buffer below 32M, so we can use direct calls and branches */
1cb0661e
AZ
500 flags |= MAP_FIXED;
501 start = (void *) 0x01000000UL;
502 if (code_gen_buffer_size > 16 * 1024 * 1024)
503 code_gen_buffer_size = 16 * 1024 * 1024;
eba0b893
RH
504#elif defined(__s390x__)
505 /* Map the buffer so that we can use direct calls and branches. */
506 /* We have a +- 4GB range on the branches; leave some slop. */
507 if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
508 code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
509 }
510 start = (void *)0x90000000UL;
26a5f13b 511#endif
141ac468
BS
512 code_gen_buffer = mmap(start, code_gen_buffer_size,
513 PROT_WRITE | PROT_READ | PROT_EXEC,
26a5f13b
FB
514 flags, -1, 0);
515 if (code_gen_buffer == MAP_FAILED) {
516 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
517 exit(1);
518 }
519 }
cbb608a5
B
520#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
521 || defined(__DragonFly__) || defined(__OpenBSD__)
06e67a82
AL
522 {
523 int flags;
524 void *addr = NULL;
525 flags = MAP_PRIVATE | MAP_ANONYMOUS;
526#if defined(__x86_64__)
527 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
528 * 0x40000000 is free */
529 flags |= MAP_FIXED;
530 addr = (void *)0x40000000;
531 /* Cannot map more than that */
532 if (code_gen_buffer_size > (800 * 1024 * 1024))
533 code_gen_buffer_size = (800 * 1024 * 1024);
4cd31ad2
BS
534#elif defined(__sparc_v9__)
535 // Map the buffer below 2G, so we can use direct calls and branches
536 flags |= MAP_FIXED;
537 addr = (void *) 0x60000000UL;
538 if (code_gen_buffer_size > (512 * 1024 * 1024)) {
539 code_gen_buffer_size = (512 * 1024 * 1024);
540 }
06e67a82
AL
541#endif
542 code_gen_buffer = mmap(addr, code_gen_buffer_size,
543 PROT_WRITE | PROT_READ | PROT_EXEC,
544 flags, -1, 0);
545 if (code_gen_buffer == MAP_FAILED) {
546 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
547 exit(1);
548 }
549 }
26a5f13b
FB
550#else
551 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
26a5f13b
FB
552 map_exec(code_gen_buffer, code_gen_buffer_size);
553#endif
4369415f 554#endif /* !USE_STATIC_CODE_GEN_BUFFER */
26a5f13b
FB
555 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
556 code_gen_buffer_max_size = code_gen_buffer_size -
239fda31 557 (TCG_MAX_OP_SIZE * OPC_MAX_SIZE);
26a5f13b
FB
558 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
559 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
560}
561
562/* Must be called before using the QEMU cpus. 'tb_size' is the size
563 (in bytes) allocated to the translation buffer. Zero means default
564 size. */
565void cpu_exec_init_all(unsigned long tb_size)
566{
26a5f13b
FB
567 cpu_gen_init();
568 code_gen_alloc(tb_size);
569 code_gen_ptr = code_gen_buffer;
4369415f 570 page_init();
e2eef170 571#if !defined(CONFIG_USER_ONLY)
26a5f13b 572 io_mem_init();
e2eef170 573#endif
9002ec79
RH
574#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
575 /* There's no guest base to take into account, so go ahead and
576 initialize the prologue now. */
577 tcg_prologue_init(&tcg_ctx);
578#endif
26a5f13b
FB
579}
580
9656f324
PB
581#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
582
e59fb374 583static int cpu_common_post_load(void *opaque, int version_id)
e7f4eff7
JQ
584{
585 CPUState *env = opaque;
9656f324 586
3098dba0
AJ
587 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
588 version_id is increased. */
589 env->interrupt_request &= ~0x01;
9656f324
PB
590 tlb_flush(env, 1);
591
592 return 0;
593}
e7f4eff7
JQ
594
595static const VMStateDescription vmstate_cpu_common = {
596 .name = "cpu_common",
597 .version_id = 1,
598 .minimum_version_id = 1,
599 .minimum_version_id_old = 1,
e7f4eff7
JQ
600 .post_load = cpu_common_post_load,
601 .fields = (VMStateField []) {
602 VMSTATE_UINT32(halted, CPUState),
603 VMSTATE_UINT32(interrupt_request, CPUState),
604 VMSTATE_END_OF_LIST()
605 }
606};
9656f324
PB
607#endif
608
950f1472
GC
609CPUState *qemu_get_cpu(int cpu)
610{
611 CPUState *env = first_cpu;
612
613 while (env) {
614 if (env->cpu_index == cpu)
615 break;
616 env = env->next_cpu;
617 }
618
619 return env;
620}
621
6a00d601 622void cpu_exec_init(CPUState *env)
fd6ce8f6 623{
6a00d601
FB
624 CPUState **penv;
625 int cpu_index;
626
c2764719
PB
627#if defined(CONFIG_USER_ONLY)
628 cpu_list_lock();
629#endif
6a00d601
FB
630 env->next_cpu = NULL;
631 penv = &first_cpu;
632 cpu_index = 0;
633 while (*penv != NULL) {
1e9fa730 634 penv = &(*penv)->next_cpu;
6a00d601
FB
635 cpu_index++;
636 }
637 env->cpu_index = cpu_index;
268a362c 638 env->numa_node = 0;
72cf2d4f
BS
639 QTAILQ_INIT(&env->breakpoints);
640 QTAILQ_INIT(&env->watchpoints);
dc7a09cf
JK
641#ifndef CONFIG_USER_ONLY
642 env->thread_id = qemu_get_thread_id();
643#endif
6a00d601 644 *penv = env;
c2764719
PB
645#if defined(CONFIG_USER_ONLY)
646 cpu_list_unlock();
647#endif
b3c7724c 648#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
0be71e32
AW
649 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
650 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
b3c7724c
PB
651 cpu_save, cpu_load, env);
652#endif
fd6ce8f6
FB
653}
654
d1a1eb74
TG
655/* Allocate a new translation block. Flush the translation buffer if
656 too many translation blocks or too much generated code. */
657static TranslationBlock *tb_alloc(target_ulong pc)
658{
659 TranslationBlock *tb;
660
661 if (nb_tbs >= code_gen_max_blocks ||
662 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
663 return NULL;
664 tb = &tbs[nb_tbs++];
665 tb->pc = pc;
666 tb->cflags = 0;
667 return tb;
668}
669
670void tb_free(TranslationBlock *tb)
671{
672 /* In practice this is mostly used for single use temporary TB
673 Ignore the hard cases and just back up if this TB happens to
674 be the last one generated. */
675 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
676 code_gen_ptr = tb->tc_ptr;
677 nb_tbs--;
678 }
679}
680
9fa3e853
FB
681static inline void invalidate_page_bitmap(PageDesc *p)
682{
683 if (p->code_bitmap) {
59817ccb 684 qemu_free(p->code_bitmap);
9fa3e853
FB
685 p->code_bitmap = NULL;
686 }
687 p->code_write_count = 0;
688}
689
5cd2c5b6
RH
690/* Set to NULL all the 'first_tb' fields in all PageDescs. */
691
692static void page_flush_tb_1 (int level, void **lp)
fd6ce8f6 693{
5cd2c5b6 694 int i;
fd6ce8f6 695
5cd2c5b6
RH
696 if (*lp == NULL) {
697 return;
698 }
699 if (level == 0) {
700 PageDesc *pd = *lp;
7296abac 701 for (i = 0; i < L2_SIZE; ++i) {
5cd2c5b6
RH
702 pd[i].first_tb = NULL;
703 invalidate_page_bitmap(pd + i);
fd6ce8f6 704 }
5cd2c5b6
RH
705 } else {
706 void **pp = *lp;
7296abac 707 for (i = 0; i < L2_SIZE; ++i) {
5cd2c5b6
RH
708 page_flush_tb_1 (level - 1, pp + i);
709 }
710 }
711}
712
713static void page_flush_tb(void)
714{
715 int i;
716 for (i = 0; i < V_L1_SIZE; i++) {
717 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
fd6ce8f6
FB
718 }
719}
720
721/* flush all the translation blocks */
d4e8164f 722/* XXX: tb_flush is currently not thread safe */
6a00d601 723void tb_flush(CPUState *env1)
fd6ce8f6 724{
6a00d601 725 CPUState *env;
0124311e 726#if defined(DEBUG_FLUSH)
ab3d1727
BS
727 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
728 (unsigned long)(code_gen_ptr - code_gen_buffer),
729 nb_tbs, nb_tbs > 0 ?
730 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
fd6ce8f6 731#endif
26a5f13b 732 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
a208e54a
PB
733 cpu_abort(env1, "Internal error: code buffer overflow\n");
734
fd6ce8f6 735 nb_tbs = 0;
3b46e624 736
6a00d601
FB
737 for(env = first_cpu; env != NULL; env = env->next_cpu) {
738 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
739 }
9fa3e853 740
8a8a608f 741 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
fd6ce8f6 742 page_flush_tb();
9fa3e853 743
fd6ce8f6 744 code_gen_ptr = code_gen_buffer;
d4e8164f
FB
745 /* XXX: flush processor icache at this point if cache flush is
746 expensive */
e3db7226 747 tb_flush_count++;
fd6ce8f6
FB
748}
749
750#ifdef DEBUG_TB_CHECK
751
bc98a7ef 752static void tb_invalidate_check(target_ulong address)
fd6ce8f6
FB
753{
754 TranslationBlock *tb;
755 int i;
756 address &= TARGET_PAGE_MASK;
99773bd4
PB
757 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
758 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
759 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
760 address >= tb->pc + tb->size)) {
0bf9e31a
BS
761 printf("ERROR invalidate: address=" TARGET_FMT_lx
762 " PC=%08lx size=%04x\n",
99773bd4 763 address, (long)tb->pc, tb->size);
fd6ce8f6
FB
764 }
765 }
766 }
767}
768
769/* verify that all the pages have correct rights for code */
770static void tb_page_check(void)
771{
772 TranslationBlock *tb;
773 int i, flags1, flags2;
3b46e624 774
99773bd4
PB
775 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
776 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
777 flags1 = page_get_flags(tb->pc);
778 flags2 = page_get_flags(tb->pc + tb->size - 1);
779 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
780 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
99773bd4 781 (long)tb->pc, tb->size, flags1, flags2);
fd6ce8f6
FB
782 }
783 }
784 }
785}
786
787#endif
788
789/* invalidate one TB */
790static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
791 int next_offset)
792{
793 TranslationBlock *tb1;
794 for(;;) {
795 tb1 = *ptb;
796 if (tb1 == tb) {
797 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
798 break;
799 }
800 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
801 }
802}
803
9fa3e853
FB
804static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
805{
806 TranslationBlock *tb1;
807 unsigned int n1;
808
809 for(;;) {
810 tb1 = *ptb;
811 n1 = (long)tb1 & 3;
812 tb1 = (TranslationBlock *)((long)tb1 & ~3);
813 if (tb1 == tb) {
814 *ptb = tb1->page_next[n1];
815 break;
816 }
817 ptb = &tb1->page_next[n1];
818 }
819}
820
d4e8164f
FB
821static inline void tb_jmp_remove(TranslationBlock *tb, int n)
822{
823 TranslationBlock *tb1, **ptb;
824 unsigned int n1;
825
826 ptb = &tb->jmp_next[n];
827 tb1 = *ptb;
828 if (tb1) {
829 /* find tb(n) in circular list */
830 for(;;) {
831 tb1 = *ptb;
832 n1 = (long)tb1 & 3;
833 tb1 = (TranslationBlock *)((long)tb1 & ~3);
834 if (n1 == n && tb1 == tb)
835 break;
836 if (n1 == 2) {
837 ptb = &tb1->jmp_first;
838 } else {
839 ptb = &tb1->jmp_next[n1];
840 }
841 }
842 /* now we can suppress tb(n) from the list */
843 *ptb = tb->jmp_next[n];
844
845 tb->jmp_next[n] = NULL;
846 }
847}
848
849/* reset the jump entry 'n' of a TB so that it is not chained to
850 another TB */
851static inline void tb_reset_jump(TranslationBlock *tb, int n)
852{
853 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
854}
855
41c1b1c9 856void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
fd6ce8f6 857{
6a00d601 858 CPUState *env;
8a40a180 859 PageDesc *p;
d4e8164f 860 unsigned int h, n1;
41c1b1c9 861 tb_page_addr_t phys_pc;
8a40a180 862 TranslationBlock *tb1, *tb2;
3b46e624 863
8a40a180
FB
864 /* remove the TB from the hash list */
865 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
866 h = tb_phys_hash_func(phys_pc);
5fafdf24 867 tb_remove(&tb_phys_hash[h], tb,
8a40a180
FB
868 offsetof(TranslationBlock, phys_hash_next));
869
870 /* remove the TB from the page list */
871 if (tb->page_addr[0] != page_addr) {
872 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
873 tb_page_remove(&p->first_tb, tb);
874 invalidate_page_bitmap(p);
875 }
876 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
877 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
878 tb_page_remove(&p->first_tb, tb);
879 invalidate_page_bitmap(p);
880 }
881
36bdbe54 882 tb_invalidated_flag = 1;
59817ccb 883
fd6ce8f6 884 /* remove the TB from the hash list */
8a40a180 885 h = tb_jmp_cache_hash_func(tb->pc);
6a00d601
FB
886 for(env = first_cpu; env != NULL; env = env->next_cpu) {
887 if (env->tb_jmp_cache[h] == tb)
888 env->tb_jmp_cache[h] = NULL;
889 }
d4e8164f
FB
890
891 /* suppress this TB from the two jump lists */
892 tb_jmp_remove(tb, 0);
893 tb_jmp_remove(tb, 1);
894
895 /* suppress any remaining jumps to this TB */
896 tb1 = tb->jmp_first;
897 for(;;) {
898 n1 = (long)tb1 & 3;
899 if (n1 == 2)
900 break;
901 tb1 = (TranslationBlock *)((long)tb1 & ~3);
902 tb2 = tb1->jmp_next[n1];
903 tb_reset_jump(tb1, n1);
904 tb1->jmp_next[n1] = NULL;
905 tb1 = tb2;
906 }
907 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
9fa3e853 908
e3db7226 909 tb_phys_invalidate_count++;
9fa3e853
FB
910}
911
912static inline void set_bits(uint8_t *tab, int start, int len)
913{
914 int end, mask, end1;
915
916 end = start + len;
917 tab += start >> 3;
918 mask = 0xff << (start & 7);
919 if ((start & ~7) == (end & ~7)) {
920 if (start < end) {
921 mask &= ~(0xff << (end & 7));
922 *tab |= mask;
923 }
924 } else {
925 *tab++ |= mask;
926 start = (start + 8) & ~7;
927 end1 = end & ~7;
928 while (start < end1) {
929 *tab++ = 0xff;
930 start += 8;
931 }
932 if (start < end) {
933 mask = ~(0xff << (end & 7));
934 *tab |= mask;
935 }
936 }
937}
938
939static void build_page_bitmap(PageDesc *p)
940{
941 int n, tb_start, tb_end;
942 TranslationBlock *tb;
3b46e624 943
b2a7081a 944 p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
9fa3e853
FB
945
946 tb = p->first_tb;
947 while (tb != NULL) {
948 n = (long)tb & 3;
949 tb = (TranslationBlock *)((long)tb & ~3);
950 /* NOTE: this is subtle as a TB may span two physical pages */
951 if (n == 0) {
952 /* NOTE: tb_end may be after the end of the page, but
953 it is not a problem */
954 tb_start = tb->pc & ~TARGET_PAGE_MASK;
955 tb_end = tb_start + tb->size;
956 if (tb_end > TARGET_PAGE_SIZE)
957 tb_end = TARGET_PAGE_SIZE;
958 } else {
959 tb_start = 0;
960 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
961 }
962 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
963 tb = tb->page_next[n];
964 }
965}
966
2e70f6ef
PB
967TranslationBlock *tb_gen_code(CPUState *env,
968 target_ulong pc, target_ulong cs_base,
969 int flags, int cflags)
d720b93d
FB
970{
971 TranslationBlock *tb;
972 uint8_t *tc_ptr;
41c1b1c9
PB
973 tb_page_addr_t phys_pc, phys_page2;
974 target_ulong virt_page2;
d720b93d
FB
975 int code_gen_size;
976
41c1b1c9 977 phys_pc = get_page_addr_code(env, pc);
c27004ec 978 tb = tb_alloc(pc);
d720b93d
FB
979 if (!tb) {
980 /* flush must be done */
981 tb_flush(env);
982 /* cannot fail at this point */
c27004ec 983 tb = tb_alloc(pc);
2e70f6ef
PB
984 /* Don't forget to invalidate previous TB info. */
985 tb_invalidated_flag = 1;
d720b93d
FB
986 }
987 tc_ptr = code_gen_ptr;
988 tb->tc_ptr = tc_ptr;
989 tb->cs_base = cs_base;
990 tb->flags = flags;
991 tb->cflags = cflags;
d07bde88 992 cpu_gen_code(env, tb, &code_gen_size);
d720b93d 993 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
3b46e624 994
d720b93d 995 /* check next page if needed */
c27004ec 996 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
d720b93d 997 phys_page2 = -1;
c27004ec 998 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
41c1b1c9 999 phys_page2 = get_page_addr_code(env, virt_page2);
d720b93d 1000 }
41c1b1c9 1001 tb_link_page(tb, phys_pc, phys_page2);
2e70f6ef 1002 return tb;
d720b93d 1003}
3b46e624 1004
9fa3e853
FB
1005/* invalidate all TBs which intersect with the target physical page
1006 starting in range [start;end[. NOTE: start and end must refer to
d720b93d
FB
1007 the same physical page. 'is_cpu_write_access' should be true if called
1008 from a real cpu write access: the virtual CPU will exit the current
1009 TB if code is modified inside this TB. */
41c1b1c9 1010void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
d720b93d
FB
1011 int is_cpu_write_access)
1012{
6b917547 1013 TranslationBlock *tb, *tb_next, *saved_tb;
d720b93d 1014 CPUState *env = cpu_single_env;
41c1b1c9 1015 tb_page_addr_t tb_start, tb_end;
6b917547
AL
1016 PageDesc *p;
1017 int n;
1018#ifdef TARGET_HAS_PRECISE_SMC
1019 int current_tb_not_found = is_cpu_write_access;
1020 TranslationBlock *current_tb = NULL;
1021 int current_tb_modified = 0;
1022 target_ulong current_pc = 0;
1023 target_ulong current_cs_base = 0;
1024 int current_flags = 0;
1025#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
1026
1027 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 1028 if (!p)
9fa3e853 1029 return;
5fafdf24 1030 if (!p->code_bitmap &&
d720b93d
FB
1031 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1032 is_cpu_write_access) {
9fa3e853
FB
1033 /* build code bitmap */
1034 build_page_bitmap(p);
1035 }
1036
1037 /* we remove all the TBs in the range [start, end[ */
1038 /* XXX: see if in some cases it could be faster to invalidate all the code */
1039 tb = p->first_tb;
1040 while (tb != NULL) {
1041 n = (long)tb & 3;
1042 tb = (TranslationBlock *)((long)tb & ~3);
1043 tb_next = tb->page_next[n];
1044 /* NOTE: this is subtle as a TB may span two physical pages */
1045 if (n == 0) {
1046 /* NOTE: tb_end may be after the end of the page, but
1047 it is not a problem */
1048 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1049 tb_end = tb_start + tb->size;
1050 } else {
1051 tb_start = tb->page_addr[1];
1052 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1053 }
1054 if (!(tb_end <= start || tb_start >= end)) {
d720b93d
FB
1055#ifdef TARGET_HAS_PRECISE_SMC
1056 if (current_tb_not_found) {
1057 current_tb_not_found = 0;
1058 current_tb = NULL;
2e70f6ef 1059 if (env->mem_io_pc) {
d720b93d 1060 /* now we have a real cpu fault */
2e70f6ef 1061 current_tb = tb_find_pc(env->mem_io_pc);
d720b93d
FB
1062 }
1063 }
1064 if (current_tb == tb &&
2e70f6ef 1065 (current_tb->cflags & CF_COUNT_MASK) != 1) {
d720b93d
FB
1066 /* If we are modifying the current TB, we must stop
1067 its execution. We could be more precise by checking
1068 that the modification is after the current PC, but it
1069 would require a specialized function to partially
1070 restore the CPU state */
3b46e624 1071
d720b93d 1072 current_tb_modified = 1;
5fafdf24 1073 cpu_restore_state(current_tb, env,
2e70f6ef 1074 env->mem_io_pc, NULL);
6b917547
AL
1075 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1076 &current_flags);
d720b93d
FB
1077 }
1078#endif /* TARGET_HAS_PRECISE_SMC */
6f5a9f7e
FB
1079 /* we need to do that to handle the case where a signal
1080 occurs while doing tb_phys_invalidate() */
1081 saved_tb = NULL;
1082 if (env) {
1083 saved_tb = env->current_tb;
1084 env->current_tb = NULL;
1085 }
9fa3e853 1086 tb_phys_invalidate(tb, -1);
6f5a9f7e
FB
1087 if (env) {
1088 env->current_tb = saved_tb;
1089 if (env->interrupt_request && env->current_tb)
1090 cpu_interrupt(env, env->interrupt_request);
1091 }
9fa3e853
FB
1092 }
1093 tb = tb_next;
1094 }
1095#if !defined(CONFIG_USER_ONLY)
1096 /* if no code remaining, no need to continue to use slow writes */
1097 if (!p->first_tb) {
1098 invalidate_page_bitmap(p);
d720b93d 1099 if (is_cpu_write_access) {
2e70f6ef 1100 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
d720b93d
FB
1101 }
1102 }
1103#endif
1104#ifdef TARGET_HAS_PRECISE_SMC
1105 if (current_tb_modified) {
1106 /* we generate a block containing just the instruction
1107 modifying the memory. It will ensure that it cannot modify
1108 itself */
ea1c1802 1109 env->current_tb = NULL;
2e70f6ef 1110 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
d720b93d 1111 cpu_resume_from_signal(env, NULL);
9fa3e853 1112 }
fd6ce8f6 1113#endif
9fa3e853 1114}
fd6ce8f6 1115
9fa3e853 1116/* len must be <= 8 and start must be a multiple of len */
41c1b1c9 1117static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
9fa3e853
FB
1118{
1119 PageDesc *p;
1120 int offset, b;
59817ccb 1121#if 0
a4193c8a 1122 if (1) {
93fcfe39
AL
1123 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1124 cpu_single_env->mem_io_vaddr, len,
1125 cpu_single_env->eip,
1126 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
59817ccb
FB
1127 }
1128#endif
9fa3e853 1129 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 1130 if (!p)
9fa3e853
FB
1131 return;
1132 if (p->code_bitmap) {
1133 offset = start & ~TARGET_PAGE_MASK;
1134 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1135 if (b & ((1 << len) - 1))
1136 goto do_invalidate;
1137 } else {
1138 do_invalidate:
d720b93d 1139 tb_invalidate_phys_page_range(start, start + len, 1);
9fa3e853
FB
1140 }
1141}
1142
9fa3e853 1143#if !defined(CONFIG_SOFTMMU)
41c1b1c9 1144static void tb_invalidate_phys_page(tb_page_addr_t addr,
d720b93d 1145 unsigned long pc, void *puc)
9fa3e853 1146{
6b917547 1147 TranslationBlock *tb;
9fa3e853 1148 PageDesc *p;
6b917547 1149 int n;
d720b93d 1150#ifdef TARGET_HAS_PRECISE_SMC
6b917547 1151 TranslationBlock *current_tb = NULL;
d720b93d 1152 CPUState *env = cpu_single_env;
6b917547
AL
1153 int current_tb_modified = 0;
1154 target_ulong current_pc = 0;
1155 target_ulong current_cs_base = 0;
1156 int current_flags = 0;
d720b93d 1157#endif
9fa3e853
FB
1158
1159 addr &= TARGET_PAGE_MASK;
1160 p = page_find(addr >> TARGET_PAGE_BITS);
5fafdf24 1161 if (!p)
9fa3e853
FB
1162 return;
1163 tb = p->first_tb;
d720b93d
FB
1164#ifdef TARGET_HAS_PRECISE_SMC
1165 if (tb && pc != 0) {
1166 current_tb = tb_find_pc(pc);
1167 }
1168#endif
9fa3e853
FB
1169 while (tb != NULL) {
1170 n = (long)tb & 3;
1171 tb = (TranslationBlock *)((long)tb & ~3);
d720b93d
FB
1172#ifdef TARGET_HAS_PRECISE_SMC
1173 if (current_tb == tb &&
2e70f6ef 1174 (current_tb->cflags & CF_COUNT_MASK) != 1) {
d720b93d
FB
1175 /* If we are modifying the current TB, we must stop
1176 its execution. We could be more precise by checking
1177 that the modification is after the current PC, but it
1178 would require a specialized function to partially
1179 restore the CPU state */
3b46e624 1180
d720b93d
FB
1181 current_tb_modified = 1;
1182 cpu_restore_state(current_tb, env, pc, puc);
6b917547
AL
1183 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1184 &current_flags);
d720b93d
FB
1185 }
1186#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
1187 tb_phys_invalidate(tb, addr);
1188 tb = tb->page_next[n];
1189 }
fd6ce8f6 1190 p->first_tb = NULL;
d720b93d
FB
1191#ifdef TARGET_HAS_PRECISE_SMC
1192 if (current_tb_modified) {
1193 /* we generate a block containing just the instruction
1194 modifying the memory. It will ensure that it cannot modify
1195 itself */
ea1c1802 1196 env->current_tb = NULL;
2e70f6ef 1197 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
d720b93d
FB
1198 cpu_resume_from_signal(env, puc);
1199 }
1200#endif
fd6ce8f6 1201}
9fa3e853 1202#endif
fd6ce8f6
FB
1203
1204/* add the tb in the target page and protect it if necessary */
5fafdf24 1205static inline void tb_alloc_page(TranslationBlock *tb,
41c1b1c9 1206 unsigned int n, tb_page_addr_t page_addr)
fd6ce8f6
FB
1207{
1208 PageDesc *p;
9fa3e853
FB
1209 TranslationBlock *last_first_tb;
1210
1211 tb->page_addr[n] = page_addr;
5cd2c5b6 1212 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
9fa3e853
FB
1213 tb->page_next[n] = p->first_tb;
1214 last_first_tb = p->first_tb;
1215 p->first_tb = (TranslationBlock *)((long)tb | n);
1216 invalidate_page_bitmap(p);
fd6ce8f6 1217
107db443 1218#if defined(TARGET_HAS_SMC) || 1
d720b93d 1219
9fa3e853 1220#if defined(CONFIG_USER_ONLY)
fd6ce8f6 1221 if (p->flags & PAGE_WRITE) {
53a5960a
PB
1222 target_ulong addr;
1223 PageDesc *p2;
9fa3e853
FB
1224 int prot;
1225
fd6ce8f6
FB
1226 /* force the host page as non writable (writes will have a
1227 page fault + mprotect overhead) */
53a5960a 1228 page_addr &= qemu_host_page_mask;
fd6ce8f6 1229 prot = 0;
53a5960a
PB
1230 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1231 addr += TARGET_PAGE_SIZE) {
1232
1233 p2 = page_find (addr >> TARGET_PAGE_BITS);
1234 if (!p2)
1235 continue;
1236 prot |= p2->flags;
1237 p2->flags &= ~PAGE_WRITE;
53a5960a 1238 }
5fafdf24 1239 mprotect(g2h(page_addr), qemu_host_page_size,
fd6ce8f6
FB
1240 (prot & PAGE_BITS) & ~PAGE_WRITE);
1241#ifdef DEBUG_TB_INVALIDATE
ab3d1727 1242 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
53a5960a 1243 page_addr);
fd6ce8f6 1244#endif
fd6ce8f6 1245 }
9fa3e853
FB
1246#else
1247 /* if some code is already present, then the pages are already
1248 protected. So we handle the case where only the first TB is
1249 allocated in a physical page */
1250 if (!last_first_tb) {
6a00d601 1251 tlb_protect_code(page_addr);
9fa3e853
FB
1252 }
1253#endif
d720b93d
FB
1254
1255#endif /* TARGET_HAS_SMC */
fd6ce8f6
FB
1256}
1257
9fa3e853
FB
1258/* add a new TB and link it to the physical page tables. phys_page2 is
1259 (-1) to indicate that only one page contains the TB. */
41c1b1c9
PB
1260void tb_link_page(TranslationBlock *tb,
1261 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
d4e8164f 1262{
9fa3e853
FB
1263 unsigned int h;
1264 TranslationBlock **ptb;
1265
c8a706fe
PB
1266 /* Grab the mmap lock to stop another thread invalidating this TB
1267 before we are done. */
1268 mmap_lock();
9fa3e853
FB
1269 /* add in the physical hash table */
1270 h = tb_phys_hash_func(phys_pc);
1271 ptb = &tb_phys_hash[h];
1272 tb->phys_hash_next = *ptb;
1273 *ptb = tb;
fd6ce8f6
FB
1274
1275 /* add in the page list */
9fa3e853
FB
1276 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1277 if (phys_page2 != -1)
1278 tb_alloc_page(tb, 1, phys_page2);
1279 else
1280 tb->page_addr[1] = -1;
9fa3e853 1281
d4e8164f
FB
1282 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1283 tb->jmp_next[0] = NULL;
1284 tb->jmp_next[1] = NULL;
1285
1286 /* init original jump addresses */
1287 if (tb->tb_next_offset[0] != 0xffff)
1288 tb_reset_jump(tb, 0);
1289 if (tb->tb_next_offset[1] != 0xffff)
1290 tb_reset_jump(tb, 1);
8a40a180
FB
1291
1292#ifdef DEBUG_TB_CHECK
1293 tb_page_check();
1294#endif
c8a706fe 1295 mmap_unlock();
fd6ce8f6
FB
1296}
1297
9fa3e853
FB
1298/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1299 tb[1].tc_ptr. Return NULL if not found */
1300TranslationBlock *tb_find_pc(unsigned long tc_ptr)
fd6ce8f6 1301{
9fa3e853
FB
1302 int m_min, m_max, m;
1303 unsigned long v;
1304 TranslationBlock *tb;
a513fe19
FB
1305
1306 if (nb_tbs <= 0)
1307 return NULL;
1308 if (tc_ptr < (unsigned long)code_gen_buffer ||
1309 tc_ptr >= (unsigned long)code_gen_ptr)
1310 return NULL;
1311 /* binary search (cf Knuth) */
1312 m_min = 0;
1313 m_max = nb_tbs - 1;
1314 while (m_min <= m_max) {
1315 m = (m_min + m_max) >> 1;
1316 tb = &tbs[m];
1317 v = (unsigned long)tb->tc_ptr;
1318 if (v == tc_ptr)
1319 return tb;
1320 else if (tc_ptr < v) {
1321 m_max = m - 1;
1322 } else {
1323 m_min = m + 1;
1324 }
5fafdf24 1325 }
a513fe19
FB
1326 return &tbs[m_max];
1327}
7501267e 1328
ea041c0e
FB
1329static void tb_reset_jump_recursive(TranslationBlock *tb);
1330
1331static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1332{
1333 TranslationBlock *tb1, *tb_next, **ptb;
1334 unsigned int n1;
1335
1336 tb1 = tb->jmp_next[n];
1337 if (tb1 != NULL) {
1338 /* find head of list */
1339 for(;;) {
1340 n1 = (long)tb1 & 3;
1341 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1342 if (n1 == 2)
1343 break;
1344 tb1 = tb1->jmp_next[n1];
1345 }
1346 /* we are now sure now that tb jumps to tb1 */
1347 tb_next = tb1;
1348
1349 /* remove tb from the jmp_first list */
1350 ptb = &tb_next->jmp_first;
1351 for(;;) {
1352 tb1 = *ptb;
1353 n1 = (long)tb1 & 3;
1354 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1355 if (n1 == n && tb1 == tb)
1356 break;
1357 ptb = &tb1->jmp_next[n1];
1358 }
1359 *ptb = tb->jmp_next[n];
1360 tb->jmp_next[n] = NULL;
3b46e624 1361
ea041c0e
FB
1362 /* suppress the jump to next tb in generated code */
1363 tb_reset_jump(tb, n);
1364
0124311e 1365 /* suppress jumps in the tb on which we could have jumped */
ea041c0e
FB
1366 tb_reset_jump_recursive(tb_next);
1367 }
1368}
1369
1370static void tb_reset_jump_recursive(TranslationBlock *tb)
1371{
1372 tb_reset_jump_recursive2(tb, 0);
1373 tb_reset_jump_recursive2(tb, 1);
1374}
1375
1fddef4b 1376#if defined(TARGET_HAS_ICE)
94df27fd
PB
1377#if defined(CONFIG_USER_ONLY)
1378static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1379{
1380 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1381}
1382#else
d720b93d
FB
1383static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1384{
c227f099 1385 target_phys_addr_t addr;
9b3c35e0 1386 target_ulong pd;
c227f099 1387 ram_addr_t ram_addr;
c2f07f81 1388 PhysPageDesc *p;
d720b93d 1389
c2f07f81
PB
1390 addr = cpu_get_phys_page_debug(env, pc);
1391 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1392 if (!p) {
1393 pd = IO_MEM_UNASSIGNED;
1394 } else {
1395 pd = p->phys_offset;
1396 }
1397 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
706cd4b5 1398 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
d720b93d 1399}
c27004ec 1400#endif
94df27fd 1401#endif /* TARGET_HAS_ICE */
d720b93d 1402
c527ee8f
PB
1403#if defined(CONFIG_USER_ONLY)
1404void cpu_watchpoint_remove_all(CPUState *env, int mask)
1405
1406{
1407}
1408
1409int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1410 int flags, CPUWatchpoint **watchpoint)
1411{
1412 return -ENOSYS;
1413}
1414#else
6658ffb8 1415/* Add a watchpoint. */
a1d1bb31
AL
1416int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1417 int flags, CPUWatchpoint **watchpoint)
6658ffb8 1418{
b4051334 1419 target_ulong len_mask = ~(len - 1);
c0ce998e 1420 CPUWatchpoint *wp;
6658ffb8 1421
b4051334
AL
1422 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1423 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1424 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1425 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1426 return -EINVAL;
1427 }
a1d1bb31 1428 wp = qemu_malloc(sizeof(*wp));
a1d1bb31
AL
1429
1430 wp->vaddr = addr;
b4051334 1431 wp->len_mask = len_mask;
a1d1bb31
AL
1432 wp->flags = flags;
1433
2dc9f411 1434 /* keep all GDB-injected watchpoints in front */
c0ce998e 1435 if (flags & BP_GDB)
72cf2d4f 1436 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
c0ce998e 1437 else
72cf2d4f 1438 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
6658ffb8 1439
6658ffb8 1440 tlb_flush_page(env, addr);
a1d1bb31
AL
1441
1442 if (watchpoint)
1443 *watchpoint = wp;
1444 return 0;
6658ffb8
PB
1445}
1446
a1d1bb31
AL
1447/* Remove a specific watchpoint. */
1448int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1449 int flags)
6658ffb8 1450{
b4051334 1451 target_ulong len_mask = ~(len - 1);
a1d1bb31 1452 CPUWatchpoint *wp;
6658ffb8 1453
72cf2d4f 1454 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334 1455 if (addr == wp->vaddr && len_mask == wp->len_mask
6e140f28 1456 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
a1d1bb31 1457 cpu_watchpoint_remove_by_ref(env, wp);
6658ffb8
PB
1458 return 0;
1459 }
1460 }
a1d1bb31 1461 return -ENOENT;
6658ffb8
PB
1462}
1463
a1d1bb31
AL
1464/* Remove a specific watchpoint by reference. */
1465void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1466{
72cf2d4f 1467 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
7d03f82f 1468
a1d1bb31
AL
1469 tlb_flush_page(env, watchpoint->vaddr);
1470
1471 qemu_free(watchpoint);
1472}
1473
1474/* Remove all matching watchpoints. */
1475void cpu_watchpoint_remove_all(CPUState *env, int mask)
1476{
c0ce998e 1477 CPUWatchpoint *wp, *next;
a1d1bb31 1478
72cf2d4f 1479 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
a1d1bb31
AL
1480 if (wp->flags & mask)
1481 cpu_watchpoint_remove_by_ref(env, wp);
c0ce998e 1482 }
7d03f82f 1483}
c527ee8f 1484#endif
7d03f82f 1485
a1d1bb31
AL
1486/* Add a breakpoint. */
1487int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1488 CPUBreakpoint **breakpoint)
4c3a88a2 1489{
1fddef4b 1490#if defined(TARGET_HAS_ICE)
c0ce998e 1491 CPUBreakpoint *bp;
3b46e624 1492
a1d1bb31 1493 bp = qemu_malloc(sizeof(*bp));
4c3a88a2 1494
a1d1bb31
AL
1495 bp->pc = pc;
1496 bp->flags = flags;
1497
2dc9f411 1498 /* keep all GDB-injected breakpoints in front */
c0ce998e 1499 if (flags & BP_GDB)
72cf2d4f 1500 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
c0ce998e 1501 else
72cf2d4f 1502 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
3b46e624 1503
d720b93d 1504 breakpoint_invalidate(env, pc);
a1d1bb31
AL
1505
1506 if (breakpoint)
1507 *breakpoint = bp;
4c3a88a2
FB
1508 return 0;
1509#else
a1d1bb31 1510 return -ENOSYS;
4c3a88a2
FB
1511#endif
1512}
1513
a1d1bb31
AL
1514/* Remove a specific breakpoint. */
1515int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1516{
7d03f82f 1517#if defined(TARGET_HAS_ICE)
a1d1bb31
AL
1518 CPUBreakpoint *bp;
1519
72cf2d4f 1520 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31
AL
1521 if (bp->pc == pc && bp->flags == flags) {
1522 cpu_breakpoint_remove_by_ref(env, bp);
1523 return 0;
1524 }
7d03f82f 1525 }
a1d1bb31
AL
1526 return -ENOENT;
1527#else
1528 return -ENOSYS;
7d03f82f
EI
1529#endif
1530}
1531
a1d1bb31
AL
1532/* Remove a specific breakpoint by reference. */
1533void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
4c3a88a2 1534{
1fddef4b 1535#if defined(TARGET_HAS_ICE)
72cf2d4f 1536 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
d720b93d 1537
a1d1bb31
AL
1538 breakpoint_invalidate(env, breakpoint->pc);
1539
1540 qemu_free(breakpoint);
1541#endif
1542}
1543
1544/* Remove all matching breakpoints. */
1545void cpu_breakpoint_remove_all(CPUState *env, int mask)
1546{
1547#if defined(TARGET_HAS_ICE)
c0ce998e 1548 CPUBreakpoint *bp, *next;
a1d1bb31 1549
72cf2d4f 1550 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
a1d1bb31
AL
1551 if (bp->flags & mask)
1552 cpu_breakpoint_remove_by_ref(env, bp);
c0ce998e 1553 }
4c3a88a2
FB
1554#endif
1555}
1556
c33a346e
FB
1557/* enable or disable single step mode. EXCP_DEBUG is returned by the
1558 CPU loop after each instruction */
1559void cpu_single_step(CPUState *env, int enabled)
1560{
1fddef4b 1561#if defined(TARGET_HAS_ICE)
c33a346e
FB
1562 if (env->singlestep_enabled != enabled) {
1563 env->singlestep_enabled = enabled;
e22a25c9
AL
1564 if (kvm_enabled())
1565 kvm_update_guest_debug(env, 0);
1566 else {
ccbb4d44 1567 /* must flush all the translated code to avoid inconsistencies */
e22a25c9
AL
1568 /* XXX: only flush what is necessary */
1569 tb_flush(env);
1570 }
c33a346e
FB
1571 }
1572#endif
1573}
1574
34865134
FB
1575/* enable or disable low levels log */
1576void cpu_set_log(int log_flags)
1577{
1578 loglevel = log_flags;
1579 if (loglevel && !logfile) {
11fcfab4 1580 logfile = fopen(logfilename, log_append ? "a" : "w");
34865134
FB
1581 if (!logfile) {
1582 perror(logfilename);
1583 _exit(1);
1584 }
9fa3e853
FB
1585#if !defined(CONFIG_SOFTMMU)
1586 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1587 {
b55266b5 1588 static char logfile_buf[4096];
9fa3e853
FB
1589 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1590 }
bf65f53f
FN
1591#elif !defined(_WIN32)
1592 /* Win32 doesn't support line-buffering and requires size >= 2 */
34865134 1593 setvbuf(logfile, NULL, _IOLBF, 0);
9fa3e853 1594#endif
e735b91c
PB
1595 log_append = 1;
1596 }
1597 if (!loglevel && logfile) {
1598 fclose(logfile);
1599 logfile = NULL;
34865134
FB
1600 }
1601}
1602
1603void cpu_set_log_filename(const char *filename)
1604{
1605 logfilename = strdup(filename);
e735b91c
PB
1606 if (logfile) {
1607 fclose(logfile);
1608 logfile = NULL;
1609 }
1610 cpu_set_log(loglevel);
34865134 1611}
c33a346e 1612
3098dba0 1613static void cpu_unlink_tb(CPUState *env)
ea041c0e 1614{
3098dba0
AJ
1615 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1616 problem and hope the cpu will stop of its own accord. For userspace
1617 emulation this often isn't actually as bad as it sounds. Often
1618 signals are used primarily to interrupt blocking syscalls. */
ea041c0e 1619 TranslationBlock *tb;
c227f099 1620 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
59817ccb 1621
cab1b4bd 1622 spin_lock(&interrupt_lock);
3098dba0
AJ
1623 tb = env->current_tb;
1624 /* if the cpu is currently executing code, we must unlink it and
1625 all the potentially executing TB */
f76cfe56 1626 if (tb) {
3098dba0
AJ
1627 env->current_tb = NULL;
1628 tb_reset_jump_recursive(tb);
be214e6c 1629 }
cab1b4bd 1630 spin_unlock(&interrupt_lock);
3098dba0
AJ
1631}
1632
1633/* mask must never be zero, except for A20 change call */
1634void cpu_interrupt(CPUState *env, int mask)
1635{
1636 int old_mask;
be214e6c 1637
2e70f6ef 1638 old_mask = env->interrupt_request;
68a79315 1639 env->interrupt_request |= mask;
3098dba0 1640
8edac960
AL
1641#ifndef CONFIG_USER_ONLY
1642 /*
1643 * If called from iothread context, wake the target cpu in
1644 * case its halted.
1645 */
b7680cb6 1646 if (!qemu_cpu_is_self(env)) {
8edac960
AL
1647 qemu_cpu_kick(env);
1648 return;
1649 }
1650#endif
1651
2e70f6ef 1652 if (use_icount) {
266910c4 1653 env->icount_decr.u16.high = 0xffff;
2e70f6ef 1654#ifndef CONFIG_USER_ONLY
2e70f6ef 1655 if (!can_do_io(env)
be214e6c 1656 && (mask & ~old_mask) != 0) {
2e70f6ef
PB
1657 cpu_abort(env, "Raised interrupt while not in I/O function");
1658 }
1659#endif
1660 } else {
3098dba0 1661 cpu_unlink_tb(env);
ea041c0e
FB
1662 }
1663}
1664
b54ad049
FB
1665void cpu_reset_interrupt(CPUState *env, int mask)
1666{
1667 env->interrupt_request &= ~mask;
1668}
1669
3098dba0
AJ
1670void cpu_exit(CPUState *env)
1671{
1672 env->exit_request = 1;
1673 cpu_unlink_tb(env);
1674}
1675
c7cd6a37 1676const CPULogItem cpu_log_items[] = {
5fafdf24 1677 { CPU_LOG_TB_OUT_ASM, "out_asm",
f193c797
FB
1678 "show generated host assembly code for each compiled TB" },
1679 { CPU_LOG_TB_IN_ASM, "in_asm",
1680 "show target assembly code for each compiled TB" },
5fafdf24 1681 { CPU_LOG_TB_OP, "op",
57fec1fe 1682 "show micro ops for each compiled TB" },
f193c797 1683 { CPU_LOG_TB_OP_OPT, "op_opt",
e01a1157
BS
1684 "show micro ops "
1685#ifdef TARGET_I386
1686 "before eflags optimization and "
f193c797 1687#endif
e01a1157 1688 "after liveness analysis" },
f193c797
FB
1689 { CPU_LOG_INT, "int",
1690 "show interrupts/exceptions in short format" },
1691 { CPU_LOG_EXEC, "exec",
1692 "show trace before each executed TB (lots of logs)" },
9fddaa0c 1693 { CPU_LOG_TB_CPU, "cpu",
e91c8a77 1694 "show CPU state before block translation" },
f193c797
FB
1695#ifdef TARGET_I386
1696 { CPU_LOG_PCALL, "pcall",
1697 "show protected mode far calls/returns/exceptions" },
eca1bdf4
AL
1698 { CPU_LOG_RESET, "cpu_reset",
1699 "show CPU state before CPU resets" },
f193c797 1700#endif
8e3a9fd2 1701#ifdef DEBUG_IOPORT
fd872598
FB
1702 { CPU_LOG_IOPORT, "ioport",
1703 "show all i/o ports accesses" },
8e3a9fd2 1704#endif
f193c797
FB
1705 { 0, NULL, NULL },
1706};
1707
f6f3fbca
MT
1708#ifndef CONFIG_USER_ONLY
1709static QLIST_HEAD(memory_client_list, CPUPhysMemoryClient) memory_client_list
1710 = QLIST_HEAD_INITIALIZER(memory_client_list);
1711
1712static void cpu_notify_set_memory(target_phys_addr_t start_addr,
9742bf26 1713 ram_addr_t size,
0fd542fb
MT
1714 ram_addr_t phys_offset,
1715 bool log_dirty)
f6f3fbca
MT
1716{
1717 CPUPhysMemoryClient *client;
1718 QLIST_FOREACH(client, &memory_client_list, list) {
0fd542fb 1719 client->set_memory(client, start_addr, size, phys_offset, log_dirty);
f6f3fbca
MT
1720 }
1721}
1722
1723static int cpu_notify_sync_dirty_bitmap(target_phys_addr_t start,
9742bf26 1724 target_phys_addr_t end)
f6f3fbca
MT
1725{
1726 CPUPhysMemoryClient *client;
1727 QLIST_FOREACH(client, &memory_client_list, list) {
1728 int r = client->sync_dirty_bitmap(client, start, end);
1729 if (r < 0)
1730 return r;
1731 }
1732 return 0;
1733}
1734
1735static int cpu_notify_migration_log(int enable)
1736{
1737 CPUPhysMemoryClient *client;
1738 QLIST_FOREACH(client, &memory_client_list, list) {
1739 int r = client->migration_log(client, enable);
1740 if (r < 0)
1741 return r;
1742 }
1743 return 0;
1744}
1745
8d4c78e7
AW
1746/* The l1_phys_map provides the upper P_L1_BITs of the guest physical
1747 * address. Each intermediate table provides the next L2_BITs of guest
1748 * physical address space. The number of levels vary based on host and
1749 * guest configuration, making it efficient to build the final guest
1750 * physical address by seeding the L1 offset and shifting and adding in
1751 * each L2 offset as we recurse through them. */
5cd2c5b6 1752static void phys_page_for_each_1(CPUPhysMemoryClient *client,
8d4c78e7 1753 int level, void **lp, target_phys_addr_t addr)
f6f3fbca 1754{
5cd2c5b6 1755 int i;
f6f3fbca 1756
5cd2c5b6
RH
1757 if (*lp == NULL) {
1758 return;
1759 }
1760 if (level == 0) {
1761 PhysPageDesc *pd = *lp;
8d4c78e7 1762 addr <<= L2_BITS + TARGET_PAGE_BITS;
7296abac 1763 for (i = 0; i < L2_SIZE; ++i) {
5cd2c5b6 1764 if (pd[i].phys_offset != IO_MEM_UNASSIGNED) {
8d4c78e7 1765 client->set_memory(client, addr | i << TARGET_PAGE_BITS,
0fd542fb 1766 TARGET_PAGE_SIZE, pd[i].phys_offset, false);
f6f3fbca 1767 }
5cd2c5b6
RH
1768 }
1769 } else {
1770 void **pp = *lp;
7296abac 1771 for (i = 0; i < L2_SIZE; ++i) {
8d4c78e7
AW
1772 phys_page_for_each_1(client, level - 1, pp + i,
1773 (addr << L2_BITS) | i);
f6f3fbca
MT
1774 }
1775 }
1776}
1777
1778static void phys_page_for_each(CPUPhysMemoryClient *client)
1779{
5cd2c5b6
RH
1780 int i;
1781 for (i = 0; i < P_L1_SIZE; ++i) {
1782 phys_page_for_each_1(client, P_L1_SHIFT / L2_BITS - 1,
8d4c78e7 1783 l1_phys_map + i, i);
f6f3fbca 1784 }
f6f3fbca
MT
1785}
1786
1787void cpu_register_phys_memory_client(CPUPhysMemoryClient *client)
1788{
1789 QLIST_INSERT_HEAD(&memory_client_list, client, list);
1790 phys_page_for_each(client);
1791}
1792
1793void cpu_unregister_phys_memory_client(CPUPhysMemoryClient *client)
1794{
1795 QLIST_REMOVE(client, list);
1796}
1797#endif
1798
f193c797
FB
1799static int cmp1(const char *s1, int n, const char *s2)
1800{
1801 if (strlen(s2) != n)
1802 return 0;
1803 return memcmp(s1, s2, n) == 0;
1804}
3b46e624 1805
f193c797
FB
1806/* takes a comma separated list of log masks. Return 0 if error. */
1807int cpu_str_to_log_mask(const char *str)
1808{
c7cd6a37 1809 const CPULogItem *item;
f193c797
FB
1810 int mask;
1811 const char *p, *p1;
1812
1813 p = str;
1814 mask = 0;
1815 for(;;) {
1816 p1 = strchr(p, ',');
1817 if (!p1)
1818 p1 = p + strlen(p);
9742bf26
YT
1819 if(cmp1(p,p1-p,"all")) {
1820 for(item = cpu_log_items; item->mask != 0; item++) {
1821 mask |= item->mask;
1822 }
1823 } else {
1824 for(item = cpu_log_items; item->mask != 0; item++) {
1825 if (cmp1(p, p1 - p, item->name))
1826 goto found;
1827 }
1828 return 0;
f193c797 1829 }
f193c797
FB
1830 found:
1831 mask |= item->mask;
1832 if (*p1 != ',')
1833 break;
1834 p = p1 + 1;
1835 }
1836 return mask;
1837}
ea041c0e 1838
7501267e
FB
1839void cpu_abort(CPUState *env, const char *fmt, ...)
1840{
1841 va_list ap;
493ae1f0 1842 va_list ap2;
7501267e
FB
1843
1844 va_start(ap, fmt);
493ae1f0 1845 va_copy(ap2, ap);
7501267e
FB
1846 fprintf(stderr, "qemu: fatal: ");
1847 vfprintf(stderr, fmt, ap);
1848 fprintf(stderr, "\n");
1849#ifdef TARGET_I386
7fe48483
FB
1850 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1851#else
1852 cpu_dump_state(env, stderr, fprintf, 0);
7501267e 1853#endif
93fcfe39
AL
1854 if (qemu_log_enabled()) {
1855 qemu_log("qemu: fatal: ");
1856 qemu_log_vprintf(fmt, ap2);
1857 qemu_log("\n");
f9373291 1858#ifdef TARGET_I386
93fcfe39 1859 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
f9373291 1860#else
93fcfe39 1861 log_cpu_state(env, 0);
f9373291 1862#endif
31b1a7b4 1863 qemu_log_flush();
93fcfe39 1864 qemu_log_close();
924edcae 1865 }
493ae1f0 1866 va_end(ap2);
f9373291 1867 va_end(ap);
fd052bf6
RV
1868#if defined(CONFIG_USER_ONLY)
1869 {
1870 struct sigaction act;
1871 sigfillset(&act.sa_mask);
1872 act.sa_handler = SIG_DFL;
1873 sigaction(SIGABRT, &act, NULL);
1874 }
1875#endif
7501267e
FB
1876 abort();
1877}
1878
c5be9f08
TS
1879CPUState *cpu_copy(CPUState *env)
1880{
01ba9816 1881 CPUState *new_env = cpu_init(env->cpu_model_str);
c5be9f08
TS
1882 CPUState *next_cpu = new_env->next_cpu;
1883 int cpu_index = new_env->cpu_index;
5a38f081
AL
1884#if defined(TARGET_HAS_ICE)
1885 CPUBreakpoint *bp;
1886 CPUWatchpoint *wp;
1887#endif
1888
c5be9f08 1889 memcpy(new_env, env, sizeof(CPUState));
5a38f081
AL
1890
1891 /* Preserve chaining and index. */
c5be9f08
TS
1892 new_env->next_cpu = next_cpu;
1893 new_env->cpu_index = cpu_index;
5a38f081
AL
1894
1895 /* Clone all break/watchpoints.
1896 Note: Once we support ptrace with hw-debug register access, make sure
1897 BP_CPU break/watchpoints are handled correctly on clone. */
72cf2d4f
BS
1898 QTAILQ_INIT(&env->breakpoints);
1899 QTAILQ_INIT(&env->watchpoints);
5a38f081 1900#if defined(TARGET_HAS_ICE)
72cf2d4f 1901 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
5a38f081
AL
1902 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1903 }
72cf2d4f 1904 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
5a38f081
AL
1905 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1906 wp->flags, NULL);
1907 }
1908#endif
1909
c5be9f08
TS
1910 return new_env;
1911}
1912
0124311e
FB
1913#if !defined(CONFIG_USER_ONLY)
1914
5c751e99
EI
1915static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1916{
1917 unsigned int i;
1918
1919 /* Discard jump cache entries for any tb which might potentially
1920 overlap the flushed page. */
1921 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1922 memset (&env->tb_jmp_cache[i], 0,
9742bf26 1923 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
5c751e99
EI
1924
1925 i = tb_jmp_cache_hash_page(addr);
1926 memset (&env->tb_jmp_cache[i], 0,
9742bf26 1927 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
5c751e99
EI
1928}
1929
08738984
IK
1930static CPUTLBEntry s_cputlb_empty_entry = {
1931 .addr_read = -1,
1932 .addr_write = -1,
1933 .addr_code = -1,
1934 .addend = -1,
1935};
1936
ee8b7021
FB
1937/* NOTE: if flush_global is true, also flush global entries (not
1938 implemented yet) */
1939void tlb_flush(CPUState *env, int flush_global)
33417e70 1940{
33417e70 1941 int i;
0124311e 1942
9fa3e853
FB
1943#if defined(DEBUG_TLB)
1944 printf("tlb_flush:\n");
1945#endif
0124311e
FB
1946 /* must reset current TB so that interrupts cannot modify the
1947 links while we are modifying them */
1948 env->current_tb = NULL;
1949
33417e70 1950 for(i = 0; i < CPU_TLB_SIZE; i++) {
cfde4bd9
IY
1951 int mmu_idx;
1952 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
08738984 1953 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
cfde4bd9 1954 }
33417e70 1955 }
9fa3e853 1956
8a40a180 1957 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
9fa3e853 1958
d4c430a8
PB
1959 env->tlb_flush_addr = -1;
1960 env->tlb_flush_mask = 0;
e3db7226 1961 tlb_flush_count++;
33417e70
FB
1962}
1963
274da6b2 1964static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
61382a50 1965{
5fafdf24 1966 if (addr == (tlb_entry->addr_read &
84b7b8e7 1967 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1968 addr == (tlb_entry->addr_write &
84b7b8e7 1969 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1970 addr == (tlb_entry->addr_code &
84b7b8e7 1971 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
08738984 1972 *tlb_entry = s_cputlb_empty_entry;
84b7b8e7 1973 }
61382a50
FB
1974}
1975
2e12669a 1976void tlb_flush_page(CPUState *env, target_ulong addr)
33417e70 1977{
8a40a180 1978 int i;
cfde4bd9 1979 int mmu_idx;
0124311e 1980
9fa3e853 1981#if defined(DEBUG_TLB)
108c49b8 1982 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
9fa3e853 1983#endif
d4c430a8
PB
1984 /* Check if we need to flush due to large pages. */
1985 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
1986#if defined(DEBUG_TLB)
1987 printf("tlb_flush_page: forced full flush ("
1988 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
1989 env->tlb_flush_addr, env->tlb_flush_mask);
1990#endif
1991 tlb_flush(env, 1);
1992 return;
1993 }
0124311e
FB
1994 /* must reset current TB so that interrupts cannot modify the
1995 links while we are modifying them */
1996 env->current_tb = NULL;
61382a50
FB
1997
1998 addr &= TARGET_PAGE_MASK;
1999 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
cfde4bd9
IY
2000 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2001 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
0124311e 2002
5c751e99 2003 tlb_flush_jmp_cache(env, addr);
9fa3e853
FB
2004}
2005
9fa3e853
FB
2006/* update the TLBs so that writes to code in the virtual page 'addr'
2007 can be detected */
c227f099 2008static void tlb_protect_code(ram_addr_t ram_addr)
9fa3e853 2009{
5fafdf24 2010 cpu_physical_memory_reset_dirty(ram_addr,
6a00d601
FB
2011 ram_addr + TARGET_PAGE_SIZE,
2012 CODE_DIRTY_FLAG);
9fa3e853
FB
2013}
2014
9fa3e853 2015/* update the TLB so that writes in physical page 'phys_addr' are no longer
3a7d929e 2016 tested for self modifying code */
c227f099 2017static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 2018 target_ulong vaddr)
9fa3e853 2019{
f7c11b53 2020 cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
1ccde1cb
FB
2021}
2022
5fafdf24 2023static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1ccde1cb
FB
2024 unsigned long start, unsigned long length)
2025{
2026 unsigned long addr;
84b7b8e7
FB
2027 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2028 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1ccde1cb 2029 if ((addr - start) < length) {
0f459d16 2030 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1ccde1cb
FB
2031 }
2032 }
2033}
2034
5579c7f3 2035/* Note: start and end must be within the same ram block. */
c227f099 2036void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 2037 int dirty_flags)
1ccde1cb
FB
2038{
2039 CPUState *env;
4f2ac237 2040 unsigned long length, start1;
f7c11b53 2041 int i;
1ccde1cb
FB
2042
2043 start &= TARGET_PAGE_MASK;
2044 end = TARGET_PAGE_ALIGN(end);
2045
2046 length = end - start;
2047 if (length == 0)
2048 return;
f7c11b53 2049 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
f23db169 2050
1ccde1cb
FB
2051 /* we modify the TLB cache so that the dirty bit will be set again
2052 when accessing the range */
b2e0a138 2053 start1 = (unsigned long)qemu_safe_ram_ptr(start);
5579c7f3
PB
2054 /* Chek that we don't span multiple blocks - this breaks the
2055 address comparisons below. */
b2e0a138 2056 if ((unsigned long)qemu_safe_ram_ptr(end - 1) - start1
5579c7f3
PB
2057 != (end - 1) - start) {
2058 abort();
2059 }
2060
6a00d601 2061 for(env = first_cpu; env != NULL; env = env->next_cpu) {
cfde4bd9
IY
2062 int mmu_idx;
2063 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2064 for(i = 0; i < CPU_TLB_SIZE; i++)
2065 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
2066 start1, length);
2067 }
6a00d601 2068 }
1ccde1cb
FB
2069}
2070
74576198
AL
2071int cpu_physical_memory_set_dirty_tracking(int enable)
2072{
f6f3fbca 2073 int ret = 0;
74576198 2074 in_migration = enable;
f6f3fbca
MT
2075 ret = cpu_notify_migration_log(!!enable);
2076 return ret;
74576198
AL
2077}
2078
2079int cpu_physical_memory_get_dirty_tracking(void)
2080{
2081 return in_migration;
2082}
2083
c227f099
AL
2084int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
2085 target_phys_addr_t end_addr)
2bec46dc 2086{
7b8f3b78 2087 int ret;
151f7749 2088
f6f3fbca 2089 ret = cpu_notify_sync_dirty_bitmap(start_addr, end_addr);
151f7749 2090 return ret;
2bec46dc
AL
2091}
2092
e5896b12
AP
2093int cpu_physical_log_start(target_phys_addr_t start_addr,
2094 ram_addr_t size)
2095{
2096 CPUPhysMemoryClient *client;
2097 QLIST_FOREACH(client, &memory_client_list, list) {
2098 if (client->log_start) {
2099 int r = client->log_start(client, start_addr, size);
2100 if (r < 0) {
2101 return r;
2102 }
2103 }
2104 }
2105 return 0;
2106}
2107
2108int cpu_physical_log_stop(target_phys_addr_t start_addr,
2109 ram_addr_t size)
2110{
2111 CPUPhysMemoryClient *client;
2112 QLIST_FOREACH(client, &memory_client_list, list) {
2113 if (client->log_stop) {
2114 int r = client->log_stop(client, start_addr, size);
2115 if (r < 0) {
2116 return r;
2117 }
2118 }
2119 }
2120 return 0;
2121}
2122
3a7d929e
FB
2123static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2124{
c227f099 2125 ram_addr_t ram_addr;
5579c7f3 2126 void *p;
3a7d929e 2127
84b7b8e7 2128 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
5579c7f3
PB
2129 p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
2130 + tlb_entry->addend);
e890261f 2131 ram_addr = qemu_ram_addr_from_host_nofail(p);
3a7d929e 2132 if (!cpu_physical_memory_is_dirty(ram_addr)) {
0f459d16 2133 tlb_entry->addr_write |= TLB_NOTDIRTY;
3a7d929e
FB
2134 }
2135 }
2136}
2137
2138/* update the TLB according to the current state of the dirty bits */
2139void cpu_tlb_update_dirty(CPUState *env)
2140{
2141 int i;
cfde4bd9
IY
2142 int mmu_idx;
2143 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2144 for(i = 0; i < CPU_TLB_SIZE; i++)
2145 tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2146 }
3a7d929e
FB
2147}
2148
0f459d16 2149static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1ccde1cb 2150{
0f459d16
PB
2151 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2152 tlb_entry->addr_write = vaddr;
1ccde1cb
FB
2153}
2154
0f459d16
PB
2155/* update the TLB corresponding to virtual page vaddr
2156 so that it is no longer dirty */
2157static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1ccde1cb 2158{
1ccde1cb 2159 int i;
cfde4bd9 2160 int mmu_idx;
1ccde1cb 2161
0f459d16 2162 vaddr &= TARGET_PAGE_MASK;
1ccde1cb 2163 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
cfde4bd9
IY
2164 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2165 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
9fa3e853
FB
2166}
2167
d4c430a8
PB
2168/* Our TLB does not support large pages, so remember the area covered by
2169 large pages and trigger a full TLB flush if these are invalidated. */
2170static void tlb_add_large_page(CPUState *env, target_ulong vaddr,
2171 target_ulong size)
2172{
2173 target_ulong mask = ~(size - 1);
2174
2175 if (env->tlb_flush_addr == (target_ulong)-1) {
2176 env->tlb_flush_addr = vaddr & mask;
2177 env->tlb_flush_mask = mask;
2178 return;
2179 }
2180 /* Extend the existing region to include the new page.
2181 This is a compromise between unnecessary flushes and the cost
2182 of maintaining a full variable size TLB. */
2183 mask &= env->tlb_flush_mask;
2184 while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
2185 mask <<= 1;
2186 }
2187 env->tlb_flush_addr &= mask;
2188 env->tlb_flush_mask = mask;
2189}
2190
2191/* Add a new TLB entry. At most one entry for a given virtual address
2192 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2193 supplied size is only used by tlb_flush_page. */
2194void tlb_set_page(CPUState *env, target_ulong vaddr,
2195 target_phys_addr_t paddr, int prot,
2196 int mmu_idx, target_ulong size)
9fa3e853 2197{
92e873b9 2198 PhysPageDesc *p;
4f2ac237 2199 unsigned long pd;
9fa3e853 2200 unsigned int index;
4f2ac237 2201 target_ulong address;
0f459d16 2202 target_ulong code_address;
355b1943 2203 unsigned long addend;
84b7b8e7 2204 CPUTLBEntry *te;
a1d1bb31 2205 CPUWatchpoint *wp;
c227f099 2206 target_phys_addr_t iotlb;
9fa3e853 2207
d4c430a8
PB
2208 assert(size >= TARGET_PAGE_SIZE);
2209 if (size != TARGET_PAGE_SIZE) {
2210 tlb_add_large_page(env, vaddr, size);
2211 }
92e873b9 2212 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
9fa3e853
FB
2213 if (!p) {
2214 pd = IO_MEM_UNASSIGNED;
9fa3e853
FB
2215 } else {
2216 pd = p->phys_offset;
9fa3e853
FB
2217 }
2218#if defined(DEBUG_TLB)
7fd3f494
SW
2219 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
2220 " prot=%x idx=%d pd=0x%08lx\n",
2221 vaddr, paddr, prot, mmu_idx, pd);
9fa3e853
FB
2222#endif
2223
0f459d16
PB
2224 address = vaddr;
2225 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2226 /* IO memory case (romd handled later) */
2227 address |= TLB_MMIO;
2228 }
5579c7f3 2229 addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
0f459d16
PB
2230 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2231 /* Normal RAM. */
2232 iotlb = pd & TARGET_PAGE_MASK;
2233 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2234 iotlb |= IO_MEM_NOTDIRTY;
2235 else
2236 iotlb |= IO_MEM_ROM;
2237 } else {
ccbb4d44 2238 /* IO handlers are currently passed a physical address.
0f459d16
PB
2239 It would be nice to pass an offset from the base address
2240 of that region. This would avoid having to special case RAM,
2241 and avoid full address decoding in every device.
2242 We can't use the high bits of pd for this because
2243 IO_MEM_ROMD uses these as a ram address. */
8da3ff18
PB
2244 iotlb = (pd & ~TARGET_PAGE_MASK);
2245 if (p) {
8da3ff18
PB
2246 iotlb += p->region_offset;
2247 } else {
2248 iotlb += paddr;
2249 }
0f459d16
PB
2250 }
2251
2252 code_address = address;
2253 /* Make accesses to pages with watchpoints go via the
2254 watchpoint trap routines. */
72cf2d4f 2255 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
a1d1bb31 2256 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
bf298f83
JK
2257 /* Avoid trapping reads of pages with a write breakpoint. */
2258 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
2259 iotlb = io_mem_watch + paddr;
2260 address |= TLB_MMIO;
2261 break;
2262 }
6658ffb8 2263 }
0f459d16 2264 }
d79acba4 2265
0f459d16
PB
2266 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2267 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2268 te = &env->tlb_table[mmu_idx][index];
2269 te->addend = addend - vaddr;
2270 if (prot & PAGE_READ) {
2271 te->addr_read = address;
2272 } else {
2273 te->addr_read = -1;
2274 }
5c751e99 2275
0f459d16
PB
2276 if (prot & PAGE_EXEC) {
2277 te->addr_code = code_address;
2278 } else {
2279 te->addr_code = -1;
2280 }
2281 if (prot & PAGE_WRITE) {
2282 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2283 (pd & IO_MEM_ROMD)) {
2284 /* Write access calls the I/O callback. */
2285 te->addr_write = address | TLB_MMIO;
2286 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2287 !cpu_physical_memory_is_dirty(pd)) {
2288 te->addr_write = address | TLB_NOTDIRTY;
9fa3e853 2289 } else {
0f459d16 2290 te->addr_write = address;
9fa3e853 2291 }
0f459d16
PB
2292 } else {
2293 te->addr_write = -1;
9fa3e853 2294 }
9fa3e853
FB
2295}
2296
0124311e
FB
2297#else
2298
ee8b7021 2299void tlb_flush(CPUState *env, int flush_global)
0124311e
FB
2300{
2301}
2302
2e12669a 2303void tlb_flush_page(CPUState *env, target_ulong addr)
0124311e
FB
2304{
2305}
2306
edf8e2af
MW
2307/*
2308 * Walks guest process memory "regions" one by one
2309 * and calls callback function 'fn' for each region.
2310 */
5cd2c5b6
RH
2311
2312struct walk_memory_regions_data
2313{
2314 walk_memory_regions_fn fn;
2315 void *priv;
2316 unsigned long start;
2317 int prot;
2318};
2319
2320static int walk_memory_regions_end(struct walk_memory_regions_data *data,
b480d9b7 2321 abi_ulong end, int new_prot)
5cd2c5b6
RH
2322{
2323 if (data->start != -1ul) {
2324 int rc = data->fn(data->priv, data->start, end, data->prot);
2325 if (rc != 0) {
2326 return rc;
2327 }
2328 }
2329
2330 data->start = (new_prot ? end : -1ul);
2331 data->prot = new_prot;
2332
2333 return 0;
2334}
2335
2336static int walk_memory_regions_1(struct walk_memory_regions_data *data,
b480d9b7 2337 abi_ulong base, int level, void **lp)
5cd2c5b6 2338{
b480d9b7 2339 abi_ulong pa;
5cd2c5b6
RH
2340 int i, rc;
2341
2342 if (*lp == NULL) {
2343 return walk_memory_regions_end(data, base, 0);
2344 }
2345
2346 if (level == 0) {
2347 PageDesc *pd = *lp;
7296abac 2348 for (i = 0; i < L2_SIZE; ++i) {
5cd2c5b6
RH
2349 int prot = pd[i].flags;
2350
2351 pa = base | (i << TARGET_PAGE_BITS);
2352 if (prot != data->prot) {
2353 rc = walk_memory_regions_end(data, pa, prot);
2354 if (rc != 0) {
2355 return rc;
9fa3e853 2356 }
9fa3e853 2357 }
5cd2c5b6
RH
2358 }
2359 } else {
2360 void **pp = *lp;
7296abac 2361 for (i = 0; i < L2_SIZE; ++i) {
b480d9b7
PB
2362 pa = base | ((abi_ulong)i <<
2363 (TARGET_PAGE_BITS + L2_BITS * level));
5cd2c5b6
RH
2364 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2365 if (rc != 0) {
2366 return rc;
2367 }
2368 }
2369 }
2370
2371 return 0;
2372}
2373
2374int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2375{
2376 struct walk_memory_regions_data data;
2377 unsigned long i;
2378
2379 data.fn = fn;
2380 data.priv = priv;
2381 data.start = -1ul;
2382 data.prot = 0;
2383
2384 for (i = 0; i < V_L1_SIZE; i++) {
b480d9b7 2385 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
5cd2c5b6
RH
2386 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2387 if (rc != 0) {
2388 return rc;
9fa3e853 2389 }
33417e70 2390 }
5cd2c5b6
RH
2391
2392 return walk_memory_regions_end(&data, 0, 0);
edf8e2af
MW
2393}
2394
b480d9b7
PB
2395static int dump_region(void *priv, abi_ulong start,
2396 abi_ulong end, unsigned long prot)
edf8e2af
MW
2397{
2398 FILE *f = (FILE *)priv;
2399
b480d9b7
PB
2400 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2401 " "TARGET_ABI_FMT_lx" %c%c%c\n",
edf8e2af
MW
2402 start, end, end - start,
2403 ((prot & PAGE_READ) ? 'r' : '-'),
2404 ((prot & PAGE_WRITE) ? 'w' : '-'),
2405 ((prot & PAGE_EXEC) ? 'x' : '-'));
2406
2407 return (0);
2408}
2409
2410/* dump memory mappings */
2411void page_dump(FILE *f)
2412{
2413 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2414 "start", "end", "size", "prot");
2415 walk_memory_regions(f, dump_region);
33417e70
FB
2416}
2417
53a5960a 2418int page_get_flags(target_ulong address)
33417e70 2419{
9fa3e853
FB
2420 PageDesc *p;
2421
2422 p = page_find(address >> TARGET_PAGE_BITS);
33417e70 2423 if (!p)
9fa3e853
FB
2424 return 0;
2425 return p->flags;
2426}
2427
376a7909
RH
2428/* Modify the flags of a page and invalidate the code if necessary.
2429 The flag PAGE_WRITE_ORG is positioned automatically depending
2430 on PAGE_WRITE. The mmap_lock should already be held. */
53a5960a 2431void page_set_flags(target_ulong start, target_ulong end, int flags)
9fa3e853 2432{
376a7909
RH
2433 target_ulong addr, len;
2434
2435 /* This function should never be called with addresses outside the
2436 guest address space. If this assert fires, it probably indicates
2437 a missing call to h2g_valid. */
b480d9b7
PB
2438#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2439 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
376a7909
RH
2440#endif
2441 assert(start < end);
9fa3e853
FB
2442
2443 start = start & TARGET_PAGE_MASK;
2444 end = TARGET_PAGE_ALIGN(end);
376a7909
RH
2445
2446 if (flags & PAGE_WRITE) {
9fa3e853 2447 flags |= PAGE_WRITE_ORG;
376a7909
RH
2448 }
2449
2450 for (addr = start, len = end - start;
2451 len != 0;
2452 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2453 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2454
2455 /* If the write protection bit is set, then we invalidate
2456 the code inside. */
5fafdf24 2457 if (!(p->flags & PAGE_WRITE) &&
9fa3e853
FB
2458 (flags & PAGE_WRITE) &&
2459 p->first_tb) {
d720b93d 2460 tb_invalidate_phys_page(addr, 0, NULL);
9fa3e853
FB
2461 }
2462 p->flags = flags;
2463 }
33417e70
FB
2464}
2465
3d97b40b
TS
2466int page_check_range(target_ulong start, target_ulong len, int flags)
2467{
2468 PageDesc *p;
2469 target_ulong end;
2470 target_ulong addr;
2471
376a7909
RH
2472 /* This function should never be called with addresses outside the
2473 guest address space. If this assert fires, it probably indicates
2474 a missing call to h2g_valid. */
338e9e6c
BS
2475#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2476 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
376a7909
RH
2477#endif
2478
3e0650a9
RH
2479 if (len == 0) {
2480 return 0;
2481 }
376a7909
RH
2482 if (start + len - 1 < start) {
2483 /* We've wrapped around. */
55f280c9 2484 return -1;
376a7909 2485 }
55f280c9 2486
3d97b40b
TS
2487 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2488 start = start & TARGET_PAGE_MASK;
2489
376a7909
RH
2490 for (addr = start, len = end - start;
2491 len != 0;
2492 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
3d97b40b
TS
2493 p = page_find(addr >> TARGET_PAGE_BITS);
2494 if( !p )
2495 return -1;
2496 if( !(p->flags & PAGE_VALID) )
2497 return -1;
2498
dae3270c 2499 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
3d97b40b 2500 return -1;
dae3270c
FB
2501 if (flags & PAGE_WRITE) {
2502 if (!(p->flags & PAGE_WRITE_ORG))
2503 return -1;
2504 /* unprotect the page if it was put read-only because it
2505 contains translated code */
2506 if (!(p->flags & PAGE_WRITE)) {
2507 if (!page_unprotect(addr, 0, NULL))
2508 return -1;
2509 }
2510 return 0;
2511 }
3d97b40b
TS
2512 }
2513 return 0;
2514}
2515
9fa3e853 2516/* called from signal handler: invalidate the code and unprotect the
ccbb4d44 2517 page. Return TRUE if the fault was successfully handled. */
53a5960a 2518int page_unprotect(target_ulong address, unsigned long pc, void *puc)
9fa3e853 2519{
45d679d6
AJ
2520 unsigned int prot;
2521 PageDesc *p;
53a5960a 2522 target_ulong host_start, host_end, addr;
9fa3e853 2523
c8a706fe
PB
2524 /* Technically this isn't safe inside a signal handler. However we
2525 know this only ever happens in a synchronous SEGV handler, so in
2526 practice it seems to be ok. */
2527 mmap_lock();
2528
45d679d6
AJ
2529 p = page_find(address >> TARGET_PAGE_BITS);
2530 if (!p) {
c8a706fe 2531 mmap_unlock();
9fa3e853 2532 return 0;
c8a706fe 2533 }
45d679d6 2534
9fa3e853
FB
2535 /* if the page was really writable, then we change its
2536 protection back to writable */
45d679d6
AJ
2537 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2538 host_start = address & qemu_host_page_mask;
2539 host_end = host_start + qemu_host_page_size;
2540
2541 prot = 0;
2542 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2543 p = page_find(addr >> TARGET_PAGE_BITS);
2544 p->flags |= PAGE_WRITE;
2545 prot |= p->flags;
2546
9fa3e853
FB
2547 /* and since the content will be modified, we must invalidate
2548 the corresponding translated code. */
45d679d6 2549 tb_invalidate_phys_page(addr, pc, puc);
9fa3e853 2550#ifdef DEBUG_TB_CHECK
45d679d6 2551 tb_invalidate_check(addr);
9fa3e853 2552#endif
9fa3e853 2553 }
45d679d6
AJ
2554 mprotect((void *)g2h(host_start), qemu_host_page_size,
2555 prot & PAGE_BITS);
2556
2557 mmap_unlock();
2558 return 1;
9fa3e853 2559 }
c8a706fe 2560 mmap_unlock();
9fa3e853
FB
2561 return 0;
2562}
2563
6a00d601
FB
2564static inline void tlb_set_dirty(CPUState *env,
2565 unsigned long addr, target_ulong vaddr)
1ccde1cb
FB
2566{
2567}
9fa3e853
FB
2568#endif /* defined(CONFIG_USER_ONLY) */
2569
e2eef170 2570#if !defined(CONFIG_USER_ONLY)
8da3ff18 2571
c04b2b78
PB
2572#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2573typedef struct subpage_t {
2574 target_phys_addr_t base;
f6405247
RH
2575 ram_addr_t sub_io_index[TARGET_PAGE_SIZE];
2576 ram_addr_t region_offset[TARGET_PAGE_SIZE];
c04b2b78
PB
2577} subpage_t;
2578
c227f099
AL
2579static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2580 ram_addr_t memory, ram_addr_t region_offset);
f6405247
RH
2581static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2582 ram_addr_t orig_memory,
2583 ram_addr_t region_offset);
db7b5426
BS
2584#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2585 need_subpage) \
2586 do { \
2587 if (addr > start_addr) \
2588 start_addr2 = 0; \
2589 else { \
2590 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2591 if (start_addr2 > 0) \
2592 need_subpage = 1; \
2593 } \
2594 \
49e9fba2 2595 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
db7b5426
BS
2596 end_addr2 = TARGET_PAGE_SIZE - 1; \
2597 else { \
2598 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2599 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2600 need_subpage = 1; \
2601 } \
2602 } while (0)
2603
8f2498f9
MT
2604/* register physical memory.
2605 For RAM, 'size' must be a multiple of the target page size.
2606 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
8da3ff18
PB
2607 io memory page. The address used when calling the IO function is
2608 the offset from the start of the region, plus region_offset. Both
ccbb4d44 2609 start_addr and region_offset are rounded down to a page boundary
8da3ff18
PB
2610 before calculating this offset. This should not be a problem unless
2611 the low bits of start_addr and region_offset differ. */
0fd542fb 2612void cpu_register_physical_memory_log(target_phys_addr_t start_addr,
c227f099
AL
2613 ram_addr_t size,
2614 ram_addr_t phys_offset,
0fd542fb
MT
2615 ram_addr_t region_offset,
2616 bool log_dirty)
33417e70 2617{
c227f099 2618 target_phys_addr_t addr, end_addr;
92e873b9 2619 PhysPageDesc *p;
9d42037b 2620 CPUState *env;
c227f099 2621 ram_addr_t orig_size = size;
f6405247 2622 subpage_t *subpage;
33417e70 2623
0fd542fb 2624 cpu_notify_set_memory(start_addr, size, phys_offset, log_dirty);
f6f3fbca 2625
67c4d23c
PB
2626 if (phys_offset == IO_MEM_UNASSIGNED) {
2627 region_offset = start_addr;
2628 }
8da3ff18 2629 region_offset &= TARGET_PAGE_MASK;
5fd386f6 2630 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
c227f099 2631 end_addr = start_addr + (target_phys_addr_t)size;
49e9fba2 2632 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
db7b5426
BS
2633 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2634 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
c227f099
AL
2635 ram_addr_t orig_memory = p->phys_offset;
2636 target_phys_addr_t start_addr2, end_addr2;
db7b5426
BS
2637 int need_subpage = 0;
2638
2639 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2640 need_subpage);
f6405247 2641 if (need_subpage) {
db7b5426
BS
2642 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2643 subpage = subpage_init((addr & TARGET_PAGE_MASK),
8da3ff18
PB
2644 &p->phys_offset, orig_memory,
2645 p->region_offset);
db7b5426
BS
2646 } else {
2647 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2648 >> IO_MEM_SHIFT];
2649 }
8da3ff18
PB
2650 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2651 region_offset);
2652 p->region_offset = 0;
db7b5426
BS
2653 } else {
2654 p->phys_offset = phys_offset;
2655 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2656 (phys_offset & IO_MEM_ROMD))
2657 phys_offset += TARGET_PAGE_SIZE;
2658 }
2659 } else {
2660 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2661 p->phys_offset = phys_offset;
8da3ff18 2662 p->region_offset = region_offset;
db7b5426 2663 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
8da3ff18 2664 (phys_offset & IO_MEM_ROMD)) {
db7b5426 2665 phys_offset += TARGET_PAGE_SIZE;
0e8f0967 2666 } else {
c227f099 2667 target_phys_addr_t start_addr2, end_addr2;
db7b5426
BS
2668 int need_subpage = 0;
2669
2670 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2671 end_addr2, need_subpage);
2672
f6405247 2673 if (need_subpage) {
db7b5426 2674 subpage = subpage_init((addr & TARGET_PAGE_MASK),
8da3ff18 2675 &p->phys_offset, IO_MEM_UNASSIGNED,
67c4d23c 2676 addr & TARGET_PAGE_MASK);
db7b5426 2677 subpage_register(subpage, start_addr2, end_addr2,
8da3ff18
PB
2678 phys_offset, region_offset);
2679 p->region_offset = 0;
db7b5426
BS
2680 }
2681 }
2682 }
8da3ff18 2683 region_offset += TARGET_PAGE_SIZE;
33417e70 2684 }
3b46e624 2685
9d42037b
FB
2686 /* since each CPU stores ram addresses in its TLB cache, we must
2687 reset the modified entries */
2688 /* XXX: slow ! */
2689 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2690 tlb_flush(env, 1);
2691 }
33417e70
FB
2692}
2693
ba863458 2694/* XXX: temporary until new memory mapping API */
c227f099 2695ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
ba863458
FB
2696{
2697 PhysPageDesc *p;
2698
2699 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2700 if (!p)
2701 return IO_MEM_UNASSIGNED;
2702 return p->phys_offset;
2703}
2704
c227f099 2705void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
f65ed4c1
AL
2706{
2707 if (kvm_enabled())
2708 kvm_coalesce_mmio_region(addr, size);
2709}
2710
c227f099 2711void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
f65ed4c1
AL
2712{
2713 if (kvm_enabled())
2714 kvm_uncoalesce_mmio_region(addr, size);
2715}
2716
62a2744c
SY
2717void qemu_flush_coalesced_mmio_buffer(void)
2718{
2719 if (kvm_enabled())
2720 kvm_flush_coalesced_mmio_buffer();
2721}
2722
c902760f
MT
2723#if defined(__linux__) && !defined(TARGET_S390X)
2724
2725#include <sys/vfs.h>
2726
2727#define HUGETLBFS_MAGIC 0x958458f6
2728
2729static long gethugepagesize(const char *path)
2730{
2731 struct statfs fs;
2732 int ret;
2733
2734 do {
9742bf26 2735 ret = statfs(path, &fs);
c902760f
MT
2736 } while (ret != 0 && errno == EINTR);
2737
2738 if (ret != 0) {
9742bf26
YT
2739 perror(path);
2740 return 0;
c902760f
MT
2741 }
2742
2743 if (fs.f_type != HUGETLBFS_MAGIC)
9742bf26 2744 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
c902760f
MT
2745
2746 return fs.f_bsize;
2747}
2748
04b16653
AW
2749static void *file_ram_alloc(RAMBlock *block,
2750 ram_addr_t memory,
2751 const char *path)
c902760f
MT
2752{
2753 char *filename;
2754 void *area;
2755 int fd;
2756#ifdef MAP_POPULATE
2757 int flags;
2758#endif
2759 unsigned long hpagesize;
2760
2761 hpagesize = gethugepagesize(path);
2762 if (!hpagesize) {
9742bf26 2763 return NULL;
c902760f
MT
2764 }
2765
2766 if (memory < hpagesize) {
2767 return NULL;
2768 }
2769
2770 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2771 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2772 return NULL;
2773 }
2774
2775 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
9742bf26 2776 return NULL;
c902760f
MT
2777 }
2778
2779 fd = mkstemp(filename);
2780 if (fd < 0) {
9742bf26
YT
2781 perror("unable to create backing store for hugepages");
2782 free(filename);
2783 return NULL;
c902760f
MT
2784 }
2785 unlink(filename);
2786 free(filename);
2787
2788 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2789
2790 /*
2791 * ftruncate is not supported by hugetlbfs in older
2792 * hosts, so don't bother bailing out on errors.
2793 * If anything goes wrong with it under other filesystems,
2794 * mmap will fail.
2795 */
2796 if (ftruncate(fd, memory))
9742bf26 2797 perror("ftruncate");
c902760f
MT
2798
2799#ifdef MAP_POPULATE
2800 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2801 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2802 * to sidestep this quirk.
2803 */
2804 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2805 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2806#else
2807 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2808#endif
2809 if (area == MAP_FAILED) {
9742bf26
YT
2810 perror("file_ram_alloc: can't mmap RAM pages");
2811 close(fd);
2812 return (NULL);
c902760f 2813 }
04b16653 2814 block->fd = fd;
c902760f
MT
2815 return area;
2816}
2817#endif
2818
d17b5288 2819static ram_addr_t find_ram_offset(ram_addr_t size)
04b16653
AW
2820{
2821 RAMBlock *block, *next_block;
09d7ae90 2822 ram_addr_t offset = 0, mingap = ULONG_MAX;
04b16653
AW
2823
2824 if (QLIST_EMPTY(&ram_list.blocks))
2825 return 0;
2826
2827 QLIST_FOREACH(block, &ram_list.blocks, next) {
2828 ram_addr_t end, next = ULONG_MAX;
2829
2830 end = block->offset + block->length;
2831
2832 QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2833 if (next_block->offset >= end) {
2834 next = MIN(next, next_block->offset);
2835 }
2836 }
2837 if (next - end >= size && next - end < mingap) {
2838 offset = end;
2839 mingap = next - end;
2840 }
2841 }
2842 return offset;
2843}
2844
2845static ram_addr_t last_ram_offset(void)
d17b5288
AW
2846{
2847 RAMBlock *block;
2848 ram_addr_t last = 0;
2849
2850 QLIST_FOREACH(block, &ram_list.blocks, next)
2851 last = MAX(last, block->offset + block->length);
2852
2853 return last;
2854}
2855
84b89d78 2856ram_addr_t qemu_ram_alloc_from_ptr(DeviceState *dev, const char *name,
6977dfe6 2857 ram_addr_t size, void *host)
84b89d78
CM
2858{
2859 RAMBlock *new_block, *block;
2860
2861 size = TARGET_PAGE_ALIGN(size);
2862 new_block = qemu_mallocz(sizeof(*new_block));
2863
2864 if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
2865 char *id = dev->parent_bus->info->get_dev_path(dev);
2866 if (id) {
2867 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
2868 qemu_free(id);
2869 }
2870 }
2871 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2872
2873 QLIST_FOREACH(block, &ram_list.blocks, next) {
2874 if (!strcmp(block->idstr, new_block->idstr)) {
2875 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2876 new_block->idstr);
2877 abort();
2878 }
2879 }
2880
6977dfe6
YT
2881 if (host) {
2882 new_block->host = host;
cd19cfa2 2883 new_block->flags |= RAM_PREALLOC_MASK;
6977dfe6
YT
2884 } else {
2885 if (mem_path) {
c902760f 2886#if defined (__linux__) && !defined(TARGET_S390X)
6977dfe6
YT
2887 new_block->host = file_ram_alloc(new_block, size, mem_path);
2888 if (!new_block->host) {
2889 new_block->host = qemu_vmalloc(size);
e78815a5 2890 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
6977dfe6 2891 }
c902760f 2892#else
6977dfe6
YT
2893 fprintf(stderr, "-mem-path option unsupported\n");
2894 exit(1);
c902760f 2895#endif
6977dfe6 2896 } else {
6b02494d 2897#if defined(TARGET_S390X) && defined(CONFIG_KVM)
6977dfe6
YT
2898 /* XXX S390 KVM requires the topmost vma of the RAM to be < 256GB */
2899 new_block->host = mmap((void*)0x1000000, size,
2900 PROT_EXEC|PROT_READ|PROT_WRITE,
2901 MAP_SHARED | MAP_ANONYMOUS, -1, 0);
6b02494d 2902#else
6977dfe6 2903 new_block->host = qemu_vmalloc(size);
6b02494d 2904#endif
e78815a5 2905 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
6977dfe6 2906 }
c902760f 2907 }
6977dfe6 2908
d17b5288 2909 new_block->offset = find_ram_offset(size);
94a6b54f
PB
2910 new_block->length = size;
2911
f471a17e 2912 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
94a6b54f 2913
f471a17e 2914 ram_list.phys_dirty = qemu_realloc(ram_list.phys_dirty,
04b16653 2915 last_ram_offset() >> TARGET_PAGE_BITS);
d17b5288 2916 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
94a6b54f
PB
2917 0xff, size >> TARGET_PAGE_BITS);
2918
6f0437e8
JK
2919 if (kvm_enabled())
2920 kvm_setup_guest_memory(new_block->host, size);
2921
94a6b54f
PB
2922 return new_block->offset;
2923}
e9a1ab19 2924
6977dfe6
YT
2925ram_addr_t qemu_ram_alloc(DeviceState *dev, const char *name, ram_addr_t size)
2926{
2927 return qemu_ram_alloc_from_ptr(dev, name, size, NULL);
2928}
2929
c227f099 2930void qemu_ram_free(ram_addr_t addr)
e9a1ab19 2931{
04b16653
AW
2932 RAMBlock *block;
2933
2934 QLIST_FOREACH(block, &ram_list.blocks, next) {
2935 if (addr == block->offset) {
2936 QLIST_REMOVE(block, next);
cd19cfa2
HY
2937 if (block->flags & RAM_PREALLOC_MASK) {
2938 ;
2939 } else if (mem_path) {
04b16653
AW
2940#if defined (__linux__) && !defined(TARGET_S390X)
2941 if (block->fd) {
2942 munmap(block->host, block->length);
2943 close(block->fd);
2944 } else {
2945 qemu_vfree(block->host);
2946 }
fd28aa13
JK
2947#else
2948 abort();
04b16653
AW
2949#endif
2950 } else {
2951#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2952 munmap(block->host, block->length);
2953#else
2954 qemu_vfree(block->host);
2955#endif
2956 }
2957 qemu_free(block);
2958 return;
2959 }
2960 }
2961
e9a1ab19
FB
2962}
2963
cd19cfa2
HY
2964#ifndef _WIN32
2965void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
2966{
2967 RAMBlock *block;
2968 ram_addr_t offset;
2969 int flags;
2970 void *area, *vaddr;
2971
2972 QLIST_FOREACH(block, &ram_list.blocks, next) {
2973 offset = addr - block->offset;
2974 if (offset < block->length) {
2975 vaddr = block->host + offset;
2976 if (block->flags & RAM_PREALLOC_MASK) {
2977 ;
2978 } else {
2979 flags = MAP_FIXED;
2980 munmap(vaddr, length);
2981 if (mem_path) {
2982#if defined(__linux__) && !defined(TARGET_S390X)
2983 if (block->fd) {
2984#ifdef MAP_POPULATE
2985 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
2986 MAP_PRIVATE;
2987#else
2988 flags |= MAP_PRIVATE;
2989#endif
2990 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2991 flags, block->fd, offset);
2992 } else {
2993 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2994 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2995 flags, -1, 0);
2996 }
fd28aa13
JK
2997#else
2998 abort();
cd19cfa2
HY
2999#endif
3000 } else {
3001#if defined(TARGET_S390X) && defined(CONFIG_KVM)
3002 flags |= MAP_SHARED | MAP_ANONYMOUS;
3003 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
3004 flags, -1, 0);
3005#else
3006 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
3007 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3008 flags, -1, 0);
3009#endif
3010 }
3011 if (area != vaddr) {
3012 fprintf(stderr, "Could not remap addr: %lx@%lx\n",
3013 length, addr);
3014 exit(1);
3015 }
3016 qemu_madvise(vaddr, length, QEMU_MADV_MERGEABLE);
3017 }
3018 return;
3019 }
3020 }
3021}
3022#endif /* !_WIN32 */
3023
dc828ca1 3024/* Return a host pointer to ram allocated with qemu_ram_alloc.
5579c7f3
PB
3025 With the exception of the softmmu code in this file, this should
3026 only be used for local memory (e.g. video ram) that the device owns,
3027 and knows it isn't going to access beyond the end of the block.
3028
3029 It should not be used for general purpose DMA.
3030 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
3031 */
c227f099 3032void *qemu_get_ram_ptr(ram_addr_t addr)
dc828ca1 3033{
94a6b54f
PB
3034 RAMBlock *block;
3035
f471a17e
AW
3036 QLIST_FOREACH(block, &ram_list.blocks, next) {
3037 if (addr - block->offset < block->length) {
7d82af38
VP
3038 /* Move this entry to to start of the list. */
3039 if (block != QLIST_FIRST(&ram_list.blocks)) {
3040 QLIST_REMOVE(block, next);
3041 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
3042 }
f471a17e
AW
3043 return block->host + (addr - block->offset);
3044 }
94a6b54f 3045 }
f471a17e
AW
3046
3047 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3048 abort();
3049
3050 return NULL;
dc828ca1
PB
3051}
3052
b2e0a138
MT
3053/* Return a host pointer to ram allocated with qemu_ram_alloc.
3054 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
3055 */
3056void *qemu_safe_ram_ptr(ram_addr_t addr)
3057{
3058 RAMBlock *block;
3059
3060 QLIST_FOREACH(block, &ram_list.blocks, next) {
3061 if (addr - block->offset < block->length) {
3062 return block->host + (addr - block->offset);
3063 }
3064 }
3065
3066 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3067 abort();
3068
3069 return NULL;
3070}
3071
e890261f 3072int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
5579c7f3 3073{
94a6b54f
PB
3074 RAMBlock *block;
3075 uint8_t *host = ptr;
3076
f471a17e
AW
3077 QLIST_FOREACH(block, &ram_list.blocks, next) {
3078 if (host - block->host < block->length) {
e890261f
MT
3079 *ram_addr = block->offset + (host - block->host);
3080 return 0;
f471a17e 3081 }
94a6b54f 3082 }
e890261f
MT
3083 return -1;
3084}
f471a17e 3085
e890261f
MT
3086/* Some of the softmmu routines need to translate from a host pointer
3087 (typically a TLB entry) back to a ram offset. */
3088ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
3089{
3090 ram_addr_t ram_addr;
f471a17e 3091
e890261f
MT
3092 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
3093 fprintf(stderr, "Bad ram pointer %p\n", ptr);
3094 abort();
3095 }
3096 return ram_addr;
5579c7f3
PB
3097}
3098
c227f099 3099static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
33417e70 3100{
67d3b957 3101#ifdef DEBUG_UNASSIGNED
ab3d1727 3102 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
b4f0a316 3103#endif
faed1c2a 3104#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
e18231a3
BS
3105 do_unassigned_access(addr, 0, 0, 0, 1);
3106#endif
3107 return 0;
3108}
3109
c227f099 3110static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
e18231a3
BS
3111{
3112#ifdef DEBUG_UNASSIGNED
3113 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3114#endif
faed1c2a 3115#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
e18231a3
BS
3116 do_unassigned_access(addr, 0, 0, 0, 2);
3117#endif
3118 return 0;
3119}
3120
c227f099 3121static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
e18231a3
BS
3122{
3123#ifdef DEBUG_UNASSIGNED
3124 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3125#endif
faed1c2a 3126#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
e18231a3 3127 do_unassigned_access(addr, 0, 0, 0, 4);
67d3b957 3128#endif
33417e70
FB
3129 return 0;
3130}
3131
c227f099 3132static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
33417e70 3133{
67d3b957 3134#ifdef DEBUG_UNASSIGNED
ab3d1727 3135 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
67d3b957 3136#endif
faed1c2a 3137#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
e18231a3
BS
3138 do_unassigned_access(addr, 1, 0, 0, 1);
3139#endif
3140}
3141
c227f099 3142static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
e18231a3
BS
3143{
3144#ifdef DEBUG_UNASSIGNED
3145 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3146#endif
faed1c2a 3147#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
e18231a3
BS
3148 do_unassigned_access(addr, 1, 0, 0, 2);
3149#endif
3150}
3151
c227f099 3152static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
e18231a3
BS
3153{
3154#ifdef DEBUG_UNASSIGNED
3155 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3156#endif
faed1c2a 3157#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
e18231a3 3158 do_unassigned_access(addr, 1, 0, 0, 4);
b4f0a316 3159#endif
33417e70
FB
3160}
3161
d60efc6b 3162static CPUReadMemoryFunc * const unassigned_mem_read[3] = {
33417e70 3163 unassigned_mem_readb,
e18231a3
BS
3164 unassigned_mem_readw,
3165 unassigned_mem_readl,
33417e70
FB
3166};
3167
d60efc6b 3168static CPUWriteMemoryFunc * const unassigned_mem_write[3] = {
33417e70 3169 unassigned_mem_writeb,
e18231a3
BS
3170 unassigned_mem_writew,
3171 unassigned_mem_writel,
33417e70
FB
3172};
3173
c227f099 3174static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
0f459d16 3175 uint32_t val)
9fa3e853 3176{
3a7d929e 3177 int dirty_flags;
f7c11b53 3178 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3a7d929e 3179 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 3180#if !defined(CONFIG_USER_ONLY)
3a7d929e 3181 tb_invalidate_phys_page_fast(ram_addr, 1);
f7c11b53 3182 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
9fa3e853 3183#endif
3a7d929e 3184 }
5579c7f3 3185 stb_p(qemu_get_ram_ptr(ram_addr), val);
f23db169 3186 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
f7c11b53 3187 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
f23db169
FB
3188 /* we remove the notdirty callback only if the code has been
3189 flushed */
3190 if (dirty_flags == 0xff)
2e70f6ef 3191 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
3192}
3193
c227f099 3194static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
0f459d16 3195 uint32_t val)
9fa3e853 3196{
3a7d929e 3197 int dirty_flags;
f7c11b53 3198 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3a7d929e 3199 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 3200#if !defined(CONFIG_USER_ONLY)
3a7d929e 3201 tb_invalidate_phys_page_fast(ram_addr, 2);
f7c11b53 3202 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
9fa3e853 3203#endif
3a7d929e 3204 }
5579c7f3 3205 stw_p(qemu_get_ram_ptr(ram_addr), val);
f23db169 3206 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
f7c11b53 3207 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
f23db169
FB
3208 /* we remove the notdirty callback only if the code has been
3209 flushed */
3210 if (dirty_flags == 0xff)
2e70f6ef 3211 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
3212}
3213
c227f099 3214static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
0f459d16 3215 uint32_t val)
9fa3e853 3216{
3a7d929e 3217 int dirty_flags;
f7c11b53 3218 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3a7d929e 3219 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 3220#if !defined(CONFIG_USER_ONLY)
3a7d929e 3221 tb_invalidate_phys_page_fast(ram_addr, 4);
f7c11b53 3222 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
9fa3e853 3223#endif
3a7d929e 3224 }
5579c7f3 3225 stl_p(qemu_get_ram_ptr(ram_addr), val);
f23db169 3226 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
f7c11b53 3227 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
f23db169
FB
3228 /* we remove the notdirty callback only if the code has been
3229 flushed */
3230 if (dirty_flags == 0xff)
2e70f6ef 3231 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
3232}
3233
d60efc6b 3234static CPUReadMemoryFunc * const error_mem_read[3] = {
9fa3e853
FB
3235 NULL, /* never used */
3236 NULL, /* never used */
3237 NULL, /* never used */
3238};
3239
d60efc6b 3240static CPUWriteMemoryFunc * const notdirty_mem_write[3] = {
1ccde1cb
FB
3241 notdirty_mem_writeb,
3242 notdirty_mem_writew,
3243 notdirty_mem_writel,
3244};
3245
0f459d16 3246/* Generate a debug exception if a watchpoint has been hit. */
b4051334 3247static void check_watchpoint(int offset, int len_mask, int flags)
0f459d16
PB
3248{
3249 CPUState *env = cpu_single_env;
06d55cc1
AL
3250 target_ulong pc, cs_base;
3251 TranslationBlock *tb;
0f459d16 3252 target_ulong vaddr;
a1d1bb31 3253 CPUWatchpoint *wp;
06d55cc1 3254 int cpu_flags;
0f459d16 3255
06d55cc1
AL
3256 if (env->watchpoint_hit) {
3257 /* We re-entered the check after replacing the TB. Now raise
3258 * the debug interrupt so that is will trigger after the
3259 * current instruction. */
3260 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3261 return;
3262 }
2e70f6ef 3263 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
72cf2d4f 3264 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334
AL
3265 if ((vaddr == (wp->vaddr & len_mask) ||
3266 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
6e140f28
AL
3267 wp->flags |= BP_WATCHPOINT_HIT;
3268 if (!env->watchpoint_hit) {
3269 env->watchpoint_hit = wp;
3270 tb = tb_find_pc(env->mem_io_pc);
3271 if (!tb) {
3272 cpu_abort(env, "check_watchpoint: could not find TB for "
3273 "pc=%p", (void *)env->mem_io_pc);
3274 }
3275 cpu_restore_state(tb, env, env->mem_io_pc, NULL);
3276 tb_phys_invalidate(tb, -1);
3277 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3278 env->exception_index = EXCP_DEBUG;
3279 } else {
3280 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3281 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
3282 }
3283 cpu_resume_from_signal(env, NULL);
06d55cc1 3284 }
6e140f28
AL
3285 } else {
3286 wp->flags &= ~BP_WATCHPOINT_HIT;
0f459d16
PB
3287 }
3288 }
3289}
3290
6658ffb8
PB
3291/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3292 so these check for a hit then pass through to the normal out-of-line
3293 phys routines. */
c227f099 3294static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
6658ffb8 3295{
b4051334 3296 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
6658ffb8
PB
3297 return ldub_phys(addr);
3298}
3299
c227f099 3300static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
6658ffb8 3301{
b4051334 3302 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
6658ffb8
PB
3303 return lduw_phys(addr);
3304}
3305
c227f099 3306static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
6658ffb8 3307{
b4051334 3308 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
6658ffb8
PB
3309 return ldl_phys(addr);
3310}
3311
c227f099 3312static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
6658ffb8
PB
3313 uint32_t val)
3314{
b4051334 3315 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
6658ffb8
PB
3316 stb_phys(addr, val);
3317}
3318
c227f099 3319static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
6658ffb8
PB
3320 uint32_t val)
3321{
b4051334 3322 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
6658ffb8
PB
3323 stw_phys(addr, val);
3324}
3325
c227f099 3326static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
6658ffb8
PB
3327 uint32_t val)
3328{
b4051334 3329 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
6658ffb8
PB
3330 stl_phys(addr, val);
3331}
3332
d60efc6b 3333static CPUReadMemoryFunc * const watch_mem_read[3] = {
6658ffb8
PB
3334 watch_mem_readb,
3335 watch_mem_readw,
3336 watch_mem_readl,
3337};
3338
d60efc6b 3339static CPUWriteMemoryFunc * const watch_mem_write[3] = {
6658ffb8
PB
3340 watch_mem_writeb,
3341 watch_mem_writew,
3342 watch_mem_writel,
3343};
6658ffb8 3344
f6405247
RH
3345static inline uint32_t subpage_readlen (subpage_t *mmio,
3346 target_phys_addr_t addr,
3347 unsigned int len)
db7b5426 3348{
f6405247 3349 unsigned int idx = SUBPAGE_IDX(addr);
db7b5426
BS
3350#if defined(DEBUG_SUBPAGE)
3351 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3352 mmio, len, addr, idx);
3353#endif
db7b5426 3354
f6405247
RH
3355 addr += mmio->region_offset[idx];
3356 idx = mmio->sub_io_index[idx];
3357 return io_mem_read[idx][len](io_mem_opaque[idx], addr);
db7b5426
BS
3358}
3359
c227f099 3360static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
f6405247 3361 uint32_t value, unsigned int len)
db7b5426 3362{
f6405247 3363 unsigned int idx = SUBPAGE_IDX(addr);
db7b5426 3364#if defined(DEBUG_SUBPAGE)
f6405247
RH
3365 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n",
3366 __func__, mmio, len, addr, idx, value);
db7b5426 3367#endif
f6405247
RH
3368
3369 addr += mmio->region_offset[idx];
3370 idx = mmio->sub_io_index[idx];
3371 io_mem_write[idx][len](io_mem_opaque[idx], addr, value);
db7b5426
BS
3372}
3373
c227f099 3374static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
db7b5426 3375{
db7b5426
BS
3376 return subpage_readlen(opaque, addr, 0);
3377}
3378
c227f099 3379static void subpage_writeb (void *opaque, target_phys_addr_t addr,
db7b5426
BS
3380 uint32_t value)
3381{
db7b5426
BS
3382 subpage_writelen(opaque, addr, value, 0);
3383}
3384
c227f099 3385static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
db7b5426 3386{
db7b5426
BS
3387 return subpage_readlen(opaque, addr, 1);
3388}
3389
c227f099 3390static void subpage_writew (void *opaque, target_phys_addr_t addr,
db7b5426
BS
3391 uint32_t value)
3392{
db7b5426
BS
3393 subpage_writelen(opaque, addr, value, 1);
3394}
3395
c227f099 3396static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
db7b5426 3397{
db7b5426
BS
3398 return subpage_readlen(opaque, addr, 2);
3399}
3400
f6405247
RH
3401static void subpage_writel (void *opaque, target_phys_addr_t addr,
3402 uint32_t value)
db7b5426 3403{
db7b5426
BS
3404 subpage_writelen(opaque, addr, value, 2);
3405}
3406
d60efc6b 3407static CPUReadMemoryFunc * const subpage_read[] = {
db7b5426
BS
3408 &subpage_readb,
3409 &subpage_readw,
3410 &subpage_readl,
3411};
3412
d60efc6b 3413static CPUWriteMemoryFunc * const subpage_write[] = {
db7b5426
BS
3414 &subpage_writeb,
3415 &subpage_writew,
3416 &subpage_writel,
3417};
3418
c227f099
AL
3419static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3420 ram_addr_t memory, ram_addr_t region_offset)
db7b5426
BS
3421{
3422 int idx, eidx;
3423
3424 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3425 return -1;
3426 idx = SUBPAGE_IDX(start);
3427 eidx = SUBPAGE_IDX(end);
3428#if defined(DEBUG_SUBPAGE)
0bf9e31a 3429 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
db7b5426
BS
3430 mmio, start, end, idx, eidx, memory);
3431#endif
95c318f5
GN
3432 if ((memory & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
3433 memory = IO_MEM_UNASSIGNED;
f6405247 3434 memory = (memory >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
db7b5426 3435 for (; idx <= eidx; idx++) {
f6405247
RH
3436 mmio->sub_io_index[idx] = memory;
3437 mmio->region_offset[idx] = region_offset;
db7b5426
BS
3438 }
3439
3440 return 0;
3441}
3442
f6405247
RH
3443static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
3444 ram_addr_t orig_memory,
3445 ram_addr_t region_offset)
db7b5426 3446{
c227f099 3447 subpage_t *mmio;
db7b5426
BS
3448 int subpage_memory;
3449
c227f099 3450 mmio = qemu_mallocz(sizeof(subpage_t));
1eec614b
AL
3451
3452 mmio->base = base;
2507c12a
AG
3453 subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio,
3454 DEVICE_NATIVE_ENDIAN);
db7b5426 3455#if defined(DEBUG_SUBPAGE)
1eec614b
AL
3456 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3457 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
db7b5426 3458#endif
1eec614b 3459 *phys = subpage_memory | IO_MEM_SUBPAGE;
f6405247 3460 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, orig_memory, region_offset);
db7b5426
BS
3461
3462 return mmio;
3463}
3464
88715657
AL
3465static int get_free_io_mem_idx(void)
3466{
3467 int i;
3468
3469 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3470 if (!io_mem_used[i]) {
3471 io_mem_used[i] = 1;
3472 return i;
3473 }
c6703b47 3474 fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
88715657
AL
3475 return -1;
3476}
3477
dd310534
AG
3478/*
3479 * Usually, devices operate in little endian mode. There are devices out
3480 * there that operate in big endian too. Each device gets byte swapped
3481 * mmio if plugged onto a CPU that does the other endianness.
3482 *
3483 * CPU Device swap?
3484 *
3485 * little little no
3486 * little big yes
3487 * big little yes
3488 * big big no
3489 */
3490
3491typedef struct SwapEndianContainer {
3492 CPUReadMemoryFunc *read[3];
3493 CPUWriteMemoryFunc *write[3];
3494 void *opaque;
3495} SwapEndianContainer;
3496
3497static uint32_t swapendian_mem_readb (void *opaque, target_phys_addr_t addr)
3498{
3499 uint32_t val;
3500 SwapEndianContainer *c = opaque;
3501 val = c->read[0](c->opaque, addr);
3502 return val;
3503}
3504
3505static uint32_t swapendian_mem_readw(void *opaque, target_phys_addr_t addr)
3506{
3507 uint32_t val;
3508 SwapEndianContainer *c = opaque;
3509 val = bswap16(c->read[1](c->opaque, addr));
3510 return val;
3511}
3512
3513static uint32_t swapendian_mem_readl(void *opaque, target_phys_addr_t addr)
3514{
3515 uint32_t val;
3516 SwapEndianContainer *c = opaque;
3517 val = bswap32(c->read[2](c->opaque, addr));
3518 return val;
3519}
3520
3521static CPUReadMemoryFunc * const swapendian_readfn[3]={
3522 swapendian_mem_readb,
3523 swapendian_mem_readw,
3524 swapendian_mem_readl
3525};
3526
3527static void swapendian_mem_writeb(void *opaque, target_phys_addr_t addr,
3528 uint32_t val)
3529{
3530 SwapEndianContainer *c = opaque;
3531 c->write[0](c->opaque, addr, val);
3532}
3533
3534static void swapendian_mem_writew(void *opaque, target_phys_addr_t addr,
3535 uint32_t val)
3536{
3537 SwapEndianContainer *c = opaque;
3538 c->write[1](c->opaque, addr, bswap16(val));
3539}
3540
3541static void swapendian_mem_writel(void *opaque, target_phys_addr_t addr,
3542 uint32_t val)
3543{
3544 SwapEndianContainer *c = opaque;
3545 c->write[2](c->opaque, addr, bswap32(val));
3546}
3547
3548static CPUWriteMemoryFunc * const swapendian_writefn[3]={
3549 swapendian_mem_writeb,
3550 swapendian_mem_writew,
3551 swapendian_mem_writel
3552};
3553
3554static void swapendian_init(int io_index)
3555{
3556 SwapEndianContainer *c = qemu_malloc(sizeof(SwapEndianContainer));
3557 int i;
3558
3559 /* Swap mmio for big endian targets */
3560 c->opaque = io_mem_opaque[io_index];
3561 for (i = 0; i < 3; i++) {
3562 c->read[i] = io_mem_read[io_index][i];
3563 c->write[i] = io_mem_write[io_index][i];
3564
3565 io_mem_read[io_index][i] = swapendian_readfn[i];
3566 io_mem_write[io_index][i] = swapendian_writefn[i];
3567 }
3568 io_mem_opaque[io_index] = c;
3569}
3570
3571static void swapendian_del(int io_index)
3572{
3573 if (io_mem_read[io_index][0] == swapendian_readfn[0]) {
3574 qemu_free(io_mem_opaque[io_index]);
3575 }
3576}
3577
33417e70
FB
3578/* mem_read and mem_write are arrays of functions containing the
3579 function to access byte (index 0), word (index 1) and dword (index
0b4e6e3e 3580 2). Functions can be omitted with a NULL function pointer.
3ee89922 3581 If io_index is non zero, the corresponding io zone is
4254fab8
BS
3582 modified. If it is zero, a new io zone is allocated. The return
3583 value can be used with cpu_register_physical_memory(). (-1) is
3584 returned if error. */
1eed09cb 3585static int cpu_register_io_memory_fixed(int io_index,
d60efc6b
BS
3586 CPUReadMemoryFunc * const *mem_read,
3587 CPUWriteMemoryFunc * const *mem_write,
dd310534 3588 void *opaque, enum device_endian endian)
33417e70 3589{
3cab721d
RH
3590 int i;
3591
33417e70 3592 if (io_index <= 0) {
88715657
AL
3593 io_index = get_free_io_mem_idx();
3594 if (io_index == -1)
3595 return io_index;
33417e70 3596 } else {
1eed09cb 3597 io_index >>= IO_MEM_SHIFT;
33417e70
FB
3598 if (io_index >= IO_MEM_NB_ENTRIES)
3599 return -1;
3600 }
b5ff1b31 3601
3cab721d
RH
3602 for (i = 0; i < 3; ++i) {
3603 io_mem_read[io_index][i]
3604 = (mem_read[i] ? mem_read[i] : unassigned_mem_read[i]);
3605 }
3606 for (i = 0; i < 3; ++i) {
3607 io_mem_write[io_index][i]
3608 = (mem_write[i] ? mem_write[i] : unassigned_mem_write[i]);
3609 }
a4193c8a 3610 io_mem_opaque[io_index] = opaque;
f6405247 3611
dd310534
AG
3612 switch (endian) {
3613 case DEVICE_BIG_ENDIAN:
3614#ifndef TARGET_WORDS_BIGENDIAN
3615 swapendian_init(io_index);
3616#endif
3617 break;
3618 case DEVICE_LITTLE_ENDIAN:
3619#ifdef TARGET_WORDS_BIGENDIAN
3620 swapendian_init(io_index);
3621#endif
3622 break;
3623 case DEVICE_NATIVE_ENDIAN:
3624 default:
3625 break;
3626 }
3627
f6405247 3628 return (io_index << IO_MEM_SHIFT);
33417e70 3629}
61382a50 3630
d60efc6b
BS
3631int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
3632 CPUWriteMemoryFunc * const *mem_write,
dd310534 3633 void *opaque, enum device_endian endian)
1eed09cb 3634{
2507c12a 3635 return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque, endian);
1eed09cb
AK
3636}
3637
88715657
AL
3638void cpu_unregister_io_memory(int io_table_address)
3639{
3640 int i;
3641 int io_index = io_table_address >> IO_MEM_SHIFT;
3642
dd310534
AG
3643 swapendian_del(io_index);
3644
88715657
AL
3645 for (i=0;i < 3; i++) {
3646 io_mem_read[io_index][i] = unassigned_mem_read[i];
3647 io_mem_write[io_index][i] = unassigned_mem_write[i];
3648 }
3649 io_mem_opaque[io_index] = NULL;
3650 io_mem_used[io_index] = 0;
3651}
3652
e9179ce1
AK
3653static void io_mem_init(void)
3654{
3655 int i;
3656
2507c12a
AG
3657 cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read,
3658 unassigned_mem_write, NULL,
3659 DEVICE_NATIVE_ENDIAN);
3660 cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read,
3661 unassigned_mem_write, NULL,
3662 DEVICE_NATIVE_ENDIAN);
3663 cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read,
3664 notdirty_mem_write, NULL,
3665 DEVICE_NATIVE_ENDIAN);
e9179ce1
AK
3666 for (i=0; i<5; i++)
3667 io_mem_used[i] = 1;
3668
3669 io_mem_watch = cpu_register_io_memory(watch_mem_read,
2507c12a
AG
3670 watch_mem_write, NULL,
3671 DEVICE_NATIVE_ENDIAN);
e9179ce1
AK
3672}
3673
e2eef170
PB
3674#endif /* !defined(CONFIG_USER_ONLY) */
3675
13eb76e0
FB
3676/* physical memory access (slow version, mainly for debug) */
3677#if defined(CONFIG_USER_ONLY)
a68fe89c
PB
3678int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3679 uint8_t *buf, int len, int is_write)
13eb76e0
FB
3680{
3681 int l, flags;
3682 target_ulong page;
53a5960a 3683 void * p;
13eb76e0
FB
3684
3685 while (len > 0) {
3686 page = addr & TARGET_PAGE_MASK;
3687 l = (page + TARGET_PAGE_SIZE) - addr;
3688 if (l > len)
3689 l = len;
3690 flags = page_get_flags(page);
3691 if (!(flags & PAGE_VALID))
a68fe89c 3692 return -1;
13eb76e0
FB
3693 if (is_write) {
3694 if (!(flags & PAGE_WRITE))
a68fe89c 3695 return -1;
579a97f7 3696 /* XXX: this code should not depend on lock_user */
72fb7daa 3697 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
a68fe89c 3698 return -1;
72fb7daa
AJ
3699 memcpy(p, buf, l);
3700 unlock_user(p, addr, l);
13eb76e0
FB
3701 } else {
3702 if (!(flags & PAGE_READ))
a68fe89c 3703 return -1;
579a97f7 3704 /* XXX: this code should not depend on lock_user */
72fb7daa 3705 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
a68fe89c 3706 return -1;
72fb7daa 3707 memcpy(buf, p, l);
5b257578 3708 unlock_user(p, addr, 0);
13eb76e0
FB
3709 }
3710 len -= l;
3711 buf += l;
3712 addr += l;
3713 }
a68fe89c 3714 return 0;
13eb76e0 3715}
8df1cd07 3716
13eb76e0 3717#else
c227f099 3718void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
3719 int len, int is_write)
3720{
3721 int l, io_index;
3722 uint8_t *ptr;
3723 uint32_t val;
c227f099 3724 target_phys_addr_t page;
2e12669a 3725 unsigned long pd;
92e873b9 3726 PhysPageDesc *p;
3b46e624 3727
13eb76e0
FB
3728 while (len > 0) {
3729 page = addr & TARGET_PAGE_MASK;
3730 l = (page + TARGET_PAGE_SIZE) - addr;
3731 if (l > len)
3732 l = len;
92e873b9 3733 p = phys_page_find(page >> TARGET_PAGE_BITS);
13eb76e0
FB
3734 if (!p) {
3735 pd = IO_MEM_UNASSIGNED;
3736 } else {
3737 pd = p->phys_offset;
3738 }
3b46e624 3739
13eb76e0 3740 if (is_write) {
3a7d929e 3741 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
c227f099 3742 target_phys_addr_t addr1 = addr;
13eb76e0 3743 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18 3744 if (p)
6c2934db 3745 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
6a00d601
FB
3746 /* XXX: could force cpu_single_env to NULL to avoid
3747 potential bugs */
6c2934db 3748 if (l >= 4 && ((addr1 & 3) == 0)) {
1c213d19 3749 /* 32 bit write access */
c27004ec 3750 val = ldl_p(buf);
6c2934db 3751 io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
13eb76e0 3752 l = 4;
6c2934db 3753 } else if (l >= 2 && ((addr1 & 1) == 0)) {
1c213d19 3754 /* 16 bit write access */
c27004ec 3755 val = lduw_p(buf);
6c2934db 3756 io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
13eb76e0
FB
3757 l = 2;
3758 } else {
1c213d19 3759 /* 8 bit write access */
c27004ec 3760 val = ldub_p(buf);
6c2934db 3761 io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
13eb76e0
FB
3762 l = 1;
3763 }
3764 } else {
b448f2f3
FB
3765 unsigned long addr1;
3766 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
13eb76e0 3767 /* RAM case */
5579c7f3 3768 ptr = qemu_get_ram_ptr(addr1);
13eb76e0 3769 memcpy(ptr, buf, l);
3a7d929e
FB
3770 if (!cpu_physical_memory_is_dirty(addr1)) {
3771 /* invalidate code */
3772 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3773 /* set dirty bit */
f7c11b53
YT
3774 cpu_physical_memory_set_dirty_flags(
3775 addr1, (0xff & ~CODE_DIRTY_FLAG));
3a7d929e 3776 }
13eb76e0
FB
3777 }
3778 } else {
5fafdf24 3779 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2a4188a3 3780 !(pd & IO_MEM_ROMD)) {
c227f099 3781 target_phys_addr_t addr1 = addr;
13eb76e0
FB
3782 /* I/O case */
3783 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18 3784 if (p)
6c2934db
AJ
3785 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3786 if (l >= 4 && ((addr1 & 3) == 0)) {
13eb76e0 3787 /* 32 bit read access */
6c2934db 3788 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
c27004ec 3789 stl_p(buf, val);
13eb76e0 3790 l = 4;
6c2934db 3791 } else if (l >= 2 && ((addr1 & 1) == 0)) {
13eb76e0 3792 /* 16 bit read access */
6c2934db 3793 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
c27004ec 3794 stw_p(buf, val);
13eb76e0
FB
3795 l = 2;
3796 } else {
1c213d19 3797 /* 8 bit read access */
6c2934db 3798 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
c27004ec 3799 stb_p(buf, val);
13eb76e0
FB
3800 l = 1;
3801 }
3802 } else {
3803 /* RAM case */
5579c7f3 3804 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
13eb76e0
FB
3805 (addr & ~TARGET_PAGE_MASK);
3806 memcpy(buf, ptr, l);
3807 }
3808 }
3809 len -= l;
3810 buf += l;
3811 addr += l;
3812 }
3813}
8df1cd07 3814
d0ecd2aa 3815/* used for ROM loading : can write in RAM and ROM */
c227f099 3816void cpu_physical_memory_write_rom(target_phys_addr_t addr,
d0ecd2aa
FB
3817 const uint8_t *buf, int len)
3818{
3819 int l;
3820 uint8_t *ptr;
c227f099 3821 target_phys_addr_t page;
d0ecd2aa
FB
3822 unsigned long pd;
3823 PhysPageDesc *p;
3b46e624 3824
d0ecd2aa
FB
3825 while (len > 0) {
3826 page = addr & TARGET_PAGE_MASK;
3827 l = (page + TARGET_PAGE_SIZE) - addr;
3828 if (l > len)
3829 l = len;
3830 p = phys_page_find(page >> TARGET_PAGE_BITS);
3831 if (!p) {
3832 pd = IO_MEM_UNASSIGNED;
3833 } else {
3834 pd = p->phys_offset;
3835 }
3b46e624 3836
d0ecd2aa 3837 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2a4188a3
FB
3838 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3839 !(pd & IO_MEM_ROMD)) {
d0ecd2aa
FB
3840 /* do nothing */
3841 } else {
3842 unsigned long addr1;
3843 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3844 /* ROM/RAM case */
5579c7f3 3845 ptr = qemu_get_ram_ptr(addr1);
d0ecd2aa
FB
3846 memcpy(ptr, buf, l);
3847 }
3848 len -= l;
3849 buf += l;
3850 addr += l;
3851 }
3852}
3853
6d16c2f8
AL
3854typedef struct {
3855 void *buffer;
c227f099
AL
3856 target_phys_addr_t addr;
3857 target_phys_addr_t len;
6d16c2f8
AL
3858} BounceBuffer;
3859
3860static BounceBuffer bounce;
3861
ba223c29
AL
3862typedef struct MapClient {
3863 void *opaque;
3864 void (*callback)(void *opaque);
72cf2d4f 3865 QLIST_ENTRY(MapClient) link;
ba223c29
AL
3866} MapClient;
3867
72cf2d4f
BS
3868static QLIST_HEAD(map_client_list, MapClient) map_client_list
3869 = QLIST_HEAD_INITIALIZER(map_client_list);
ba223c29
AL
3870
3871void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3872{
3873 MapClient *client = qemu_malloc(sizeof(*client));
3874
3875 client->opaque = opaque;
3876 client->callback = callback;
72cf2d4f 3877 QLIST_INSERT_HEAD(&map_client_list, client, link);
ba223c29
AL
3878 return client;
3879}
3880
3881void cpu_unregister_map_client(void *_client)
3882{
3883 MapClient *client = (MapClient *)_client;
3884
72cf2d4f 3885 QLIST_REMOVE(client, link);
34d5e948 3886 qemu_free(client);
ba223c29
AL
3887}
3888
3889static void cpu_notify_map_clients(void)
3890{
3891 MapClient *client;
3892
72cf2d4f
BS
3893 while (!QLIST_EMPTY(&map_client_list)) {
3894 client = QLIST_FIRST(&map_client_list);
ba223c29 3895 client->callback(client->opaque);
34d5e948 3896 cpu_unregister_map_client(client);
ba223c29
AL
3897 }
3898}
3899
6d16c2f8
AL
3900/* Map a physical memory region into a host virtual address.
3901 * May map a subset of the requested range, given by and returned in *plen.
3902 * May return NULL if resources needed to perform the mapping are exhausted.
3903 * Use only for reads OR writes - not for read-modify-write operations.
ba223c29
AL
3904 * Use cpu_register_map_client() to know when retrying the map operation is
3905 * likely to succeed.
6d16c2f8 3906 */
c227f099
AL
3907void *cpu_physical_memory_map(target_phys_addr_t addr,
3908 target_phys_addr_t *plen,
6d16c2f8
AL
3909 int is_write)
3910{
c227f099
AL
3911 target_phys_addr_t len = *plen;
3912 target_phys_addr_t done = 0;
6d16c2f8
AL
3913 int l;
3914 uint8_t *ret = NULL;
3915 uint8_t *ptr;
c227f099 3916 target_phys_addr_t page;
6d16c2f8
AL
3917 unsigned long pd;
3918 PhysPageDesc *p;
3919 unsigned long addr1;
3920
3921 while (len > 0) {
3922 page = addr & TARGET_PAGE_MASK;
3923 l = (page + TARGET_PAGE_SIZE) - addr;
3924 if (l > len)
3925 l = len;
3926 p = phys_page_find(page >> TARGET_PAGE_BITS);
3927 if (!p) {
3928 pd = IO_MEM_UNASSIGNED;
3929 } else {
3930 pd = p->phys_offset;
3931 }
3932
3933 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3934 if (done || bounce.buffer) {
3935 break;
3936 }
3937 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3938 bounce.addr = addr;
3939 bounce.len = l;
3940 if (!is_write) {
3941 cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
3942 }
3943 ptr = bounce.buffer;
3944 } else {
3945 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
5579c7f3 3946 ptr = qemu_get_ram_ptr(addr1);
6d16c2f8
AL
3947 }
3948 if (!done) {
3949 ret = ptr;
3950 } else if (ret + done != ptr) {
3951 break;
3952 }
3953
3954 len -= l;
3955 addr += l;
3956 done += l;
3957 }
3958 *plen = done;
3959 return ret;
3960}
3961
3962/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3963 * Will also mark the memory as dirty if is_write == 1. access_len gives
3964 * the amount of memory that was actually read or written by the caller.
3965 */
c227f099
AL
3966void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3967 int is_write, target_phys_addr_t access_len)
6d16c2f8
AL
3968{
3969 if (buffer != bounce.buffer) {
3970 if (is_write) {
e890261f 3971 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
6d16c2f8
AL
3972 while (access_len) {
3973 unsigned l;
3974 l = TARGET_PAGE_SIZE;
3975 if (l > access_len)
3976 l = access_len;
3977 if (!cpu_physical_memory_is_dirty(addr1)) {
3978 /* invalidate code */
3979 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3980 /* set dirty bit */
f7c11b53
YT
3981 cpu_physical_memory_set_dirty_flags(
3982 addr1, (0xff & ~CODE_DIRTY_FLAG));
6d16c2f8
AL
3983 }
3984 addr1 += l;
3985 access_len -= l;
3986 }
3987 }
3988 return;
3989 }
3990 if (is_write) {
3991 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3992 }
f8a83245 3993 qemu_vfree(bounce.buffer);
6d16c2f8 3994 bounce.buffer = NULL;
ba223c29 3995 cpu_notify_map_clients();
6d16c2f8 3996}
d0ecd2aa 3997
8df1cd07 3998/* warning: addr must be aligned */
c227f099 3999uint32_t ldl_phys(target_phys_addr_t addr)
8df1cd07
FB
4000{
4001 int io_index;
4002 uint8_t *ptr;
4003 uint32_t val;
4004 unsigned long pd;
4005 PhysPageDesc *p;
4006
4007 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4008 if (!p) {
4009 pd = IO_MEM_UNASSIGNED;
4010 } else {
4011 pd = p->phys_offset;
4012 }
3b46e624 4013
5fafdf24 4014 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2a4188a3 4015 !(pd & IO_MEM_ROMD)) {
8df1cd07
FB
4016 /* I/O case */
4017 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
4018 if (p)
4019 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
8df1cd07
FB
4020 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
4021 } else {
4022 /* RAM case */
5579c7f3 4023 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
8df1cd07
FB
4024 (addr & ~TARGET_PAGE_MASK);
4025 val = ldl_p(ptr);
4026 }
4027 return val;
4028}
4029
84b7b8e7 4030/* warning: addr must be aligned */
c227f099 4031uint64_t ldq_phys(target_phys_addr_t addr)
84b7b8e7
FB
4032{
4033 int io_index;
4034 uint8_t *ptr;
4035 uint64_t val;
4036 unsigned long pd;
4037 PhysPageDesc *p;
4038
4039 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4040 if (!p) {
4041 pd = IO_MEM_UNASSIGNED;
4042 } else {
4043 pd = p->phys_offset;
4044 }
3b46e624 4045
2a4188a3
FB
4046 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
4047 !(pd & IO_MEM_ROMD)) {
84b7b8e7
FB
4048 /* I/O case */
4049 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
4050 if (p)
4051 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
84b7b8e7
FB
4052#ifdef TARGET_WORDS_BIGENDIAN
4053 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
4054 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
4055#else
4056 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
4057 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
4058#endif
4059 } else {
4060 /* RAM case */
5579c7f3 4061 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
84b7b8e7
FB
4062 (addr & ~TARGET_PAGE_MASK);
4063 val = ldq_p(ptr);
4064 }
4065 return val;
4066}
4067
aab33094 4068/* XXX: optimize */
c227f099 4069uint32_t ldub_phys(target_phys_addr_t addr)
aab33094
FB
4070{
4071 uint8_t val;
4072 cpu_physical_memory_read(addr, &val, 1);
4073 return val;
4074}
4075
733f0b02 4076/* warning: addr must be aligned */
c227f099 4077uint32_t lduw_phys(target_phys_addr_t addr)
aab33094 4078{
733f0b02
MT
4079 int io_index;
4080 uint8_t *ptr;
4081 uint64_t val;
4082 unsigned long pd;
4083 PhysPageDesc *p;
4084
4085 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4086 if (!p) {
4087 pd = IO_MEM_UNASSIGNED;
4088 } else {
4089 pd = p->phys_offset;
4090 }
4091
4092 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
4093 !(pd & IO_MEM_ROMD)) {
4094 /* I/O case */
4095 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4096 if (p)
4097 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4098 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
4099 } else {
4100 /* RAM case */
4101 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4102 (addr & ~TARGET_PAGE_MASK);
4103 val = lduw_p(ptr);
4104 }
4105 return val;
aab33094
FB
4106}
4107
8df1cd07
FB
4108/* warning: addr must be aligned. The ram page is not masked as dirty
4109 and the code inside is not invalidated. It is useful if the dirty
4110 bits are used to track modified PTEs */
c227f099 4111void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
8df1cd07
FB
4112{
4113 int io_index;
4114 uint8_t *ptr;
4115 unsigned long pd;
4116 PhysPageDesc *p;
4117
4118 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4119 if (!p) {
4120 pd = IO_MEM_UNASSIGNED;
4121 } else {
4122 pd = p->phys_offset;
4123 }
3b46e624 4124
3a7d929e 4125 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07 4126 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
4127 if (p)
4128 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
8df1cd07
FB
4129 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4130 } else {
74576198 4131 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
5579c7f3 4132 ptr = qemu_get_ram_ptr(addr1);
8df1cd07 4133 stl_p(ptr, val);
74576198
AL
4134
4135 if (unlikely(in_migration)) {
4136 if (!cpu_physical_memory_is_dirty(addr1)) {
4137 /* invalidate code */
4138 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4139 /* set dirty bit */
f7c11b53
YT
4140 cpu_physical_memory_set_dirty_flags(
4141 addr1, (0xff & ~CODE_DIRTY_FLAG));
74576198
AL
4142 }
4143 }
8df1cd07
FB
4144 }
4145}
4146
c227f099 4147void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
bc98a7ef
JM
4148{
4149 int io_index;
4150 uint8_t *ptr;
4151 unsigned long pd;
4152 PhysPageDesc *p;
4153
4154 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4155 if (!p) {
4156 pd = IO_MEM_UNASSIGNED;
4157 } else {
4158 pd = p->phys_offset;
4159 }
3b46e624 4160
bc98a7ef
JM
4161 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4162 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
4163 if (p)
4164 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bc98a7ef
JM
4165#ifdef TARGET_WORDS_BIGENDIAN
4166 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
4167 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
4168#else
4169 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4170 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
4171#endif
4172 } else {
5579c7f3 4173 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
bc98a7ef
JM
4174 (addr & ~TARGET_PAGE_MASK);
4175 stq_p(ptr, val);
4176 }
4177}
4178
8df1cd07 4179/* warning: addr must be aligned */
c227f099 4180void stl_phys(target_phys_addr_t addr, uint32_t val)
8df1cd07
FB
4181{
4182 int io_index;
4183 uint8_t *ptr;
4184 unsigned long pd;
4185 PhysPageDesc *p;
4186
4187 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4188 if (!p) {
4189 pd = IO_MEM_UNASSIGNED;
4190 } else {
4191 pd = p->phys_offset;
4192 }
3b46e624 4193
3a7d929e 4194 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07 4195 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
4196 if (p)
4197 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
8df1cd07
FB
4198 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4199 } else {
4200 unsigned long addr1;
4201 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4202 /* RAM case */
5579c7f3 4203 ptr = qemu_get_ram_ptr(addr1);
8df1cd07 4204 stl_p(ptr, val);
3a7d929e
FB
4205 if (!cpu_physical_memory_is_dirty(addr1)) {
4206 /* invalidate code */
4207 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4208 /* set dirty bit */
f7c11b53
YT
4209 cpu_physical_memory_set_dirty_flags(addr1,
4210 (0xff & ~CODE_DIRTY_FLAG));
3a7d929e 4211 }
8df1cd07
FB
4212 }
4213}
4214
aab33094 4215/* XXX: optimize */
c227f099 4216void stb_phys(target_phys_addr_t addr, uint32_t val)
aab33094
FB
4217{
4218 uint8_t v = val;
4219 cpu_physical_memory_write(addr, &v, 1);
4220}
4221
733f0b02 4222/* warning: addr must be aligned */
c227f099 4223void stw_phys(target_phys_addr_t addr, uint32_t val)
aab33094 4224{
733f0b02
MT
4225 int io_index;
4226 uint8_t *ptr;
4227 unsigned long pd;
4228 PhysPageDesc *p;
4229
4230 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4231 if (!p) {
4232 pd = IO_MEM_UNASSIGNED;
4233 } else {
4234 pd = p->phys_offset;
4235 }
4236
4237 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4238 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4239 if (p)
4240 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4241 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
4242 } else {
4243 unsigned long addr1;
4244 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4245 /* RAM case */
4246 ptr = qemu_get_ram_ptr(addr1);
4247 stw_p(ptr, val);
4248 if (!cpu_physical_memory_is_dirty(addr1)) {
4249 /* invalidate code */
4250 tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
4251 /* set dirty bit */
4252 cpu_physical_memory_set_dirty_flags(addr1,
4253 (0xff & ~CODE_DIRTY_FLAG));
4254 }
4255 }
aab33094
FB
4256}
4257
4258/* XXX: optimize */
c227f099 4259void stq_phys(target_phys_addr_t addr, uint64_t val)
aab33094
FB
4260{
4261 val = tswap64(val);
4262 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
4263}
4264
5e2972fd 4265/* virtual memory access for debug (includes writing to ROM) */
5fafdf24 4266int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
b448f2f3 4267 uint8_t *buf, int len, int is_write)
13eb76e0
FB
4268{
4269 int l;
c227f099 4270 target_phys_addr_t phys_addr;
9b3c35e0 4271 target_ulong page;
13eb76e0
FB
4272
4273 while (len > 0) {
4274 page = addr & TARGET_PAGE_MASK;
4275 phys_addr = cpu_get_phys_page_debug(env, page);
4276 /* if no physical page mapped, return an error */
4277 if (phys_addr == -1)
4278 return -1;
4279 l = (page + TARGET_PAGE_SIZE) - addr;
4280 if (l > len)
4281 l = len;
5e2972fd 4282 phys_addr += (addr & ~TARGET_PAGE_MASK);
5e2972fd
AL
4283 if (is_write)
4284 cpu_physical_memory_write_rom(phys_addr, buf, l);
4285 else
5e2972fd 4286 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
13eb76e0
FB
4287 len -= l;
4288 buf += l;
4289 addr += l;
4290 }
4291 return 0;
4292}
a68fe89c 4293#endif
13eb76e0 4294
2e70f6ef
PB
4295/* in deterministic execution mode, instructions doing device I/Os
4296 must be at the end of the TB */
4297void cpu_io_recompile(CPUState *env, void *retaddr)
4298{
4299 TranslationBlock *tb;
4300 uint32_t n, cflags;
4301 target_ulong pc, cs_base;
4302 uint64_t flags;
4303
4304 tb = tb_find_pc((unsigned long)retaddr);
4305 if (!tb) {
4306 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
4307 retaddr);
4308 }
4309 n = env->icount_decr.u16.low + tb->icount;
4310 cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
4311 /* Calculate how many instructions had been executed before the fault
bf20dc07 4312 occurred. */
2e70f6ef
PB
4313 n = n - env->icount_decr.u16.low;
4314 /* Generate a new TB ending on the I/O insn. */
4315 n++;
4316 /* On MIPS and SH, delay slot instructions can only be restarted if
4317 they were already the first instruction in the TB. If this is not
bf20dc07 4318 the first instruction in a TB then re-execute the preceding
2e70f6ef
PB
4319 branch. */
4320#if defined(TARGET_MIPS)
4321 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4322 env->active_tc.PC -= 4;
4323 env->icount_decr.u16.low++;
4324 env->hflags &= ~MIPS_HFLAG_BMASK;
4325 }
4326#elif defined(TARGET_SH4)
4327 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4328 && n > 1) {
4329 env->pc -= 2;
4330 env->icount_decr.u16.low++;
4331 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4332 }
4333#endif
4334 /* This should never happen. */
4335 if (n > CF_COUNT_MASK)
4336 cpu_abort(env, "TB too big during recompile");
4337
4338 cflags = n | CF_LAST_IO;
4339 pc = tb->pc;
4340 cs_base = tb->cs_base;
4341 flags = tb->flags;
4342 tb_phys_invalidate(tb, -1);
4343 /* FIXME: In theory this could raise an exception. In practice
4344 we have already translated the block once so it's probably ok. */
4345 tb_gen_code(env, pc, cs_base, flags, cflags);
bf20dc07 4346 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
2e70f6ef
PB
4347 the first in the TB) then we end up generating a whole new TB and
4348 repeating the fault, which is horribly inefficient.
4349 Better would be to execute just this insn uncached, or generate a
4350 second new TB. */
4351 cpu_resume_from_signal(env, NULL);
4352}
4353
b3755a91
PB
4354#if !defined(CONFIG_USER_ONLY)
4355
055403b2 4356void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
e3db7226
FB
4357{
4358 int i, target_code_size, max_target_code_size;
4359 int direct_jmp_count, direct_jmp2_count, cross_page;
4360 TranslationBlock *tb;
3b46e624 4361
e3db7226
FB
4362 target_code_size = 0;
4363 max_target_code_size = 0;
4364 cross_page = 0;
4365 direct_jmp_count = 0;
4366 direct_jmp2_count = 0;
4367 for(i = 0; i < nb_tbs; i++) {
4368 tb = &tbs[i];
4369 target_code_size += tb->size;
4370 if (tb->size > max_target_code_size)
4371 max_target_code_size = tb->size;
4372 if (tb->page_addr[1] != -1)
4373 cross_page++;
4374 if (tb->tb_next_offset[0] != 0xffff) {
4375 direct_jmp_count++;
4376 if (tb->tb_next_offset[1] != 0xffff) {
4377 direct_jmp2_count++;
4378 }
4379 }
4380 }
4381 /* XXX: avoid using doubles ? */
57fec1fe 4382 cpu_fprintf(f, "Translation buffer state:\n");
055403b2 4383 cpu_fprintf(f, "gen code size %td/%ld\n",
26a5f13b
FB
4384 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4385 cpu_fprintf(f, "TB count %d/%d\n",
4386 nb_tbs, code_gen_max_blocks);
5fafdf24 4387 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
e3db7226
FB
4388 nb_tbs ? target_code_size / nb_tbs : 0,
4389 max_target_code_size);
055403b2 4390 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
e3db7226
FB
4391 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4392 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
5fafdf24
TS
4393 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4394 cross_page,
e3db7226
FB
4395 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4396 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
5fafdf24 4397 direct_jmp_count,
e3db7226
FB
4398 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4399 direct_jmp2_count,
4400 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
57fec1fe 4401 cpu_fprintf(f, "\nStatistics:\n");
e3db7226
FB
4402 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4403 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4404 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
b67d9a52 4405 tcg_dump_info(f, cpu_fprintf);
e3db7226
FB
4406}
4407
61382a50
FB
4408#define MMUSUFFIX _cmmu
4409#define GETPC() NULL
4410#define env cpu_single_env
b769d8fe 4411#define SOFTMMU_CODE_ACCESS
61382a50
FB
4412
4413#define SHIFT 0
4414#include "softmmu_template.h"
4415
4416#define SHIFT 1
4417#include "softmmu_template.h"
4418
4419#define SHIFT 2
4420#include "softmmu_template.h"
4421
4422#define SHIFT 3
4423#include "softmmu_template.h"
4424
4425#undef env
4426
4427#endif