]> git.proxmox.com Git - qemu.git/blame - exec.c
lm32: fix build breakage due to uninitialized variable 'r'
[qemu.git] / exec.c
CommitLineData
54936004 1/*
fd6ce8f6 2 * virtual page mapping and translated block handling
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
54936004 18 */
67b915a5 19#include "config.h"
d5a8f07c
FB
20#ifdef _WIN32
21#include <windows.h>
22#else
a98d49b1 23#include <sys/types.h>
d5a8f07c
FB
24#include <sys/mman.h>
25#endif
54936004 26
055403b2 27#include "qemu-common.h"
6180a181
FB
28#include "cpu.h"
29#include "exec-all.h"
b67d9a52 30#include "tcg.h"
b3c7724c 31#include "hw/hw.h"
cc9e98cb 32#include "hw/qdev.h"
74576198 33#include "osdep.h"
7ba1e619 34#include "kvm.h"
29e922b6 35#include "qemu-timer.h"
53a5960a
PB
36#if defined(CONFIG_USER_ONLY)
37#include <qemu.h>
fd052bf6 38#include <signal.h>
f01576f1
JL
39#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
40#include <sys/param.h>
41#if __FreeBSD_version >= 700104
42#define HAVE_KINFO_GETVMMAP
43#define sigqueue sigqueue_freebsd /* avoid redefinition */
44#include <sys/time.h>
45#include <sys/proc.h>
46#include <machine/profile.h>
47#define _KERNEL
48#include <sys/user.h>
49#undef _KERNEL
50#undef sigqueue
51#include <libutil.h>
52#endif
53#endif
53a5960a 54#endif
54936004 55
fd6ce8f6 56//#define DEBUG_TB_INVALIDATE
66e85a21 57//#define DEBUG_FLUSH
9fa3e853 58//#define DEBUG_TLB
67d3b957 59//#define DEBUG_UNASSIGNED
fd6ce8f6
FB
60
61/* make various TB consistency checks */
5fafdf24
TS
62//#define DEBUG_TB_CHECK
63//#define DEBUG_TLB_CHECK
fd6ce8f6 64
1196be37 65//#define DEBUG_IOPORT
db7b5426 66//#define DEBUG_SUBPAGE
1196be37 67
99773bd4
PB
68#if !defined(CONFIG_USER_ONLY)
69/* TB consistency checks only implemented for usermode emulation. */
70#undef DEBUG_TB_CHECK
71#endif
72
9fa3e853
FB
73#define SMC_BITMAP_USE_THRESHOLD 10
74
bdaf78e0 75static TranslationBlock *tbs;
24ab68ac 76static int code_gen_max_blocks;
9fa3e853 77TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
bdaf78e0 78static int nb_tbs;
eb51d102 79/* any access to the tbs or the page table must use this lock */
c227f099 80spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
fd6ce8f6 81
141ac468
BS
82#if defined(__arm__) || defined(__sparc_v9__)
83/* The prologue must be reachable with a direct jump. ARM and Sparc64
84 have limited branch ranges (possibly also PPC) so place it in a
d03d860b
BS
85 section close to code segment. */
86#define code_gen_section \
87 __attribute__((__section__(".gen_code"))) \
88 __attribute__((aligned (32)))
f8e2af11
SW
89#elif defined(_WIN32)
90/* Maximum alignment for Win32 is 16. */
91#define code_gen_section \
92 __attribute__((aligned (16)))
d03d860b
BS
93#else
94#define code_gen_section \
95 __attribute__((aligned (32)))
96#endif
97
98uint8_t code_gen_prologue[1024] code_gen_section;
bdaf78e0
BS
99static uint8_t *code_gen_buffer;
100static unsigned long code_gen_buffer_size;
26a5f13b 101/* threshold to flush the translated code buffer */
bdaf78e0 102static unsigned long code_gen_buffer_max_size;
24ab68ac 103static uint8_t *code_gen_ptr;
fd6ce8f6 104
e2eef170 105#if !defined(CONFIG_USER_ONLY)
9fa3e853 106int phys_ram_fd;
74576198 107static int in_migration;
94a6b54f 108
f471a17e 109RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list) };
e2eef170 110#endif
9fa3e853 111
6a00d601
FB
112CPUState *first_cpu;
113/* current CPU in the current thread. It is only valid inside
114 cpu_exec() */
5fafdf24 115CPUState *cpu_single_env;
2e70f6ef 116/* 0 = Do not count executed instructions.
bf20dc07 117 1 = Precise instruction counting.
2e70f6ef
PB
118 2 = Adaptive rate instruction counting. */
119int use_icount = 0;
120/* Current instruction counter. While executing translated code this may
121 include some instructions that have not yet been executed. */
122int64_t qemu_icount;
6a00d601 123
54936004 124typedef struct PageDesc {
92e873b9 125 /* list of TBs intersecting this ram page */
fd6ce8f6 126 TranslationBlock *first_tb;
9fa3e853
FB
127 /* in order to optimize self modifying code, we count the number
128 of lookups we do to a given page to use a bitmap */
129 unsigned int code_write_count;
130 uint8_t *code_bitmap;
131#if defined(CONFIG_USER_ONLY)
132 unsigned long flags;
133#endif
54936004
FB
134} PageDesc;
135
41c1b1c9 136/* In system mode we want L1_MAP to be based on ram offsets,
5cd2c5b6
RH
137 while in user mode we want it to be based on virtual addresses. */
138#if !defined(CONFIG_USER_ONLY)
41c1b1c9
PB
139#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
140# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
141#else
5cd2c5b6 142# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
41c1b1c9 143#endif
bedb69ea 144#else
5cd2c5b6 145# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
bedb69ea 146#endif
54936004 147
5cd2c5b6
RH
148/* Size of the L2 (and L3, etc) page tables. */
149#define L2_BITS 10
54936004
FB
150#define L2_SIZE (1 << L2_BITS)
151
5cd2c5b6
RH
152/* The bits remaining after N lower levels of page tables. */
153#define P_L1_BITS_REM \
154 ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
155#define V_L1_BITS_REM \
156 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
157
158/* Size of the L1 page table. Avoid silly small sizes. */
159#if P_L1_BITS_REM < 4
160#define P_L1_BITS (P_L1_BITS_REM + L2_BITS)
161#else
162#define P_L1_BITS P_L1_BITS_REM
163#endif
164
165#if V_L1_BITS_REM < 4
166#define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
167#else
168#define V_L1_BITS V_L1_BITS_REM
169#endif
170
171#define P_L1_SIZE ((target_phys_addr_t)1 << P_L1_BITS)
172#define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
173
174#define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS)
175#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
176
83fb7adf
FB
177unsigned long qemu_real_host_page_size;
178unsigned long qemu_host_page_bits;
179unsigned long qemu_host_page_size;
180unsigned long qemu_host_page_mask;
54936004 181
5cd2c5b6
RH
182/* This is a multi-level map on the virtual address space.
183 The bottom level has pointers to PageDesc. */
184static void *l1_map[V_L1_SIZE];
54936004 185
e2eef170 186#if !defined(CONFIG_USER_ONLY)
41c1b1c9
PB
187typedef struct PhysPageDesc {
188 /* offset in host memory of the page + io_index in the low bits */
189 ram_addr_t phys_offset;
190 ram_addr_t region_offset;
191} PhysPageDesc;
192
5cd2c5b6
RH
193/* This is a multi-level map on the physical address space.
194 The bottom level has pointers to PhysPageDesc. */
195static void *l1_phys_map[P_L1_SIZE];
6d9a1304 196
e2eef170
PB
197static void io_mem_init(void);
198
33417e70 199/* io memory support */
33417e70
FB
200CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
201CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
a4193c8a 202void *io_mem_opaque[IO_MEM_NB_ENTRIES];
511d2b14 203static char io_mem_used[IO_MEM_NB_ENTRIES];
6658ffb8
PB
204static int io_mem_watch;
205#endif
33417e70 206
34865134 207/* log support */
1e8b27ca
JR
208#ifdef WIN32
209static const char *logfilename = "qemu.log";
210#else
d9b630fd 211static const char *logfilename = "/tmp/qemu.log";
1e8b27ca 212#endif
34865134
FB
213FILE *logfile;
214int loglevel;
e735b91c 215static int log_append = 0;
34865134 216
e3db7226 217/* statistics */
b3755a91 218#if !defined(CONFIG_USER_ONLY)
e3db7226 219static int tlb_flush_count;
b3755a91 220#endif
e3db7226
FB
221static int tb_flush_count;
222static int tb_phys_invalidate_count;
223
7cb69cae
FB
224#ifdef _WIN32
225static void map_exec(void *addr, long size)
226{
227 DWORD old_protect;
228 VirtualProtect(addr, size,
229 PAGE_EXECUTE_READWRITE, &old_protect);
230
231}
232#else
233static void map_exec(void *addr, long size)
234{
4369415f 235 unsigned long start, end, page_size;
7cb69cae 236
4369415f 237 page_size = getpagesize();
7cb69cae 238 start = (unsigned long)addr;
4369415f 239 start &= ~(page_size - 1);
7cb69cae
FB
240
241 end = (unsigned long)addr + size;
4369415f
FB
242 end += page_size - 1;
243 end &= ~(page_size - 1);
7cb69cae
FB
244
245 mprotect((void *)start, end - start,
246 PROT_READ | PROT_WRITE | PROT_EXEC);
247}
248#endif
249
b346ff46 250static void page_init(void)
54936004 251{
83fb7adf 252 /* NOTE: we can always suppose that qemu_host_page_size >=
54936004 253 TARGET_PAGE_SIZE */
c2b48b69
AL
254#ifdef _WIN32
255 {
256 SYSTEM_INFO system_info;
257
258 GetSystemInfo(&system_info);
259 qemu_real_host_page_size = system_info.dwPageSize;
260 }
261#else
262 qemu_real_host_page_size = getpagesize();
263#endif
83fb7adf
FB
264 if (qemu_host_page_size == 0)
265 qemu_host_page_size = qemu_real_host_page_size;
266 if (qemu_host_page_size < TARGET_PAGE_SIZE)
267 qemu_host_page_size = TARGET_PAGE_SIZE;
268 qemu_host_page_bits = 0;
269 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
270 qemu_host_page_bits++;
271 qemu_host_page_mask = ~(qemu_host_page_size - 1);
50a9569b 272
2e9a5713 273#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
50a9569b 274 {
f01576f1
JL
275#ifdef HAVE_KINFO_GETVMMAP
276 struct kinfo_vmentry *freep;
277 int i, cnt;
278
279 freep = kinfo_getvmmap(getpid(), &cnt);
280 if (freep) {
281 mmap_lock();
282 for (i = 0; i < cnt; i++) {
283 unsigned long startaddr, endaddr;
284
285 startaddr = freep[i].kve_start;
286 endaddr = freep[i].kve_end;
287 if (h2g_valid(startaddr)) {
288 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
289
290 if (h2g_valid(endaddr)) {
291 endaddr = h2g(endaddr);
fd436907 292 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
f01576f1
JL
293 } else {
294#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
295 endaddr = ~0ul;
fd436907 296 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
f01576f1
JL
297#endif
298 }
299 }
300 }
301 free(freep);
302 mmap_unlock();
303 }
304#else
50a9569b 305 FILE *f;
50a9569b 306
0776590d 307 last_brk = (unsigned long)sbrk(0);
5cd2c5b6 308
fd436907 309 f = fopen("/compat/linux/proc/self/maps", "r");
50a9569b 310 if (f) {
5cd2c5b6
RH
311 mmap_lock();
312
50a9569b 313 do {
5cd2c5b6
RH
314 unsigned long startaddr, endaddr;
315 int n;
316
317 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
318
319 if (n == 2 && h2g_valid(startaddr)) {
320 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
321
322 if (h2g_valid(endaddr)) {
323 endaddr = h2g(endaddr);
324 } else {
325 endaddr = ~0ul;
326 }
327 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
50a9569b
AZ
328 }
329 } while (!feof(f));
5cd2c5b6 330
50a9569b 331 fclose(f);
5cd2c5b6 332 mmap_unlock();
50a9569b 333 }
f01576f1 334#endif
50a9569b
AZ
335 }
336#endif
54936004
FB
337}
338
41c1b1c9 339static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
54936004 340{
41c1b1c9
PB
341 PageDesc *pd;
342 void **lp;
343 int i;
344
5cd2c5b6 345#if defined(CONFIG_USER_ONLY)
2e9a5713 346 /* We can't use qemu_malloc because it may recurse into a locked mutex. */
5cd2c5b6
RH
347# define ALLOC(P, SIZE) \
348 do { \
349 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
350 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
5cd2c5b6
RH
351 } while (0)
352#else
353# define ALLOC(P, SIZE) \
354 do { P = qemu_mallocz(SIZE); } while (0)
17e2377a 355#endif
434929bf 356
5cd2c5b6
RH
357 /* Level 1. Always allocated. */
358 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
359
360 /* Level 2..N-1. */
361 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
362 void **p = *lp;
363
364 if (p == NULL) {
365 if (!alloc) {
366 return NULL;
367 }
368 ALLOC(p, sizeof(void *) * L2_SIZE);
369 *lp = p;
17e2377a 370 }
5cd2c5b6
RH
371
372 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
373 }
374
375 pd = *lp;
376 if (pd == NULL) {
377 if (!alloc) {
378 return NULL;
379 }
380 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
381 *lp = pd;
54936004 382 }
5cd2c5b6
RH
383
384#undef ALLOC
5cd2c5b6
RH
385
386 return pd + (index & (L2_SIZE - 1));
54936004
FB
387}
388
41c1b1c9 389static inline PageDesc *page_find(tb_page_addr_t index)
54936004 390{
5cd2c5b6 391 return page_find_alloc(index, 0);
fd6ce8f6
FB
392}
393
6d9a1304 394#if !defined(CONFIG_USER_ONLY)
c227f099 395static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
92e873b9 396{
e3f4e2a4 397 PhysPageDesc *pd;
5cd2c5b6
RH
398 void **lp;
399 int i;
92e873b9 400
5cd2c5b6
RH
401 /* Level 1. Always allocated. */
402 lp = l1_phys_map + ((index >> P_L1_SHIFT) & (P_L1_SIZE - 1));
108c49b8 403
5cd2c5b6
RH
404 /* Level 2..N-1. */
405 for (i = P_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
406 void **p = *lp;
407 if (p == NULL) {
408 if (!alloc) {
409 return NULL;
410 }
411 *lp = p = qemu_mallocz(sizeof(void *) * L2_SIZE);
412 }
413 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
108c49b8 414 }
5cd2c5b6 415
e3f4e2a4 416 pd = *lp;
5cd2c5b6 417 if (pd == NULL) {
e3f4e2a4 418 int i;
5cd2c5b6
RH
419
420 if (!alloc) {
108c49b8 421 return NULL;
5cd2c5b6
RH
422 }
423
424 *lp = pd = qemu_malloc(sizeof(PhysPageDesc) * L2_SIZE);
425
67c4d23c 426 for (i = 0; i < L2_SIZE; i++) {
5cd2c5b6
RH
427 pd[i].phys_offset = IO_MEM_UNASSIGNED;
428 pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
67c4d23c 429 }
92e873b9 430 }
5cd2c5b6
RH
431
432 return pd + (index & (L2_SIZE - 1));
92e873b9
FB
433}
434
c227f099 435static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
92e873b9 436{
108c49b8 437 return phys_page_find_alloc(index, 0);
92e873b9
FB
438}
439
c227f099
AL
440static void tlb_protect_code(ram_addr_t ram_addr);
441static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 442 target_ulong vaddr);
c8a706fe
PB
443#define mmap_lock() do { } while(0)
444#define mmap_unlock() do { } while(0)
9fa3e853 445#endif
fd6ce8f6 446
4369415f
FB
447#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
448
449#if defined(CONFIG_USER_ONLY)
ccbb4d44 450/* Currently it is not recommended to allocate big chunks of data in
4369415f
FB
451 user mode. It will change when a dedicated libc will be used */
452#define USE_STATIC_CODE_GEN_BUFFER
453#endif
454
455#ifdef USE_STATIC_CODE_GEN_BUFFER
ebf50fb3
AJ
456static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
457 __attribute__((aligned (CODE_GEN_ALIGN)));
4369415f
FB
458#endif
459
8fcd3692 460static void code_gen_alloc(unsigned long tb_size)
26a5f13b 461{
4369415f
FB
462#ifdef USE_STATIC_CODE_GEN_BUFFER
463 code_gen_buffer = static_code_gen_buffer;
464 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
465 map_exec(code_gen_buffer, code_gen_buffer_size);
466#else
26a5f13b
FB
467 code_gen_buffer_size = tb_size;
468 if (code_gen_buffer_size == 0) {
4369415f
FB
469#if defined(CONFIG_USER_ONLY)
470 /* in user mode, phys_ram_size is not meaningful */
471 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
472#else
ccbb4d44 473 /* XXX: needs adjustments */
94a6b54f 474 code_gen_buffer_size = (unsigned long)(ram_size / 4);
4369415f 475#endif
26a5f13b
FB
476 }
477 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
478 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
479 /* The code gen buffer location may have constraints depending on
480 the host cpu and OS */
481#if defined(__linux__)
482 {
483 int flags;
141ac468
BS
484 void *start = NULL;
485
26a5f13b
FB
486 flags = MAP_PRIVATE | MAP_ANONYMOUS;
487#if defined(__x86_64__)
488 flags |= MAP_32BIT;
489 /* Cannot map more than that */
490 if (code_gen_buffer_size > (800 * 1024 * 1024))
491 code_gen_buffer_size = (800 * 1024 * 1024);
141ac468
BS
492#elif defined(__sparc_v9__)
493 // Map the buffer below 2G, so we can use direct calls and branches
494 flags |= MAP_FIXED;
495 start = (void *) 0x60000000UL;
496 if (code_gen_buffer_size > (512 * 1024 * 1024))
497 code_gen_buffer_size = (512 * 1024 * 1024);
1cb0661e 498#elif defined(__arm__)
63d41246 499 /* Map the buffer below 32M, so we can use direct calls and branches */
1cb0661e
AZ
500 flags |= MAP_FIXED;
501 start = (void *) 0x01000000UL;
502 if (code_gen_buffer_size > 16 * 1024 * 1024)
503 code_gen_buffer_size = 16 * 1024 * 1024;
eba0b893
RH
504#elif defined(__s390x__)
505 /* Map the buffer so that we can use direct calls and branches. */
506 /* We have a +- 4GB range on the branches; leave some slop. */
507 if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
508 code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
509 }
510 start = (void *)0x90000000UL;
26a5f13b 511#endif
141ac468
BS
512 code_gen_buffer = mmap(start, code_gen_buffer_size,
513 PROT_WRITE | PROT_READ | PROT_EXEC,
26a5f13b
FB
514 flags, -1, 0);
515 if (code_gen_buffer == MAP_FAILED) {
516 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
517 exit(1);
518 }
519 }
cbb608a5
B
520#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
521 || defined(__DragonFly__) || defined(__OpenBSD__)
06e67a82
AL
522 {
523 int flags;
524 void *addr = NULL;
525 flags = MAP_PRIVATE | MAP_ANONYMOUS;
526#if defined(__x86_64__)
527 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
528 * 0x40000000 is free */
529 flags |= MAP_FIXED;
530 addr = (void *)0x40000000;
531 /* Cannot map more than that */
532 if (code_gen_buffer_size > (800 * 1024 * 1024))
533 code_gen_buffer_size = (800 * 1024 * 1024);
4cd31ad2
BS
534#elif defined(__sparc_v9__)
535 // Map the buffer below 2G, so we can use direct calls and branches
536 flags |= MAP_FIXED;
537 addr = (void *) 0x60000000UL;
538 if (code_gen_buffer_size > (512 * 1024 * 1024)) {
539 code_gen_buffer_size = (512 * 1024 * 1024);
540 }
06e67a82
AL
541#endif
542 code_gen_buffer = mmap(addr, code_gen_buffer_size,
543 PROT_WRITE | PROT_READ | PROT_EXEC,
544 flags, -1, 0);
545 if (code_gen_buffer == MAP_FAILED) {
546 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
547 exit(1);
548 }
549 }
26a5f13b
FB
550#else
551 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
26a5f13b
FB
552 map_exec(code_gen_buffer, code_gen_buffer_size);
553#endif
4369415f 554#endif /* !USE_STATIC_CODE_GEN_BUFFER */
26a5f13b
FB
555 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
556 code_gen_buffer_max_size = code_gen_buffer_size -
239fda31 557 (TCG_MAX_OP_SIZE * OPC_MAX_SIZE);
26a5f13b
FB
558 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
559 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
560}
561
562/* Must be called before using the QEMU cpus. 'tb_size' is the size
563 (in bytes) allocated to the translation buffer. Zero means default
564 size. */
565void cpu_exec_init_all(unsigned long tb_size)
566{
26a5f13b
FB
567 cpu_gen_init();
568 code_gen_alloc(tb_size);
569 code_gen_ptr = code_gen_buffer;
4369415f 570 page_init();
e2eef170 571#if !defined(CONFIG_USER_ONLY)
26a5f13b 572 io_mem_init();
e2eef170 573#endif
9002ec79
RH
574#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
575 /* There's no guest base to take into account, so go ahead and
576 initialize the prologue now. */
577 tcg_prologue_init(&tcg_ctx);
578#endif
26a5f13b
FB
579}
580
9656f324
PB
581#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
582
e59fb374 583static int cpu_common_post_load(void *opaque, int version_id)
e7f4eff7
JQ
584{
585 CPUState *env = opaque;
9656f324 586
3098dba0
AJ
587 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
588 version_id is increased. */
589 env->interrupt_request &= ~0x01;
9656f324
PB
590 tlb_flush(env, 1);
591
592 return 0;
593}
e7f4eff7
JQ
594
595static const VMStateDescription vmstate_cpu_common = {
596 .name = "cpu_common",
597 .version_id = 1,
598 .minimum_version_id = 1,
599 .minimum_version_id_old = 1,
e7f4eff7
JQ
600 .post_load = cpu_common_post_load,
601 .fields = (VMStateField []) {
602 VMSTATE_UINT32(halted, CPUState),
603 VMSTATE_UINT32(interrupt_request, CPUState),
604 VMSTATE_END_OF_LIST()
605 }
606};
9656f324
PB
607#endif
608
950f1472
GC
609CPUState *qemu_get_cpu(int cpu)
610{
611 CPUState *env = first_cpu;
612
613 while (env) {
614 if (env->cpu_index == cpu)
615 break;
616 env = env->next_cpu;
617 }
618
619 return env;
620}
621
6a00d601 622void cpu_exec_init(CPUState *env)
fd6ce8f6 623{
6a00d601
FB
624 CPUState **penv;
625 int cpu_index;
626
c2764719
PB
627#if defined(CONFIG_USER_ONLY)
628 cpu_list_lock();
629#endif
6a00d601
FB
630 env->next_cpu = NULL;
631 penv = &first_cpu;
632 cpu_index = 0;
633 while (*penv != NULL) {
1e9fa730 634 penv = &(*penv)->next_cpu;
6a00d601
FB
635 cpu_index++;
636 }
637 env->cpu_index = cpu_index;
268a362c 638 env->numa_node = 0;
72cf2d4f
BS
639 QTAILQ_INIT(&env->breakpoints);
640 QTAILQ_INIT(&env->watchpoints);
dc7a09cf
JK
641#ifndef CONFIG_USER_ONLY
642 env->thread_id = qemu_get_thread_id();
643#endif
6a00d601 644 *penv = env;
c2764719
PB
645#if defined(CONFIG_USER_ONLY)
646 cpu_list_unlock();
647#endif
b3c7724c 648#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
0be71e32
AW
649 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
650 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
b3c7724c
PB
651 cpu_save, cpu_load, env);
652#endif
fd6ce8f6
FB
653}
654
d1a1eb74
TG
655/* Allocate a new translation block. Flush the translation buffer if
656 too many translation blocks or too much generated code. */
657static TranslationBlock *tb_alloc(target_ulong pc)
658{
659 TranslationBlock *tb;
660
661 if (nb_tbs >= code_gen_max_blocks ||
662 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
663 return NULL;
664 tb = &tbs[nb_tbs++];
665 tb->pc = pc;
666 tb->cflags = 0;
667 return tb;
668}
669
670void tb_free(TranslationBlock *tb)
671{
672 /* In practice this is mostly used for single use temporary TB
673 Ignore the hard cases and just back up if this TB happens to
674 be the last one generated. */
675 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
676 code_gen_ptr = tb->tc_ptr;
677 nb_tbs--;
678 }
679}
680
9fa3e853
FB
681static inline void invalidate_page_bitmap(PageDesc *p)
682{
683 if (p->code_bitmap) {
59817ccb 684 qemu_free(p->code_bitmap);
9fa3e853
FB
685 p->code_bitmap = NULL;
686 }
687 p->code_write_count = 0;
688}
689
5cd2c5b6
RH
690/* Set to NULL all the 'first_tb' fields in all PageDescs. */
691
692static void page_flush_tb_1 (int level, void **lp)
fd6ce8f6 693{
5cd2c5b6 694 int i;
fd6ce8f6 695
5cd2c5b6
RH
696 if (*lp == NULL) {
697 return;
698 }
699 if (level == 0) {
700 PageDesc *pd = *lp;
7296abac 701 for (i = 0; i < L2_SIZE; ++i) {
5cd2c5b6
RH
702 pd[i].first_tb = NULL;
703 invalidate_page_bitmap(pd + i);
fd6ce8f6 704 }
5cd2c5b6
RH
705 } else {
706 void **pp = *lp;
7296abac 707 for (i = 0; i < L2_SIZE; ++i) {
5cd2c5b6
RH
708 page_flush_tb_1 (level - 1, pp + i);
709 }
710 }
711}
712
713static void page_flush_tb(void)
714{
715 int i;
716 for (i = 0; i < V_L1_SIZE; i++) {
717 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
fd6ce8f6
FB
718 }
719}
720
721/* flush all the translation blocks */
d4e8164f 722/* XXX: tb_flush is currently not thread safe */
6a00d601 723void tb_flush(CPUState *env1)
fd6ce8f6 724{
6a00d601 725 CPUState *env;
0124311e 726#if defined(DEBUG_FLUSH)
ab3d1727
BS
727 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
728 (unsigned long)(code_gen_ptr - code_gen_buffer),
729 nb_tbs, nb_tbs > 0 ?
730 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
fd6ce8f6 731#endif
26a5f13b 732 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
a208e54a
PB
733 cpu_abort(env1, "Internal error: code buffer overflow\n");
734
fd6ce8f6 735 nb_tbs = 0;
3b46e624 736
6a00d601
FB
737 for(env = first_cpu; env != NULL; env = env->next_cpu) {
738 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
739 }
9fa3e853 740
8a8a608f 741 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
fd6ce8f6 742 page_flush_tb();
9fa3e853 743
fd6ce8f6 744 code_gen_ptr = code_gen_buffer;
d4e8164f
FB
745 /* XXX: flush processor icache at this point if cache flush is
746 expensive */
e3db7226 747 tb_flush_count++;
fd6ce8f6
FB
748}
749
750#ifdef DEBUG_TB_CHECK
751
bc98a7ef 752static void tb_invalidate_check(target_ulong address)
fd6ce8f6
FB
753{
754 TranslationBlock *tb;
755 int i;
756 address &= TARGET_PAGE_MASK;
99773bd4
PB
757 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
758 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
759 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
760 address >= tb->pc + tb->size)) {
0bf9e31a
BS
761 printf("ERROR invalidate: address=" TARGET_FMT_lx
762 " PC=%08lx size=%04x\n",
99773bd4 763 address, (long)tb->pc, tb->size);
fd6ce8f6
FB
764 }
765 }
766 }
767}
768
769/* verify that all the pages have correct rights for code */
770static void tb_page_check(void)
771{
772 TranslationBlock *tb;
773 int i, flags1, flags2;
3b46e624 774
99773bd4
PB
775 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
776 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
777 flags1 = page_get_flags(tb->pc);
778 flags2 = page_get_flags(tb->pc + tb->size - 1);
779 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
780 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
99773bd4 781 (long)tb->pc, tb->size, flags1, flags2);
fd6ce8f6
FB
782 }
783 }
784 }
785}
786
787#endif
788
789/* invalidate one TB */
790static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
791 int next_offset)
792{
793 TranslationBlock *tb1;
794 for(;;) {
795 tb1 = *ptb;
796 if (tb1 == tb) {
797 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
798 break;
799 }
800 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
801 }
802}
803
9fa3e853
FB
804static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
805{
806 TranslationBlock *tb1;
807 unsigned int n1;
808
809 for(;;) {
810 tb1 = *ptb;
811 n1 = (long)tb1 & 3;
812 tb1 = (TranslationBlock *)((long)tb1 & ~3);
813 if (tb1 == tb) {
814 *ptb = tb1->page_next[n1];
815 break;
816 }
817 ptb = &tb1->page_next[n1];
818 }
819}
820
d4e8164f
FB
821static inline void tb_jmp_remove(TranslationBlock *tb, int n)
822{
823 TranslationBlock *tb1, **ptb;
824 unsigned int n1;
825
826 ptb = &tb->jmp_next[n];
827 tb1 = *ptb;
828 if (tb1) {
829 /* find tb(n) in circular list */
830 for(;;) {
831 tb1 = *ptb;
832 n1 = (long)tb1 & 3;
833 tb1 = (TranslationBlock *)((long)tb1 & ~3);
834 if (n1 == n && tb1 == tb)
835 break;
836 if (n1 == 2) {
837 ptb = &tb1->jmp_first;
838 } else {
839 ptb = &tb1->jmp_next[n1];
840 }
841 }
842 /* now we can suppress tb(n) from the list */
843 *ptb = tb->jmp_next[n];
844
845 tb->jmp_next[n] = NULL;
846 }
847}
848
849/* reset the jump entry 'n' of a TB so that it is not chained to
850 another TB */
851static inline void tb_reset_jump(TranslationBlock *tb, int n)
852{
853 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
854}
855
41c1b1c9 856void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
fd6ce8f6 857{
6a00d601 858 CPUState *env;
8a40a180 859 PageDesc *p;
d4e8164f 860 unsigned int h, n1;
41c1b1c9 861 tb_page_addr_t phys_pc;
8a40a180 862 TranslationBlock *tb1, *tb2;
3b46e624 863
8a40a180
FB
864 /* remove the TB from the hash list */
865 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
866 h = tb_phys_hash_func(phys_pc);
5fafdf24 867 tb_remove(&tb_phys_hash[h], tb,
8a40a180
FB
868 offsetof(TranslationBlock, phys_hash_next));
869
870 /* remove the TB from the page list */
871 if (tb->page_addr[0] != page_addr) {
872 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
873 tb_page_remove(&p->first_tb, tb);
874 invalidate_page_bitmap(p);
875 }
876 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
877 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
878 tb_page_remove(&p->first_tb, tb);
879 invalidate_page_bitmap(p);
880 }
881
36bdbe54 882 tb_invalidated_flag = 1;
59817ccb 883
fd6ce8f6 884 /* remove the TB from the hash list */
8a40a180 885 h = tb_jmp_cache_hash_func(tb->pc);
6a00d601
FB
886 for(env = first_cpu; env != NULL; env = env->next_cpu) {
887 if (env->tb_jmp_cache[h] == tb)
888 env->tb_jmp_cache[h] = NULL;
889 }
d4e8164f
FB
890
891 /* suppress this TB from the two jump lists */
892 tb_jmp_remove(tb, 0);
893 tb_jmp_remove(tb, 1);
894
895 /* suppress any remaining jumps to this TB */
896 tb1 = tb->jmp_first;
897 for(;;) {
898 n1 = (long)tb1 & 3;
899 if (n1 == 2)
900 break;
901 tb1 = (TranslationBlock *)((long)tb1 & ~3);
902 tb2 = tb1->jmp_next[n1];
903 tb_reset_jump(tb1, n1);
904 tb1->jmp_next[n1] = NULL;
905 tb1 = tb2;
906 }
907 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
9fa3e853 908
e3db7226 909 tb_phys_invalidate_count++;
9fa3e853
FB
910}
911
912static inline void set_bits(uint8_t *tab, int start, int len)
913{
914 int end, mask, end1;
915
916 end = start + len;
917 tab += start >> 3;
918 mask = 0xff << (start & 7);
919 if ((start & ~7) == (end & ~7)) {
920 if (start < end) {
921 mask &= ~(0xff << (end & 7));
922 *tab |= mask;
923 }
924 } else {
925 *tab++ |= mask;
926 start = (start + 8) & ~7;
927 end1 = end & ~7;
928 while (start < end1) {
929 *tab++ = 0xff;
930 start += 8;
931 }
932 if (start < end) {
933 mask = ~(0xff << (end & 7));
934 *tab |= mask;
935 }
936 }
937}
938
939static void build_page_bitmap(PageDesc *p)
940{
941 int n, tb_start, tb_end;
942 TranslationBlock *tb;
3b46e624 943
b2a7081a 944 p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
9fa3e853
FB
945
946 tb = p->first_tb;
947 while (tb != NULL) {
948 n = (long)tb & 3;
949 tb = (TranslationBlock *)((long)tb & ~3);
950 /* NOTE: this is subtle as a TB may span two physical pages */
951 if (n == 0) {
952 /* NOTE: tb_end may be after the end of the page, but
953 it is not a problem */
954 tb_start = tb->pc & ~TARGET_PAGE_MASK;
955 tb_end = tb_start + tb->size;
956 if (tb_end > TARGET_PAGE_SIZE)
957 tb_end = TARGET_PAGE_SIZE;
958 } else {
959 tb_start = 0;
960 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
961 }
962 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
963 tb = tb->page_next[n];
964 }
965}
966
2e70f6ef
PB
967TranslationBlock *tb_gen_code(CPUState *env,
968 target_ulong pc, target_ulong cs_base,
969 int flags, int cflags)
d720b93d
FB
970{
971 TranslationBlock *tb;
972 uint8_t *tc_ptr;
41c1b1c9
PB
973 tb_page_addr_t phys_pc, phys_page2;
974 target_ulong virt_page2;
d720b93d
FB
975 int code_gen_size;
976
41c1b1c9 977 phys_pc = get_page_addr_code(env, pc);
c27004ec 978 tb = tb_alloc(pc);
d720b93d
FB
979 if (!tb) {
980 /* flush must be done */
981 tb_flush(env);
982 /* cannot fail at this point */
c27004ec 983 tb = tb_alloc(pc);
2e70f6ef
PB
984 /* Don't forget to invalidate previous TB info. */
985 tb_invalidated_flag = 1;
d720b93d
FB
986 }
987 tc_ptr = code_gen_ptr;
988 tb->tc_ptr = tc_ptr;
989 tb->cs_base = cs_base;
990 tb->flags = flags;
991 tb->cflags = cflags;
d07bde88 992 cpu_gen_code(env, tb, &code_gen_size);
d720b93d 993 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
3b46e624 994
d720b93d 995 /* check next page if needed */
c27004ec 996 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
d720b93d 997 phys_page2 = -1;
c27004ec 998 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
41c1b1c9 999 phys_page2 = get_page_addr_code(env, virt_page2);
d720b93d 1000 }
41c1b1c9 1001 tb_link_page(tb, phys_pc, phys_page2);
2e70f6ef 1002 return tb;
d720b93d 1003}
3b46e624 1004
9fa3e853
FB
1005/* invalidate all TBs which intersect with the target physical page
1006 starting in range [start;end[. NOTE: start and end must refer to
d720b93d
FB
1007 the same physical page. 'is_cpu_write_access' should be true if called
1008 from a real cpu write access: the virtual CPU will exit the current
1009 TB if code is modified inside this TB. */
41c1b1c9 1010void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
d720b93d
FB
1011 int is_cpu_write_access)
1012{
6b917547 1013 TranslationBlock *tb, *tb_next, *saved_tb;
d720b93d 1014 CPUState *env = cpu_single_env;
41c1b1c9 1015 tb_page_addr_t tb_start, tb_end;
6b917547
AL
1016 PageDesc *p;
1017 int n;
1018#ifdef TARGET_HAS_PRECISE_SMC
1019 int current_tb_not_found = is_cpu_write_access;
1020 TranslationBlock *current_tb = NULL;
1021 int current_tb_modified = 0;
1022 target_ulong current_pc = 0;
1023 target_ulong current_cs_base = 0;
1024 int current_flags = 0;
1025#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
1026
1027 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 1028 if (!p)
9fa3e853 1029 return;
5fafdf24 1030 if (!p->code_bitmap &&
d720b93d
FB
1031 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1032 is_cpu_write_access) {
9fa3e853
FB
1033 /* build code bitmap */
1034 build_page_bitmap(p);
1035 }
1036
1037 /* we remove all the TBs in the range [start, end[ */
1038 /* XXX: see if in some cases it could be faster to invalidate all the code */
1039 tb = p->first_tb;
1040 while (tb != NULL) {
1041 n = (long)tb & 3;
1042 tb = (TranslationBlock *)((long)tb & ~3);
1043 tb_next = tb->page_next[n];
1044 /* NOTE: this is subtle as a TB may span two physical pages */
1045 if (n == 0) {
1046 /* NOTE: tb_end may be after the end of the page, but
1047 it is not a problem */
1048 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1049 tb_end = tb_start + tb->size;
1050 } else {
1051 tb_start = tb->page_addr[1];
1052 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1053 }
1054 if (!(tb_end <= start || tb_start >= end)) {
d720b93d
FB
1055#ifdef TARGET_HAS_PRECISE_SMC
1056 if (current_tb_not_found) {
1057 current_tb_not_found = 0;
1058 current_tb = NULL;
2e70f6ef 1059 if (env->mem_io_pc) {
d720b93d 1060 /* now we have a real cpu fault */
2e70f6ef 1061 current_tb = tb_find_pc(env->mem_io_pc);
d720b93d
FB
1062 }
1063 }
1064 if (current_tb == tb &&
2e70f6ef 1065 (current_tb->cflags & CF_COUNT_MASK) != 1) {
d720b93d
FB
1066 /* If we are modifying the current TB, we must stop
1067 its execution. We could be more precise by checking
1068 that the modification is after the current PC, but it
1069 would require a specialized function to partially
1070 restore the CPU state */
3b46e624 1071
d720b93d 1072 current_tb_modified = 1;
5fafdf24 1073 cpu_restore_state(current_tb, env,
2e70f6ef 1074 env->mem_io_pc, NULL);
6b917547
AL
1075 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1076 &current_flags);
d720b93d
FB
1077 }
1078#endif /* TARGET_HAS_PRECISE_SMC */
6f5a9f7e
FB
1079 /* we need to do that to handle the case where a signal
1080 occurs while doing tb_phys_invalidate() */
1081 saved_tb = NULL;
1082 if (env) {
1083 saved_tb = env->current_tb;
1084 env->current_tb = NULL;
1085 }
9fa3e853 1086 tb_phys_invalidate(tb, -1);
6f5a9f7e
FB
1087 if (env) {
1088 env->current_tb = saved_tb;
1089 if (env->interrupt_request && env->current_tb)
1090 cpu_interrupt(env, env->interrupt_request);
1091 }
9fa3e853
FB
1092 }
1093 tb = tb_next;
1094 }
1095#if !defined(CONFIG_USER_ONLY)
1096 /* if no code remaining, no need to continue to use slow writes */
1097 if (!p->first_tb) {
1098 invalidate_page_bitmap(p);
d720b93d 1099 if (is_cpu_write_access) {
2e70f6ef 1100 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
d720b93d
FB
1101 }
1102 }
1103#endif
1104#ifdef TARGET_HAS_PRECISE_SMC
1105 if (current_tb_modified) {
1106 /* we generate a block containing just the instruction
1107 modifying the memory. It will ensure that it cannot modify
1108 itself */
ea1c1802 1109 env->current_tb = NULL;
2e70f6ef 1110 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
d720b93d 1111 cpu_resume_from_signal(env, NULL);
9fa3e853 1112 }
fd6ce8f6 1113#endif
9fa3e853 1114}
fd6ce8f6 1115
9fa3e853 1116/* len must be <= 8 and start must be a multiple of len */
41c1b1c9 1117static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
9fa3e853
FB
1118{
1119 PageDesc *p;
1120 int offset, b;
59817ccb 1121#if 0
a4193c8a 1122 if (1) {
93fcfe39
AL
1123 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1124 cpu_single_env->mem_io_vaddr, len,
1125 cpu_single_env->eip,
1126 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
59817ccb
FB
1127 }
1128#endif
9fa3e853 1129 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 1130 if (!p)
9fa3e853
FB
1131 return;
1132 if (p->code_bitmap) {
1133 offset = start & ~TARGET_PAGE_MASK;
1134 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1135 if (b & ((1 << len) - 1))
1136 goto do_invalidate;
1137 } else {
1138 do_invalidate:
d720b93d 1139 tb_invalidate_phys_page_range(start, start + len, 1);
9fa3e853
FB
1140 }
1141}
1142
9fa3e853 1143#if !defined(CONFIG_SOFTMMU)
41c1b1c9 1144static void tb_invalidate_phys_page(tb_page_addr_t addr,
d720b93d 1145 unsigned long pc, void *puc)
9fa3e853 1146{
6b917547 1147 TranslationBlock *tb;
9fa3e853 1148 PageDesc *p;
6b917547 1149 int n;
d720b93d 1150#ifdef TARGET_HAS_PRECISE_SMC
6b917547 1151 TranslationBlock *current_tb = NULL;
d720b93d 1152 CPUState *env = cpu_single_env;
6b917547
AL
1153 int current_tb_modified = 0;
1154 target_ulong current_pc = 0;
1155 target_ulong current_cs_base = 0;
1156 int current_flags = 0;
d720b93d 1157#endif
9fa3e853
FB
1158
1159 addr &= TARGET_PAGE_MASK;
1160 p = page_find(addr >> TARGET_PAGE_BITS);
5fafdf24 1161 if (!p)
9fa3e853
FB
1162 return;
1163 tb = p->first_tb;
d720b93d
FB
1164#ifdef TARGET_HAS_PRECISE_SMC
1165 if (tb && pc != 0) {
1166 current_tb = tb_find_pc(pc);
1167 }
1168#endif
9fa3e853
FB
1169 while (tb != NULL) {
1170 n = (long)tb & 3;
1171 tb = (TranslationBlock *)((long)tb & ~3);
d720b93d
FB
1172#ifdef TARGET_HAS_PRECISE_SMC
1173 if (current_tb == tb &&
2e70f6ef 1174 (current_tb->cflags & CF_COUNT_MASK) != 1) {
d720b93d
FB
1175 /* If we are modifying the current TB, we must stop
1176 its execution. We could be more precise by checking
1177 that the modification is after the current PC, but it
1178 would require a specialized function to partially
1179 restore the CPU state */
3b46e624 1180
d720b93d
FB
1181 current_tb_modified = 1;
1182 cpu_restore_state(current_tb, env, pc, puc);
6b917547
AL
1183 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1184 &current_flags);
d720b93d
FB
1185 }
1186#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
1187 tb_phys_invalidate(tb, addr);
1188 tb = tb->page_next[n];
1189 }
fd6ce8f6 1190 p->first_tb = NULL;
d720b93d
FB
1191#ifdef TARGET_HAS_PRECISE_SMC
1192 if (current_tb_modified) {
1193 /* we generate a block containing just the instruction
1194 modifying the memory. It will ensure that it cannot modify
1195 itself */
ea1c1802 1196 env->current_tb = NULL;
2e70f6ef 1197 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
d720b93d
FB
1198 cpu_resume_from_signal(env, puc);
1199 }
1200#endif
fd6ce8f6 1201}
9fa3e853 1202#endif
fd6ce8f6
FB
1203
1204/* add the tb in the target page and protect it if necessary */
5fafdf24 1205static inline void tb_alloc_page(TranslationBlock *tb,
41c1b1c9 1206 unsigned int n, tb_page_addr_t page_addr)
fd6ce8f6
FB
1207{
1208 PageDesc *p;
9fa3e853
FB
1209 TranslationBlock *last_first_tb;
1210
1211 tb->page_addr[n] = page_addr;
5cd2c5b6 1212 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
9fa3e853
FB
1213 tb->page_next[n] = p->first_tb;
1214 last_first_tb = p->first_tb;
1215 p->first_tb = (TranslationBlock *)((long)tb | n);
1216 invalidate_page_bitmap(p);
fd6ce8f6 1217
107db443 1218#if defined(TARGET_HAS_SMC) || 1
d720b93d 1219
9fa3e853 1220#if defined(CONFIG_USER_ONLY)
fd6ce8f6 1221 if (p->flags & PAGE_WRITE) {
53a5960a
PB
1222 target_ulong addr;
1223 PageDesc *p2;
9fa3e853
FB
1224 int prot;
1225
fd6ce8f6
FB
1226 /* force the host page as non writable (writes will have a
1227 page fault + mprotect overhead) */
53a5960a 1228 page_addr &= qemu_host_page_mask;
fd6ce8f6 1229 prot = 0;
53a5960a
PB
1230 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1231 addr += TARGET_PAGE_SIZE) {
1232
1233 p2 = page_find (addr >> TARGET_PAGE_BITS);
1234 if (!p2)
1235 continue;
1236 prot |= p2->flags;
1237 p2->flags &= ~PAGE_WRITE;
53a5960a 1238 }
5fafdf24 1239 mprotect(g2h(page_addr), qemu_host_page_size,
fd6ce8f6
FB
1240 (prot & PAGE_BITS) & ~PAGE_WRITE);
1241#ifdef DEBUG_TB_INVALIDATE
ab3d1727 1242 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
53a5960a 1243 page_addr);
fd6ce8f6 1244#endif
fd6ce8f6 1245 }
9fa3e853
FB
1246#else
1247 /* if some code is already present, then the pages are already
1248 protected. So we handle the case where only the first TB is
1249 allocated in a physical page */
1250 if (!last_first_tb) {
6a00d601 1251 tlb_protect_code(page_addr);
9fa3e853
FB
1252 }
1253#endif
d720b93d
FB
1254
1255#endif /* TARGET_HAS_SMC */
fd6ce8f6
FB
1256}
1257
9fa3e853
FB
1258/* add a new TB and link it to the physical page tables. phys_page2 is
1259 (-1) to indicate that only one page contains the TB. */
41c1b1c9
PB
1260void tb_link_page(TranslationBlock *tb,
1261 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
d4e8164f 1262{
9fa3e853
FB
1263 unsigned int h;
1264 TranslationBlock **ptb;
1265
c8a706fe
PB
1266 /* Grab the mmap lock to stop another thread invalidating this TB
1267 before we are done. */
1268 mmap_lock();
9fa3e853
FB
1269 /* add in the physical hash table */
1270 h = tb_phys_hash_func(phys_pc);
1271 ptb = &tb_phys_hash[h];
1272 tb->phys_hash_next = *ptb;
1273 *ptb = tb;
fd6ce8f6
FB
1274
1275 /* add in the page list */
9fa3e853
FB
1276 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1277 if (phys_page2 != -1)
1278 tb_alloc_page(tb, 1, phys_page2);
1279 else
1280 tb->page_addr[1] = -1;
9fa3e853 1281
d4e8164f
FB
1282 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1283 tb->jmp_next[0] = NULL;
1284 tb->jmp_next[1] = NULL;
1285
1286 /* init original jump addresses */
1287 if (tb->tb_next_offset[0] != 0xffff)
1288 tb_reset_jump(tb, 0);
1289 if (tb->tb_next_offset[1] != 0xffff)
1290 tb_reset_jump(tb, 1);
8a40a180
FB
1291
1292#ifdef DEBUG_TB_CHECK
1293 tb_page_check();
1294#endif
c8a706fe 1295 mmap_unlock();
fd6ce8f6
FB
1296}
1297
9fa3e853
FB
1298/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1299 tb[1].tc_ptr. Return NULL if not found */
1300TranslationBlock *tb_find_pc(unsigned long tc_ptr)
fd6ce8f6 1301{
9fa3e853
FB
1302 int m_min, m_max, m;
1303 unsigned long v;
1304 TranslationBlock *tb;
a513fe19
FB
1305
1306 if (nb_tbs <= 0)
1307 return NULL;
1308 if (tc_ptr < (unsigned long)code_gen_buffer ||
1309 tc_ptr >= (unsigned long)code_gen_ptr)
1310 return NULL;
1311 /* binary search (cf Knuth) */
1312 m_min = 0;
1313 m_max = nb_tbs - 1;
1314 while (m_min <= m_max) {
1315 m = (m_min + m_max) >> 1;
1316 tb = &tbs[m];
1317 v = (unsigned long)tb->tc_ptr;
1318 if (v == tc_ptr)
1319 return tb;
1320 else if (tc_ptr < v) {
1321 m_max = m - 1;
1322 } else {
1323 m_min = m + 1;
1324 }
5fafdf24 1325 }
a513fe19
FB
1326 return &tbs[m_max];
1327}
7501267e 1328
ea041c0e
FB
1329static void tb_reset_jump_recursive(TranslationBlock *tb);
1330
1331static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1332{
1333 TranslationBlock *tb1, *tb_next, **ptb;
1334 unsigned int n1;
1335
1336 tb1 = tb->jmp_next[n];
1337 if (tb1 != NULL) {
1338 /* find head of list */
1339 for(;;) {
1340 n1 = (long)tb1 & 3;
1341 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1342 if (n1 == 2)
1343 break;
1344 tb1 = tb1->jmp_next[n1];
1345 }
1346 /* we are now sure now that tb jumps to tb1 */
1347 tb_next = tb1;
1348
1349 /* remove tb from the jmp_first list */
1350 ptb = &tb_next->jmp_first;
1351 for(;;) {
1352 tb1 = *ptb;
1353 n1 = (long)tb1 & 3;
1354 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1355 if (n1 == n && tb1 == tb)
1356 break;
1357 ptb = &tb1->jmp_next[n1];
1358 }
1359 *ptb = tb->jmp_next[n];
1360 tb->jmp_next[n] = NULL;
3b46e624 1361
ea041c0e
FB
1362 /* suppress the jump to next tb in generated code */
1363 tb_reset_jump(tb, n);
1364
0124311e 1365 /* suppress jumps in the tb on which we could have jumped */
ea041c0e
FB
1366 tb_reset_jump_recursive(tb_next);
1367 }
1368}
1369
1370static void tb_reset_jump_recursive(TranslationBlock *tb)
1371{
1372 tb_reset_jump_recursive2(tb, 0);
1373 tb_reset_jump_recursive2(tb, 1);
1374}
1375
1fddef4b 1376#if defined(TARGET_HAS_ICE)
94df27fd
PB
1377#if defined(CONFIG_USER_ONLY)
1378static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1379{
1380 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1381}
1382#else
d720b93d
FB
1383static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1384{
c227f099 1385 target_phys_addr_t addr;
9b3c35e0 1386 target_ulong pd;
c227f099 1387 ram_addr_t ram_addr;
c2f07f81 1388 PhysPageDesc *p;
d720b93d 1389
c2f07f81
PB
1390 addr = cpu_get_phys_page_debug(env, pc);
1391 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1392 if (!p) {
1393 pd = IO_MEM_UNASSIGNED;
1394 } else {
1395 pd = p->phys_offset;
1396 }
1397 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
706cd4b5 1398 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
d720b93d 1399}
c27004ec 1400#endif
94df27fd 1401#endif /* TARGET_HAS_ICE */
d720b93d 1402
c527ee8f
PB
1403#if defined(CONFIG_USER_ONLY)
1404void cpu_watchpoint_remove_all(CPUState *env, int mask)
1405
1406{
1407}
1408
1409int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1410 int flags, CPUWatchpoint **watchpoint)
1411{
1412 return -ENOSYS;
1413}
1414#else
6658ffb8 1415/* Add a watchpoint. */
a1d1bb31
AL
1416int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1417 int flags, CPUWatchpoint **watchpoint)
6658ffb8 1418{
b4051334 1419 target_ulong len_mask = ~(len - 1);
c0ce998e 1420 CPUWatchpoint *wp;
6658ffb8 1421
b4051334
AL
1422 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1423 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1424 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1425 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1426 return -EINVAL;
1427 }
a1d1bb31 1428 wp = qemu_malloc(sizeof(*wp));
a1d1bb31
AL
1429
1430 wp->vaddr = addr;
b4051334 1431 wp->len_mask = len_mask;
a1d1bb31
AL
1432 wp->flags = flags;
1433
2dc9f411 1434 /* keep all GDB-injected watchpoints in front */
c0ce998e 1435 if (flags & BP_GDB)
72cf2d4f 1436 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
c0ce998e 1437 else
72cf2d4f 1438 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
6658ffb8 1439
6658ffb8 1440 tlb_flush_page(env, addr);
a1d1bb31
AL
1441
1442 if (watchpoint)
1443 *watchpoint = wp;
1444 return 0;
6658ffb8
PB
1445}
1446
a1d1bb31
AL
1447/* Remove a specific watchpoint. */
1448int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1449 int flags)
6658ffb8 1450{
b4051334 1451 target_ulong len_mask = ~(len - 1);
a1d1bb31 1452 CPUWatchpoint *wp;
6658ffb8 1453
72cf2d4f 1454 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334 1455 if (addr == wp->vaddr && len_mask == wp->len_mask
6e140f28 1456 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
a1d1bb31 1457 cpu_watchpoint_remove_by_ref(env, wp);
6658ffb8
PB
1458 return 0;
1459 }
1460 }
a1d1bb31 1461 return -ENOENT;
6658ffb8
PB
1462}
1463
a1d1bb31
AL
1464/* Remove a specific watchpoint by reference. */
1465void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1466{
72cf2d4f 1467 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
7d03f82f 1468
a1d1bb31
AL
1469 tlb_flush_page(env, watchpoint->vaddr);
1470
1471 qemu_free(watchpoint);
1472}
1473
1474/* Remove all matching watchpoints. */
1475void cpu_watchpoint_remove_all(CPUState *env, int mask)
1476{
c0ce998e 1477 CPUWatchpoint *wp, *next;
a1d1bb31 1478
72cf2d4f 1479 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
a1d1bb31
AL
1480 if (wp->flags & mask)
1481 cpu_watchpoint_remove_by_ref(env, wp);
c0ce998e 1482 }
7d03f82f 1483}
c527ee8f 1484#endif
7d03f82f 1485
a1d1bb31
AL
1486/* Add a breakpoint. */
1487int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1488 CPUBreakpoint **breakpoint)
4c3a88a2 1489{
1fddef4b 1490#if defined(TARGET_HAS_ICE)
c0ce998e 1491 CPUBreakpoint *bp;
3b46e624 1492
a1d1bb31 1493 bp = qemu_malloc(sizeof(*bp));
4c3a88a2 1494
a1d1bb31
AL
1495 bp->pc = pc;
1496 bp->flags = flags;
1497
2dc9f411 1498 /* keep all GDB-injected breakpoints in front */
c0ce998e 1499 if (flags & BP_GDB)
72cf2d4f 1500 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
c0ce998e 1501 else
72cf2d4f 1502 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
3b46e624 1503
d720b93d 1504 breakpoint_invalidate(env, pc);
a1d1bb31
AL
1505
1506 if (breakpoint)
1507 *breakpoint = bp;
4c3a88a2
FB
1508 return 0;
1509#else
a1d1bb31 1510 return -ENOSYS;
4c3a88a2
FB
1511#endif
1512}
1513
a1d1bb31
AL
1514/* Remove a specific breakpoint. */
1515int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1516{
7d03f82f 1517#if defined(TARGET_HAS_ICE)
a1d1bb31
AL
1518 CPUBreakpoint *bp;
1519
72cf2d4f 1520 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31
AL
1521 if (bp->pc == pc && bp->flags == flags) {
1522 cpu_breakpoint_remove_by_ref(env, bp);
1523 return 0;
1524 }
7d03f82f 1525 }
a1d1bb31
AL
1526 return -ENOENT;
1527#else
1528 return -ENOSYS;
7d03f82f
EI
1529#endif
1530}
1531
a1d1bb31
AL
1532/* Remove a specific breakpoint by reference. */
1533void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
4c3a88a2 1534{
1fddef4b 1535#if defined(TARGET_HAS_ICE)
72cf2d4f 1536 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
d720b93d 1537
a1d1bb31
AL
1538 breakpoint_invalidate(env, breakpoint->pc);
1539
1540 qemu_free(breakpoint);
1541#endif
1542}
1543
1544/* Remove all matching breakpoints. */
1545void cpu_breakpoint_remove_all(CPUState *env, int mask)
1546{
1547#if defined(TARGET_HAS_ICE)
c0ce998e 1548 CPUBreakpoint *bp, *next;
a1d1bb31 1549
72cf2d4f 1550 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
a1d1bb31
AL
1551 if (bp->flags & mask)
1552 cpu_breakpoint_remove_by_ref(env, bp);
c0ce998e 1553 }
4c3a88a2
FB
1554#endif
1555}
1556
c33a346e
FB
1557/* enable or disable single step mode. EXCP_DEBUG is returned by the
1558 CPU loop after each instruction */
1559void cpu_single_step(CPUState *env, int enabled)
1560{
1fddef4b 1561#if defined(TARGET_HAS_ICE)
c33a346e
FB
1562 if (env->singlestep_enabled != enabled) {
1563 env->singlestep_enabled = enabled;
e22a25c9
AL
1564 if (kvm_enabled())
1565 kvm_update_guest_debug(env, 0);
1566 else {
ccbb4d44 1567 /* must flush all the translated code to avoid inconsistencies */
e22a25c9
AL
1568 /* XXX: only flush what is necessary */
1569 tb_flush(env);
1570 }
c33a346e
FB
1571 }
1572#endif
1573}
1574
34865134
FB
1575/* enable or disable low levels log */
1576void cpu_set_log(int log_flags)
1577{
1578 loglevel = log_flags;
1579 if (loglevel && !logfile) {
11fcfab4 1580 logfile = fopen(logfilename, log_append ? "a" : "w");
34865134
FB
1581 if (!logfile) {
1582 perror(logfilename);
1583 _exit(1);
1584 }
9fa3e853
FB
1585#if !defined(CONFIG_SOFTMMU)
1586 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1587 {
b55266b5 1588 static char logfile_buf[4096];
9fa3e853
FB
1589 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1590 }
bf65f53f
FN
1591#elif !defined(_WIN32)
1592 /* Win32 doesn't support line-buffering and requires size >= 2 */
34865134 1593 setvbuf(logfile, NULL, _IOLBF, 0);
9fa3e853 1594#endif
e735b91c
PB
1595 log_append = 1;
1596 }
1597 if (!loglevel && logfile) {
1598 fclose(logfile);
1599 logfile = NULL;
34865134
FB
1600 }
1601}
1602
1603void cpu_set_log_filename(const char *filename)
1604{
1605 logfilename = strdup(filename);
e735b91c
PB
1606 if (logfile) {
1607 fclose(logfile);
1608 logfile = NULL;
1609 }
1610 cpu_set_log(loglevel);
34865134 1611}
c33a346e 1612
3098dba0 1613static void cpu_unlink_tb(CPUState *env)
ea041c0e 1614{
3098dba0
AJ
1615 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1616 problem and hope the cpu will stop of its own accord. For userspace
1617 emulation this often isn't actually as bad as it sounds. Often
1618 signals are used primarily to interrupt blocking syscalls. */
ea041c0e 1619 TranslationBlock *tb;
c227f099 1620 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
59817ccb 1621
cab1b4bd 1622 spin_lock(&interrupt_lock);
3098dba0
AJ
1623 tb = env->current_tb;
1624 /* if the cpu is currently executing code, we must unlink it and
1625 all the potentially executing TB */
f76cfe56 1626 if (tb) {
3098dba0
AJ
1627 env->current_tb = NULL;
1628 tb_reset_jump_recursive(tb);
be214e6c 1629 }
cab1b4bd 1630 spin_unlock(&interrupt_lock);
3098dba0
AJ
1631}
1632
1633/* mask must never be zero, except for A20 change call */
1634void cpu_interrupt(CPUState *env, int mask)
1635{
1636 int old_mask;
be214e6c 1637
2e70f6ef 1638 old_mask = env->interrupt_request;
68a79315 1639 env->interrupt_request |= mask;
3098dba0 1640
8edac960
AL
1641#ifndef CONFIG_USER_ONLY
1642 /*
1643 * If called from iothread context, wake the target cpu in
1644 * case its halted.
1645 */
b7680cb6 1646 if (!qemu_cpu_is_self(env)) {
8edac960
AL
1647 qemu_cpu_kick(env);
1648 return;
1649 }
1650#endif
1651
2e70f6ef 1652 if (use_icount) {
266910c4 1653 env->icount_decr.u16.high = 0xffff;
2e70f6ef 1654#ifndef CONFIG_USER_ONLY
2e70f6ef 1655 if (!can_do_io(env)
be214e6c 1656 && (mask & ~old_mask) != 0) {
2e70f6ef
PB
1657 cpu_abort(env, "Raised interrupt while not in I/O function");
1658 }
1659#endif
1660 } else {
3098dba0 1661 cpu_unlink_tb(env);
ea041c0e
FB
1662 }
1663}
1664
b54ad049
FB
1665void cpu_reset_interrupt(CPUState *env, int mask)
1666{
1667 env->interrupt_request &= ~mask;
1668}
1669
3098dba0
AJ
1670void cpu_exit(CPUState *env)
1671{
1672 env->exit_request = 1;
1673 cpu_unlink_tb(env);
1674}
1675
c7cd6a37 1676const CPULogItem cpu_log_items[] = {
5fafdf24 1677 { CPU_LOG_TB_OUT_ASM, "out_asm",
f193c797
FB
1678 "show generated host assembly code for each compiled TB" },
1679 { CPU_LOG_TB_IN_ASM, "in_asm",
1680 "show target assembly code for each compiled TB" },
5fafdf24 1681 { CPU_LOG_TB_OP, "op",
57fec1fe 1682 "show micro ops for each compiled TB" },
f193c797 1683 { CPU_LOG_TB_OP_OPT, "op_opt",
e01a1157
BS
1684 "show micro ops "
1685#ifdef TARGET_I386
1686 "before eflags optimization and "
f193c797 1687#endif
e01a1157 1688 "after liveness analysis" },
f193c797
FB
1689 { CPU_LOG_INT, "int",
1690 "show interrupts/exceptions in short format" },
1691 { CPU_LOG_EXEC, "exec",
1692 "show trace before each executed TB (lots of logs)" },
9fddaa0c 1693 { CPU_LOG_TB_CPU, "cpu",
e91c8a77 1694 "show CPU state before block translation" },
f193c797
FB
1695#ifdef TARGET_I386
1696 { CPU_LOG_PCALL, "pcall",
1697 "show protected mode far calls/returns/exceptions" },
eca1bdf4
AL
1698 { CPU_LOG_RESET, "cpu_reset",
1699 "show CPU state before CPU resets" },
f193c797 1700#endif
8e3a9fd2 1701#ifdef DEBUG_IOPORT
fd872598
FB
1702 { CPU_LOG_IOPORT, "ioport",
1703 "show all i/o ports accesses" },
8e3a9fd2 1704#endif
f193c797
FB
1705 { 0, NULL, NULL },
1706};
1707
f6f3fbca
MT
1708#ifndef CONFIG_USER_ONLY
1709static QLIST_HEAD(memory_client_list, CPUPhysMemoryClient) memory_client_list
1710 = QLIST_HEAD_INITIALIZER(memory_client_list);
1711
1712static void cpu_notify_set_memory(target_phys_addr_t start_addr,
9742bf26
YT
1713 ram_addr_t size,
1714 ram_addr_t phys_offset)
f6f3fbca
MT
1715{
1716 CPUPhysMemoryClient *client;
1717 QLIST_FOREACH(client, &memory_client_list, list) {
1718 client->set_memory(client, start_addr, size, phys_offset);
1719 }
1720}
1721
1722static int cpu_notify_sync_dirty_bitmap(target_phys_addr_t start,
9742bf26 1723 target_phys_addr_t end)
f6f3fbca
MT
1724{
1725 CPUPhysMemoryClient *client;
1726 QLIST_FOREACH(client, &memory_client_list, list) {
1727 int r = client->sync_dirty_bitmap(client, start, end);
1728 if (r < 0)
1729 return r;
1730 }
1731 return 0;
1732}
1733
1734static int cpu_notify_migration_log(int enable)
1735{
1736 CPUPhysMemoryClient *client;
1737 QLIST_FOREACH(client, &memory_client_list, list) {
1738 int r = client->migration_log(client, enable);
1739 if (r < 0)
1740 return r;
1741 }
1742 return 0;
1743}
1744
5cd2c5b6
RH
1745static void phys_page_for_each_1(CPUPhysMemoryClient *client,
1746 int level, void **lp)
f6f3fbca 1747{
5cd2c5b6 1748 int i;
f6f3fbca 1749
5cd2c5b6
RH
1750 if (*lp == NULL) {
1751 return;
1752 }
1753 if (level == 0) {
1754 PhysPageDesc *pd = *lp;
7296abac 1755 for (i = 0; i < L2_SIZE; ++i) {
5cd2c5b6
RH
1756 if (pd[i].phys_offset != IO_MEM_UNASSIGNED) {
1757 client->set_memory(client, pd[i].region_offset,
1758 TARGET_PAGE_SIZE, pd[i].phys_offset);
f6f3fbca 1759 }
5cd2c5b6
RH
1760 }
1761 } else {
1762 void **pp = *lp;
7296abac 1763 for (i = 0; i < L2_SIZE; ++i) {
5cd2c5b6 1764 phys_page_for_each_1(client, level - 1, pp + i);
f6f3fbca
MT
1765 }
1766 }
1767}
1768
1769static void phys_page_for_each(CPUPhysMemoryClient *client)
1770{
5cd2c5b6
RH
1771 int i;
1772 for (i = 0; i < P_L1_SIZE; ++i) {
1773 phys_page_for_each_1(client, P_L1_SHIFT / L2_BITS - 1,
1774 l1_phys_map + 1);
f6f3fbca 1775 }
f6f3fbca
MT
1776}
1777
1778void cpu_register_phys_memory_client(CPUPhysMemoryClient *client)
1779{
1780 QLIST_INSERT_HEAD(&memory_client_list, client, list);
1781 phys_page_for_each(client);
1782}
1783
1784void cpu_unregister_phys_memory_client(CPUPhysMemoryClient *client)
1785{
1786 QLIST_REMOVE(client, list);
1787}
1788#endif
1789
f193c797
FB
1790static int cmp1(const char *s1, int n, const char *s2)
1791{
1792 if (strlen(s2) != n)
1793 return 0;
1794 return memcmp(s1, s2, n) == 0;
1795}
3b46e624 1796
f193c797
FB
1797/* takes a comma separated list of log masks. Return 0 if error. */
1798int cpu_str_to_log_mask(const char *str)
1799{
c7cd6a37 1800 const CPULogItem *item;
f193c797
FB
1801 int mask;
1802 const char *p, *p1;
1803
1804 p = str;
1805 mask = 0;
1806 for(;;) {
1807 p1 = strchr(p, ',');
1808 if (!p1)
1809 p1 = p + strlen(p);
9742bf26
YT
1810 if(cmp1(p,p1-p,"all")) {
1811 for(item = cpu_log_items; item->mask != 0; item++) {
1812 mask |= item->mask;
1813 }
1814 } else {
1815 for(item = cpu_log_items; item->mask != 0; item++) {
1816 if (cmp1(p, p1 - p, item->name))
1817 goto found;
1818 }
1819 return 0;
f193c797 1820 }
f193c797
FB
1821 found:
1822 mask |= item->mask;
1823 if (*p1 != ',')
1824 break;
1825 p = p1 + 1;
1826 }
1827 return mask;
1828}
ea041c0e 1829
7501267e
FB
1830void cpu_abort(CPUState *env, const char *fmt, ...)
1831{
1832 va_list ap;
493ae1f0 1833 va_list ap2;
7501267e
FB
1834
1835 va_start(ap, fmt);
493ae1f0 1836 va_copy(ap2, ap);
7501267e
FB
1837 fprintf(stderr, "qemu: fatal: ");
1838 vfprintf(stderr, fmt, ap);
1839 fprintf(stderr, "\n");
1840#ifdef TARGET_I386
7fe48483
FB
1841 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1842#else
1843 cpu_dump_state(env, stderr, fprintf, 0);
7501267e 1844#endif
93fcfe39
AL
1845 if (qemu_log_enabled()) {
1846 qemu_log("qemu: fatal: ");
1847 qemu_log_vprintf(fmt, ap2);
1848 qemu_log("\n");
f9373291 1849#ifdef TARGET_I386
93fcfe39 1850 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
f9373291 1851#else
93fcfe39 1852 log_cpu_state(env, 0);
f9373291 1853#endif
31b1a7b4 1854 qemu_log_flush();
93fcfe39 1855 qemu_log_close();
924edcae 1856 }
493ae1f0 1857 va_end(ap2);
f9373291 1858 va_end(ap);
fd052bf6
RV
1859#if defined(CONFIG_USER_ONLY)
1860 {
1861 struct sigaction act;
1862 sigfillset(&act.sa_mask);
1863 act.sa_handler = SIG_DFL;
1864 sigaction(SIGABRT, &act, NULL);
1865 }
1866#endif
7501267e
FB
1867 abort();
1868}
1869
c5be9f08
TS
1870CPUState *cpu_copy(CPUState *env)
1871{
01ba9816 1872 CPUState *new_env = cpu_init(env->cpu_model_str);
c5be9f08
TS
1873 CPUState *next_cpu = new_env->next_cpu;
1874 int cpu_index = new_env->cpu_index;
5a38f081
AL
1875#if defined(TARGET_HAS_ICE)
1876 CPUBreakpoint *bp;
1877 CPUWatchpoint *wp;
1878#endif
1879
c5be9f08 1880 memcpy(new_env, env, sizeof(CPUState));
5a38f081
AL
1881
1882 /* Preserve chaining and index. */
c5be9f08
TS
1883 new_env->next_cpu = next_cpu;
1884 new_env->cpu_index = cpu_index;
5a38f081
AL
1885
1886 /* Clone all break/watchpoints.
1887 Note: Once we support ptrace with hw-debug register access, make sure
1888 BP_CPU break/watchpoints are handled correctly on clone. */
72cf2d4f
BS
1889 QTAILQ_INIT(&env->breakpoints);
1890 QTAILQ_INIT(&env->watchpoints);
5a38f081 1891#if defined(TARGET_HAS_ICE)
72cf2d4f 1892 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
5a38f081
AL
1893 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1894 }
72cf2d4f 1895 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
5a38f081
AL
1896 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1897 wp->flags, NULL);
1898 }
1899#endif
1900
c5be9f08
TS
1901 return new_env;
1902}
1903
0124311e
FB
1904#if !defined(CONFIG_USER_ONLY)
1905
5c751e99
EI
1906static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1907{
1908 unsigned int i;
1909
1910 /* Discard jump cache entries for any tb which might potentially
1911 overlap the flushed page. */
1912 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1913 memset (&env->tb_jmp_cache[i], 0,
9742bf26 1914 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
5c751e99
EI
1915
1916 i = tb_jmp_cache_hash_page(addr);
1917 memset (&env->tb_jmp_cache[i], 0,
9742bf26 1918 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
5c751e99
EI
1919}
1920
08738984
IK
1921static CPUTLBEntry s_cputlb_empty_entry = {
1922 .addr_read = -1,
1923 .addr_write = -1,
1924 .addr_code = -1,
1925 .addend = -1,
1926};
1927
ee8b7021
FB
1928/* NOTE: if flush_global is true, also flush global entries (not
1929 implemented yet) */
1930void tlb_flush(CPUState *env, int flush_global)
33417e70 1931{
33417e70 1932 int i;
0124311e 1933
9fa3e853
FB
1934#if defined(DEBUG_TLB)
1935 printf("tlb_flush:\n");
1936#endif
0124311e
FB
1937 /* must reset current TB so that interrupts cannot modify the
1938 links while we are modifying them */
1939 env->current_tb = NULL;
1940
33417e70 1941 for(i = 0; i < CPU_TLB_SIZE; i++) {
cfde4bd9
IY
1942 int mmu_idx;
1943 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
08738984 1944 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
cfde4bd9 1945 }
33417e70 1946 }
9fa3e853 1947
8a40a180 1948 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
9fa3e853 1949
d4c430a8
PB
1950 env->tlb_flush_addr = -1;
1951 env->tlb_flush_mask = 0;
e3db7226 1952 tlb_flush_count++;
33417e70
FB
1953}
1954
274da6b2 1955static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
61382a50 1956{
5fafdf24 1957 if (addr == (tlb_entry->addr_read &
84b7b8e7 1958 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1959 addr == (tlb_entry->addr_write &
84b7b8e7 1960 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1961 addr == (tlb_entry->addr_code &
84b7b8e7 1962 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
08738984 1963 *tlb_entry = s_cputlb_empty_entry;
84b7b8e7 1964 }
61382a50
FB
1965}
1966
2e12669a 1967void tlb_flush_page(CPUState *env, target_ulong addr)
33417e70 1968{
8a40a180 1969 int i;
cfde4bd9 1970 int mmu_idx;
0124311e 1971
9fa3e853 1972#if defined(DEBUG_TLB)
108c49b8 1973 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
9fa3e853 1974#endif
d4c430a8
PB
1975 /* Check if we need to flush due to large pages. */
1976 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
1977#if defined(DEBUG_TLB)
1978 printf("tlb_flush_page: forced full flush ("
1979 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
1980 env->tlb_flush_addr, env->tlb_flush_mask);
1981#endif
1982 tlb_flush(env, 1);
1983 return;
1984 }
0124311e
FB
1985 /* must reset current TB so that interrupts cannot modify the
1986 links while we are modifying them */
1987 env->current_tb = NULL;
61382a50
FB
1988
1989 addr &= TARGET_PAGE_MASK;
1990 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
cfde4bd9
IY
1991 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1992 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
0124311e 1993
5c751e99 1994 tlb_flush_jmp_cache(env, addr);
9fa3e853
FB
1995}
1996
9fa3e853
FB
1997/* update the TLBs so that writes to code in the virtual page 'addr'
1998 can be detected */
c227f099 1999static void tlb_protect_code(ram_addr_t ram_addr)
9fa3e853 2000{
5fafdf24 2001 cpu_physical_memory_reset_dirty(ram_addr,
6a00d601
FB
2002 ram_addr + TARGET_PAGE_SIZE,
2003 CODE_DIRTY_FLAG);
9fa3e853
FB
2004}
2005
9fa3e853 2006/* update the TLB so that writes in physical page 'phys_addr' are no longer
3a7d929e 2007 tested for self modifying code */
c227f099 2008static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 2009 target_ulong vaddr)
9fa3e853 2010{
f7c11b53 2011 cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
1ccde1cb
FB
2012}
2013
5fafdf24 2014static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1ccde1cb
FB
2015 unsigned long start, unsigned long length)
2016{
2017 unsigned long addr;
84b7b8e7
FB
2018 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2019 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1ccde1cb 2020 if ((addr - start) < length) {
0f459d16 2021 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1ccde1cb
FB
2022 }
2023 }
2024}
2025
5579c7f3 2026/* Note: start and end must be within the same ram block. */
c227f099 2027void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 2028 int dirty_flags)
1ccde1cb
FB
2029{
2030 CPUState *env;
4f2ac237 2031 unsigned long length, start1;
f7c11b53 2032 int i;
1ccde1cb
FB
2033
2034 start &= TARGET_PAGE_MASK;
2035 end = TARGET_PAGE_ALIGN(end);
2036
2037 length = end - start;
2038 if (length == 0)
2039 return;
f7c11b53 2040 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
f23db169 2041
1ccde1cb
FB
2042 /* we modify the TLB cache so that the dirty bit will be set again
2043 when accessing the range */
b2e0a138 2044 start1 = (unsigned long)qemu_safe_ram_ptr(start);
5579c7f3
PB
2045 /* Chek that we don't span multiple blocks - this breaks the
2046 address comparisons below. */
b2e0a138 2047 if ((unsigned long)qemu_safe_ram_ptr(end - 1) - start1
5579c7f3
PB
2048 != (end - 1) - start) {
2049 abort();
2050 }
2051
6a00d601 2052 for(env = first_cpu; env != NULL; env = env->next_cpu) {
cfde4bd9
IY
2053 int mmu_idx;
2054 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2055 for(i = 0; i < CPU_TLB_SIZE; i++)
2056 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
2057 start1, length);
2058 }
6a00d601 2059 }
1ccde1cb
FB
2060}
2061
74576198
AL
2062int cpu_physical_memory_set_dirty_tracking(int enable)
2063{
f6f3fbca 2064 int ret = 0;
74576198 2065 in_migration = enable;
f6f3fbca
MT
2066 ret = cpu_notify_migration_log(!!enable);
2067 return ret;
74576198
AL
2068}
2069
2070int cpu_physical_memory_get_dirty_tracking(void)
2071{
2072 return in_migration;
2073}
2074
c227f099
AL
2075int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
2076 target_phys_addr_t end_addr)
2bec46dc 2077{
7b8f3b78 2078 int ret;
151f7749 2079
f6f3fbca 2080 ret = cpu_notify_sync_dirty_bitmap(start_addr, end_addr);
151f7749 2081 return ret;
2bec46dc
AL
2082}
2083
e5896b12
AP
2084int cpu_physical_log_start(target_phys_addr_t start_addr,
2085 ram_addr_t size)
2086{
2087 CPUPhysMemoryClient *client;
2088 QLIST_FOREACH(client, &memory_client_list, list) {
2089 if (client->log_start) {
2090 int r = client->log_start(client, start_addr, size);
2091 if (r < 0) {
2092 return r;
2093 }
2094 }
2095 }
2096 return 0;
2097}
2098
2099int cpu_physical_log_stop(target_phys_addr_t start_addr,
2100 ram_addr_t size)
2101{
2102 CPUPhysMemoryClient *client;
2103 QLIST_FOREACH(client, &memory_client_list, list) {
2104 if (client->log_stop) {
2105 int r = client->log_stop(client, start_addr, size);
2106 if (r < 0) {
2107 return r;
2108 }
2109 }
2110 }
2111 return 0;
2112}
2113
3a7d929e
FB
2114static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2115{
c227f099 2116 ram_addr_t ram_addr;
5579c7f3 2117 void *p;
3a7d929e 2118
84b7b8e7 2119 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
5579c7f3
PB
2120 p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
2121 + tlb_entry->addend);
e890261f 2122 ram_addr = qemu_ram_addr_from_host_nofail(p);
3a7d929e 2123 if (!cpu_physical_memory_is_dirty(ram_addr)) {
0f459d16 2124 tlb_entry->addr_write |= TLB_NOTDIRTY;
3a7d929e
FB
2125 }
2126 }
2127}
2128
2129/* update the TLB according to the current state of the dirty bits */
2130void cpu_tlb_update_dirty(CPUState *env)
2131{
2132 int i;
cfde4bd9
IY
2133 int mmu_idx;
2134 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2135 for(i = 0; i < CPU_TLB_SIZE; i++)
2136 tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2137 }
3a7d929e
FB
2138}
2139
0f459d16 2140static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1ccde1cb 2141{
0f459d16
PB
2142 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2143 tlb_entry->addr_write = vaddr;
1ccde1cb
FB
2144}
2145
0f459d16
PB
2146/* update the TLB corresponding to virtual page vaddr
2147 so that it is no longer dirty */
2148static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1ccde1cb 2149{
1ccde1cb 2150 int i;
cfde4bd9 2151 int mmu_idx;
1ccde1cb 2152
0f459d16 2153 vaddr &= TARGET_PAGE_MASK;
1ccde1cb 2154 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
cfde4bd9
IY
2155 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2156 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
9fa3e853
FB
2157}
2158
d4c430a8
PB
2159/* Our TLB does not support large pages, so remember the area covered by
2160 large pages and trigger a full TLB flush if these are invalidated. */
2161static void tlb_add_large_page(CPUState *env, target_ulong vaddr,
2162 target_ulong size)
2163{
2164 target_ulong mask = ~(size - 1);
2165
2166 if (env->tlb_flush_addr == (target_ulong)-1) {
2167 env->tlb_flush_addr = vaddr & mask;
2168 env->tlb_flush_mask = mask;
2169 return;
2170 }
2171 /* Extend the existing region to include the new page.
2172 This is a compromise between unnecessary flushes and the cost
2173 of maintaining a full variable size TLB. */
2174 mask &= env->tlb_flush_mask;
2175 while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
2176 mask <<= 1;
2177 }
2178 env->tlb_flush_addr &= mask;
2179 env->tlb_flush_mask = mask;
2180}
2181
2182/* Add a new TLB entry. At most one entry for a given virtual address
2183 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2184 supplied size is only used by tlb_flush_page. */
2185void tlb_set_page(CPUState *env, target_ulong vaddr,
2186 target_phys_addr_t paddr, int prot,
2187 int mmu_idx, target_ulong size)
9fa3e853 2188{
92e873b9 2189 PhysPageDesc *p;
4f2ac237 2190 unsigned long pd;
9fa3e853 2191 unsigned int index;
4f2ac237 2192 target_ulong address;
0f459d16 2193 target_ulong code_address;
355b1943 2194 unsigned long addend;
84b7b8e7 2195 CPUTLBEntry *te;
a1d1bb31 2196 CPUWatchpoint *wp;
c227f099 2197 target_phys_addr_t iotlb;
9fa3e853 2198
d4c430a8
PB
2199 assert(size >= TARGET_PAGE_SIZE);
2200 if (size != TARGET_PAGE_SIZE) {
2201 tlb_add_large_page(env, vaddr, size);
2202 }
92e873b9 2203 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
9fa3e853
FB
2204 if (!p) {
2205 pd = IO_MEM_UNASSIGNED;
9fa3e853
FB
2206 } else {
2207 pd = p->phys_offset;
9fa3e853
FB
2208 }
2209#if defined(DEBUG_TLB)
7fd3f494
SW
2210 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
2211 " prot=%x idx=%d pd=0x%08lx\n",
2212 vaddr, paddr, prot, mmu_idx, pd);
9fa3e853
FB
2213#endif
2214
0f459d16
PB
2215 address = vaddr;
2216 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2217 /* IO memory case (romd handled later) */
2218 address |= TLB_MMIO;
2219 }
5579c7f3 2220 addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
0f459d16
PB
2221 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2222 /* Normal RAM. */
2223 iotlb = pd & TARGET_PAGE_MASK;
2224 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2225 iotlb |= IO_MEM_NOTDIRTY;
2226 else
2227 iotlb |= IO_MEM_ROM;
2228 } else {
ccbb4d44 2229 /* IO handlers are currently passed a physical address.
0f459d16
PB
2230 It would be nice to pass an offset from the base address
2231 of that region. This would avoid having to special case RAM,
2232 and avoid full address decoding in every device.
2233 We can't use the high bits of pd for this because
2234 IO_MEM_ROMD uses these as a ram address. */
8da3ff18
PB
2235 iotlb = (pd & ~TARGET_PAGE_MASK);
2236 if (p) {
8da3ff18
PB
2237 iotlb += p->region_offset;
2238 } else {
2239 iotlb += paddr;
2240 }
0f459d16
PB
2241 }
2242
2243 code_address = address;
2244 /* Make accesses to pages with watchpoints go via the
2245 watchpoint trap routines. */
72cf2d4f 2246 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
a1d1bb31 2247 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
bf298f83
JK
2248 /* Avoid trapping reads of pages with a write breakpoint. */
2249 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
2250 iotlb = io_mem_watch + paddr;
2251 address |= TLB_MMIO;
2252 break;
2253 }
6658ffb8 2254 }
0f459d16 2255 }
d79acba4 2256
0f459d16
PB
2257 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2258 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2259 te = &env->tlb_table[mmu_idx][index];
2260 te->addend = addend - vaddr;
2261 if (prot & PAGE_READ) {
2262 te->addr_read = address;
2263 } else {
2264 te->addr_read = -1;
2265 }
5c751e99 2266
0f459d16
PB
2267 if (prot & PAGE_EXEC) {
2268 te->addr_code = code_address;
2269 } else {
2270 te->addr_code = -1;
2271 }
2272 if (prot & PAGE_WRITE) {
2273 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2274 (pd & IO_MEM_ROMD)) {
2275 /* Write access calls the I/O callback. */
2276 te->addr_write = address | TLB_MMIO;
2277 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2278 !cpu_physical_memory_is_dirty(pd)) {
2279 te->addr_write = address | TLB_NOTDIRTY;
9fa3e853 2280 } else {
0f459d16 2281 te->addr_write = address;
9fa3e853 2282 }
0f459d16
PB
2283 } else {
2284 te->addr_write = -1;
9fa3e853 2285 }
9fa3e853
FB
2286}
2287
0124311e
FB
2288#else
2289
ee8b7021 2290void tlb_flush(CPUState *env, int flush_global)
0124311e
FB
2291{
2292}
2293
2e12669a 2294void tlb_flush_page(CPUState *env, target_ulong addr)
0124311e
FB
2295{
2296}
2297
edf8e2af
MW
2298/*
2299 * Walks guest process memory "regions" one by one
2300 * and calls callback function 'fn' for each region.
2301 */
5cd2c5b6
RH
2302
2303struct walk_memory_regions_data
2304{
2305 walk_memory_regions_fn fn;
2306 void *priv;
2307 unsigned long start;
2308 int prot;
2309};
2310
2311static int walk_memory_regions_end(struct walk_memory_regions_data *data,
b480d9b7 2312 abi_ulong end, int new_prot)
5cd2c5b6
RH
2313{
2314 if (data->start != -1ul) {
2315 int rc = data->fn(data->priv, data->start, end, data->prot);
2316 if (rc != 0) {
2317 return rc;
2318 }
2319 }
2320
2321 data->start = (new_prot ? end : -1ul);
2322 data->prot = new_prot;
2323
2324 return 0;
2325}
2326
2327static int walk_memory_regions_1(struct walk_memory_regions_data *data,
b480d9b7 2328 abi_ulong base, int level, void **lp)
5cd2c5b6 2329{
b480d9b7 2330 abi_ulong pa;
5cd2c5b6
RH
2331 int i, rc;
2332
2333 if (*lp == NULL) {
2334 return walk_memory_regions_end(data, base, 0);
2335 }
2336
2337 if (level == 0) {
2338 PageDesc *pd = *lp;
7296abac 2339 for (i = 0; i < L2_SIZE; ++i) {
5cd2c5b6
RH
2340 int prot = pd[i].flags;
2341
2342 pa = base | (i << TARGET_PAGE_BITS);
2343 if (prot != data->prot) {
2344 rc = walk_memory_regions_end(data, pa, prot);
2345 if (rc != 0) {
2346 return rc;
9fa3e853 2347 }
9fa3e853 2348 }
5cd2c5b6
RH
2349 }
2350 } else {
2351 void **pp = *lp;
7296abac 2352 for (i = 0; i < L2_SIZE; ++i) {
b480d9b7
PB
2353 pa = base | ((abi_ulong)i <<
2354 (TARGET_PAGE_BITS + L2_BITS * level));
5cd2c5b6
RH
2355 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2356 if (rc != 0) {
2357 return rc;
2358 }
2359 }
2360 }
2361
2362 return 0;
2363}
2364
2365int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2366{
2367 struct walk_memory_regions_data data;
2368 unsigned long i;
2369
2370 data.fn = fn;
2371 data.priv = priv;
2372 data.start = -1ul;
2373 data.prot = 0;
2374
2375 for (i = 0; i < V_L1_SIZE; i++) {
b480d9b7 2376 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
5cd2c5b6
RH
2377 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2378 if (rc != 0) {
2379 return rc;
9fa3e853 2380 }
33417e70 2381 }
5cd2c5b6
RH
2382
2383 return walk_memory_regions_end(&data, 0, 0);
edf8e2af
MW
2384}
2385
b480d9b7
PB
2386static int dump_region(void *priv, abi_ulong start,
2387 abi_ulong end, unsigned long prot)
edf8e2af
MW
2388{
2389 FILE *f = (FILE *)priv;
2390
b480d9b7
PB
2391 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2392 " "TARGET_ABI_FMT_lx" %c%c%c\n",
edf8e2af
MW
2393 start, end, end - start,
2394 ((prot & PAGE_READ) ? 'r' : '-'),
2395 ((prot & PAGE_WRITE) ? 'w' : '-'),
2396 ((prot & PAGE_EXEC) ? 'x' : '-'));
2397
2398 return (0);
2399}
2400
2401/* dump memory mappings */
2402void page_dump(FILE *f)
2403{
2404 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2405 "start", "end", "size", "prot");
2406 walk_memory_regions(f, dump_region);
33417e70
FB
2407}
2408
53a5960a 2409int page_get_flags(target_ulong address)
33417e70 2410{
9fa3e853
FB
2411 PageDesc *p;
2412
2413 p = page_find(address >> TARGET_PAGE_BITS);
33417e70 2414 if (!p)
9fa3e853
FB
2415 return 0;
2416 return p->flags;
2417}
2418
376a7909
RH
2419/* Modify the flags of a page and invalidate the code if necessary.
2420 The flag PAGE_WRITE_ORG is positioned automatically depending
2421 on PAGE_WRITE. The mmap_lock should already be held. */
53a5960a 2422void page_set_flags(target_ulong start, target_ulong end, int flags)
9fa3e853 2423{
376a7909
RH
2424 target_ulong addr, len;
2425
2426 /* This function should never be called with addresses outside the
2427 guest address space. If this assert fires, it probably indicates
2428 a missing call to h2g_valid. */
b480d9b7
PB
2429#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2430 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
376a7909
RH
2431#endif
2432 assert(start < end);
9fa3e853
FB
2433
2434 start = start & TARGET_PAGE_MASK;
2435 end = TARGET_PAGE_ALIGN(end);
376a7909
RH
2436
2437 if (flags & PAGE_WRITE) {
9fa3e853 2438 flags |= PAGE_WRITE_ORG;
376a7909
RH
2439 }
2440
2441 for (addr = start, len = end - start;
2442 len != 0;
2443 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2444 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2445
2446 /* If the write protection bit is set, then we invalidate
2447 the code inside. */
5fafdf24 2448 if (!(p->flags & PAGE_WRITE) &&
9fa3e853
FB
2449 (flags & PAGE_WRITE) &&
2450 p->first_tb) {
d720b93d 2451 tb_invalidate_phys_page(addr, 0, NULL);
9fa3e853
FB
2452 }
2453 p->flags = flags;
2454 }
33417e70
FB
2455}
2456
3d97b40b
TS
2457int page_check_range(target_ulong start, target_ulong len, int flags)
2458{
2459 PageDesc *p;
2460 target_ulong end;
2461 target_ulong addr;
2462
376a7909
RH
2463 /* This function should never be called with addresses outside the
2464 guest address space. If this assert fires, it probably indicates
2465 a missing call to h2g_valid. */
338e9e6c
BS
2466#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2467 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
376a7909
RH
2468#endif
2469
3e0650a9
RH
2470 if (len == 0) {
2471 return 0;
2472 }
376a7909
RH
2473 if (start + len - 1 < start) {
2474 /* We've wrapped around. */
55f280c9 2475 return -1;
376a7909 2476 }
55f280c9 2477
3d97b40b
TS
2478 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2479 start = start & TARGET_PAGE_MASK;
2480
376a7909
RH
2481 for (addr = start, len = end - start;
2482 len != 0;
2483 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
3d97b40b
TS
2484 p = page_find(addr >> TARGET_PAGE_BITS);
2485 if( !p )
2486 return -1;
2487 if( !(p->flags & PAGE_VALID) )
2488 return -1;
2489
dae3270c 2490 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
3d97b40b 2491 return -1;
dae3270c
FB
2492 if (flags & PAGE_WRITE) {
2493 if (!(p->flags & PAGE_WRITE_ORG))
2494 return -1;
2495 /* unprotect the page if it was put read-only because it
2496 contains translated code */
2497 if (!(p->flags & PAGE_WRITE)) {
2498 if (!page_unprotect(addr, 0, NULL))
2499 return -1;
2500 }
2501 return 0;
2502 }
3d97b40b
TS
2503 }
2504 return 0;
2505}
2506
9fa3e853 2507/* called from signal handler: invalidate the code and unprotect the
ccbb4d44 2508 page. Return TRUE if the fault was successfully handled. */
53a5960a 2509int page_unprotect(target_ulong address, unsigned long pc, void *puc)
9fa3e853 2510{
45d679d6
AJ
2511 unsigned int prot;
2512 PageDesc *p;
53a5960a 2513 target_ulong host_start, host_end, addr;
9fa3e853 2514
c8a706fe
PB
2515 /* Technically this isn't safe inside a signal handler. However we
2516 know this only ever happens in a synchronous SEGV handler, so in
2517 practice it seems to be ok. */
2518 mmap_lock();
2519
45d679d6
AJ
2520 p = page_find(address >> TARGET_PAGE_BITS);
2521 if (!p) {
c8a706fe 2522 mmap_unlock();
9fa3e853 2523 return 0;
c8a706fe 2524 }
45d679d6 2525
9fa3e853
FB
2526 /* if the page was really writable, then we change its
2527 protection back to writable */
45d679d6
AJ
2528 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2529 host_start = address & qemu_host_page_mask;
2530 host_end = host_start + qemu_host_page_size;
2531
2532 prot = 0;
2533 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2534 p = page_find(addr >> TARGET_PAGE_BITS);
2535 p->flags |= PAGE_WRITE;
2536 prot |= p->flags;
2537
9fa3e853
FB
2538 /* and since the content will be modified, we must invalidate
2539 the corresponding translated code. */
45d679d6 2540 tb_invalidate_phys_page(addr, pc, puc);
9fa3e853 2541#ifdef DEBUG_TB_CHECK
45d679d6 2542 tb_invalidate_check(addr);
9fa3e853 2543#endif
9fa3e853 2544 }
45d679d6
AJ
2545 mprotect((void *)g2h(host_start), qemu_host_page_size,
2546 prot & PAGE_BITS);
2547
2548 mmap_unlock();
2549 return 1;
9fa3e853 2550 }
c8a706fe 2551 mmap_unlock();
9fa3e853
FB
2552 return 0;
2553}
2554
6a00d601
FB
2555static inline void tlb_set_dirty(CPUState *env,
2556 unsigned long addr, target_ulong vaddr)
1ccde1cb
FB
2557{
2558}
9fa3e853
FB
2559#endif /* defined(CONFIG_USER_ONLY) */
2560
e2eef170 2561#if !defined(CONFIG_USER_ONLY)
8da3ff18 2562
c04b2b78
PB
2563#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2564typedef struct subpage_t {
2565 target_phys_addr_t base;
f6405247
RH
2566 ram_addr_t sub_io_index[TARGET_PAGE_SIZE];
2567 ram_addr_t region_offset[TARGET_PAGE_SIZE];
c04b2b78
PB
2568} subpage_t;
2569
c227f099
AL
2570static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2571 ram_addr_t memory, ram_addr_t region_offset);
f6405247
RH
2572static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2573 ram_addr_t orig_memory,
2574 ram_addr_t region_offset);
db7b5426
BS
2575#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2576 need_subpage) \
2577 do { \
2578 if (addr > start_addr) \
2579 start_addr2 = 0; \
2580 else { \
2581 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2582 if (start_addr2 > 0) \
2583 need_subpage = 1; \
2584 } \
2585 \
49e9fba2 2586 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
db7b5426
BS
2587 end_addr2 = TARGET_PAGE_SIZE - 1; \
2588 else { \
2589 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2590 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2591 need_subpage = 1; \
2592 } \
2593 } while (0)
2594
8f2498f9
MT
2595/* register physical memory.
2596 For RAM, 'size' must be a multiple of the target page size.
2597 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
8da3ff18
PB
2598 io memory page. The address used when calling the IO function is
2599 the offset from the start of the region, plus region_offset. Both
ccbb4d44 2600 start_addr and region_offset are rounded down to a page boundary
8da3ff18
PB
2601 before calculating this offset. This should not be a problem unless
2602 the low bits of start_addr and region_offset differ. */
c227f099
AL
2603void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2604 ram_addr_t size,
2605 ram_addr_t phys_offset,
2606 ram_addr_t region_offset)
33417e70 2607{
c227f099 2608 target_phys_addr_t addr, end_addr;
92e873b9 2609 PhysPageDesc *p;
9d42037b 2610 CPUState *env;
c227f099 2611 ram_addr_t orig_size = size;
f6405247 2612 subpage_t *subpage;
33417e70 2613
3b8e6a2d 2614 assert(size);
f6f3fbca
MT
2615 cpu_notify_set_memory(start_addr, size, phys_offset);
2616
67c4d23c
PB
2617 if (phys_offset == IO_MEM_UNASSIGNED) {
2618 region_offset = start_addr;
2619 }
8da3ff18 2620 region_offset &= TARGET_PAGE_MASK;
5fd386f6 2621 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
c227f099 2622 end_addr = start_addr + (target_phys_addr_t)size;
3b8e6a2d
EI
2623
2624 addr = start_addr;
2625 do {
db7b5426
BS
2626 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2627 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
c227f099
AL
2628 ram_addr_t orig_memory = p->phys_offset;
2629 target_phys_addr_t start_addr2, end_addr2;
db7b5426
BS
2630 int need_subpage = 0;
2631
2632 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2633 need_subpage);
f6405247 2634 if (need_subpage) {
db7b5426
BS
2635 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2636 subpage = subpage_init((addr & TARGET_PAGE_MASK),
8da3ff18
PB
2637 &p->phys_offset, orig_memory,
2638 p->region_offset);
db7b5426
BS
2639 } else {
2640 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2641 >> IO_MEM_SHIFT];
2642 }
8da3ff18
PB
2643 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2644 region_offset);
2645 p->region_offset = 0;
db7b5426
BS
2646 } else {
2647 p->phys_offset = phys_offset;
2648 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2649 (phys_offset & IO_MEM_ROMD))
2650 phys_offset += TARGET_PAGE_SIZE;
2651 }
2652 } else {
2653 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2654 p->phys_offset = phys_offset;
8da3ff18 2655 p->region_offset = region_offset;
db7b5426 2656 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
8da3ff18 2657 (phys_offset & IO_MEM_ROMD)) {
db7b5426 2658 phys_offset += TARGET_PAGE_SIZE;
0e8f0967 2659 } else {
c227f099 2660 target_phys_addr_t start_addr2, end_addr2;
db7b5426
BS
2661 int need_subpage = 0;
2662
2663 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2664 end_addr2, need_subpage);
2665
f6405247 2666 if (need_subpage) {
db7b5426 2667 subpage = subpage_init((addr & TARGET_PAGE_MASK),
8da3ff18 2668 &p->phys_offset, IO_MEM_UNASSIGNED,
67c4d23c 2669 addr & TARGET_PAGE_MASK);
db7b5426 2670 subpage_register(subpage, start_addr2, end_addr2,
8da3ff18
PB
2671 phys_offset, region_offset);
2672 p->region_offset = 0;
db7b5426
BS
2673 }
2674 }
2675 }
8da3ff18 2676 region_offset += TARGET_PAGE_SIZE;
3b8e6a2d
EI
2677 addr += TARGET_PAGE_SIZE;
2678 } while (addr != end_addr);
3b46e624 2679
9d42037b
FB
2680 /* since each CPU stores ram addresses in its TLB cache, we must
2681 reset the modified entries */
2682 /* XXX: slow ! */
2683 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2684 tlb_flush(env, 1);
2685 }
33417e70
FB
2686}
2687
ba863458 2688/* XXX: temporary until new memory mapping API */
c227f099 2689ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
ba863458
FB
2690{
2691 PhysPageDesc *p;
2692
2693 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2694 if (!p)
2695 return IO_MEM_UNASSIGNED;
2696 return p->phys_offset;
2697}
2698
c227f099 2699void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
f65ed4c1
AL
2700{
2701 if (kvm_enabled())
2702 kvm_coalesce_mmio_region(addr, size);
2703}
2704
c227f099 2705void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
f65ed4c1
AL
2706{
2707 if (kvm_enabled())
2708 kvm_uncoalesce_mmio_region(addr, size);
2709}
2710
62a2744c
SY
2711void qemu_flush_coalesced_mmio_buffer(void)
2712{
2713 if (kvm_enabled())
2714 kvm_flush_coalesced_mmio_buffer();
2715}
2716
c902760f
MT
2717#if defined(__linux__) && !defined(TARGET_S390X)
2718
2719#include <sys/vfs.h>
2720
2721#define HUGETLBFS_MAGIC 0x958458f6
2722
2723static long gethugepagesize(const char *path)
2724{
2725 struct statfs fs;
2726 int ret;
2727
2728 do {
9742bf26 2729 ret = statfs(path, &fs);
c902760f
MT
2730 } while (ret != 0 && errno == EINTR);
2731
2732 if (ret != 0) {
9742bf26
YT
2733 perror(path);
2734 return 0;
c902760f
MT
2735 }
2736
2737 if (fs.f_type != HUGETLBFS_MAGIC)
9742bf26 2738 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
c902760f
MT
2739
2740 return fs.f_bsize;
2741}
2742
04b16653
AW
2743static void *file_ram_alloc(RAMBlock *block,
2744 ram_addr_t memory,
2745 const char *path)
c902760f
MT
2746{
2747 char *filename;
2748 void *area;
2749 int fd;
2750#ifdef MAP_POPULATE
2751 int flags;
2752#endif
2753 unsigned long hpagesize;
2754
2755 hpagesize = gethugepagesize(path);
2756 if (!hpagesize) {
9742bf26 2757 return NULL;
c902760f
MT
2758 }
2759
2760 if (memory < hpagesize) {
2761 return NULL;
2762 }
2763
2764 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2765 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2766 return NULL;
2767 }
2768
2769 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
9742bf26 2770 return NULL;
c902760f
MT
2771 }
2772
2773 fd = mkstemp(filename);
2774 if (fd < 0) {
9742bf26
YT
2775 perror("unable to create backing store for hugepages");
2776 free(filename);
2777 return NULL;
c902760f
MT
2778 }
2779 unlink(filename);
2780 free(filename);
2781
2782 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2783
2784 /*
2785 * ftruncate is not supported by hugetlbfs in older
2786 * hosts, so don't bother bailing out on errors.
2787 * If anything goes wrong with it under other filesystems,
2788 * mmap will fail.
2789 */
2790 if (ftruncate(fd, memory))
9742bf26 2791 perror("ftruncate");
c902760f
MT
2792
2793#ifdef MAP_POPULATE
2794 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2795 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2796 * to sidestep this quirk.
2797 */
2798 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2799 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2800#else
2801 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2802#endif
2803 if (area == MAP_FAILED) {
9742bf26
YT
2804 perror("file_ram_alloc: can't mmap RAM pages");
2805 close(fd);
2806 return (NULL);
c902760f 2807 }
04b16653 2808 block->fd = fd;
c902760f
MT
2809 return area;
2810}
2811#endif
2812
d17b5288 2813static ram_addr_t find_ram_offset(ram_addr_t size)
04b16653
AW
2814{
2815 RAMBlock *block, *next_block;
09d7ae90 2816 ram_addr_t offset = 0, mingap = ULONG_MAX;
04b16653
AW
2817
2818 if (QLIST_EMPTY(&ram_list.blocks))
2819 return 0;
2820
2821 QLIST_FOREACH(block, &ram_list.blocks, next) {
2822 ram_addr_t end, next = ULONG_MAX;
2823
2824 end = block->offset + block->length;
2825
2826 QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2827 if (next_block->offset >= end) {
2828 next = MIN(next, next_block->offset);
2829 }
2830 }
2831 if (next - end >= size && next - end < mingap) {
2832 offset = end;
2833 mingap = next - end;
2834 }
2835 }
2836 return offset;
2837}
2838
2839static ram_addr_t last_ram_offset(void)
d17b5288
AW
2840{
2841 RAMBlock *block;
2842 ram_addr_t last = 0;
2843
2844 QLIST_FOREACH(block, &ram_list.blocks, next)
2845 last = MAX(last, block->offset + block->length);
2846
2847 return last;
2848}
2849
84b89d78 2850ram_addr_t qemu_ram_alloc_from_ptr(DeviceState *dev, const char *name,
6977dfe6 2851 ram_addr_t size, void *host)
84b89d78
CM
2852{
2853 RAMBlock *new_block, *block;
2854
2855 size = TARGET_PAGE_ALIGN(size);
2856 new_block = qemu_mallocz(sizeof(*new_block));
2857
2858 if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
2859 char *id = dev->parent_bus->info->get_dev_path(dev);
2860 if (id) {
2861 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
2862 qemu_free(id);
2863 }
2864 }
2865 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2866
2867 QLIST_FOREACH(block, &ram_list.blocks, next) {
2868 if (!strcmp(block->idstr, new_block->idstr)) {
2869 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2870 new_block->idstr);
2871 abort();
2872 }
2873 }
2874
6977dfe6
YT
2875 if (host) {
2876 new_block->host = host;
cd19cfa2 2877 new_block->flags |= RAM_PREALLOC_MASK;
6977dfe6
YT
2878 } else {
2879 if (mem_path) {
c902760f 2880#if defined (__linux__) && !defined(TARGET_S390X)
6977dfe6
YT
2881 new_block->host = file_ram_alloc(new_block, size, mem_path);
2882 if (!new_block->host) {
2883 new_block->host = qemu_vmalloc(size);
e78815a5 2884 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
6977dfe6 2885 }
c902760f 2886#else
6977dfe6
YT
2887 fprintf(stderr, "-mem-path option unsupported\n");
2888 exit(1);
c902760f 2889#endif
6977dfe6 2890 } else {
6b02494d 2891#if defined(TARGET_S390X) && defined(CONFIG_KVM)
6977dfe6
YT
2892 /* XXX S390 KVM requires the topmost vma of the RAM to be < 256GB */
2893 new_block->host = mmap((void*)0x1000000, size,
2894 PROT_EXEC|PROT_READ|PROT_WRITE,
2895 MAP_SHARED | MAP_ANONYMOUS, -1, 0);
6b02494d 2896#else
6977dfe6 2897 new_block->host = qemu_vmalloc(size);
6b02494d 2898#endif
e78815a5 2899 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
6977dfe6 2900 }
c902760f 2901 }
6977dfe6 2902
d17b5288 2903 new_block->offset = find_ram_offset(size);
94a6b54f
PB
2904 new_block->length = size;
2905
f471a17e 2906 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
94a6b54f 2907
f471a17e 2908 ram_list.phys_dirty = qemu_realloc(ram_list.phys_dirty,
04b16653 2909 last_ram_offset() >> TARGET_PAGE_BITS);
d17b5288 2910 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
94a6b54f
PB
2911 0xff, size >> TARGET_PAGE_BITS);
2912
6f0437e8
JK
2913 if (kvm_enabled())
2914 kvm_setup_guest_memory(new_block->host, size);
2915
94a6b54f
PB
2916 return new_block->offset;
2917}
e9a1ab19 2918
6977dfe6
YT
2919ram_addr_t qemu_ram_alloc(DeviceState *dev, const char *name, ram_addr_t size)
2920{
2921 return qemu_ram_alloc_from_ptr(dev, name, size, NULL);
2922}
2923
c227f099 2924void qemu_ram_free(ram_addr_t addr)
e9a1ab19 2925{
04b16653
AW
2926 RAMBlock *block;
2927
2928 QLIST_FOREACH(block, &ram_list.blocks, next) {
2929 if (addr == block->offset) {
2930 QLIST_REMOVE(block, next);
cd19cfa2
HY
2931 if (block->flags & RAM_PREALLOC_MASK) {
2932 ;
2933 } else if (mem_path) {
04b16653
AW
2934#if defined (__linux__) && !defined(TARGET_S390X)
2935 if (block->fd) {
2936 munmap(block->host, block->length);
2937 close(block->fd);
2938 } else {
2939 qemu_vfree(block->host);
2940 }
fd28aa13
JK
2941#else
2942 abort();
04b16653
AW
2943#endif
2944 } else {
2945#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2946 munmap(block->host, block->length);
2947#else
2948 qemu_vfree(block->host);
2949#endif
2950 }
2951 qemu_free(block);
2952 return;
2953 }
2954 }
2955
e9a1ab19
FB
2956}
2957
cd19cfa2
HY
2958#ifndef _WIN32
2959void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
2960{
2961 RAMBlock *block;
2962 ram_addr_t offset;
2963 int flags;
2964 void *area, *vaddr;
2965
2966 QLIST_FOREACH(block, &ram_list.blocks, next) {
2967 offset = addr - block->offset;
2968 if (offset < block->length) {
2969 vaddr = block->host + offset;
2970 if (block->flags & RAM_PREALLOC_MASK) {
2971 ;
2972 } else {
2973 flags = MAP_FIXED;
2974 munmap(vaddr, length);
2975 if (mem_path) {
2976#if defined(__linux__) && !defined(TARGET_S390X)
2977 if (block->fd) {
2978#ifdef MAP_POPULATE
2979 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
2980 MAP_PRIVATE;
2981#else
2982 flags |= MAP_PRIVATE;
2983#endif
2984 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2985 flags, block->fd, offset);
2986 } else {
2987 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2988 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2989 flags, -1, 0);
2990 }
fd28aa13
JK
2991#else
2992 abort();
cd19cfa2
HY
2993#endif
2994 } else {
2995#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2996 flags |= MAP_SHARED | MAP_ANONYMOUS;
2997 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
2998 flags, -1, 0);
2999#else
3000 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
3001 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3002 flags, -1, 0);
3003#endif
3004 }
3005 if (area != vaddr) {
3006 fprintf(stderr, "Could not remap addr: %lx@%lx\n",
3007 length, addr);
3008 exit(1);
3009 }
3010 qemu_madvise(vaddr, length, QEMU_MADV_MERGEABLE);
3011 }
3012 return;
3013 }
3014 }
3015}
3016#endif /* !_WIN32 */
3017
dc828ca1 3018/* Return a host pointer to ram allocated with qemu_ram_alloc.
5579c7f3
PB
3019 With the exception of the softmmu code in this file, this should
3020 only be used for local memory (e.g. video ram) that the device owns,
3021 and knows it isn't going to access beyond the end of the block.
3022
3023 It should not be used for general purpose DMA.
3024 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
3025 */
c227f099 3026void *qemu_get_ram_ptr(ram_addr_t addr)
dc828ca1 3027{
94a6b54f
PB
3028 RAMBlock *block;
3029
f471a17e
AW
3030 QLIST_FOREACH(block, &ram_list.blocks, next) {
3031 if (addr - block->offset < block->length) {
7d82af38
VP
3032 /* Move this entry to to start of the list. */
3033 if (block != QLIST_FIRST(&ram_list.blocks)) {
3034 QLIST_REMOVE(block, next);
3035 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
3036 }
f471a17e
AW
3037 return block->host + (addr - block->offset);
3038 }
94a6b54f 3039 }
f471a17e
AW
3040
3041 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3042 abort();
3043
3044 return NULL;
dc828ca1
PB
3045}
3046
b2e0a138
MT
3047/* Return a host pointer to ram allocated with qemu_ram_alloc.
3048 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
3049 */
3050void *qemu_safe_ram_ptr(ram_addr_t addr)
3051{
3052 RAMBlock *block;
3053
3054 QLIST_FOREACH(block, &ram_list.blocks, next) {
3055 if (addr - block->offset < block->length) {
3056 return block->host + (addr - block->offset);
3057 }
3058 }
3059
3060 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3061 abort();
3062
3063 return NULL;
3064}
3065
e890261f 3066int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
5579c7f3 3067{
94a6b54f
PB
3068 RAMBlock *block;
3069 uint8_t *host = ptr;
3070
f471a17e
AW
3071 QLIST_FOREACH(block, &ram_list.blocks, next) {
3072 if (host - block->host < block->length) {
e890261f
MT
3073 *ram_addr = block->offset + (host - block->host);
3074 return 0;
f471a17e 3075 }
94a6b54f 3076 }
e890261f
MT
3077 return -1;
3078}
f471a17e 3079
e890261f
MT
3080/* Some of the softmmu routines need to translate from a host pointer
3081 (typically a TLB entry) back to a ram offset. */
3082ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
3083{
3084 ram_addr_t ram_addr;
f471a17e 3085
e890261f
MT
3086 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
3087 fprintf(stderr, "Bad ram pointer %p\n", ptr);
3088 abort();
3089 }
3090 return ram_addr;
5579c7f3
PB
3091}
3092
c227f099 3093static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
33417e70 3094{
67d3b957 3095#ifdef DEBUG_UNASSIGNED
ab3d1727 3096 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
b4f0a316 3097#endif
faed1c2a 3098#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
e18231a3
BS
3099 do_unassigned_access(addr, 0, 0, 0, 1);
3100#endif
3101 return 0;
3102}
3103
c227f099 3104static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
e18231a3
BS
3105{
3106#ifdef DEBUG_UNASSIGNED
3107 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3108#endif
faed1c2a 3109#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
e18231a3
BS
3110 do_unassigned_access(addr, 0, 0, 0, 2);
3111#endif
3112 return 0;
3113}
3114
c227f099 3115static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
e18231a3
BS
3116{
3117#ifdef DEBUG_UNASSIGNED
3118 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3119#endif
faed1c2a 3120#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
e18231a3 3121 do_unassigned_access(addr, 0, 0, 0, 4);
67d3b957 3122#endif
33417e70
FB
3123 return 0;
3124}
3125
c227f099 3126static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
33417e70 3127{
67d3b957 3128#ifdef DEBUG_UNASSIGNED
ab3d1727 3129 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
67d3b957 3130#endif
faed1c2a 3131#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
e18231a3
BS
3132 do_unassigned_access(addr, 1, 0, 0, 1);
3133#endif
3134}
3135
c227f099 3136static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
e18231a3
BS
3137{
3138#ifdef DEBUG_UNASSIGNED
3139 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3140#endif
faed1c2a 3141#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
e18231a3
BS
3142 do_unassigned_access(addr, 1, 0, 0, 2);
3143#endif
3144}
3145
c227f099 3146static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
e18231a3
BS
3147{
3148#ifdef DEBUG_UNASSIGNED
3149 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3150#endif
faed1c2a 3151#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
e18231a3 3152 do_unassigned_access(addr, 1, 0, 0, 4);
b4f0a316 3153#endif
33417e70
FB
3154}
3155
d60efc6b 3156static CPUReadMemoryFunc * const unassigned_mem_read[3] = {
33417e70 3157 unassigned_mem_readb,
e18231a3
BS
3158 unassigned_mem_readw,
3159 unassigned_mem_readl,
33417e70
FB
3160};
3161
d60efc6b 3162static CPUWriteMemoryFunc * const unassigned_mem_write[3] = {
33417e70 3163 unassigned_mem_writeb,
e18231a3
BS
3164 unassigned_mem_writew,
3165 unassigned_mem_writel,
33417e70
FB
3166};
3167
c227f099 3168static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
0f459d16 3169 uint32_t val)
9fa3e853 3170{
3a7d929e 3171 int dirty_flags;
f7c11b53 3172 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3a7d929e 3173 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 3174#if !defined(CONFIG_USER_ONLY)
3a7d929e 3175 tb_invalidate_phys_page_fast(ram_addr, 1);
f7c11b53 3176 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
9fa3e853 3177#endif
3a7d929e 3178 }
5579c7f3 3179 stb_p(qemu_get_ram_ptr(ram_addr), val);
f23db169 3180 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
f7c11b53 3181 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
f23db169
FB
3182 /* we remove the notdirty callback only if the code has been
3183 flushed */
3184 if (dirty_flags == 0xff)
2e70f6ef 3185 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
3186}
3187
c227f099 3188static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
0f459d16 3189 uint32_t val)
9fa3e853 3190{
3a7d929e 3191 int dirty_flags;
f7c11b53 3192 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3a7d929e 3193 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 3194#if !defined(CONFIG_USER_ONLY)
3a7d929e 3195 tb_invalidate_phys_page_fast(ram_addr, 2);
f7c11b53 3196 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
9fa3e853 3197#endif
3a7d929e 3198 }
5579c7f3 3199 stw_p(qemu_get_ram_ptr(ram_addr), val);
f23db169 3200 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
f7c11b53 3201 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
f23db169
FB
3202 /* we remove the notdirty callback only if the code has been
3203 flushed */
3204 if (dirty_flags == 0xff)
2e70f6ef 3205 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
3206}
3207
c227f099 3208static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
0f459d16 3209 uint32_t val)
9fa3e853 3210{
3a7d929e 3211 int dirty_flags;
f7c11b53 3212 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3a7d929e 3213 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 3214#if !defined(CONFIG_USER_ONLY)
3a7d929e 3215 tb_invalidate_phys_page_fast(ram_addr, 4);
f7c11b53 3216 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
9fa3e853 3217#endif
3a7d929e 3218 }
5579c7f3 3219 stl_p(qemu_get_ram_ptr(ram_addr), val);
f23db169 3220 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
f7c11b53 3221 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
f23db169
FB
3222 /* we remove the notdirty callback only if the code has been
3223 flushed */
3224 if (dirty_flags == 0xff)
2e70f6ef 3225 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
3226}
3227
d60efc6b 3228static CPUReadMemoryFunc * const error_mem_read[3] = {
9fa3e853
FB
3229 NULL, /* never used */
3230 NULL, /* never used */
3231 NULL, /* never used */
3232};
3233
d60efc6b 3234static CPUWriteMemoryFunc * const notdirty_mem_write[3] = {
1ccde1cb
FB
3235 notdirty_mem_writeb,
3236 notdirty_mem_writew,
3237 notdirty_mem_writel,
3238};
3239
0f459d16 3240/* Generate a debug exception if a watchpoint has been hit. */
b4051334 3241static void check_watchpoint(int offset, int len_mask, int flags)
0f459d16
PB
3242{
3243 CPUState *env = cpu_single_env;
06d55cc1
AL
3244 target_ulong pc, cs_base;
3245 TranslationBlock *tb;
0f459d16 3246 target_ulong vaddr;
a1d1bb31 3247 CPUWatchpoint *wp;
06d55cc1 3248 int cpu_flags;
0f459d16 3249
06d55cc1
AL
3250 if (env->watchpoint_hit) {
3251 /* We re-entered the check after replacing the TB. Now raise
3252 * the debug interrupt so that is will trigger after the
3253 * current instruction. */
3254 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3255 return;
3256 }
2e70f6ef 3257 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
72cf2d4f 3258 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334
AL
3259 if ((vaddr == (wp->vaddr & len_mask) ||
3260 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
6e140f28
AL
3261 wp->flags |= BP_WATCHPOINT_HIT;
3262 if (!env->watchpoint_hit) {
3263 env->watchpoint_hit = wp;
3264 tb = tb_find_pc(env->mem_io_pc);
3265 if (!tb) {
3266 cpu_abort(env, "check_watchpoint: could not find TB for "
3267 "pc=%p", (void *)env->mem_io_pc);
3268 }
3269 cpu_restore_state(tb, env, env->mem_io_pc, NULL);
3270 tb_phys_invalidate(tb, -1);
3271 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3272 env->exception_index = EXCP_DEBUG;
3273 } else {
3274 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3275 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
3276 }
3277 cpu_resume_from_signal(env, NULL);
06d55cc1 3278 }
6e140f28
AL
3279 } else {
3280 wp->flags &= ~BP_WATCHPOINT_HIT;
0f459d16
PB
3281 }
3282 }
3283}
3284
6658ffb8
PB
3285/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3286 so these check for a hit then pass through to the normal out-of-line
3287 phys routines. */
c227f099 3288static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
6658ffb8 3289{
b4051334 3290 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
6658ffb8
PB
3291 return ldub_phys(addr);
3292}
3293
c227f099 3294static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
6658ffb8 3295{
b4051334 3296 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
6658ffb8
PB
3297 return lduw_phys(addr);
3298}
3299
c227f099 3300static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
6658ffb8 3301{
b4051334 3302 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
6658ffb8
PB
3303 return ldl_phys(addr);
3304}
3305
c227f099 3306static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
6658ffb8
PB
3307 uint32_t val)
3308{
b4051334 3309 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
6658ffb8
PB
3310 stb_phys(addr, val);
3311}
3312
c227f099 3313static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
6658ffb8
PB
3314 uint32_t val)
3315{
b4051334 3316 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
6658ffb8
PB
3317 stw_phys(addr, val);
3318}
3319
c227f099 3320static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
6658ffb8
PB
3321 uint32_t val)
3322{
b4051334 3323 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
6658ffb8
PB
3324 stl_phys(addr, val);
3325}
3326
d60efc6b 3327static CPUReadMemoryFunc * const watch_mem_read[3] = {
6658ffb8
PB
3328 watch_mem_readb,
3329 watch_mem_readw,
3330 watch_mem_readl,
3331};
3332
d60efc6b 3333static CPUWriteMemoryFunc * const watch_mem_write[3] = {
6658ffb8
PB
3334 watch_mem_writeb,
3335 watch_mem_writew,
3336 watch_mem_writel,
3337};
6658ffb8 3338
f6405247
RH
3339static inline uint32_t subpage_readlen (subpage_t *mmio,
3340 target_phys_addr_t addr,
3341 unsigned int len)
db7b5426 3342{
f6405247 3343 unsigned int idx = SUBPAGE_IDX(addr);
db7b5426
BS
3344#if defined(DEBUG_SUBPAGE)
3345 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3346 mmio, len, addr, idx);
3347#endif
db7b5426 3348
f6405247
RH
3349 addr += mmio->region_offset[idx];
3350 idx = mmio->sub_io_index[idx];
3351 return io_mem_read[idx][len](io_mem_opaque[idx], addr);
db7b5426
BS
3352}
3353
c227f099 3354static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
f6405247 3355 uint32_t value, unsigned int len)
db7b5426 3356{
f6405247 3357 unsigned int idx = SUBPAGE_IDX(addr);
db7b5426 3358#if defined(DEBUG_SUBPAGE)
f6405247
RH
3359 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n",
3360 __func__, mmio, len, addr, idx, value);
db7b5426 3361#endif
f6405247
RH
3362
3363 addr += mmio->region_offset[idx];
3364 idx = mmio->sub_io_index[idx];
3365 io_mem_write[idx][len](io_mem_opaque[idx], addr, value);
db7b5426
BS
3366}
3367
c227f099 3368static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
db7b5426 3369{
db7b5426
BS
3370 return subpage_readlen(opaque, addr, 0);
3371}
3372
c227f099 3373static void subpage_writeb (void *opaque, target_phys_addr_t addr,
db7b5426
BS
3374 uint32_t value)
3375{
db7b5426
BS
3376 subpage_writelen(opaque, addr, value, 0);
3377}
3378
c227f099 3379static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
db7b5426 3380{
db7b5426
BS
3381 return subpage_readlen(opaque, addr, 1);
3382}
3383
c227f099 3384static void subpage_writew (void *opaque, target_phys_addr_t addr,
db7b5426
BS
3385 uint32_t value)
3386{
db7b5426
BS
3387 subpage_writelen(opaque, addr, value, 1);
3388}
3389
c227f099 3390static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
db7b5426 3391{
db7b5426
BS
3392 return subpage_readlen(opaque, addr, 2);
3393}
3394
f6405247
RH
3395static void subpage_writel (void *opaque, target_phys_addr_t addr,
3396 uint32_t value)
db7b5426 3397{
db7b5426
BS
3398 subpage_writelen(opaque, addr, value, 2);
3399}
3400
d60efc6b 3401static CPUReadMemoryFunc * const subpage_read[] = {
db7b5426
BS
3402 &subpage_readb,
3403 &subpage_readw,
3404 &subpage_readl,
3405};
3406
d60efc6b 3407static CPUWriteMemoryFunc * const subpage_write[] = {
db7b5426
BS
3408 &subpage_writeb,
3409 &subpage_writew,
3410 &subpage_writel,
3411};
3412
c227f099
AL
3413static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3414 ram_addr_t memory, ram_addr_t region_offset)
db7b5426
BS
3415{
3416 int idx, eidx;
3417
3418 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3419 return -1;
3420 idx = SUBPAGE_IDX(start);
3421 eidx = SUBPAGE_IDX(end);
3422#if defined(DEBUG_SUBPAGE)
0bf9e31a 3423 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
db7b5426
BS
3424 mmio, start, end, idx, eidx, memory);
3425#endif
95c318f5
GN
3426 if ((memory & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
3427 memory = IO_MEM_UNASSIGNED;
f6405247 3428 memory = (memory >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
db7b5426 3429 for (; idx <= eidx; idx++) {
f6405247
RH
3430 mmio->sub_io_index[idx] = memory;
3431 mmio->region_offset[idx] = region_offset;
db7b5426
BS
3432 }
3433
3434 return 0;
3435}
3436
f6405247
RH
3437static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
3438 ram_addr_t orig_memory,
3439 ram_addr_t region_offset)
db7b5426 3440{
c227f099 3441 subpage_t *mmio;
db7b5426
BS
3442 int subpage_memory;
3443
c227f099 3444 mmio = qemu_mallocz(sizeof(subpage_t));
1eec614b
AL
3445
3446 mmio->base = base;
2507c12a
AG
3447 subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio,
3448 DEVICE_NATIVE_ENDIAN);
db7b5426 3449#if defined(DEBUG_SUBPAGE)
1eec614b
AL
3450 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3451 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
db7b5426 3452#endif
1eec614b 3453 *phys = subpage_memory | IO_MEM_SUBPAGE;
f6405247 3454 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, orig_memory, region_offset);
db7b5426
BS
3455
3456 return mmio;
3457}
3458
88715657
AL
3459static int get_free_io_mem_idx(void)
3460{
3461 int i;
3462
3463 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3464 if (!io_mem_used[i]) {
3465 io_mem_used[i] = 1;
3466 return i;
3467 }
c6703b47 3468 fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
88715657
AL
3469 return -1;
3470}
3471
dd310534
AG
3472/*
3473 * Usually, devices operate in little endian mode. There are devices out
3474 * there that operate in big endian too. Each device gets byte swapped
3475 * mmio if plugged onto a CPU that does the other endianness.
3476 *
3477 * CPU Device swap?
3478 *
3479 * little little no
3480 * little big yes
3481 * big little yes
3482 * big big no
3483 */
3484
3485typedef struct SwapEndianContainer {
3486 CPUReadMemoryFunc *read[3];
3487 CPUWriteMemoryFunc *write[3];
3488 void *opaque;
3489} SwapEndianContainer;
3490
3491static uint32_t swapendian_mem_readb (void *opaque, target_phys_addr_t addr)
3492{
3493 uint32_t val;
3494 SwapEndianContainer *c = opaque;
3495 val = c->read[0](c->opaque, addr);
3496 return val;
3497}
3498
3499static uint32_t swapendian_mem_readw(void *opaque, target_phys_addr_t addr)
3500{
3501 uint32_t val;
3502 SwapEndianContainer *c = opaque;
3503 val = bswap16(c->read[1](c->opaque, addr));
3504 return val;
3505}
3506
3507static uint32_t swapendian_mem_readl(void *opaque, target_phys_addr_t addr)
3508{
3509 uint32_t val;
3510 SwapEndianContainer *c = opaque;
3511 val = bswap32(c->read[2](c->opaque, addr));
3512 return val;
3513}
3514
3515static CPUReadMemoryFunc * const swapendian_readfn[3]={
3516 swapendian_mem_readb,
3517 swapendian_mem_readw,
3518 swapendian_mem_readl
3519};
3520
3521static void swapendian_mem_writeb(void *opaque, target_phys_addr_t addr,
3522 uint32_t val)
3523{
3524 SwapEndianContainer *c = opaque;
3525 c->write[0](c->opaque, addr, val);
3526}
3527
3528static void swapendian_mem_writew(void *opaque, target_phys_addr_t addr,
3529 uint32_t val)
3530{
3531 SwapEndianContainer *c = opaque;
3532 c->write[1](c->opaque, addr, bswap16(val));
3533}
3534
3535static void swapendian_mem_writel(void *opaque, target_phys_addr_t addr,
3536 uint32_t val)
3537{
3538 SwapEndianContainer *c = opaque;
3539 c->write[2](c->opaque, addr, bswap32(val));
3540}
3541
3542static CPUWriteMemoryFunc * const swapendian_writefn[3]={
3543 swapendian_mem_writeb,
3544 swapendian_mem_writew,
3545 swapendian_mem_writel
3546};
3547
3548static void swapendian_init(int io_index)
3549{
3550 SwapEndianContainer *c = qemu_malloc(sizeof(SwapEndianContainer));
3551 int i;
3552
3553 /* Swap mmio for big endian targets */
3554 c->opaque = io_mem_opaque[io_index];
3555 for (i = 0; i < 3; i++) {
3556 c->read[i] = io_mem_read[io_index][i];
3557 c->write[i] = io_mem_write[io_index][i];
3558
3559 io_mem_read[io_index][i] = swapendian_readfn[i];
3560 io_mem_write[io_index][i] = swapendian_writefn[i];
3561 }
3562 io_mem_opaque[io_index] = c;
3563}
3564
3565static void swapendian_del(int io_index)
3566{
3567 if (io_mem_read[io_index][0] == swapendian_readfn[0]) {
3568 qemu_free(io_mem_opaque[io_index]);
3569 }
3570}
3571
33417e70
FB
3572/* mem_read and mem_write are arrays of functions containing the
3573 function to access byte (index 0), word (index 1) and dword (index
0b4e6e3e 3574 2). Functions can be omitted with a NULL function pointer.
3ee89922 3575 If io_index is non zero, the corresponding io zone is
4254fab8
BS
3576 modified. If it is zero, a new io zone is allocated. The return
3577 value can be used with cpu_register_physical_memory(). (-1) is
3578 returned if error. */
1eed09cb 3579static int cpu_register_io_memory_fixed(int io_index,
d60efc6b
BS
3580 CPUReadMemoryFunc * const *mem_read,
3581 CPUWriteMemoryFunc * const *mem_write,
dd310534 3582 void *opaque, enum device_endian endian)
33417e70 3583{
3cab721d
RH
3584 int i;
3585
33417e70 3586 if (io_index <= 0) {
88715657
AL
3587 io_index = get_free_io_mem_idx();
3588 if (io_index == -1)
3589 return io_index;
33417e70 3590 } else {
1eed09cb 3591 io_index >>= IO_MEM_SHIFT;
33417e70
FB
3592 if (io_index >= IO_MEM_NB_ENTRIES)
3593 return -1;
3594 }
b5ff1b31 3595
3cab721d
RH
3596 for (i = 0; i < 3; ++i) {
3597 io_mem_read[io_index][i]
3598 = (mem_read[i] ? mem_read[i] : unassigned_mem_read[i]);
3599 }
3600 for (i = 0; i < 3; ++i) {
3601 io_mem_write[io_index][i]
3602 = (mem_write[i] ? mem_write[i] : unassigned_mem_write[i]);
3603 }
a4193c8a 3604 io_mem_opaque[io_index] = opaque;
f6405247 3605
dd310534
AG
3606 switch (endian) {
3607 case DEVICE_BIG_ENDIAN:
3608#ifndef TARGET_WORDS_BIGENDIAN
3609 swapendian_init(io_index);
3610#endif
3611 break;
3612 case DEVICE_LITTLE_ENDIAN:
3613#ifdef TARGET_WORDS_BIGENDIAN
3614 swapendian_init(io_index);
3615#endif
3616 break;
3617 case DEVICE_NATIVE_ENDIAN:
3618 default:
3619 break;
3620 }
3621
f6405247 3622 return (io_index << IO_MEM_SHIFT);
33417e70 3623}
61382a50 3624
d60efc6b
BS
3625int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
3626 CPUWriteMemoryFunc * const *mem_write,
dd310534 3627 void *opaque, enum device_endian endian)
1eed09cb 3628{
2507c12a 3629 return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque, endian);
1eed09cb
AK
3630}
3631
88715657
AL
3632void cpu_unregister_io_memory(int io_table_address)
3633{
3634 int i;
3635 int io_index = io_table_address >> IO_MEM_SHIFT;
3636
dd310534
AG
3637 swapendian_del(io_index);
3638
88715657
AL
3639 for (i=0;i < 3; i++) {
3640 io_mem_read[io_index][i] = unassigned_mem_read[i];
3641 io_mem_write[io_index][i] = unassigned_mem_write[i];
3642 }
3643 io_mem_opaque[io_index] = NULL;
3644 io_mem_used[io_index] = 0;
3645}
3646
e9179ce1
AK
3647static void io_mem_init(void)
3648{
3649 int i;
3650
2507c12a
AG
3651 cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read,
3652 unassigned_mem_write, NULL,
3653 DEVICE_NATIVE_ENDIAN);
3654 cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read,
3655 unassigned_mem_write, NULL,
3656 DEVICE_NATIVE_ENDIAN);
3657 cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read,
3658 notdirty_mem_write, NULL,
3659 DEVICE_NATIVE_ENDIAN);
e9179ce1
AK
3660 for (i=0; i<5; i++)
3661 io_mem_used[i] = 1;
3662
3663 io_mem_watch = cpu_register_io_memory(watch_mem_read,
2507c12a
AG
3664 watch_mem_write, NULL,
3665 DEVICE_NATIVE_ENDIAN);
e9179ce1
AK
3666}
3667
e2eef170
PB
3668#endif /* !defined(CONFIG_USER_ONLY) */
3669
13eb76e0
FB
3670/* physical memory access (slow version, mainly for debug) */
3671#if defined(CONFIG_USER_ONLY)
a68fe89c
PB
3672int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3673 uint8_t *buf, int len, int is_write)
13eb76e0
FB
3674{
3675 int l, flags;
3676 target_ulong page;
53a5960a 3677 void * p;
13eb76e0
FB
3678
3679 while (len > 0) {
3680 page = addr & TARGET_PAGE_MASK;
3681 l = (page + TARGET_PAGE_SIZE) - addr;
3682 if (l > len)
3683 l = len;
3684 flags = page_get_flags(page);
3685 if (!(flags & PAGE_VALID))
a68fe89c 3686 return -1;
13eb76e0
FB
3687 if (is_write) {
3688 if (!(flags & PAGE_WRITE))
a68fe89c 3689 return -1;
579a97f7 3690 /* XXX: this code should not depend on lock_user */
72fb7daa 3691 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
a68fe89c 3692 return -1;
72fb7daa
AJ
3693 memcpy(p, buf, l);
3694 unlock_user(p, addr, l);
13eb76e0
FB
3695 } else {
3696 if (!(flags & PAGE_READ))
a68fe89c 3697 return -1;
579a97f7 3698 /* XXX: this code should not depend on lock_user */
72fb7daa 3699 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
a68fe89c 3700 return -1;
72fb7daa 3701 memcpy(buf, p, l);
5b257578 3702 unlock_user(p, addr, 0);
13eb76e0
FB
3703 }
3704 len -= l;
3705 buf += l;
3706 addr += l;
3707 }
a68fe89c 3708 return 0;
13eb76e0 3709}
8df1cd07 3710
13eb76e0 3711#else
c227f099 3712void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
3713 int len, int is_write)
3714{
3715 int l, io_index;
3716 uint8_t *ptr;
3717 uint32_t val;
c227f099 3718 target_phys_addr_t page;
2e12669a 3719 unsigned long pd;
92e873b9 3720 PhysPageDesc *p;
3b46e624 3721
13eb76e0
FB
3722 while (len > 0) {
3723 page = addr & TARGET_PAGE_MASK;
3724 l = (page + TARGET_PAGE_SIZE) - addr;
3725 if (l > len)
3726 l = len;
92e873b9 3727 p = phys_page_find(page >> TARGET_PAGE_BITS);
13eb76e0
FB
3728 if (!p) {
3729 pd = IO_MEM_UNASSIGNED;
3730 } else {
3731 pd = p->phys_offset;
3732 }
3b46e624 3733
13eb76e0 3734 if (is_write) {
3a7d929e 3735 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
c227f099 3736 target_phys_addr_t addr1 = addr;
13eb76e0 3737 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18 3738 if (p)
6c2934db 3739 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
6a00d601
FB
3740 /* XXX: could force cpu_single_env to NULL to avoid
3741 potential bugs */
6c2934db 3742 if (l >= 4 && ((addr1 & 3) == 0)) {
1c213d19 3743 /* 32 bit write access */
c27004ec 3744 val = ldl_p(buf);
6c2934db 3745 io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
13eb76e0 3746 l = 4;
6c2934db 3747 } else if (l >= 2 && ((addr1 & 1) == 0)) {
1c213d19 3748 /* 16 bit write access */
c27004ec 3749 val = lduw_p(buf);
6c2934db 3750 io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
13eb76e0
FB
3751 l = 2;
3752 } else {
1c213d19 3753 /* 8 bit write access */
c27004ec 3754 val = ldub_p(buf);
6c2934db 3755 io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
13eb76e0
FB
3756 l = 1;
3757 }
3758 } else {
b448f2f3
FB
3759 unsigned long addr1;
3760 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
13eb76e0 3761 /* RAM case */
5579c7f3 3762 ptr = qemu_get_ram_ptr(addr1);
13eb76e0 3763 memcpy(ptr, buf, l);
3a7d929e
FB
3764 if (!cpu_physical_memory_is_dirty(addr1)) {
3765 /* invalidate code */
3766 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3767 /* set dirty bit */
f7c11b53
YT
3768 cpu_physical_memory_set_dirty_flags(
3769 addr1, (0xff & ~CODE_DIRTY_FLAG));
3a7d929e 3770 }
13eb76e0
FB
3771 }
3772 } else {
5fafdf24 3773 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2a4188a3 3774 !(pd & IO_MEM_ROMD)) {
c227f099 3775 target_phys_addr_t addr1 = addr;
13eb76e0
FB
3776 /* I/O case */
3777 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18 3778 if (p)
6c2934db
AJ
3779 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3780 if (l >= 4 && ((addr1 & 3) == 0)) {
13eb76e0 3781 /* 32 bit read access */
6c2934db 3782 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
c27004ec 3783 stl_p(buf, val);
13eb76e0 3784 l = 4;
6c2934db 3785 } else if (l >= 2 && ((addr1 & 1) == 0)) {
13eb76e0 3786 /* 16 bit read access */
6c2934db 3787 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
c27004ec 3788 stw_p(buf, val);
13eb76e0
FB
3789 l = 2;
3790 } else {
1c213d19 3791 /* 8 bit read access */
6c2934db 3792 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
c27004ec 3793 stb_p(buf, val);
13eb76e0
FB
3794 l = 1;
3795 }
3796 } else {
3797 /* RAM case */
5579c7f3 3798 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
13eb76e0
FB
3799 (addr & ~TARGET_PAGE_MASK);
3800 memcpy(buf, ptr, l);
3801 }
3802 }
3803 len -= l;
3804 buf += l;
3805 addr += l;
3806 }
3807}
8df1cd07 3808
d0ecd2aa 3809/* used for ROM loading : can write in RAM and ROM */
c227f099 3810void cpu_physical_memory_write_rom(target_phys_addr_t addr,
d0ecd2aa
FB
3811 const uint8_t *buf, int len)
3812{
3813 int l;
3814 uint8_t *ptr;
c227f099 3815 target_phys_addr_t page;
d0ecd2aa
FB
3816 unsigned long pd;
3817 PhysPageDesc *p;
3b46e624 3818
d0ecd2aa
FB
3819 while (len > 0) {
3820 page = addr & TARGET_PAGE_MASK;
3821 l = (page + TARGET_PAGE_SIZE) - addr;
3822 if (l > len)
3823 l = len;
3824 p = phys_page_find(page >> TARGET_PAGE_BITS);
3825 if (!p) {
3826 pd = IO_MEM_UNASSIGNED;
3827 } else {
3828 pd = p->phys_offset;
3829 }
3b46e624 3830
d0ecd2aa 3831 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2a4188a3
FB
3832 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3833 !(pd & IO_MEM_ROMD)) {
d0ecd2aa
FB
3834 /* do nothing */
3835 } else {
3836 unsigned long addr1;
3837 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3838 /* ROM/RAM case */
5579c7f3 3839 ptr = qemu_get_ram_ptr(addr1);
d0ecd2aa
FB
3840 memcpy(ptr, buf, l);
3841 }
3842 len -= l;
3843 buf += l;
3844 addr += l;
3845 }
3846}
3847
6d16c2f8
AL
3848typedef struct {
3849 void *buffer;
c227f099
AL
3850 target_phys_addr_t addr;
3851 target_phys_addr_t len;
6d16c2f8
AL
3852} BounceBuffer;
3853
3854static BounceBuffer bounce;
3855
ba223c29
AL
3856typedef struct MapClient {
3857 void *opaque;
3858 void (*callback)(void *opaque);
72cf2d4f 3859 QLIST_ENTRY(MapClient) link;
ba223c29
AL
3860} MapClient;
3861
72cf2d4f
BS
3862static QLIST_HEAD(map_client_list, MapClient) map_client_list
3863 = QLIST_HEAD_INITIALIZER(map_client_list);
ba223c29
AL
3864
3865void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3866{
3867 MapClient *client = qemu_malloc(sizeof(*client));
3868
3869 client->opaque = opaque;
3870 client->callback = callback;
72cf2d4f 3871 QLIST_INSERT_HEAD(&map_client_list, client, link);
ba223c29
AL
3872 return client;
3873}
3874
3875void cpu_unregister_map_client(void *_client)
3876{
3877 MapClient *client = (MapClient *)_client;
3878
72cf2d4f 3879 QLIST_REMOVE(client, link);
34d5e948 3880 qemu_free(client);
ba223c29
AL
3881}
3882
3883static void cpu_notify_map_clients(void)
3884{
3885 MapClient *client;
3886
72cf2d4f
BS
3887 while (!QLIST_EMPTY(&map_client_list)) {
3888 client = QLIST_FIRST(&map_client_list);
ba223c29 3889 client->callback(client->opaque);
34d5e948 3890 cpu_unregister_map_client(client);
ba223c29
AL
3891 }
3892}
3893
6d16c2f8
AL
3894/* Map a physical memory region into a host virtual address.
3895 * May map a subset of the requested range, given by and returned in *plen.
3896 * May return NULL if resources needed to perform the mapping are exhausted.
3897 * Use only for reads OR writes - not for read-modify-write operations.
ba223c29
AL
3898 * Use cpu_register_map_client() to know when retrying the map operation is
3899 * likely to succeed.
6d16c2f8 3900 */
c227f099
AL
3901void *cpu_physical_memory_map(target_phys_addr_t addr,
3902 target_phys_addr_t *plen,
6d16c2f8
AL
3903 int is_write)
3904{
c227f099
AL
3905 target_phys_addr_t len = *plen;
3906 target_phys_addr_t done = 0;
6d16c2f8
AL
3907 int l;
3908 uint8_t *ret = NULL;
3909 uint8_t *ptr;
c227f099 3910 target_phys_addr_t page;
6d16c2f8
AL
3911 unsigned long pd;
3912 PhysPageDesc *p;
3913 unsigned long addr1;
3914
3915 while (len > 0) {
3916 page = addr & TARGET_PAGE_MASK;
3917 l = (page + TARGET_PAGE_SIZE) - addr;
3918 if (l > len)
3919 l = len;
3920 p = phys_page_find(page >> TARGET_PAGE_BITS);
3921 if (!p) {
3922 pd = IO_MEM_UNASSIGNED;
3923 } else {
3924 pd = p->phys_offset;
3925 }
3926
3927 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3928 if (done || bounce.buffer) {
3929 break;
3930 }
3931 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3932 bounce.addr = addr;
3933 bounce.len = l;
3934 if (!is_write) {
54f7b4a3 3935 cpu_physical_memory_read(addr, bounce.buffer, l);
6d16c2f8
AL
3936 }
3937 ptr = bounce.buffer;
3938 } else {
3939 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
5579c7f3 3940 ptr = qemu_get_ram_ptr(addr1);
6d16c2f8
AL
3941 }
3942 if (!done) {
3943 ret = ptr;
3944 } else if (ret + done != ptr) {
3945 break;
3946 }
3947
3948 len -= l;
3949 addr += l;
3950 done += l;
3951 }
3952 *plen = done;
3953 return ret;
3954}
3955
3956/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3957 * Will also mark the memory as dirty if is_write == 1. access_len gives
3958 * the amount of memory that was actually read or written by the caller.
3959 */
c227f099
AL
3960void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3961 int is_write, target_phys_addr_t access_len)
6d16c2f8
AL
3962{
3963 if (buffer != bounce.buffer) {
3964 if (is_write) {
e890261f 3965 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
6d16c2f8
AL
3966 while (access_len) {
3967 unsigned l;
3968 l = TARGET_PAGE_SIZE;
3969 if (l > access_len)
3970 l = access_len;
3971 if (!cpu_physical_memory_is_dirty(addr1)) {
3972 /* invalidate code */
3973 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3974 /* set dirty bit */
f7c11b53
YT
3975 cpu_physical_memory_set_dirty_flags(
3976 addr1, (0xff & ~CODE_DIRTY_FLAG));
6d16c2f8
AL
3977 }
3978 addr1 += l;
3979 access_len -= l;
3980 }
3981 }
3982 return;
3983 }
3984 if (is_write) {
3985 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3986 }
f8a83245 3987 qemu_vfree(bounce.buffer);
6d16c2f8 3988 bounce.buffer = NULL;
ba223c29 3989 cpu_notify_map_clients();
6d16c2f8 3990}
d0ecd2aa 3991
8df1cd07 3992/* warning: addr must be aligned */
c227f099 3993uint32_t ldl_phys(target_phys_addr_t addr)
8df1cd07
FB
3994{
3995 int io_index;
3996 uint8_t *ptr;
3997 uint32_t val;
3998 unsigned long pd;
3999 PhysPageDesc *p;
4000
4001 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4002 if (!p) {
4003 pd = IO_MEM_UNASSIGNED;
4004 } else {
4005 pd = p->phys_offset;
4006 }
3b46e624 4007
5fafdf24 4008 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2a4188a3 4009 !(pd & IO_MEM_ROMD)) {
8df1cd07
FB
4010 /* I/O case */
4011 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
4012 if (p)
4013 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
8df1cd07
FB
4014 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
4015 } else {
4016 /* RAM case */
5579c7f3 4017 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
8df1cd07
FB
4018 (addr & ~TARGET_PAGE_MASK);
4019 val = ldl_p(ptr);
4020 }
4021 return val;
4022}
4023
84b7b8e7 4024/* warning: addr must be aligned */
c227f099 4025uint64_t ldq_phys(target_phys_addr_t addr)
84b7b8e7
FB
4026{
4027 int io_index;
4028 uint8_t *ptr;
4029 uint64_t val;
4030 unsigned long pd;
4031 PhysPageDesc *p;
4032
4033 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4034 if (!p) {
4035 pd = IO_MEM_UNASSIGNED;
4036 } else {
4037 pd = p->phys_offset;
4038 }
3b46e624 4039
2a4188a3
FB
4040 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
4041 !(pd & IO_MEM_ROMD)) {
84b7b8e7
FB
4042 /* I/O case */
4043 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
4044 if (p)
4045 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
84b7b8e7
FB
4046#ifdef TARGET_WORDS_BIGENDIAN
4047 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
4048 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
4049#else
4050 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
4051 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
4052#endif
4053 } else {
4054 /* RAM case */
5579c7f3 4055 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
84b7b8e7
FB
4056 (addr & ~TARGET_PAGE_MASK);
4057 val = ldq_p(ptr);
4058 }
4059 return val;
4060}
4061
aab33094 4062/* XXX: optimize */
c227f099 4063uint32_t ldub_phys(target_phys_addr_t addr)
aab33094
FB
4064{
4065 uint8_t val;
4066 cpu_physical_memory_read(addr, &val, 1);
4067 return val;
4068}
4069
733f0b02 4070/* warning: addr must be aligned */
c227f099 4071uint32_t lduw_phys(target_phys_addr_t addr)
aab33094 4072{
733f0b02
MT
4073 int io_index;
4074 uint8_t *ptr;
4075 uint64_t val;
4076 unsigned long pd;
4077 PhysPageDesc *p;
4078
4079 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4080 if (!p) {
4081 pd = IO_MEM_UNASSIGNED;
4082 } else {
4083 pd = p->phys_offset;
4084 }
4085
4086 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
4087 !(pd & IO_MEM_ROMD)) {
4088 /* I/O case */
4089 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4090 if (p)
4091 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4092 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
4093 } else {
4094 /* RAM case */
4095 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4096 (addr & ~TARGET_PAGE_MASK);
4097 val = lduw_p(ptr);
4098 }
4099 return val;
aab33094
FB
4100}
4101
8df1cd07
FB
4102/* warning: addr must be aligned. The ram page is not masked as dirty
4103 and the code inside is not invalidated. It is useful if the dirty
4104 bits are used to track modified PTEs */
c227f099 4105void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
8df1cd07
FB
4106{
4107 int io_index;
4108 uint8_t *ptr;
4109 unsigned long pd;
4110 PhysPageDesc *p;
4111
4112 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4113 if (!p) {
4114 pd = IO_MEM_UNASSIGNED;
4115 } else {
4116 pd = p->phys_offset;
4117 }
3b46e624 4118
3a7d929e 4119 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07 4120 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
4121 if (p)
4122 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
8df1cd07
FB
4123 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4124 } else {
74576198 4125 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
5579c7f3 4126 ptr = qemu_get_ram_ptr(addr1);
8df1cd07 4127 stl_p(ptr, val);
74576198
AL
4128
4129 if (unlikely(in_migration)) {
4130 if (!cpu_physical_memory_is_dirty(addr1)) {
4131 /* invalidate code */
4132 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4133 /* set dirty bit */
f7c11b53
YT
4134 cpu_physical_memory_set_dirty_flags(
4135 addr1, (0xff & ~CODE_DIRTY_FLAG));
74576198
AL
4136 }
4137 }
8df1cd07
FB
4138 }
4139}
4140
c227f099 4141void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
bc98a7ef
JM
4142{
4143 int io_index;
4144 uint8_t *ptr;
4145 unsigned long pd;
4146 PhysPageDesc *p;
4147
4148 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4149 if (!p) {
4150 pd = IO_MEM_UNASSIGNED;
4151 } else {
4152 pd = p->phys_offset;
4153 }
3b46e624 4154
bc98a7ef
JM
4155 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4156 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
4157 if (p)
4158 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bc98a7ef
JM
4159#ifdef TARGET_WORDS_BIGENDIAN
4160 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
4161 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
4162#else
4163 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4164 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
4165#endif
4166 } else {
5579c7f3 4167 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
bc98a7ef
JM
4168 (addr & ~TARGET_PAGE_MASK);
4169 stq_p(ptr, val);
4170 }
4171}
4172
8df1cd07 4173/* warning: addr must be aligned */
c227f099 4174void stl_phys(target_phys_addr_t addr, uint32_t val)
8df1cd07
FB
4175{
4176 int io_index;
4177 uint8_t *ptr;
4178 unsigned long pd;
4179 PhysPageDesc *p;
4180
4181 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4182 if (!p) {
4183 pd = IO_MEM_UNASSIGNED;
4184 } else {
4185 pd = p->phys_offset;
4186 }
3b46e624 4187
3a7d929e 4188 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07 4189 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
4190 if (p)
4191 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
8df1cd07
FB
4192 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4193 } else {
4194 unsigned long addr1;
4195 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4196 /* RAM case */
5579c7f3 4197 ptr = qemu_get_ram_ptr(addr1);
8df1cd07 4198 stl_p(ptr, val);
3a7d929e
FB
4199 if (!cpu_physical_memory_is_dirty(addr1)) {
4200 /* invalidate code */
4201 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4202 /* set dirty bit */
f7c11b53
YT
4203 cpu_physical_memory_set_dirty_flags(addr1,
4204 (0xff & ~CODE_DIRTY_FLAG));
3a7d929e 4205 }
8df1cd07
FB
4206 }
4207}
4208
aab33094 4209/* XXX: optimize */
c227f099 4210void stb_phys(target_phys_addr_t addr, uint32_t val)
aab33094
FB
4211{
4212 uint8_t v = val;
4213 cpu_physical_memory_write(addr, &v, 1);
4214}
4215
733f0b02 4216/* warning: addr must be aligned */
c227f099 4217void stw_phys(target_phys_addr_t addr, uint32_t val)
aab33094 4218{
733f0b02
MT
4219 int io_index;
4220 uint8_t *ptr;
4221 unsigned long pd;
4222 PhysPageDesc *p;
4223
4224 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4225 if (!p) {
4226 pd = IO_MEM_UNASSIGNED;
4227 } else {
4228 pd = p->phys_offset;
4229 }
4230
4231 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4232 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4233 if (p)
4234 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4235 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
4236 } else {
4237 unsigned long addr1;
4238 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4239 /* RAM case */
4240 ptr = qemu_get_ram_ptr(addr1);
4241 stw_p(ptr, val);
4242 if (!cpu_physical_memory_is_dirty(addr1)) {
4243 /* invalidate code */
4244 tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
4245 /* set dirty bit */
4246 cpu_physical_memory_set_dirty_flags(addr1,
4247 (0xff & ~CODE_DIRTY_FLAG));
4248 }
4249 }
aab33094
FB
4250}
4251
4252/* XXX: optimize */
c227f099 4253void stq_phys(target_phys_addr_t addr, uint64_t val)
aab33094
FB
4254{
4255 val = tswap64(val);
71d2b725 4256 cpu_physical_memory_write(addr, &val, 8);
aab33094
FB
4257}
4258
5e2972fd 4259/* virtual memory access for debug (includes writing to ROM) */
5fafdf24 4260int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
b448f2f3 4261 uint8_t *buf, int len, int is_write)
13eb76e0
FB
4262{
4263 int l;
c227f099 4264 target_phys_addr_t phys_addr;
9b3c35e0 4265 target_ulong page;
13eb76e0
FB
4266
4267 while (len > 0) {
4268 page = addr & TARGET_PAGE_MASK;
4269 phys_addr = cpu_get_phys_page_debug(env, page);
4270 /* if no physical page mapped, return an error */
4271 if (phys_addr == -1)
4272 return -1;
4273 l = (page + TARGET_PAGE_SIZE) - addr;
4274 if (l > len)
4275 l = len;
5e2972fd 4276 phys_addr += (addr & ~TARGET_PAGE_MASK);
5e2972fd
AL
4277 if (is_write)
4278 cpu_physical_memory_write_rom(phys_addr, buf, l);
4279 else
5e2972fd 4280 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
13eb76e0
FB
4281 len -= l;
4282 buf += l;
4283 addr += l;
4284 }
4285 return 0;
4286}
a68fe89c 4287#endif
13eb76e0 4288
2e70f6ef
PB
4289/* in deterministic execution mode, instructions doing device I/Os
4290 must be at the end of the TB */
4291void cpu_io_recompile(CPUState *env, void *retaddr)
4292{
4293 TranslationBlock *tb;
4294 uint32_t n, cflags;
4295 target_ulong pc, cs_base;
4296 uint64_t flags;
4297
4298 tb = tb_find_pc((unsigned long)retaddr);
4299 if (!tb) {
4300 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
4301 retaddr);
4302 }
4303 n = env->icount_decr.u16.low + tb->icount;
4304 cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
4305 /* Calculate how many instructions had been executed before the fault
bf20dc07 4306 occurred. */
2e70f6ef
PB
4307 n = n - env->icount_decr.u16.low;
4308 /* Generate a new TB ending on the I/O insn. */
4309 n++;
4310 /* On MIPS and SH, delay slot instructions can only be restarted if
4311 they were already the first instruction in the TB. If this is not
bf20dc07 4312 the first instruction in a TB then re-execute the preceding
2e70f6ef
PB
4313 branch. */
4314#if defined(TARGET_MIPS)
4315 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4316 env->active_tc.PC -= 4;
4317 env->icount_decr.u16.low++;
4318 env->hflags &= ~MIPS_HFLAG_BMASK;
4319 }
4320#elif defined(TARGET_SH4)
4321 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4322 && n > 1) {
4323 env->pc -= 2;
4324 env->icount_decr.u16.low++;
4325 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4326 }
4327#endif
4328 /* This should never happen. */
4329 if (n > CF_COUNT_MASK)
4330 cpu_abort(env, "TB too big during recompile");
4331
4332 cflags = n | CF_LAST_IO;
4333 pc = tb->pc;
4334 cs_base = tb->cs_base;
4335 flags = tb->flags;
4336 tb_phys_invalidate(tb, -1);
4337 /* FIXME: In theory this could raise an exception. In practice
4338 we have already translated the block once so it's probably ok. */
4339 tb_gen_code(env, pc, cs_base, flags, cflags);
bf20dc07 4340 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
2e70f6ef
PB
4341 the first in the TB) then we end up generating a whole new TB and
4342 repeating the fault, which is horribly inefficient.
4343 Better would be to execute just this insn uncached, or generate a
4344 second new TB. */
4345 cpu_resume_from_signal(env, NULL);
4346}
4347
b3755a91
PB
4348#if !defined(CONFIG_USER_ONLY)
4349
055403b2 4350void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
e3db7226
FB
4351{
4352 int i, target_code_size, max_target_code_size;
4353 int direct_jmp_count, direct_jmp2_count, cross_page;
4354 TranslationBlock *tb;
3b46e624 4355
e3db7226
FB
4356 target_code_size = 0;
4357 max_target_code_size = 0;
4358 cross_page = 0;
4359 direct_jmp_count = 0;
4360 direct_jmp2_count = 0;
4361 for(i = 0; i < nb_tbs; i++) {
4362 tb = &tbs[i];
4363 target_code_size += tb->size;
4364 if (tb->size > max_target_code_size)
4365 max_target_code_size = tb->size;
4366 if (tb->page_addr[1] != -1)
4367 cross_page++;
4368 if (tb->tb_next_offset[0] != 0xffff) {
4369 direct_jmp_count++;
4370 if (tb->tb_next_offset[1] != 0xffff) {
4371 direct_jmp2_count++;
4372 }
4373 }
4374 }
4375 /* XXX: avoid using doubles ? */
57fec1fe 4376 cpu_fprintf(f, "Translation buffer state:\n");
055403b2 4377 cpu_fprintf(f, "gen code size %td/%ld\n",
26a5f13b
FB
4378 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4379 cpu_fprintf(f, "TB count %d/%d\n",
4380 nb_tbs, code_gen_max_blocks);
5fafdf24 4381 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
e3db7226
FB
4382 nb_tbs ? target_code_size / nb_tbs : 0,
4383 max_target_code_size);
055403b2 4384 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
e3db7226
FB
4385 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4386 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
5fafdf24
TS
4387 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4388 cross_page,
e3db7226
FB
4389 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4390 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
5fafdf24 4391 direct_jmp_count,
e3db7226
FB
4392 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4393 direct_jmp2_count,
4394 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
57fec1fe 4395 cpu_fprintf(f, "\nStatistics:\n");
e3db7226
FB
4396 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4397 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4398 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
b67d9a52 4399 tcg_dump_info(f, cpu_fprintf);
e3db7226
FB
4400}
4401
61382a50
FB
4402#define MMUSUFFIX _cmmu
4403#define GETPC() NULL
4404#define env cpu_single_env
b769d8fe 4405#define SOFTMMU_CODE_ACCESS
61382a50
FB
4406
4407#define SHIFT 0
4408#include "softmmu_template.h"
4409
4410#define SHIFT 1
4411#include "softmmu_template.h"
4412
4413#define SHIFT 2
4414#include "softmmu_template.h"
4415
4416#define SHIFT 3
4417#include "softmmu_template.h"
4418
4419#undef env
4420
4421#endif