]> git.proxmox.com Git - qemu.git/blame - exec.c
lm32: add Milkymist Minimac support
[qemu.git] / exec.c
CommitLineData
54936004 1/*
fd6ce8f6 2 * virtual page mapping and translated block handling
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
54936004 18 */
67b915a5 19#include "config.h"
d5a8f07c
FB
20#ifdef _WIN32
21#include <windows.h>
22#else
a98d49b1 23#include <sys/types.h>
d5a8f07c
FB
24#include <sys/mman.h>
25#endif
54936004 26
055403b2 27#include "qemu-common.h"
6180a181
FB
28#include "cpu.h"
29#include "exec-all.h"
b67d9a52 30#include "tcg.h"
b3c7724c 31#include "hw/hw.h"
cc9e98cb 32#include "hw/qdev.h"
74576198 33#include "osdep.h"
7ba1e619 34#include "kvm.h"
29e922b6 35#include "qemu-timer.h"
53a5960a
PB
36#if defined(CONFIG_USER_ONLY)
37#include <qemu.h>
fd052bf6 38#include <signal.h>
f01576f1
JL
39#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
40#include <sys/param.h>
41#if __FreeBSD_version >= 700104
42#define HAVE_KINFO_GETVMMAP
43#define sigqueue sigqueue_freebsd /* avoid redefinition */
44#include <sys/time.h>
45#include <sys/proc.h>
46#include <machine/profile.h>
47#define _KERNEL
48#include <sys/user.h>
49#undef _KERNEL
50#undef sigqueue
51#include <libutil.h>
52#endif
53#endif
53a5960a 54#endif
54936004 55
fd6ce8f6 56//#define DEBUG_TB_INVALIDATE
66e85a21 57//#define DEBUG_FLUSH
9fa3e853 58//#define DEBUG_TLB
67d3b957 59//#define DEBUG_UNASSIGNED
fd6ce8f6
FB
60
61/* make various TB consistency checks */
5fafdf24
TS
62//#define DEBUG_TB_CHECK
63//#define DEBUG_TLB_CHECK
fd6ce8f6 64
1196be37 65//#define DEBUG_IOPORT
db7b5426 66//#define DEBUG_SUBPAGE
1196be37 67
99773bd4
PB
68#if !defined(CONFIG_USER_ONLY)
69/* TB consistency checks only implemented for usermode emulation. */
70#undef DEBUG_TB_CHECK
71#endif
72
9fa3e853
FB
73#define SMC_BITMAP_USE_THRESHOLD 10
74
bdaf78e0 75static TranslationBlock *tbs;
24ab68ac 76static int code_gen_max_blocks;
9fa3e853 77TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
bdaf78e0 78static int nb_tbs;
eb51d102 79/* any access to the tbs or the page table must use this lock */
c227f099 80spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
fd6ce8f6 81
141ac468
BS
82#if defined(__arm__) || defined(__sparc_v9__)
83/* The prologue must be reachable with a direct jump. ARM and Sparc64
84 have limited branch ranges (possibly also PPC) so place it in a
d03d860b
BS
85 section close to code segment. */
86#define code_gen_section \
87 __attribute__((__section__(".gen_code"))) \
88 __attribute__((aligned (32)))
f8e2af11
SW
89#elif defined(_WIN32)
90/* Maximum alignment for Win32 is 16. */
91#define code_gen_section \
92 __attribute__((aligned (16)))
d03d860b
BS
93#else
94#define code_gen_section \
95 __attribute__((aligned (32)))
96#endif
97
98uint8_t code_gen_prologue[1024] code_gen_section;
bdaf78e0
BS
99static uint8_t *code_gen_buffer;
100static unsigned long code_gen_buffer_size;
26a5f13b 101/* threshold to flush the translated code buffer */
bdaf78e0 102static unsigned long code_gen_buffer_max_size;
24ab68ac 103static uint8_t *code_gen_ptr;
fd6ce8f6 104
e2eef170 105#if !defined(CONFIG_USER_ONLY)
9fa3e853 106int phys_ram_fd;
74576198 107static int in_migration;
94a6b54f 108
f471a17e 109RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list) };
e2eef170 110#endif
9fa3e853 111
6a00d601
FB
112CPUState *first_cpu;
113/* current CPU in the current thread. It is only valid inside
114 cpu_exec() */
5fafdf24 115CPUState *cpu_single_env;
2e70f6ef 116/* 0 = Do not count executed instructions.
bf20dc07 117 1 = Precise instruction counting.
2e70f6ef
PB
118 2 = Adaptive rate instruction counting. */
119int use_icount = 0;
120/* Current instruction counter. While executing translated code this may
121 include some instructions that have not yet been executed. */
122int64_t qemu_icount;
6a00d601 123
54936004 124typedef struct PageDesc {
92e873b9 125 /* list of TBs intersecting this ram page */
fd6ce8f6 126 TranslationBlock *first_tb;
9fa3e853
FB
127 /* in order to optimize self modifying code, we count the number
128 of lookups we do to a given page to use a bitmap */
129 unsigned int code_write_count;
130 uint8_t *code_bitmap;
131#if defined(CONFIG_USER_ONLY)
132 unsigned long flags;
133#endif
54936004
FB
134} PageDesc;
135
41c1b1c9 136/* In system mode we want L1_MAP to be based on ram offsets,
5cd2c5b6
RH
137 while in user mode we want it to be based on virtual addresses. */
138#if !defined(CONFIG_USER_ONLY)
41c1b1c9
PB
139#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
140# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
141#else
5cd2c5b6 142# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
41c1b1c9 143#endif
bedb69ea 144#else
5cd2c5b6 145# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
bedb69ea 146#endif
54936004 147
5cd2c5b6
RH
148/* Size of the L2 (and L3, etc) page tables. */
149#define L2_BITS 10
54936004
FB
150#define L2_SIZE (1 << L2_BITS)
151
5cd2c5b6
RH
152/* The bits remaining after N lower levels of page tables. */
153#define P_L1_BITS_REM \
154 ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
155#define V_L1_BITS_REM \
156 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
157
158/* Size of the L1 page table. Avoid silly small sizes. */
159#if P_L1_BITS_REM < 4
160#define P_L1_BITS (P_L1_BITS_REM + L2_BITS)
161#else
162#define P_L1_BITS P_L1_BITS_REM
163#endif
164
165#if V_L1_BITS_REM < 4
166#define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
167#else
168#define V_L1_BITS V_L1_BITS_REM
169#endif
170
171#define P_L1_SIZE ((target_phys_addr_t)1 << P_L1_BITS)
172#define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
173
174#define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS)
175#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
176
83fb7adf
FB
177unsigned long qemu_real_host_page_size;
178unsigned long qemu_host_page_bits;
179unsigned long qemu_host_page_size;
180unsigned long qemu_host_page_mask;
54936004 181
5cd2c5b6
RH
182/* This is a multi-level map on the virtual address space.
183 The bottom level has pointers to PageDesc. */
184static void *l1_map[V_L1_SIZE];
54936004 185
e2eef170 186#if !defined(CONFIG_USER_ONLY)
41c1b1c9
PB
187typedef struct PhysPageDesc {
188 /* offset in host memory of the page + io_index in the low bits */
189 ram_addr_t phys_offset;
190 ram_addr_t region_offset;
191} PhysPageDesc;
192
5cd2c5b6
RH
193/* This is a multi-level map on the physical address space.
194 The bottom level has pointers to PhysPageDesc. */
195static void *l1_phys_map[P_L1_SIZE];
6d9a1304 196
e2eef170
PB
197static void io_mem_init(void);
198
33417e70 199/* io memory support */
33417e70
FB
200CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
201CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
a4193c8a 202void *io_mem_opaque[IO_MEM_NB_ENTRIES];
511d2b14 203static char io_mem_used[IO_MEM_NB_ENTRIES];
6658ffb8
PB
204static int io_mem_watch;
205#endif
33417e70 206
34865134 207/* log support */
1e8b27ca
JR
208#ifdef WIN32
209static const char *logfilename = "qemu.log";
210#else
d9b630fd 211static const char *logfilename = "/tmp/qemu.log";
1e8b27ca 212#endif
34865134
FB
213FILE *logfile;
214int loglevel;
e735b91c 215static int log_append = 0;
34865134 216
e3db7226 217/* statistics */
b3755a91 218#if !defined(CONFIG_USER_ONLY)
e3db7226 219static int tlb_flush_count;
b3755a91 220#endif
e3db7226
FB
221static int tb_flush_count;
222static int tb_phys_invalidate_count;
223
7cb69cae
FB
224#ifdef _WIN32
225static void map_exec(void *addr, long size)
226{
227 DWORD old_protect;
228 VirtualProtect(addr, size,
229 PAGE_EXECUTE_READWRITE, &old_protect);
230
231}
232#else
233static void map_exec(void *addr, long size)
234{
4369415f 235 unsigned long start, end, page_size;
7cb69cae 236
4369415f 237 page_size = getpagesize();
7cb69cae 238 start = (unsigned long)addr;
4369415f 239 start &= ~(page_size - 1);
7cb69cae
FB
240
241 end = (unsigned long)addr + size;
4369415f
FB
242 end += page_size - 1;
243 end &= ~(page_size - 1);
7cb69cae
FB
244
245 mprotect((void *)start, end - start,
246 PROT_READ | PROT_WRITE | PROT_EXEC);
247}
248#endif
249
b346ff46 250static void page_init(void)
54936004 251{
83fb7adf 252 /* NOTE: we can always suppose that qemu_host_page_size >=
54936004 253 TARGET_PAGE_SIZE */
c2b48b69
AL
254#ifdef _WIN32
255 {
256 SYSTEM_INFO system_info;
257
258 GetSystemInfo(&system_info);
259 qemu_real_host_page_size = system_info.dwPageSize;
260 }
261#else
262 qemu_real_host_page_size = getpagesize();
263#endif
83fb7adf
FB
264 if (qemu_host_page_size == 0)
265 qemu_host_page_size = qemu_real_host_page_size;
266 if (qemu_host_page_size < TARGET_PAGE_SIZE)
267 qemu_host_page_size = TARGET_PAGE_SIZE;
268 qemu_host_page_bits = 0;
269 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
270 qemu_host_page_bits++;
271 qemu_host_page_mask = ~(qemu_host_page_size - 1);
50a9569b 272
2e9a5713 273#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
50a9569b 274 {
f01576f1
JL
275#ifdef HAVE_KINFO_GETVMMAP
276 struct kinfo_vmentry *freep;
277 int i, cnt;
278
279 freep = kinfo_getvmmap(getpid(), &cnt);
280 if (freep) {
281 mmap_lock();
282 for (i = 0; i < cnt; i++) {
283 unsigned long startaddr, endaddr;
284
285 startaddr = freep[i].kve_start;
286 endaddr = freep[i].kve_end;
287 if (h2g_valid(startaddr)) {
288 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
289
290 if (h2g_valid(endaddr)) {
291 endaddr = h2g(endaddr);
fd436907 292 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
f01576f1
JL
293 } else {
294#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
295 endaddr = ~0ul;
fd436907 296 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
f01576f1
JL
297#endif
298 }
299 }
300 }
301 free(freep);
302 mmap_unlock();
303 }
304#else
50a9569b 305 FILE *f;
50a9569b 306
0776590d 307 last_brk = (unsigned long)sbrk(0);
5cd2c5b6 308
fd436907 309 f = fopen("/compat/linux/proc/self/maps", "r");
50a9569b 310 if (f) {
5cd2c5b6
RH
311 mmap_lock();
312
50a9569b 313 do {
5cd2c5b6
RH
314 unsigned long startaddr, endaddr;
315 int n;
316
317 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
318
319 if (n == 2 && h2g_valid(startaddr)) {
320 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
321
322 if (h2g_valid(endaddr)) {
323 endaddr = h2g(endaddr);
324 } else {
325 endaddr = ~0ul;
326 }
327 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
50a9569b
AZ
328 }
329 } while (!feof(f));
5cd2c5b6 330
50a9569b 331 fclose(f);
5cd2c5b6 332 mmap_unlock();
50a9569b 333 }
f01576f1 334#endif
50a9569b
AZ
335 }
336#endif
54936004
FB
337}
338
41c1b1c9 339static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
54936004 340{
41c1b1c9
PB
341 PageDesc *pd;
342 void **lp;
343 int i;
344
5cd2c5b6 345#if defined(CONFIG_USER_ONLY)
2e9a5713 346 /* We can't use qemu_malloc because it may recurse into a locked mutex. */
5cd2c5b6
RH
347# define ALLOC(P, SIZE) \
348 do { \
349 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
350 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
5cd2c5b6
RH
351 } while (0)
352#else
353# define ALLOC(P, SIZE) \
354 do { P = qemu_mallocz(SIZE); } while (0)
17e2377a 355#endif
434929bf 356
5cd2c5b6
RH
357 /* Level 1. Always allocated. */
358 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
359
360 /* Level 2..N-1. */
361 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
362 void **p = *lp;
363
364 if (p == NULL) {
365 if (!alloc) {
366 return NULL;
367 }
368 ALLOC(p, sizeof(void *) * L2_SIZE);
369 *lp = p;
17e2377a 370 }
5cd2c5b6
RH
371
372 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
373 }
374
375 pd = *lp;
376 if (pd == NULL) {
377 if (!alloc) {
378 return NULL;
379 }
380 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
381 *lp = pd;
54936004 382 }
5cd2c5b6
RH
383
384#undef ALLOC
5cd2c5b6
RH
385
386 return pd + (index & (L2_SIZE - 1));
54936004
FB
387}
388
41c1b1c9 389static inline PageDesc *page_find(tb_page_addr_t index)
54936004 390{
5cd2c5b6 391 return page_find_alloc(index, 0);
fd6ce8f6
FB
392}
393
6d9a1304 394#if !defined(CONFIG_USER_ONLY)
c227f099 395static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
92e873b9 396{
e3f4e2a4 397 PhysPageDesc *pd;
5cd2c5b6
RH
398 void **lp;
399 int i;
92e873b9 400
5cd2c5b6
RH
401 /* Level 1. Always allocated. */
402 lp = l1_phys_map + ((index >> P_L1_SHIFT) & (P_L1_SIZE - 1));
108c49b8 403
5cd2c5b6
RH
404 /* Level 2..N-1. */
405 for (i = P_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
406 void **p = *lp;
407 if (p == NULL) {
408 if (!alloc) {
409 return NULL;
410 }
411 *lp = p = qemu_mallocz(sizeof(void *) * L2_SIZE);
412 }
413 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
108c49b8 414 }
5cd2c5b6 415
e3f4e2a4 416 pd = *lp;
5cd2c5b6 417 if (pd == NULL) {
e3f4e2a4 418 int i;
5cd2c5b6
RH
419
420 if (!alloc) {
108c49b8 421 return NULL;
5cd2c5b6
RH
422 }
423
424 *lp = pd = qemu_malloc(sizeof(PhysPageDesc) * L2_SIZE);
425
67c4d23c 426 for (i = 0; i < L2_SIZE; i++) {
5cd2c5b6
RH
427 pd[i].phys_offset = IO_MEM_UNASSIGNED;
428 pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
67c4d23c 429 }
92e873b9 430 }
5cd2c5b6
RH
431
432 return pd + (index & (L2_SIZE - 1));
92e873b9
FB
433}
434
c227f099 435static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
92e873b9 436{
108c49b8 437 return phys_page_find_alloc(index, 0);
92e873b9
FB
438}
439
c227f099
AL
440static void tlb_protect_code(ram_addr_t ram_addr);
441static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 442 target_ulong vaddr);
c8a706fe
PB
443#define mmap_lock() do { } while(0)
444#define mmap_unlock() do { } while(0)
9fa3e853 445#endif
fd6ce8f6 446
4369415f
FB
447#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
448
449#if defined(CONFIG_USER_ONLY)
ccbb4d44 450/* Currently it is not recommended to allocate big chunks of data in
4369415f
FB
451 user mode. It will change when a dedicated libc will be used */
452#define USE_STATIC_CODE_GEN_BUFFER
453#endif
454
455#ifdef USE_STATIC_CODE_GEN_BUFFER
ebf50fb3
AJ
456static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
457 __attribute__((aligned (CODE_GEN_ALIGN)));
4369415f
FB
458#endif
459
8fcd3692 460static void code_gen_alloc(unsigned long tb_size)
26a5f13b 461{
4369415f
FB
462#ifdef USE_STATIC_CODE_GEN_BUFFER
463 code_gen_buffer = static_code_gen_buffer;
464 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
465 map_exec(code_gen_buffer, code_gen_buffer_size);
466#else
26a5f13b
FB
467 code_gen_buffer_size = tb_size;
468 if (code_gen_buffer_size == 0) {
4369415f
FB
469#if defined(CONFIG_USER_ONLY)
470 /* in user mode, phys_ram_size is not meaningful */
471 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
472#else
ccbb4d44 473 /* XXX: needs adjustments */
94a6b54f 474 code_gen_buffer_size = (unsigned long)(ram_size / 4);
4369415f 475#endif
26a5f13b
FB
476 }
477 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
478 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
479 /* The code gen buffer location may have constraints depending on
480 the host cpu and OS */
481#if defined(__linux__)
482 {
483 int flags;
141ac468
BS
484 void *start = NULL;
485
26a5f13b
FB
486 flags = MAP_PRIVATE | MAP_ANONYMOUS;
487#if defined(__x86_64__)
488 flags |= MAP_32BIT;
489 /* Cannot map more than that */
490 if (code_gen_buffer_size > (800 * 1024 * 1024))
491 code_gen_buffer_size = (800 * 1024 * 1024);
141ac468
BS
492#elif defined(__sparc_v9__)
493 // Map the buffer below 2G, so we can use direct calls and branches
494 flags |= MAP_FIXED;
495 start = (void *) 0x60000000UL;
496 if (code_gen_buffer_size > (512 * 1024 * 1024))
497 code_gen_buffer_size = (512 * 1024 * 1024);
1cb0661e 498#elif defined(__arm__)
63d41246 499 /* Map the buffer below 32M, so we can use direct calls and branches */
1cb0661e
AZ
500 flags |= MAP_FIXED;
501 start = (void *) 0x01000000UL;
502 if (code_gen_buffer_size > 16 * 1024 * 1024)
503 code_gen_buffer_size = 16 * 1024 * 1024;
eba0b893
RH
504#elif defined(__s390x__)
505 /* Map the buffer so that we can use direct calls and branches. */
506 /* We have a +- 4GB range on the branches; leave some slop. */
507 if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
508 code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
509 }
510 start = (void *)0x90000000UL;
26a5f13b 511#endif
141ac468
BS
512 code_gen_buffer = mmap(start, code_gen_buffer_size,
513 PROT_WRITE | PROT_READ | PROT_EXEC,
26a5f13b
FB
514 flags, -1, 0);
515 if (code_gen_buffer == MAP_FAILED) {
516 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
517 exit(1);
518 }
519 }
cbb608a5
B
520#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
521 || defined(__DragonFly__) || defined(__OpenBSD__)
06e67a82
AL
522 {
523 int flags;
524 void *addr = NULL;
525 flags = MAP_PRIVATE | MAP_ANONYMOUS;
526#if defined(__x86_64__)
527 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
528 * 0x40000000 is free */
529 flags |= MAP_FIXED;
530 addr = (void *)0x40000000;
531 /* Cannot map more than that */
532 if (code_gen_buffer_size > (800 * 1024 * 1024))
533 code_gen_buffer_size = (800 * 1024 * 1024);
4cd31ad2
BS
534#elif defined(__sparc_v9__)
535 // Map the buffer below 2G, so we can use direct calls and branches
536 flags |= MAP_FIXED;
537 addr = (void *) 0x60000000UL;
538 if (code_gen_buffer_size > (512 * 1024 * 1024)) {
539 code_gen_buffer_size = (512 * 1024 * 1024);
540 }
06e67a82
AL
541#endif
542 code_gen_buffer = mmap(addr, code_gen_buffer_size,
543 PROT_WRITE | PROT_READ | PROT_EXEC,
544 flags, -1, 0);
545 if (code_gen_buffer == MAP_FAILED) {
546 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
547 exit(1);
548 }
549 }
26a5f13b
FB
550#else
551 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
26a5f13b
FB
552 map_exec(code_gen_buffer, code_gen_buffer_size);
553#endif
4369415f 554#endif /* !USE_STATIC_CODE_GEN_BUFFER */
26a5f13b
FB
555 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
556 code_gen_buffer_max_size = code_gen_buffer_size -
239fda31 557 (TCG_MAX_OP_SIZE * OPC_MAX_SIZE);
26a5f13b
FB
558 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
559 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
560}
561
562/* Must be called before using the QEMU cpus. 'tb_size' is the size
563 (in bytes) allocated to the translation buffer. Zero means default
564 size. */
565void cpu_exec_init_all(unsigned long tb_size)
566{
26a5f13b
FB
567 cpu_gen_init();
568 code_gen_alloc(tb_size);
569 code_gen_ptr = code_gen_buffer;
4369415f 570 page_init();
e2eef170 571#if !defined(CONFIG_USER_ONLY)
26a5f13b 572 io_mem_init();
e2eef170 573#endif
9002ec79
RH
574#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
575 /* There's no guest base to take into account, so go ahead and
576 initialize the prologue now. */
577 tcg_prologue_init(&tcg_ctx);
578#endif
26a5f13b
FB
579}
580
9656f324
PB
581#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
582
e59fb374 583static int cpu_common_post_load(void *opaque, int version_id)
e7f4eff7
JQ
584{
585 CPUState *env = opaque;
9656f324 586
3098dba0
AJ
587 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
588 version_id is increased. */
589 env->interrupt_request &= ~0x01;
9656f324
PB
590 tlb_flush(env, 1);
591
592 return 0;
593}
e7f4eff7
JQ
594
595static const VMStateDescription vmstate_cpu_common = {
596 .name = "cpu_common",
597 .version_id = 1,
598 .minimum_version_id = 1,
599 .minimum_version_id_old = 1,
e7f4eff7
JQ
600 .post_load = cpu_common_post_load,
601 .fields = (VMStateField []) {
602 VMSTATE_UINT32(halted, CPUState),
603 VMSTATE_UINT32(interrupt_request, CPUState),
604 VMSTATE_END_OF_LIST()
605 }
606};
9656f324
PB
607#endif
608
950f1472
GC
609CPUState *qemu_get_cpu(int cpu)
610{
611 CPUState *env = first_cpu;
612
613 while (env) {
614 if (env->cpu_index == cpu)
615 break;
616 env = env->next_cpu;
617 }
618
619 return env;
620}
621
6a00d601 622void cpu_exec_init(CPUState *env)
fd6ce8f6 623{
6a00d601
FB
624 CPUState **penv;
625 int cpu_index;
626
c2764719
PB
627#if defined(CONFIG_USER_ONLY)
628 cpu_list_lock();
629#endif
6a00d601
FB
630 env->next_cpu = NULL;
631 penv = &first_cpu;
632 cpu_index = 0;
633 while (*penv != NULL) {
1e9fa730 634 penv = &(*penv)->next_cpu;
6a00d601
FB
635 cpu_index++;
636 }
637 env->cpu_index = cpu_index;
268a362c 638 env->numa_node = 0;
72cf2d4f
BS
639 QTAILQ_INIT(&env->breakpoints);
640 QTAILQ_INIT(&env->watchpoints);
dc7a09cf
JK
641#ifndef CONFIG_USER_ONLY
642 env->thread_id = qemu_get_thread_id();
643#endif
6a00d601 644 *penv = env;
c2764719
PB
645#if defined(CONFIG_USER_ONLY)
646 cpu_list_unlock();
647#endif
b3c7724c 648#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
0be71e32
AW
649 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
650 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
b3c7724c
PB
651 cpu_save, cpu_load, env);
652#endif
fd6ce8f6
FB
653}
654
d1a1eb74
TG
655/* Allocate a new translation block. Flush the translation buffer if
656 too many translation blocks or too much generated code. */
657static TranslationBlock *tb_alloc(target_ulong pc)
658{
659 TranslationBlock *tb;
660
661 if (nb_tbs >= code_gen_max_blocks ||
662 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
663 return NULL;
664 tb = &tbs[nb_tbs++];
665 tb->pc = pc;
666 tb->cflags = 0;
667 return tb;
668}
669
670void tb_free(TranslationBlock *tb)
671{
672 /* In practice this is mostly used for single use temporary TB
673 Ignore the hard cases and just back up if this TB happens to
674 be the last one generated. */
675 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
676 code_gen_ptr = tb->tc_ptr;
677 nb_tbs--;
678 }
679}
680
9fa3e853
FB
681static inline void invalidate_page_bitmap(PageDesc *p)
682{
683 if (p->code_bitmap) {
59817ccb 684 qemu_free(p->code_bitmap);
9fa3e853
FB
685 p->code_bitmap = NULL;
686 }
687 p->code_write_count = 0;
688}
689
5cd2c5b6
RH
690/* Set to NULL all the 'first_tb' fields in all PageDescs. */
691
692static void page_flush_tb_1 (int level, void **lp)
fd6ce8f6 693{
5cd2c5b6 694 int i;
fd6ce8f6 695
5cd2c5b6
RH
696 if (*lp == NULL) {
697 return;
698 }
699 if (level == 0) {
700 PageDesc *pd = *lp;
7296abac 701 for (i = 0; i < L2_SIZE; ++i) {
5cd2c5b6
RH
702 pd[i].first_tb = NULL;
703 invalidate_page_bitmap(pd + i);
fd6ce8f6 704 }
5cd2c5b6
RH
705 } else {
706 void **pp = *lp;
7296abac 707 for (i = 0; i < L2_SIZE; ++i) {
5cd2c5b6
RH
708 page_flush_tb_1 (level - 1, pp + i);
709 }
710 }
711}
712
713static void page_flush_tb(void)
714{
715 int i;
716 for (i = 0; i < V_L1_SIZE; i++) {
717 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
fd6ce8f6
FB
718 }
719}
720
721/* flush all the translation blocks */
d4e8164f 722/* XXX: tb_flush is currently not thread safe */
6a00d601 723void tb_flush(CPUState *env1)
fd6ce8f6 724{
6a00d601 725 CPUState *env;
0124311e 726#if defined(DEBUG_FLUSH)
ab3d1727
BS
727 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
728 (unsigned long)(code_gen_ptr - code_gen_buffer),
729 nb_tbs, nb_tbs > 0 ?
730 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
fd6ce8f6 731#endif
26a5f13b 732 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
a208e54a
PB
733 cpu_abort(env1, "Internal error: code buffer overflow\n");
734
fd6ce8f6 735 nb_tbs = 0;
3b46e624 736
6a00d601
FB
737 for(env = first_cpu; env != NULL; env = env->next_cpu) {
738 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
739 }
9fa3e853 740
8a8a608f 741 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
fd6ce8f6 742 page_flush_tb();
9fa3e853 743
fd6ce8f6 744 code_gen_ptr = code_gen_buffer;
d4e8164f
FB
745 /* XXX: flush processor icache at this point if cache flush is
746 expensive */
e3db7226 747 tb_flush_count++;
fd6ce8f6
FB
748}
749
750#ifdef DEBUG_TB_CHECK
751
bc98a7ef 752static void tb_invalidate_check(target_ulong address)
fd6ce8f6
FB
753{
754 TranslationBlock *tb;
755 int i;
756 address &= TARGET_PAGE_MASK;
99773bd4
PB
757 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
758 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
759 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
760 address >= tb->pc + tb->size)) {
0bf9e31a
BS
761 printf("ERROR invalidate: address=" TARGET_FMT_lx
762 " PC=%08lx size=%04x\n",
99773bd4 763 address, (long)tb->pc, tb->size);
fd6ce8f6
FB
764 }
765 }
766 }
767}
768
769/* verify that all the pages have correct rights for code */
770static void tb_page_check(void)
771{
772 TranslationBlock *tb;
773 int i, flags1, flags2;
3b46e624 774
99773bd4
PB
775 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
776 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
777 flags1 = page_get_flags(tb->pc);
778 flags2 = page_get_flags(tb->pc + tb->size - 1);
779 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
780 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
99773bd4 781 (long)tb->pc, tb->size, flags1, flags2);
fd6ce8f6
FB
782 }
783 }
784 }
785}
786
787#endif
788
789/* invalidate one TB */
790static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
791 int next_offset)
792{
793 TranslationBlock *tb1;
794 for(;;) {
795 tb1 = *ptb;
796 if (tb1 == tb) {
797 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
798 break;
799 }
800 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
801 }
802}
803
9fa3e853
FB
804static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
805{
806 TranslationBlock *tb1;
807 unsigned int n1;
808
809 for(;;) {
810 tb1 = *ptb;
811 n1 = (long)tb1 & 3;
812 tb1 = (TranslationBlock *)((long)tb1 & ~3);
813 if (tb1 == tb) {
814 *ptb = tb1->page_next[n1];
815 break;
816 }
817 ptb = &tb1->page_next[n1];
818 }
819}
820
d4e8164f
FB
821static inline void tb_jmp_remove(TranslationBlock *tb, int n)
822{
823 TranslationBlock *tb1, **ptb;
824 unsigned int n1;
825
826 ptb = &tb->jmp_next[n];
827 tb1 = *ptb;
828 if (tb1) {
829 /* find tb(n) in circular list */
830 for(;;) {
831 tb1 = *ptb;
832 n1 = (long)tb1 & 3;
833 tb1 = (TranslationBlock *)((long)tb1 & ~3);
834 if (n1 == n && tb1 == tb)
835 break;
836 if (n1 == 2) {
837 ptb = &tb1->jmp_first;
838 } else {
839 ptb = &tb1->jmp_next[n1];
840 }
841 }
842 /* now we can suppress tb(n) from the list */
843 *ptb = tb->jmp_next[n];
844
845 tb->jmp_next[n] = NULL;
846 }
847}
848
849/* reset the jump entry 'n' of a TB so that it is not chained to
850 another TB */
851static inline void tb_reset_jump(TranslationBlock *tb, int n)
852{
853 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
854}
855
41c1b1c9 856void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
fd6ce8f6 857{
6a00d601 858 CPUState *env;
8a40a180 859 PageDesc *p;
d4e8164f 860 unsigned int h, n1;
41c1b1c9 861 tb_page_addr_t phys_pc;
8a40a180 862 TranslationBlock *tb1, *tb2;
3b46e624 863
8a40a180
FB
864 /* remove the TB from the hash list */
865 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
866 h = tb_phys_hash_func(phys_pc);
5fafdf24 867 tb_remove(&tb_phys_hash[h], tb,
8a40a180
FB
868 offsetof(TranslationBlock, phys_hash_next));
869
870 /* remove the TB from the page list */
871 if (tb->page_addr[0] != page_addr) {
872 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
873 tb_page_remove(&p->first_tb, tb);
874 invalidate_page_bitmap(p);
875 }
876 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
877 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
878 tb_page_remove(&p->first_tb, tb);
879 invalidate_page_bitmap(p);
880 }
881
36bdbe54 882 tb_invalidated_flag = 1;
59817ccb 883
fd6ce8f6 884 /* remove the TB from the hash list */
8a40a180 885 h = tb_jmp_cache_hash_func(tb->pc);
6a00d601
FB
886 for(env = first_cpu; env != NULL; env = env->next_cpu) {
887 if (env->tb_jmp_cache[h] == tb)
888 env->tb_jmp_cache[h] = NULL;
889 }
d4e8164f
FB
890
891 /* suppress this TB from the two jump lists */
892 tb_jmp_remove(tb, 0);
893 tb_jmp_remove(tb, 1);
894
895 /* suppress any remaining jumps to this TB */
896 tb1 = tb->jmp_first;
897 for(;;) {
898 n1 = (long)tb1 & 3;
899 if (n1 == 2)
900 break;
901 tb1 = (TranslationBlock *)((long)tb1 & ~3);
902 tb2 = tb1->jmp_next[n1];
903 tb_reset_jump(tb1, n1);
904 tb1->jmp_next[n1] = NULL;
905 tb1 = tb2;
906 }
907 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
9fa3e853 908
e3db7226 909 tb_phys_invalidate_count++;
9fa3e853
FB
910}
911
912static inline void set_bits(uint8_t *tab, int start, int len)
913{
914 int end, mask, end1;
915
916 end = start + len;
917 tab += start >> 3;
918 mask = 0xff << (start & 7);
919 if ((start & ~7) == (end & ~7)) {
920 if (start < end) {
921 mask &= ~(0xff << (end & 7));
922 *tab |= mask;
923 }
924 } else {
925 *tab++ |= mask;
926 start = (start + 8) & ~7;
927 end1 = end & ~7;
928 while (start < end1) {
929 *tab++ = 0xff;
930 start += 8;
931 }
932 if (start < end) {
933 mask = ~(0xff << (end & 7));
934 *tab |= mask;
935 }
936 }
937}
938
939static void build_page_bitmap(PageDesc *p)
940{
941 int n, tb_start, tb_end;
942 TranslationBlock *tb;
3b46e624 943
b2a7081a 944 p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
9fa3e853
FB
945
946 tb = p->first_tb;
947 while (tb != NULL) {
948 n = (long)tb & 3;
949 tb = (TranslationBlock *)((long)tb & ~3);
950 /* NOTE: this is subtle as a TB may span two physical pages */
951 if (n == 0) {
952 /* NOTE: tb_end may be after the end of the page, but
953 it is not a problem */
954 tb_start = tb->pc & ~TARGET_PAGE_MASK;
955 tb_end = tb_start + tb->size;
956 if (tb_end > TARGET_PAGE_SIZE)
957 tb_end = TARGET_PAGE_SIZE;
958 } else {
959 tb_start = 0;
960 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
961 }
962 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
963 tb = tb->page_next[n];
964 }
965}
966
2e70f6ef
PB
967TranslationBlock *tb_gen_code(CPUState *env,
968 target_ulong pc, target_ulong cs_base,
969 int flags, int cflags)
d720b93d
FB
970{
971 TranslationBlock *tb;
972 uint8_t *tc_ptr;
41c1b1c9
PB
973 tb_page_addr_t phys_pc, phys_page2;
974 target_ulong virt_page2;
d720b93d
FB
975 int code_gen_size;
976
41c1b1c9 977 phys_pc = get_page_addr_code(env, pc);
c27004ec 978 tb = tb_alloc(pc);
d720b93d
FB
979 if (!tb) {
980 /* flush must be done */
981 tb_flush(env);
982 /* cannot fail at this point */
c27004ec 983 tb = tb_alloc(pc);
2e70f6ef
PB
984 /* Don't forget to invalidate previous TB info. */
985 tb_invalidated_flag = 1;
d720b93d
FB
986 }
987 tc_ptr = code_gen_ptr;
988 tb->tc_ptr = tc_ptr;
989 tb->cs_base = cs_base;
990 tb->flags = flags;
991 tb->cflags = cflags;
d07bde88 992 cpu_gen_code(env, tb, &code_gen_size);
d720b93d 993 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
3b46e624 994
d720b93d 995 /* check next page if needed */
c27004ec 996 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
d720b93d 997 phys_page2 = -1;
c27004ec 998 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
41c1b1c9 999 phys_page2 = get_page_addr_code(env, virt_page2);
d720b93d 1000 }
41c1b1c9 1001 tb_link_page(tb, phys_pc, phys_page2);
2e70f6ef 1002 return tb;
d720b93d 1003}
3b46e624 1004
9fa3e853
FB
1005/* invalidate all TBs which intersect with the target physical page
1006 starting in range [start;end[. NOTE: start and end must refer to
d720b93d
FB
1007 the same physical page. 'is_cpu_write_access' should be true if called
1008 from a real cpu write access: the virtual CPU will exit the current
1009 TB if code is modified inside this TB. */
41c1b1c9 1010void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
d720b93d
FB
1011 int is_cpu_write_access)
1012{
6b917547 1013 TranslationBlock *tb, *tb_next, *saved_tb;
d720b93d 1014 CPUState *env = cpu_single_env;
41c1b1c9 1015 tb_page_addr_t tb_start, tb_end;
6b917547
AL
1016 PageDesc *p;
1017 int n;
1018#ifdef TARGET_HAS_PRECISE_SMC
1019 int current_tb_not_found = is_cpu_write_access;
1020 TranslationBlock *current_tb = NULL;
1021 int current_tb_modified = 0;
1022 target_ulong current_pc = 0;
1023 target_ulong current_cs_base = 0;
1024 int current_flags = 0;
1025#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
1026
1027 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 1028 if (!p)
9fa3e853 1029 return;
5fafdf24 1030 if (!p->code_bitmap &&
d720b93d
FB
1031 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1032 is_cpu_write_access) {
9fa3e853
FB
1033 /* build code bitmap */
1034 build_page_bitmap(p);
1035 }
1036
1037 /* we remove all the TBs in the range [start, end[ */
1038 /* XXX: see if in some cases it could be faster to invalidate all the code */
1039 tb = p->first_tb;
1040 while (tb != NULL) {
1041 n = (long)tb & 3;
1042 tb = (TranslationBlock *)((long)tb & ~3);
1043 tb_next = tb->page_next[n];
1044 /* NOTE: this is subtle as a TB may span two physical pages */
1045 if (n == 0) {
1046 /* NOTE: tb_end may be after the end of the page, but
1047 it is not a problem */
1048 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1049 tb_end = tb_start + tb->size;
1050 } else {
1051 tb_start = tb->page_addr[1];
1052 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1053 }
1054 if (!(tb_end <= start || tb_start >= end)) {
d720b93d
FB
1055#ifdef TARGET_HAS_PRECISE_SMC
1056 if (current_tb_not_found) {
1057 current_tb_not_found = 0;
1058 current_tb = NULL;
2e70f6ef 1059 if (env->mem_io_pc) {
d720b93d 1060 /* now we have a real cpu fault */
2e70f6ef 1061 current_tb = tb_find_pc(env->mem_io_pc);
d720b93d
FB
1062 }
1063 }
1064 if (current_tb == tb &&
2e70f6ef 1065 (current_tb->cflags & CF_COUNT_MASK) != 1) {
d720b93d
FB
1066 /* If we are modifying the current TB, we must stop
1067 its execution. We could be more precise by checking
1068 that the modification is after the current PC, but it
1069 would require a specialized function to partially
1070 restore the CPU state */
3b46e624 1071
d720b93d 1072 current_tb_modified = 1;
5fafdf24 1073 cpu_restore_state(current_tb, env,
2e70f6ef 1074 env->mem_io_pc, NULL);
6b917547
AL
1075 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1076 &current_flags);
d720b93d
FB
1077 }
1078#endif /* TARGET_HAS_PRECISE_SMC */
6f5a9f7e
FB
1079 /* we need to do that to handle the case where a signal
1080 occurs while doing tb_phys_invalidate() */
1081 saved_tb = NULL;
1082 if (env) {
1083 saved_tb = env->current_tb;
1084 env->current_tb = NULL;
1085 }
9fa3e853 1086 tb_phys_invalidate(tb, -1);
6f5a9f7e
FB
1087 if (env) {
1088 env->current_tb = saved_tb;
1089 if (env->interrupt_request && env->current_tb)
1090 cpu_interrupt(env, env->interrupt_request);
1091 }
9fa3e853
FB
1092 }
1093 tb = tb_next;
1094 }
1095#if !defined(CONFIG_USER_ONLY)
1096 /* if no code remaining, no need to continue to use slow writes */
1097 if (!p->first_tb) {
1098 invalidate_page_bitmap(p);
d720b93d 1099 if (is_cpu_write_access) {
2e70f6ef 1100 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
d720b93d
FB
1101 }
1102 }
1103#endif
1104#ifdef TARGET_HAS_PRECISE_SMC
1105 if (current_tb_modified) {
1106 /* we generate a block containing just the instruction
1107 modifying the memory. It will ensure that it cannot modify
1108 itself */
ea1c1802 1109 env->current_tb = NULL;
2e70f6ef 1110 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
d720b93d 1111 cpu_resume_from_signal(env, NULL);
9fa3e853 1112 }
fd6ce8f6 1113#endif
9fa3e853 1114}
fd6ce8f6 1115
9fa3e853 1116/* len must be <= 8 and start must be a multiple of len */
41c1b1c9 1117static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
9fa3e853
FB
1118{
1119 PageDesc *p;
1120 int offset, b;
59817ccb 1121#if 0
a4193c8a 1122 if (1) {
93fcfe39
AL
1123 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1124 cpu_single_env->mem_io_vaddr, len,
1125 cpu_single_env->eip,
1126 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
59817ccb
FB
1127 }
1128#endif
9fa3e853 1129 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 1130 if (!p)
9fa3e853
FB
1131 return;
1132 if (p->code_bitmap) {
1133 offset = start & ~TARGET_PAGE_MASK;
1134 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1135 if (b & ((1 << len) - 1))
1136 goto do_invalidate;
1137 } else {
1138 do_invalidate:
d720b93d 1139 tb_invalidate_phys_page_range(start, start + len, 1);
9fa3e853
FB
1140 }
1141}
1142
9fa3e853 1143#if !defined(CONFIG_SOFTMMU)
41c1b1c9 1144static void tb_invalidate_phys_page(tb_page_addr_t addr,
d720b93d 1145 unsigned long pc, void *puc)
9fa3e853 1146{
6b917547 1147 TranslationBlock *tb;
9fa3e853 1148 PageDesc *p;
6b917547 1149 int n;
d720b93d 1150#ifdef TARGET_HAS_PRECISE_SMC
6b917547 1151 TranslationBlock *current_tb = NULL;
d720b93d 1152 CPUState *env = cpu_single_env;
6b917547
AL
1153 int current_tb_modified = 0;
1154 target_ulong current_pc = 0;
1155 target_ulong current_cs_base = 0;
1156 int current_flags = 0;
d720b93d 1157#endif
9fa3e853
FB
1158
1159 addr &= TARGET_PAGE_MASK;
1160 p = page_find(addr >> TARGET_PAGE_BITS);
5fafdf24 1161 if (!p)
9fa3e853
FB
1162 return;
1163 tb = p->first_tb;
d720b93d
FB
1164#ifdef TARGET_HAS_PRECISE_SMC
1165 if (tb && pc != 0) {
1166 current_tb = tb_find_pc(pc);
1167 }
1168#endif
9fa3e853
FB
1169 while (tb != NULL) {
1170 n = (long)tb & 3;
1171 tb = (TranslationBlock *)((long)tb & ~3);
d720b93d
FB
1172#ifdef TARGET_HAS_PRECISE_SMC
1173 if (current_tb == tb &&
2e70f6ef 1174 (current_tb->cflags & CF_COUNT_MASK) != 1) {
d720b93d
FB
1175 /* If we are modifying the current TB, we must stop
1176 its execution. We could be more precise by checking
1177 that the modification is after the current PC, but it
1178 would require a specialized function to partially
1179 restore the CPU state */
3b46e624 1180
d720b93d
FB
1181 current_tb_modified = 1;
1182 cpu_restore_state(current_tb, env, pc, puc);
6b917547
AL
1183 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1184 &current_flags);
d720b93d
FB
1185 }
1186#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
1187 tb_phys_invalidate(tb, addr);
1188 tb = tb->page_next[n];
1189 }
fd6ce8f6 1190 p->first_tb = NULL;
d720b93d
FB
1191#ifdef TARGET_HAS_PRECISE_SMC
1192 if (current_tb_modified) {
1193 /* we generate a block containing just the instruction
1194 modifying the memory. It will ensure that it cannot modify
1195 itself */
ea1c1802 1196 env->current_tb = NULL;
2e70f6ef 1197 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
d720b93d
FB
1198 cpu_resume_from_signal(env, puc);
1199 }
1200#endif
fd6ce8f6 1201}
9fa3e853 1202#endif
fd6ce8f6
FB
1203
1204/* add the tb in the target page and protect it if necessary */
5fafdf24 1205static inline void tb_alloc_page(TranslationBlock *tb,
41c1b1c9 1206 unsigned int n, tb_page_addr_t page_addr)
fd6ce8f6
FB
1207{
1208 PageDesc *p;
9fa3e853
FB
1209 TranslationBlock *last_first_tb;
1210
1211 tb->page_addr[n] = page_addr;
5cd2c5b6 1212 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
9fa3e853
FB
1213 tb->page_next[n] = p->first_tb;
1214 last_first_tb = p->first_tb;
1215 p->first_tb = (TranslationBlock *)((long)tb | n);
1216 invalidate_page_bitmap(p);
fd6ce8f6 1217
107db443 1218#if defined(TARGET_HAS_SMC) || 1
d720b93d 1219
9fa3e853 1220#if defined(CONFIG_USER_ONLY)
fd6ce8f6 1221 if (p->flags & PAGE_WRITE) {
53a5960a
PB
1222 target_ulong addr;
1223 PageDesc *p2;
9fa3e853
FB
1224 int prot;
1225
fd6ce8f6
FB
1226 /* force the host page as non writable (writes will have a
1227 page fault + mprotect overhead) */
53a5960a 1228 page_addr &= qemu_host_page_mask;
fd6ce8f6 1229 prot = 0;
53a5960a
PB
1230 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1231 addr += TARGET_PAGE_SIZE) {
1232
1233 p2 = page_find (addr >> TARGET_PAGE_BITS);
1234 if (!p2)
1235 continue;
1236 prot |= p2->flags;
1237 p2->flags &= ~PAGE_WRITE;
53a5960a 1238 }
5fafdf24 1239 mprotect(g2h(page_addr), qemu_host_page_size,
fd6ce8f6
FB
1240 (prot & PAGE_BITS) & ~PAGE_WRITE);
1241#ifdef DEBUG_TB_INVALIDATE
ab3d1727 1242 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
53a5960a 1243 page_addr);
fd6ce8f6 1244#endif
fd6ce8f6 1245 }
9fa3e853
FB
1246#else
1247 /* if some code is already present, then the pages are already
1248 protected. So we handle the case where only the first TB is
1249 allocated in a physical page */
1250 if (!last_first_tb) {
6a00d601 1251 tlb_protect_code(page_addr);
9fa3e853
FB
1252 }
1253#endif
d720b93d
FB
1254
1255#endif /* TARGET_HAS_SMC */
fd6ce8f6
FB
1256}
1257
9fa3e853
FB
1258/* add a new TB and link it to the physical page tables. phys_page2 is
1259 (-1) to indicate that only one page contains the TB. */
41c1b1c9
PB
1260void tb_link_page(TranslationBlock *tb,
1261 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
d4e8164f 1262{
9fa3e853
FB
1263 unsigned int h;
1264 TranslationBlock **ptb;
1265
c8a706fe
PB
1266 /* Grab the mmap lock to stop another thread invalidating this TB
1267 before we are done. */
1268 mmap_lock();
9fa3e853
FB
1269 /* add in the physical hash table */
1270 h = tb_phys_hash_func(phys_pc);
1271 ptb = &tb_phys_hash[h];
1272 tb->phys_hash_next = *ptb;
1273 *ptb = tb;
fd6ce8f6
FB
1274
1275 /* add in the page list */
9fa3e853
FB
1276 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1277 if (phys_page2 != -1)
1278 tb_alloc_page(tb, 1, phys_page2);
1279 else
1280 tb->page_addr[1] = -1;
9fa3e853 1281
d4e8164f
FB
1282 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1283 tb->jmp_next[0] = NULL;
1284 tb->jmp_next[1] = NULL;
1285
1286 /* init original jump addresses */
1287 if (tb->tb_next_offset[0] != 0xffff)
1288 tb_reset_jump(tb, 0);
1289 if (tb->tb_next_offset[1] != 0xffff)
1290 tb_reset_jump(tb, 1);
8a40a180
FB
1291
1292#ifdef DEBUG_TB_CHECK
1293 tb_page_check();
1294#endif
c8a706fe 1295 mmap_unlock();
fd6ce8f6
FB
1296}
1297
9fa3e853
FB
1298/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1299 tb[1].tc_ptr. Return NULL if not found */
1300TranslationBlock *tb_find_pc(unsigned long tc_ptr)
fd6ce8f6 1301{
9fa3e853
FB
1302 int m_min, m_max, m;
1303 unsigned long v;
1304 TranslationBlock *tb;
a513fe19
FB
1305
1306 if (nb_tbs <= 0)
1307 return NULL;
1308 if (tc_ptr < (unsigned long)code_gen_buffer ||
1309 tc_ptr >= (unsigned long)code_gen_ptr)
1310 return NULL;
1311 /* binary search (cf Knuth) */
1312 m_min = 0;
1313 m_max = nb_tbs - 1;
1314 while (m_min <= m_max) {
1315 m = (m_min + m_max) >> 1;
1316 tb = &tbs[m];
1317 v = (unsigned long)tb->tc_ptr;
1318 if (v == tc_ptr)
1319 return tb;
1320 else if (tc_ptr < v) {
1321 m_max = m - 1;
1322 } else {
1323 m_min = m + 1;
1324 }
5fafdf24 1325 }
a513fe19
FB
1326 return &tbs[m_max];
1327}
7501267e 1328
ea041c0e
FB
1329static void tb_reset_jump_recursive(TranslationBlock *tb);
1330
1331static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1332{
1333 TranslationBlock *tb1, *tb_next, **ptb;
1334 unsigned int n1;
1335
1336 tb1 = tb->jmp_next[n];
1337 if (tb1 != NULL) {
1338 /* find head of list */
1339 for(;;) {
1340 n1 = (long)tb1 & 3;
1341 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1342 if (n1 == 2)
1343 break;
1344 tb1 = tb1->jmp_next[n1];
1345 }
1346 /* we are now sure now that tb jumps to tb1 */
1347 tb_next = tb1;
1348
1349 /* remove tb from the jmp_first list */
1350 ptb = &tb_next->jmp_first;
1351 for(;;) {
1352 tb1 = *ptb;
1353 n1 = (long)tb1 & 3;
1354 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1355 if (n1 == n && tb1 == tb)
1356 break;
1357 ptb = &tb1->jmp_next[n1];
1358 }
1359 *ptb = tb->jmp_next[n];
1360 tb->jmp_next[n] = NULL;
3b46e624 1361
ea041c0e
FB
1362 /* suppress the jump to next tb in generated code */
1363 tb_reset_jump(tb, n);
1364
0124311e 1365 /* suppress jumps in the tb on which we could have jumped */
ea041c0e
FB
1366 tb_reset_jump_recursive(tb_next);
1367 }
1368}
1369
1370static void tb_reset_jump_recursive(TranslationBlock *tb)
1371{
1372 tb_reset_jump_recursive2(tb, 0);
1373 tb_reset_jump_recursive2(tb, 1);
1374}
1375
1fddef4b 1376#if defined(TARGET_HAS_ICE)
94df27fd
PB
1377#if defined(CONFIG_USER_ONLY)
1378static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1379{
1380 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1381}
1382#else
d720b93d
FB
1383static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1384{
c227f099 1385 target_phys_addr_t addr;
9b3c35e0 1386 target_ulong pd;
c227f099 1387 ram_addr_t ram_addr;
c2f07f81 1388 PhysPageDesc *p;
d720b93d 1389
c2f07f81
PB
1390 addr = cpu_get_phys_page_debug(env, pc);
1391 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1392 if (!p) {
1393 pd = IO_MEM_UNASSIGNED;
1394 } else {
1395 pd = p->phys_offset;
1396 }
1397 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
706cd4b5 1398 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
d720b93d 1399}
c27004ec 1400#endif
94df27fd 1401#endif /* TARGET_HAS_ICE */
d720b93d 1402
c527ee8f
PB
1403#if defined(CONFIG_USER_ONLY)
1404void cpu_watchpoint_remove_all(CPUState *env, int mask)
1405
1406{
1407}
1408
1409int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1410 int flags, CPUWatchpoint **watchpoint)
1411{
1412 return -ENOSYS;
1413}
1414#else
6658ffb8 1415/* Add a watchpoint. */
a1d1bb31
AL
1416int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1417 int flags, CPUWatchpoint **watchpoint)
6658ffb8 1418{
b4051334 1419 target_ulong len_mask = ~(len - 1);
c0ce998e 1420 CPUWatchpoint *wp;
6658ffb8 1421
b4051334
AL
1422 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1423 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1424 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1425 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1426 return -EINVAL;
1427 }
a1d1bb31 1428 wp = qemu_malloc(sizeof(*wp));
a1d1bb31
AL
1429
1430 wp->vaddr = addr;
b4051334 1431 wp->len_mask = len_mask;
a1d1bb31
AL
1432 wp->flags = flags;
1433
2dc9f411 1434 /* keep all GDB-injected watchpoints in front */
c0ce998e 1435 if (flags & BP_GDB)
72cf2d4f 1436 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
c0ce998e 1437 else
72cf2d4f 1438 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
6658ffb8 1439
6658ffb8 1440 tlb_flush_page(env, addr);
a1d1bb31
AL
1441
1442 if (watchpoint)
1443 *watchpoint = wp;
1444 return 0;
6658ffb8
PB
1445}
1446
a1d1bb31
AL
1447/* Remove a specific watchpoint. */
1448int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1449 int flags)
6658ffb8 1450{
b4051334 1451 target_ulong len_mask = ~(len - 1);
a1d1bb31 1452 CPUWatchpoint *wp;
6658ffb8 1453
72cf2d4f 1454 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334 1455 if (addr == wp->vaddr && len_mask == wp->len_mask
6e140f28 1456 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
a1d1bb31 1457 cpu_watchpoint_remove_by_ref(env, wp);
6658ffb8
PB
1458 return 0;
1459 }
1460 }
a1d1bb31 1461 return -ENOENT;
6658ffb8
PB
1462}
1463
a1d1bb31
AL
1464/* Remove a specific watchpoint by reference. */
1465void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1466{
72cf2d4f 1467 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
7d03f82f 1468
a1d1bb31
AL
1469 tlb_flush_page(env, watchpoint->vaddr);
1470
1471 qemu_free(watchpoint);
1472}
1473
1474/* Remove all matching watchpoints. */
1475void cpu_watchpoint_remove_all(CPUState *env, int mask)
1476{
c0ce998e 1477 CPUWatchpoint *wp, *next;
a1d1bb31 1478
72cf2d4f 1479 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
a1d1bb31
AL
1480 if (wp->flags & mask)
1481 cpu_watchpoint_remove_by_ref(env, wp);
c0ce998e 1482 }
7d03f82f 1483}
c527ee8f 1484#endif
7d03f82f 1485
a1d1bb31
AL
1486/* Add a breakpoint. */
1487int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1488 CPUBreakpoint **breakpoint)
4c3a88a2 1489{
1fddef4b 1490#if defined(TARGET_HAS_ICE)
c0ce998e 1491 CPUBreakpoint *bp;
3b46e624 1492
a1d1bb31 1493 bp = qemu_malloc(sizeof(*bp));
4c3a88a2 1494
a1d1bb31
AL
1495 bp->pc = pc;
1496 bp->flags = flags;
1497
2dc9f411 1498 /* keep all GDB-injected breakpoints in front */
c0ce998e 1499 if (flags & BP_GDB)
72cf2d4f 1500 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
c0ce998e 1501 else
72cf2d4f 1502 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
3b46e624 1503
d720b93d 1504 breakpoint_invalidate(env, pc);
a1d1bb31
AL
1505
1506 if (breakpoint)
1507 *breakpoint = bp;
4c3a88a2
FB
1508 return 0;
1509#else
a1d1bb31 1510 return -ENOSYS;
4c3a88a2
FB
1511#endif
1512}
1513
a1d1bb31
AL
1514/* Remove a specific breakpoint. */
1515int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1516{
7d03f82f 1517#if defined(TARGET_HAS_ICE)
a1d1bb31
AL
1518 CPUBreakpoint *bp;
1519
72cf2d4f 1520 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31
AL
1521 if (bp->pc == pc && bp->flags == flags) {
1522 cpu_breakpoint_remove_by_ref(env, bp);
1523 return 0;
1524 }
7d03f82f 1525 }
a1d1bb31
AL
1526 return -ENOENT;
1527#else
1528 return -ENOSYS;
7d03f82f
EI
1529#endif
1530}
1531
a1d1bb31
AL
1532/* Remove a specific breakpoint by reference. */
1533void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
4c3a88a2 1534{
1fddef4b 1535#if defined(TARGET_HAS_ICE)
72cf2d4f 1536 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
d720b93d 1537
a1d1bb31
AL
1538 breakpoint_invalidate(env, breakpoint->pc);
1539
1540 qemu_free(breakpoint);
1541#endif
1542}
1543
1544/* Remove all matching breakpoints. */
1545void cpu_breakpoint_remove_all(CPUState *env, int mask)
1546{
1547#if defined(TARGET_HAS_ICE)
c0ce998e 1548 CPUBreakpoint *bp, *next;
a1d1bb31 1549
72cf2d4f 1550 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
a1d1bb31
AL
1551 if (bp->flags & mask)
1552 cpu_breakpoint_remove_by_ref(env, bp);
c0ce998e 1553 }
4c3a88a2
FB
1554#endif
1555}
1556
c33a346e
FB
1557/* enable or disable single step mode. EXCP_DEBUG is returned by the
1558 CPU loop after each instruction */
1559void cpu_single_step(CPUState *env, int enabled)
1560{
1fddef4b 1561#if defined(TARGET_HAS_ICE)
c33a346e
FB
1562 if (env->singlestep_enabled != enabled) {
1563 env->singlestep_enabled = enabled;
e22a25c9
AL
1564 if (kvm_enabled())
1565 kvm_update_guest_debug(env, 0);
1566 else {
ccbb4d44 1567 /* must flush all the translated code to avoid inconsistencies */
e22a25c9
AL
1568 /* XXX: only flush what is necessary */
1569 tb_flush(env);
1570 }
c33a346e
FB
1571 }
1572#endif
1573}
1574
34865134
FB
1575/* enable or disable low levels log */
1576void cpu_set_log(int log_flags)
1577{
1578 loglevel = log_flags;
1579 if (loglevel && !logfile) {
11fcfab4 1580 logfile = fopen(logfilename, log_append ? "a" : "w");
34865134
FB
1581 if (!logfile) {
1582 perror(logfilename);
1583 _exit(1);
1584 }
9fa3e853
FB
1585#if !defined(CONFIG_SOFTMMU)
1586 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1587 {
b55266b5 1588 static char logfile_buf[4096];
9fa3e853
FB
1589 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1590 }
bf65f53f
FN
1591#elif !defined(_WIN32)
1592 /* Win32 doesn't support line-buffering and requires size >= 2 */
34865134 1593 setvbuf(logfile, NULL, _IOLBF, 0);
9fa3e853 1594#endif
e735b91c
PB
1595 log_append = 1;
1596 }
1597 if (!loglevel && logfile) {
1598 fclose(logfile);
1599 logfile = NULL;
34865134
FB
1600 }
1601}
1602
1603void cpu_set_log_filename(const char *filename)
1604{
1605 logfilename = strdup(filename);
e735b91c
PB
1606 if (logfile) {
1607 fclose(logfile);
1608 logfile = NULL;
1609 }
1610 cpu_set_log(loglevel);
34865134 1611}
c33a346e 1612
3098dba0 1613static void cpu_unlink_tb(CPUState *env)
ea041c0e 1614{
3098dba0
AJ
1615 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1616 problem and hope the cpu will stop of its own accord. For userspace
1617 emulation this often isn't actually as bad as it sounds. Often
1618 signals are used primarily to interrupt blocking syscalls. */
ea041c0e 1619 TranslationBlock *tb;
c227f099 1620 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
59817ccb 1621
cab1b4bd 1622 spin_lock(&interrupt_lock);
3098dba0
AJ
1623 tb = env->current_tb;
1624 /* if the cpu is currently executing code, we must unlink it and
1625 all the potentially executing TB */
f76cfe56 1626 if (tb) {
3098dba0
AJ
1627 env->current_tb = NULL;
1628 tb_reset_jump_recursive(tb);
be214e6c 1629 }
cab1b4bd 1630 spin_unlock(&interrupt_lock);
3098dba0
AJ
1631}
1632
1633/* mask must never be zero, except for A20 change call */
1634void cpu_interrupt(CPUState *env, int mask)
1635{
1636 int old_mask;
be214e6c 1637
2e70f6ef 1638 old_mask = env->interrupt_request;
68a79315 1639 env->interrupt_request |= mask;
3098dba0 1640
8edac960
AL
1641#ifndef CONFIG_USER_ONLY
1642 /*
1643 * If called from iothread context, wake the target cpu in
1644 * case its halted.
1645 */
b7680cb6 1646 if (!qemu_cpu_is_self(env)) {
8edac960
AL
1647 qemu_cpu_kick(env);
1648 return;
1649 }
1650#endif
1651
2e70f6ef 1652 if (use_icount) {
266910c4 1653 env->icount_decr.u16.high = 0xffff;
2e70f6ef 1654#ifndef CONFIG_USER_ONLY
2e70f6ef 1655 if (!can_do_io(env)
be214e6c 1656 && (mask & ~old_mask) != 0) {
2e70f6ef
PB
1657 cpu_abort(env, "Raised interrupt while not in I/O function");
1658 }
1659#endif
1660 } else {
3098dba0 1661 cpu_unlink_tb(env);
ea041c0e
FB
1662 }
1663}
1664
b54ad049
FB
1665void cpu_reset_interrupt(CPUState *env, int mask)
1666{
1667 env->interrupt_request &= ~mask;
1668}
1669
3098dba0
AJ
1670void cpu_exit(CPUState *env)
1671{
1672 env->exit_request = 1;
1673 cpu_unlink_tb(env);
1674}
1675
c7cd6a37 1676const CPULogItem cpu_log_items[] = {
5fafdf24 1677 { CPU_LOG_TB_OUT_ASM, "out_asm",
f193c797
FB
1678 "show generated host assembly code for each compiled TB" },
1679 { CPU_LOG_TB_IN_ASM, "in_asm",
1680 "show target assembly code for each compiled TB" },
5fafdf24 1681 { CPU_LOG_TB_OP, "op",
57fec1fe 1682 "show micro ops for each compiled TB" },
f193c797 1683 { CPU_LOG_TB_OP_OPT, "op_opt",
e01a1157
BS
1684 "show micro ops "
1685#ifdef TARGET_I386
1686 "before eflags optimization and "
f193c797 1687#endif
e01a1157 1688 "after liveness analysis" },
f193c797
FB
1689 { CPU_LOG_INT, "int",
1690 "show interrupts/exceptions in short format" },
1691 { CPU_LOG_EXEC, "exec",
1692 "show trace before each executed TB (lots of logs)" },
9fddaa0c 1693 { CPU_LOG_TB_CPU, "cpu",
e91c8a77 1694 "show CPU state before block translation" },
f193c797
FB
1695#ifdef TARGET_I386
1696 { CPU_LOG_PCALL, "pcall",
1697 "show protected mode far calls/returns/exceptions" },
eca1bdf4
AL
1698 { CPU_LOG_RESET, "cpu_reset",
1699 "show CPU state before CPU resets" },
f193c797 1700#endif
8e3a9fd2 1701#ifdef DEBUG_IOPORT
fd872598
FB
1702 { CPU_LOG_IOPORT, "ioport",
1703 "show all i/o ports accesses" },
8e3a9fd2 1704#endif
f193c797
FB
1705 { 0, NULL, NULL },
1706};
1707
f6f3fbca
MT
1708#ifndef CONFIG_USER_ONLY
1709static QLIST_HEAD(memory_client_list, CPUPhysMemoryClient) memory_client_list
1710 = QLIST_HEAD_INITIALIZER(memory_client_list);
1711
1712static void cpu_notify_set_memory(target_phys_addr_t start_addr,
9742bf26
YT
1713 ram_addr_t size,
1714 ram_addr_t phys_offset)
f6f3fbca
MT
1715{
1716 CPUPhysMemoryClient *client;
1717 QLIST_FOREACH(client, &memory_client_list, list) {
1718 client->set_memory(client, start_addr, size, phys_offset);
1719 }
1720}
1721
1722static int cpu_notify_sync_dirty_bitmap(target_phys_addr_t start,
9742bf26 1723 target_phys_addr_t end)
f6f3fbca
MT
1724{
1725 CPUPhysMemoryClient *client;
1726 QLIST_FOREACH(client, &memory_client_list, list) {
1727 int r = client->sync_dirty_bitmap(client, start, end);
1728 if (r < 0)
1729 return r;
1730 }
1731 return 0;
1732}
1733
1734static int cpu_notify_migration_log(int enable)
1735{
1736 CPUPhysMemoryClient *client;
1737 QLIST_FOREACH(client, &memory_client_list, list) {
1738 int r = client->migration_log(client, enable);
1739 if (r < 0)
1740 return r;
1741 }
1742 return 0;
1743}
1744
5cd2c5b6
RH
1745static void phys_page_for_each_1(CPUPhysMemoryClient *client,
1746 int level, void **lp)
f6f3fbca 1747{
5cd2c5b6 1748 int i;
f6f3fbca 1749
5cd2c5b6
RH
1750 if (*lp == NULL) {
1751 return;
1752 }
1753 if (level == 0) {
1754 PhysPageDesc *pd = *lp;
7296abac 1755 for (i = 0; i < L2_SIZE; ++i) {
5cd2c5b6
RH
1756 if (pd[i].phys_offset != IO_MEM_UNASSIGNED) {
1757 client->set_memory(client, pd[i].region_offset,
1758 TARGET_PAGE_SIZE, pd[i].phys_offset);
f6f3fbca 1759 }
5cd2c5b6
RH
1760 }
1761 } else {
1762 void **pp = *lp;
7296abac 1763 for (i = 0; i < L2_SIZE; ++i) {
5cd2c5b6 1764 phys_page_for_each_1(client, level - 1, pp + i);
f6f3fbca
MT
1765 }
1766 }
1767}
1768
1769static void phys_page_for_each(CPUPhysMemoryClient *client)
1770{
5cd2c5b6
RH
1771 int i;
1772 for (i = 0; i < P_L1_SIZE; ++i) {
1773 phys_page_for_each_1(client, P_L1_SHIFT / L2_BITS - 1,
1774 l1_phys_map + 1);
f6f3fbca 1775 }
f6f3fbca
MT
1776}
1777
1778void cpu_register_phys_memory_client(CPUPhysMemoryClient *client)
1779{
1780 QLIST_INSERT_HEAD(&memory_client_list, client, list);
1781 phys_page_for_each(client);
1782}
1783
1784void cpu_unregister_phys_memory_client(CPUPhysMemoryClient *client)
1785{
1786 QLIST_REMOVE(client, list);
1787}
1788#endif
1789
f193c797
FB
1790static int cmp1(const char *s1, int n, const char *s2)
1791{
1792 if (strlen(s2) != n)
1793 return 0;
1794 return memcmp(s1, s2, n) == 0;
1795}
3b46e624 1796
f193c797
FB
1797/* takes a comma separated list of log masks. Return 0 if error. */
1798int cpu_str_to_log_mask(const char *str)
1799{
c7cd6a37 1800 const CPULogItem *item;
f193c797
FB
1801 int mask;
1802 const char *p, *p1;
1803
1804 p = str;
1805 mask = 0;
1806 for(;;) {
1807 p1 = strchr(p, ',');
1808 if (!p1)
1809 p1 = p + strlen(p);
9742bf26
YT
1810 if(cmp1(p,p1-p,"all")) {
1811 for(item = cpu_log_items; item->mask != 0; item++) {
1812 mask |= item->mask;
1813 }
1814 } else {
1815 for(item = cpu_log_items; item->mask != 0; item++) {
1816 if (cmp1(p, p1 - p, item->name))
1817 goto found;
1818 }
1819 return 0;
f193c797 1820 }
f193c797
FB
1821 found:
1822 mask |= item->mask;
1823 if (*p1 != ',')
1824 break;
1825 p = p1 + 1;
1826 }
1827 return mask;
1828}
ea041c0e 1829
7501267e
FB
1830void cpu_abort(CPUState *env, const char *fmt, ...)
1831{
1832 va_list ap;
493ae1f0 1833 va_list ap2;
7501267e
FB
1834
1835 va_start(ap, fmt);
493ae1f0 1836 va_copy(ap2, ap);
7501267e
FB
1837 fprintf(stderr, "qemu: fatal: ");
1838 vfprintf(stderr, fmt, ap);
1839 fprintf(stderr, "\n");
1840#ifdef TARGET_I386
7fe48483
FB
1841 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1842#else
1843 cpu_dump_state(env, stderr, fprintf, 0);
7501267e 1844#endif
93fcfe39
AL
1845 if (qemu_log_enabled()) {
1846 qemu_log("qemu: fatal: ");
1847 qemu_log_vprintf(fmt, ap2);
1848 qemu_log("\n");
f9373291 1849#ifdef TARGET_I386
93fcfe39 1850 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
f9373291 1851#else
93fcfe39 1852 log_cpu_state(env, 0);
f9373291 1853#endif
31b1a7b4 1854 qemu_log_flush();
93fcfe39 1855 qemu_log_close();
924edcae 1856 }
493ae1f0 1857 va_end(ap2);
f9373291 1858 va_end(ap);
fd052bf6
RV
1859#if defined(CONFIG_USER_ONLY)
1860 {
1861 struct sigaction act;
1862 sigfillset(&act.sa_mask);
1863 act.sa_handler = SIG_DFL;
1864 sigaction(SIGABRT, &act, NULL);
1865 }
1866#endif
7501267e
FB
1867 abort();
1868}
1869
c5be9f08
TS
1870CPUState *cpu_copy(CPUState *env)
1871{
01ba9816 1872 CPUState *new_env = cpu_init(env->cpu_model_str);
c5be9f08
TS
1873 CPUState *next_cpu = new_env->next_cpu;
1874 int cpu_index = new_env->cpu_index;
5a38f081
AL
1875#if defined(TARGET_HAS_ICE)
1876 CPUBreakpoint *bp;
1877 CPUWatchpoint *wp;
1878#endif
1879
c5be9f08 1880 memcpy(new_env, env, sizeof(CPUState));
5a38f081
AL
1881
1882 /* Preserve chaining and index. */
c5be9f08
TS
1883 new_env->next_cpu = next_cpu;
1884 new_env->cpu_index = cpu_index;
5a38f081
AL
1885
1886 /* Clone all break/watchpoints.
1887 Note: Once we support ptrace with hw-debug register access, make sure
1888 BP_CPU break/watchpoints are handled correctly on clone. */
72cf2d4f
BS
1889 QTAILQ_INIT(&env->breakpoints);
1890 QTAILQ_INIT(&env->watchpoints);
5a38f081 1891#if defined(TARGET_HAS_ICE)
72cf2d4f 1892 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
5a38f081
AL
1893 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1894 }
72cf2d4f 1895 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
5a38f081
AL
1896 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1897 wp->flags, NULL);
1898 }
1899#endif
1900
c5be9f08
TS
1901 return new_env;
1902}
1903
0124311e
FB
1904#if !defined(CONFIG_USER_ONLY)
1905
5c751e99
EI
1906static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1907{
1908 unsigned int i;
1909
1910 /* Discard jump cache entries for any tb which might potentially
1911 overlap the flushed page. */
1912 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1913 memset (&env->tb_jmp_cache[i], 0,
9742bf26 1914 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
5c751e99
EI
1915
1916 i = tb_jmp_cache_hash_page(addr);
1917 memset (&env->tb_jmp_cache[i], 0,
9742bf26 1918 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
5c751e99
EI
1919}
1920
08738984
IK
1921static CPUTLBEntry s_cputlb_empty_entry = {
1922 .addr_read = -1,
1923 .addr_write = -1,
1924 .addr_code = -1,
1925 .addend = -1,
1926};
1927
ee8b7021
FB
1928/* NOTE: if flush_global is true, also flush global entries (not
1929 implemented yet) */
1930void tlb_flush(CPUState *env, int flush_global)
33417e70 1931{
33417e70 1932 int i;
0124311e 1933
9fa3e853
FB
1934#if defined(DEBUG_TLB)
1935 printf("tlb_flush:\n");
1936#endif
0124311e
FB
1937 /* must reset current TB so that interrupts cannot modify the
1938 links while we are modifying them */
1939 env->current_tb = NULL;
1940
33417e70 1941 for(i = 0; i < CPU_TLB_SIZE; i++) {
cfde4bd9
IY
1942 int mmu_idx;
1943 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
08738984 1944 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
cfde4bd9 1945 }
33417e70 1946 }
9fa3e853 1947
8a40a180 1948 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
9fa3e853 1949
d4c430a8
PB
1950 env->tlb_flush_addr = -1;
1951 env->tlb_flush_mask = 0;
e3db7226 1952 tlb_flush_count++;
33417e70
FB
1953}
1954
274da6b2 1955static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
61382a50 1956{
5fafdf24 1957 if (addr == (tlb_entry->addr_read &
84b7b8e7 1958 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1959 addr == (tlb_entry->addr_write &
84b7b8e7 1960 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1961 addr == (tlb_entry->addr_code &
84b7b8e7 1962 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
08738984 1963 *tlb_entry = s_cputlb_empty_entry;
84b7b8e7 1964 }
61382a50
FB
1965}
1966
2e12669a 1967void tlb_flush_page(CPUState *env, target_ulong addr)
33417e70 1968{
8a40a180 1969 int i;
cfde4bd9 1970 int mmu_idx;
0124311e 1971
9fa3e853 1972#if defined(DEBUG_TLB)
108c49b8 1973 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
9fa3e853 1974#endif
d4c430a8
PB
1975 /* Check if we need to flush due to large pages. */
1976 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
1977#if defined(DEBUG_TLB)
1978 printf("tlb_flush_page: forced full flush ("
1979 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
1980 env->tlb_flush_addr, env->tlb_flush_mask);
1981#endif
1982 tlb_flush(env, 1);
1983 return;
1984 }
0124311e
FB
1985 /* must reset current TB so that interrupts cannot modify the
1986 links while we are modifying them */
1987 env->current_tb = NULL;
61382a50
FB
1988
1989 addr &= TARGET_PAGE_MASK;
1990 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
cfde4bd9
IY
1991 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1992 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
0124311e 1993
5c751e99 1994 tlb_flush_jmp_cache(env, addr);
9fa3e853
FB
1995}
1996
9fa3e853
FB
1997/* update the TLBs so that writes to code in the virtual page 'addr'
1998 can be detected */
c227f099 1999static void tlb_protect_code(ram_addr_t ram_addr)
9fa3e853 2000{
5fafdf24 2001 cpu_physical_memory_reset_dirty(ram_addr,
6a00d601
FB
2002 ram_addr + TARGET_PAGE_SIZE,
2003 CODE_DIRTY_FLAG);
9fa3e853
FB
2004}
2005
9fa3e853 2006/* update the TLB so that writes in physical page 'phys_addr' are no longer
3a7d929e 2007 tested for self modifying code */
c227f099 2008static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 2009 target_ulong vaddr)
9fa3e853 2010{
f7c11b53 2011 cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
1ccde1cb
FB
2012}
2013
5fafdf24 2014static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1ccde1cb
FB
2015 unsigned long start, unsigned long length)
2016{
2017 unsigned long addr;
84b7b8e7
FB
2018 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2019 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1ccde1cb 2020 if ((addr - start) < length) {
0f459d16 2021 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1ccde1cb
FB
2022 }
2023 }
2024}
2025
5579c7f3 2026/* Note: start and end must be within the same ram block. */
c227f099 2027void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 2028 int dirty_flags)
1ccde1cb
FB
2029{
2030 CPUState *env;
4f2ac237 2031 unsigned long length, start1;
f7c11b53 2032 int i;
1ccde1cb
FB
2033
2034 start &= TARGET_PAGE_MASK;
2035 end = TARGET_PAGE_ALIGN(end);
2036
2037 length = end - start;
2038 if (length == 0)
2039 return;
f7c11b53 2040 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
f23db169 2041
1ccde1cb
FB
2042 /* we modify the TLB cache so that the dirty bit will be set again
2043 when accessing the range */
b2e0a138 2044 start1 = (unsigned long)qemu_safe_ram_ptr(start);
5579c7f3
PB
2045 /* Chek that we don't span multiple blocks - this breaks the
2046 address comparisons below. */
b2e0a138 2047 if ((unsigned long)qemu_safe_ram_ptr(end - 1) - start1
5579c7f3
PB
2048 != (end - 1) - start) {
2049 abort();
2050 }
2051
6a00d601 2052 for(env = first_cpu; env != NULL; env = env->next_cpu) {
cfde4bd9
IY
2053 int mmu_idx;
2054 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2055 for(i = 0; i < CPU_TLB_SIZE; i++)
2056 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
2057 start1, length);
2058 }
6a00d601 2059 }
1ccde1cb
FB
2060}
2061
74576198
AL
2062int cpu_physical_memory_set_dirty_tracking(int enable)
2063{
f6f3fbca 2064 int ret = 0;
74576198 2065 in_migration = enable;
f6f3fbca
MT
2066 ret = cpu_notify_migration_log(!!enable);
2067 return ret;
74576198
AL
2068}
2069
2070int cpu_physical_memory_get_dirty_tracking(void)
2071{
2072 return in_migration;
2073}
2074
c227f099
AL
2075int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
2076 target_phys_addr_t end_addr)
2bec46dc 2077{
7b8f3b78 2078 int ret;
151f7749 2079
f6f3fbca 2080 ret = cpu_notify_sync_dirty_bitmap(start_addr, end_addr);
151f7749 2081 return ret;
2bec46dc
AL
2082}
2083
e5896b12
AP
2084int cpu_physical_log_start(target_phys_addr_t start_addr,
2085 ram_addr_t size)
2086{
2087 CPUPhysMemoryClient *client;
2088 QLIST_FOREACH(client, &memory_client_list, list) {
2089 if (client->log_start) {
2090 int r = client->log_start(client, start_addr, size);
2091 if (r < 0) {
2092 return r;
2093 }
2094 }
2095 }
2096 return 0;
2097}
2098
2099int cpu_physical_log_stop(target_phys_addr_t start_addr,
2100 ram_addr_t size)
2101{
2102 CPUPhysMemoryClient *client;
2103 QLIST_FOREACH(client, &memory_client_list, list) {
2104 if (client->log_stop) {
2105 int r = client->log_stop(client, start_addr, size);
2106 if (r < 0) {
2107 return r;
2108 }
2109 }
2110 }
2111 return 0;
2112}
2113
3a7d929e
FB
2114static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2115{
c227f099 2116 ram_addr_t ram_addr;
5579c7f3 2117 void *p;
3a7d929e 2118
84b7b8e7 2119 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
5579c7f3
PB
2120 p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
2121 + tlb_entry->addend);
e890261f 2122 ram_addr = qemu_ram_addr_from_host_nofail(p);
3a7d929e 2123 if (!cpu_physical_memory_is_dirty(ram_addr)) {
0f459d16 2124 tlb_entry->addr_write |= TLB_NOTDIRTY;
3a7d929e
FB
2125 }
2126 }
2127}
2128
2129/* update the TLB according to the current state of the dirty bits */
2130void cpu_tlb_update_dirty(CPUState *env)
2131{
2132 int i;
cfde4bd9
IY
2133 int mmu_idx;
2134 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2135 for(i = 0; i < CPU_TLB_SIZE; i++)
2136 tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2137 }
3a7d929e
FB
2138}
2139
0f459d16 2140static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1ccde1cb 2141{
0f459d16
PB
2142 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2143 tlb_entry->addr_write = vaddr;
1ccde1cb
FB
2144}
2145
0f459d16
PB
2146/* update the TLB corresponding to virtual page vaddr
2147 so that it is no longer dirty */
2148static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1ccde1cb 2149{
1ccde1cb 2150 int i;
cfde4bd9 2151 int mmu_idx;
1ccde1cb 2152
0f459d16 2153 vaddr &= TARGET_PAGE_MASK;
1ccde1cb 2154 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
cfde4bd9
IY
2155 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2156 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
9fa3e853
FB
2157}
2158
d4c430a8
PB
2159/* Our TLB does not support large pages, so remember the area covered by
2160 large pages and trigger a full TLB flush if these are invalidated. */
2161static void tlb_add_large_page(CPUState *env, target_ulong vaddr,
2162 target_ulong size)
2163{
2164 target_ulong mask = ~(size - 1);
2165
2166 if (env->tlb_flush_addr == (target_ulong)-1) {
2167 env->tlb_flush_addr = vaddr & mask;
2168 env->tlb_flush_mask = mask;
2169 return;
2170 }
2171 /* Extend the existing region to include the new page.
2172 This is a compromise between unnecessary flushes and the cost
2173 of maintaining a full variable size TLB. */
2174 mask &= env->tlb_flush_mask;
2175 while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
2176 mask <<= 1;
2177 }
2178 env->tlb_flush_addr &= mask;
2179 env->tlb_flush_mask = mask;
2180}
2181
2182/* Add a new TLB entry. At most one entry for a given virtual address
2183 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2184 supplied size is only used by tlb_flush_page. */
2185void tlb_set_page(CPUState *env, target_ulong vaddr,
2186 target_phys_addr_t paddr, int prot,
2187 int mmu_idx, target_ulong size)
9fa3e853 2188{
92e873b9 2189 PhysPageDesc *p;
4f2ac237 2190 unsigned long pd;
9fa3e853 2191 unsigned int index;
4f2ac237 2192 target_ulong address;
0f459d16 2193 target_ulong code_address;
355b1943 2194 unsigned long addend;
84b7b8e7 2195 CPUTLBEntry *te;
a1d1bb31 2196 CPUWatchpoint *wp;
c227f099 2197 target_phys_addr_t iotlb;
9fa3e853 2198
d4c430a8
PB
2199 assert(size >= TARGET_PAGE_SIZE);
2200 if (size != TARGET_PAGE_SIZE) {
2201 tlb_add_large_page(env, vaddr, size);
2202 }
92e873b9 2203 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
9fa3e853
FB
2204 if (!p) {
2205 pd = IO_MEM_UNASSIGNED;
9fa3e853
FB
2206 } else {
2207 pd = p->phys_offset;
9fa3e853
FB
2208 }
2209#if defined(DEBUG_TLB)
7fd3f494
SW
2210 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
2211 " prot=%x idx=%d pd=0x%08lx\n",
2212 vaddr, paddr, prot, mmu_idx, pd);
9fa3e853
FB
2213#endif
2214
0f459d16
PB
2215 address = vaddr;
2216 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2217 /* IO memory case (romd handled later) */
2218 address |= TLB_MMIO;
2219 }
5579c7f3 2220 addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
0f459d16
PB
2221 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2222 /* Normal RAM. */
2223 iotlb = pd & TARGET_PAGE_MASK;
2224 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2225 iotlb |= IO_MEM_NOTDIRTY;
2226 else
2227 iotlb |= IO_MEM_ROM;
2228 } else {
ccbb4d44 2229 /* IO handlers are currently passed a physical address.
0f459d16
PB
2230 It would be nice to pass an offset from the base address
2231 of that region. This would avoid having to special case RAM,
2232 and avoid full address decoding in every device.
2233 We can't use the high bits of pd for this because
2234 IO_MEM_ROMD uses these as a ram address. */
8da3ff18
PB
2235 iotlb = (pd & ~TARGET_PAGE_MASK);
2236 if (p) {
8da3ff18
PB
2237 iotlb += p->region_offset;
2238 } else {
2239 iotlb += paddr;
2240 }
0f459d16
PB
2241 }
2242
2243 code_address = address;
2244 /* Make accesses to pages with watchpoints go via the
2245 watchpoint trap routines. */
72cf2d4f 2246 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
a1d1bb31 2247 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
bf298f83
JK
2248 /* Avoid trapping reads of pages with a write breakpoint. */
2249 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
2250 iotlb = io_mem_watch + paddr;
2251 address |= TLB_MMIO;
2252 break;
2253 }
6658ffb8 2254 }
0f459d16 2255 }
d79acba4 2256
0f459d16
PB
2257 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2258 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2259 te = &env->tlb_table[mmu_idx][index];
2260 te->addend = addend - vaddr;
2261 if (prot & PAGE_READ) {
2262 te->addr_read = address;
2263 } else {
2264 te->addr_read = -1;
2265 }
5c751e99 2266
0f459d16
PB
2267 if (prot & PAGE_EXEC) {
2268 te->addr_code = code_address;
2269 } else {
2270 te->addr_code = -1;
2271 }
2272 if (prot & PAGE_WRITE) {
2273 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2274 (pd & IO_MEM_ROMD)) {
2275 /* Write access calls the I/O callback. */
2276 te->addr_write = address | TLB_MMIO;
2277 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2278 !cpu_physical_memory_is_dirty(pd)) {
2279 te->addr_write = address | TLB_NOTDIRTY;
9fa3e853 2280 } else {
0f459d16 2281 te->addr_write = address;
9fa3e853 2282 }
0f459d16
PB
2283 } else {
2284 te->addr_write = -1;
9fa3e853 2285 }
9fa3e853
FB
2286}
2287
0124311e
FB
2288#else
2289
ee8b7021 2290void tlb_flush(CPUState *env, int flush_global)
0124311e
FB
2291{
2292}
2293
2e12669a 2294void tlb_flush_page(CPUState *env, target_ulong addr)
0124311e
FB
2295{
2296}
2297
edf8e2af
MW
2298/*
2299 * Walks guest process memory "regions" one by one
2300 * and calls callback function 'fn' for each region.
2301 */
5cd2c5b6
RH
2302
2303struct walk_memory_regions_data
2304{
2305 walk_memory_regions_fn fn;
2306 void *priv;
2307 unsigned long start;
2308 int prot;
2309};
2310
2311static int walk_memory_regions_end(struct walk_memory_regions_data *data,
b480d9b7 2312 abi_ulong end, int new_prot)
5cd2c5b6
RH
2313{
2314 if (data->start != -1ul) {
2315 int rc = data->fn(data->priv, data->start, end, data->prot);
2316 if (rc != 0) {
2317 return rc;
2318 }
2319 }
2320
2321 data->start = (new_prot ? end : -1ul);
2322 data->prot = new_prot;
2323
2324 return 0;
2325}
2326
2327static int walk_memory_regions_1(struct walk_memory_regions_data *data,
b480d9b7 2328 abi_ulong base, int level, void **lp)
5cd2c5b6 2329{
b480d9b7 2330 abi_ulong pa;
5cd2c5b6
RH
2331 int i, rc;
2332
2333 if (*lp == NULL) {
2334 return walk_memory_regions_end(data, base, 0);
2335 }
2336
2337 if (level == 0) {
2338 PageDesc *pd = *lp;
7296abac 2339 for (i = 0; i < L2_SIZE; ++i) {
5cd2c5b6
RH
2340 int prot = pd[i].flags;
2341
2342 pa = base | (i << TARGET_PAGE_BITS);
2343 if (prot != data->prot) {
2344 rc = walk_memory_regions_end(data, pa, prot);
2345 if (rc != 0) {
2346 return rc;
9fa3e853 2347 }
9fa3e853 2348 }
5cd2c5b6
RH
2349 }
2350 } else {
2351 void **pp = *lp;
7296abac 2352 for (i = 0; i < L2_SIZE; ++i) {
b480d9b7
PB
2353 pa = base | ((abi_ulong)i <<
2354 (TARGET_PAGE_BITS + L2_BITS * level));
5cd2c5b6
RH
2355 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2356 if (rc != 0) {
2357 return rc;
2358 }
2359 }
2360 }
2361
2362 return 0;
2363}
2364
2365int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2366{
2367 struct walk_memory_regions_data data;
2368 unsigned long i;
2369
2370 data.fn = fn;
2371 data.priv = priv;
2372 data.start = -1ul;
2373 data.prot = 0;
2374
2375 for (i = 0; i < V_L1_SIZE; i++) {
b480d9b7 2376 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
5cd2c5b6
RH
2377 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2378 if (rc != 0) {
2379 return rc;
9fa3e853 2380 }
33417e70 2381 }
5cd2c5b6
RH
2382
2383 return walk_memory_regions_end(&data, 0, 0);
edf8e2af
MW
2384}
2385
b480d9b7
PB
2386static int dump_region(void *priv, abi_ulong start,
2387 abi_ulong end, unsigned long prot)
edf8e2af
MW
2388{
2389 FILE *f = (FILE *)priv;
2390
b480d9b7
PB
2391 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2392 " "TARGET_ABI_FMT_lx" %c%c%c\n",
edf8e2af
MW
2393 start, end, end - start,
2394 ((prot & PAGE_READ) ? 'r' : '-'),
2395 ((prot & PAGE_WRITE) ? 'w' : '-'),
2396 ((prot & PAGE_EXEC) ? 'x' : '-'));
2397
2398 return (0);
2399}
2400
2401/* dump memory mappings */
2402void page_dump(FILE *f)
2403{
2404 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2405 "start", "end", "size", "prot");
2406 walk_memory_regions(f, dump_region);
33417e70
FB
2407}
2408
53a5960a 2409int page_get_flags(target_ulong address)
33417e70 2410{
9fa3e853
FB
2411 PageDesc *p;
2412
2413 p = page_find(address >> TARGET_PAGE_BITS);
33417e70 2414 if (!p)
9fa3e853
FB
2415 return 0;
2416 return p->flags;
2417}
2418
376a7909
RH
2419/* Modify the flags of a page and invalidate the code if necessary.
2420 The flag PAGE_WRITE_ORG is positioned automatically depending
2421 on PAGE_WRITE. The mmap_lock should already be held. */
53a5960a 2422void page_set_flags(target_ulong start, target_ulong end, int flags)
9fa3e853 2423{
376a7909
RH
2424 target_ulong addr, len;
2425
2426 /* This function should never be called with addresses outside the
2427 guest address space. If this assert fires, it probably indicates
2428 a missing call to h2g_valid. */
b480d9b7
PB
2429#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2430 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
376a7909
RH
2431#endif
2432 assert(start < end);
9fa3e853
FB
2433
2434 start = start & TARGET_PAGE_MASK;
2435 end = TARGET_PAGE_ALIGN(end);
376a7909
RH
2436
2437 if (flags & PAGE_WRITE) {
9fa3e853 2438 flags |= PAGE_WRITE_ORG;
376a7909
RH
2439 }
2440
2441 for (addr = start, len = end - start;
2442 len != 0;
2443 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2444 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2445
2446 /* If the write protection bit is set, then we invalidate
2447 the code inside. */
5fafdf24 2448 if (!(p->flags & PAGE_WRITE) &&
9fa3e853
FB
2449 (flags & PAGE_WRITE) &&
2450 p->first_tb) {
d720b93d 2451 tb_invalidate_phys_page(addr, 0, NULL);
9fa3e853
FB
2452 }
2453 p->flags = flags;
2454 }
33417e70
FB
2455}
2456
3d97b40b
TS
2457int page_check_range(target_ulong start, target_ulong len, int flags)
2458{
2459 PageDesc *p;
2460 target_ulong end;
2461 target_ulong addr;
2462
376a7909
RH
2463 /* This function should never be called with addresses outside the
2464 guest address space. If this assert fires, it probably indicates
2465 a missing call to h2g_valid. */
338e9e6c
BS
2466#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2467 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
376a7909
RH
2468#endif
2469
3e0650a9
RH
2470 if (len == 0) {
2471 return 0;
2472 }
376a7909
RH
2473 if (start + len - 1 < start) {
2474 /* We've wrapped around. */
55f280c9 2475 return -1;
376a7909 2476 }
55f280c9 2477
3d97b40b
TS
2478 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2479 start = start & TARGET_PAGE_MASK;
2480
376a7909
RH
2481 for (addr = start, len = end - start;
2482 len != 0;
2483 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
3d97b40b
TS
2484 p = page_find(addr >> TARGET_PAGE_BITS);
2485 if( !p )
2486 return -1;
2487 if( !(p->flags & PAGE_VALID) )
2488 return -1;
2489
dae3270c 2490 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
3d97b40b 2491 return -1;
dae3270c
FB
2492 if (flags & PAGE_WRITE) {
2493 if (!(p->flags & PAGE_WRITE_ORG))
2494 return -1;
2495 /* unprotect the page if it was put read-only because it
2496 contains translated code */
2497 if (!(p->flags & PAGE_WRITE)) {
2498 if (!page_unprotect(addr, 0, NULL))
2499 return -1;
2500 }
2501 return 0;
2502 }
3d97b40b
TS
2503 }
2504 return 0;
2505}
2506
9fa3e853 2507/* called from signal handler: invalidate the code and unprotect the
ccbb4d44 2508 page. Return TRUE if the fault was successfully handled. */
53a5960a 2509int page_unprotect(target_ulong address, unsigned long pc, void *puc)
9fa3e853 2510{
45d679d6
AJ
2511 unsigned int prot;
2512 PageDesc *p;
53a5960a 2513 target_ulong host_start, host_end, addr;
9fa3e853 2514
c8a706fe
PB
2515 /* Technically this isn't safe inside a signal handler. However we
2516 know this only ever happens in a synchronous SEGV handler, so in
2517 practice it seems to be ok. */
2518 mmap_lock();
2519
45d679d6
AJ
2520 p = page_find(address >> TARGET_PAGE_BITS);
2521 if (!p) {
c8a706fe 2522 mmap_unlock();
9fa3e853 2523 return 0;
c8a706fe 2524 }
45d679d6 2525
9fa3e853
FB
2526 /* if the page was really writable, then we change its
2527 protection back to writable */
45d679d6
AJ
2528 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2529 host_start = address & qemu_host_page_mask;
2530 host_end = host_start + qemu_host_page_size;
2531
2532 prot = 0;
2533 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2534 p = page_find(addr >> TARGET_PAGE_BITS);
2535 p->flags |= PAGE_WRITE;
2536 prot |= p->flags;
2537
9fa3e853
FB
2538 /* and since the content will be modified, we must invalidate
2539 the corresponding translated code. */
45d679d6 2540 tb_invalidate_phys_page(addr, pc, puc);
9fa3e853 2541#ifdef DEBUG_TB_CHECK
45d679d6 2542 tb_invalidate_check(addr);
9fa3e853 2543#endif
9fa3e853 2544 }
45d679d6
AJ
2545 mprotect((void *)g2h(host_start), qemu_host_page_size,
2546 prot & PAGE_BITS);
2547
2548 mmap_unlock();
2549 return 1;
9fa3e853 2550 }
c8a706fe 2551 mmap_unlock();
9fa3e853
FB
2552 return 0;
2553}
2554
6a00d601
FB
2555static inline void tlb_set_dirty(CPUState *env,
2556 unsigned long addr, target_ulong vaddr)
1ccde1cb
FB
2557{
2558}
9fa3e853
FB
2559#endif /* defined(CONFIG_USER_ONLY) */
2560
e2eef170 2561#if !defined(CONFIG_USER_ONLY)
8da3ff18 2562
c04b2b78
PB
2563#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2564typedef struct subpage_t {
2565 target_phys_addr_t base;
f6405247
RH
2566 ram_addr_t sub_io_index[TARGET_PAGE_SIZE];
2567 ram_addr_t region_offset[TARGET_PAGE_SIZE];
c04b2b78
PB
2568} subpage_t;
2569
c227f099
AL
2570static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2571 ram_addr_t memory, ram_addr_t region_offset);
f6405247
RH
2572static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2573 ram_addr_t orig_memory,
2574 ram_addr_t region_offset);
db7b5426
BS
2575#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2576 need_subpage) \
2577 do { \
2578 if (addr > start_addr) \
2579 start_addr2 = 0; \
2580 else { \
2581 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2582 if (start_addr2 > 0) \
2583 need_subpage = 1; \
2584 } \
2585 \
49e9fba2 2586 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
db7b5426
BS
2587 end_addr2 = TARGET_PAGE_SIZE - 1; \
2588 else { \
2589 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2590 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2591 need_subpage = 1; \
2592 } \
2593 } while (0)
2594
8f2498f9
MT
2595/* register physical memory.
2596 For RAM, 'size' must be a multiple of the target page size.
2597 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
8da3ff18
PB
2598 io memory page. The address used when calling the IO function is
2599 the offset from the start of the region, plus region_offset. Both
ccbb4d44 2600 start_addr and region_offset are rounded down to a page boundary
8da3ff18
PB
2601 before calculating this offset. This should not be a problem unless
2602 the low bits of start_addr and region_offset differ. */
c227f099
AL
2603void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2604 ram_addr_t size,
2605 ram_addr_t phys_offset,
2606 ram_addr_t region_offset)
33417e70 2607{
c227f099 2608 target_phys_addr_t addr, end_addr;
92e873b9 2609 PhysPageDesc *p;
9d42037b 2610 CPUState *env;
c227f099 2611 ram_addr_t orig_size = size;
f6405247 2612 subpage_t *subpage;
33417e70 2613
f6f3fbca
MT
2614 cpu_notify_set_memory(start_addr, size, phys_offset);
2615
67c4d23c
PB
2616 if (phys_offset == IO_MEM_UNASSIGNED) {
2617 region_offset = start_addr;
2618 }
8da3ff18 2619 region_offset &= TARGET_PAGE_MASK;
5fd386f6 2620 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
c227f099 2621 end_addr = start_addr + (target_phys_addr_t)size;
49e9fba2 2622 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
db7b5426
BS
2623 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2624 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
c227f099
AL
2625 ram_addr_t orig_memory = p->phys_offset;
2626 target_phys_addr_t start_addr2, end_addr2;
db7b5426
BS
2627 int need_subpage = 0;
2628
2629 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2630 need_subpage);
f6405247 2631 if (need_subpage) {
db7b5426
BS
2632 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2633 subpage = subpage_init((addr & TARGET_PAGE_MASK),
8da3ff18
PB
2634 &p->phys_offset, orig_memory,
2635 p->region_offset);
db7b5426
BS
2636 } else {
2637 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2638 >> IO_MEM_SHIFT];
2639 }
8da3ff18
PB
2640 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2641 region_offset);
2642 p->region_offset = 0;
db7b5426
BS
2643 } else {
2644 p->phys_offset = phys_offset;
2645 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2646 (phys_offset & IO_MEM_ROMD))
2647 phys_offset += TARGET_PAGE_SIZE;
2648 }
2649 } else {
2650 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2651 p->phys_offset = phys_offset;
8da3ff18 2652 p->region_offset = region_offset;
db7b5426 2653 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
8da3ff18 2654 (phys_offset & IO_MEM_ROMD)) {
db7b5426 2655 phys_offset += TARGET_PAGE_SIZE;
0e8f0967 2656 } else {
c227f099 2657 target_phys_addr_t start_addr2, end_addr2;
db7b5426
BS
2658 int need_subpage = 0;
2659
2660 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2661 end_addr2, need_subpage);
2662
f6405247 2663 if (need_subpage) {
db7b5426 2664 subpage = subpage_init((addr & TARGET_PAGE_MASK),
8da3ff18 2665 &p->phys_offset, IO_MEM_UNASSIGNED,
67c4d23c 2666 addr & TARGET_PAGE_MASK);
db7b5426 2667 subpage_register(subpage, start_addr2, end_addr2,
8da3ff18
PB
2668 phys_offset, region_offset);
2669 p->region_offset = 0;
db7b5426
BS
2670 }
2671 }
2672 }
8da3ff18 2673 region_offset += TARGET_PAGE_SIZE;
33417e70 2674 }
3b46e624 2675
9d42037b
FB
2676 /* since each CPU stores ram addresses in its TLB cache, we must
2677 reset the modified entries */
2678 /* XXX: slow ! */
2679 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2680 tlb_flush(env, 1);
2681 }
33417e70
FB
2682}
2683
ba863458 2684/* XXX: temporary until new memory mapping API */
c227f099 2685ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
ba863458
FB
2686{
2687 PhysPageDesc *p;
2688
2689 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2690 if (!p)
2691 return IO_MEM_UNASSIGNED;
2692 return p->phys_offset;
2693}
2694
c227f099 2695void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
f65ed4c1
AL
2696{
2697 if (kvm_enabled())
2698 kvm_coalesce_mmio_region(addr, size);
2699}
2700
c227f099 2701void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
f65ed4c1
AL
2702{
2703 if (kvm_enabled())
2704 kvm_uncoalesce_mmio_region(addr, size);
2705}
2706
62a2744c
SY
2707void qemu_flush_coalesced_mmio_buffer(void)
2708{
2709 if (kvm_enabled())
2710 kvm_flush_coalesced_mmio_buffer();
2711}
2712
c902760f
MT
2713#if defined(__linux__) && !defined(TARGET_S390X)
2714
2715#include <sys/vfs.h>
2716
2717#define HUGETLBFS_MAGIC 0x958458f6
2718
2719static long gethugepagesize(const char *path)
2720{
2721 struct statfs fs;
2722 int ret;
2723
2724 do {
9742bf26 2725 ret = statfs(path, &fs);
c902760f
MT
2726 } while (ret != 0 && errno == EINTR);
2727
2728 if (ret != 0) {
9742bf26
YT
2729 perror(path);
2730 return 0;
c902760f
MT
2731 }
2732
2733 if (fs.f_type != HUGETLBFS_MAGIC)
9742bf26 2734 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
c902760f
MT
2735
2736 return fs.f_bsize;
2737}
2738
04b16653
AW
2739static void *file_ram_alloc(RAMBlock *block,
2740 ram_addr_t memory,
2741 const char *path)
c902760f
MT
2742{
2743 char *filename;
2744 void *area;
2745 int fd;
2746#ifdef MAP_POPULATE
2747 int flags;
2748#endif
2749 unsigned long hpagesize;
2750
2751 hpagesize = gethugepagesize(path);
2752 if (!hpagesize) {
9742bf26 2753 return NULL;
c902760f
MT
2754 }
2755
2756 if (memory < hpagesize) {
2757 return NULL;
2758 }
2759
2760 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2761 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2762 return NULL;
2763 }
2764
2765 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
9742bf26 2766 return NULL;
c902760f
MT
2767 }
2768
2769 fd = mkstemp(filename);
2770 if (fd < 0) {
9742bf26
YT
2771 perror("unable to create backing store for hugepages");
2772 free(filename);
2773 return NULL;
c902760f
MT
2774 }
2775 unlink(filename);
2776 free(filename);
2777
2778 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2779
2780 /*
2781 * ftruncate is not supported by hugetlbfs in older
2782 * hosts, so don't bother bailing out on errors.
2783 * If anything goes wrong with it under other filesystems,
2784 * mmap will fail.
2785 */
2786 if (ftruncate(fd, memory))
9742bf26 2787 perror("ftruncate");
c902760f
MT
2788
2789#ifdef MAP_POPULATE
2790 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2791 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2792 * to sidestep this quirk.
2793 */
2794 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2795 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2796#else
2797 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2798#endif
2799 if (area == MAP_FAILED) {
9742bf26
YT
2800 perror("file_ram_alloc: can't mmap RAM pages");
2801 close(fd);
2802 return (NULL);
c902760f 2803 }
04b16653 2804 block->fd = fd;
c902760f
MT
2805 return area;
2806}
2807#endif
2808
d17b5288 2809static ram_addr_t find_ram_offset(ram_addr_t size)
04b16653
AW
2810{
2811 RAMBlock *block, *next_block;
09d7ae90 2812 ram_addr_t offset = 0, mingap = ULONG_MAX;
04b16653
AW
2813
2814 if (QLIST_EMPTY(&ram_list.blocks))
2815 return 0;
2816
2817 QLIST_FOREACH(block, &ram_list.blocks, next) {
2818 ram_addr_t end, next = ULONG_MAX;
2819
2820 end = block->offset + block->length;
2821
2822 QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2823 if (next_block->offset >= end) {
2824 next = MIN(next, next_block->offset);
2825 }
2826 }
2827 if (next - end >= size && next - end < mingap) {
2828 offset = end;
2829 mingap = next - end;
2830 }
2831 }
2832 return offset;
2833}
2834
2835static ram_addr_t last_ram_offset(void)
d17b5288
AW
2836{
2837 RAMBlock *block;
2838 ram_addr_t last = 0;
2839
2840 QLIST_FOREACH(block, &ram_list.blocks, next)
2841 last = MAX(last, block->offset + block->length);
2842
2843 return last;
2844}
2845
84b89d78 2846ram_addr_t qemu_ram_alloc_from_ptr(DeviceState *dev, const char *name,
6977dfe6 2847 ram_addr_t size, void *host)
84b89d78
CM
2848{
2849 RAMBlock *new_block, *block;
2850
2851 size = TARGET_PAGE_ALIGN(size);
2852 new_block = qemu_mallocz(sizeof(*new_block));
2853
2854 if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
2855 char *id = dev->parent_bus->info->get_dev_path(dev);
2856 if (id) {
2857 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
2858 qemu_free(id);
2859 }
2860 }
2861 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2862
2863 QLIST_FOREACH(block, &ram_list.blocks, next) {
2864 if (!strcmp(block->idstr, new_block->idstr)) {
2865 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2866 new_block->idstr);
2867 abort();
2868 }
2869 }
2870
6977dfe6
YT
2871 if (host) {
2872 new_block->host = host;
cd19cfa2 2873 new_block->flags |= RAM_PREALLOC_MASK;
6977dfe6
YT
2874 } else {
2875 if (mem_path) {
c902760f 2876#if defined (__linux__) && !defined(TARGET_S390X)
6977dfe6
YT
2877 new_block->host = file_ram_alloc(new_block, size, mem_path);
2878 if (!new_block->host) {
2879 new_block->host = qemu_vmalloc(size);
e78815a5 2880 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
6977dfe6 2881 }
c902760f 2882#else
6977dfe6
YT
2883 fprintf(stderr, "-mem-path option unsupported\n");
2884 exit(1);
c902760f 2885#endif
6977dfe6 2886 } else {
6b02494d 2887#if defined(TARGET_S390X) && defined(CONFIG_KVM)
6977dfe6
YT
2888 /* XXX S390 KVM requires the topmost vma of the RAM to be < 256GB */
2889 new_block->host = mmap((void*)0x1000000, size,
2890 PROT_EXEC|PROT_READ|PROT_WRITE,
2891 MAP_SHARED | MAP_ANONYMOUS, -1, 0);
6b02494d 2892#else
6977dfe6 2893 new_block->host = qemu_vmalloc(size);
6b02494d 2894#endif
e78815a5 2895 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
6977dfe6 2896 }
c902760f 2897 }
6977dfe6 2898
d17b5288 2899 new_block->offset = find_ram_offset(size);
94a6b54f
PB
2900 new_block->length = size;
2901
f471a17e 2902 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
94a6b54f 2903
f471a17e 2904 ram_list.phys_dirty = qemu_realloc(ram_list.phys_dirty,
04b16653 2905 last_ram_offset() >> TARGET_PAGE_BITS);
d17b5288 2906 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
94a6b54f
PB
2907 0xff, size >> TARGET_PAGE_BITS);
2908
6f0437e8
JK
2909 if (kvm_enabled())
2910 kvm_setup_guest_memory(new_block->host, size);
2911
94a6b54f
PB
2912 return new_block->offset;
2913}
e9a1ab19 2914
6977dfe6
YT
2915ram_addr_t qemu_ram_alloc(DeviceState *dev, const char *name, ram_addr_t size)
2916{
2917 return qemu_ram_alloc_from_ptr(dev, name, size, NULL);
2918}
2919
c227f099 2920void qemu_ram_free(ram_addr_t addr)
e9a1ab19 2921{
04b16653
AW
2922 RAMBlock *block;
2923
2924 QLIST_FOREACH(block, &ram_list.blocks, next) {
2925 if (addr == block->offset) {
2926 QLIST_REMOVE(block, next);
cd19cfa2
HY
2927 if (block->flags & RAM_PREALLOC_MASK) {
2928 ;
2929 } else if (mem_path) {
04b16653
AW
2930#if defined (__linux__) && !defined(TARGET_S390X)
2931 if (block->fd) {
2932 munmap(block->host, block->length);
2933 close(block->fd);
2934 } else {
2935 qemu_vfree(block->host);
2936 }
fd28aa13
JK
2937#else
2938 abort();
04b16653
AW
2939#endif
2940 } else {
2941#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2942 munmap(block->host, block->length);
2943#else
2944 qemu_vfree(block->host);
2945#endif
2946 }
2947 qemu_free(block);
2948 return;
2949 }
2950 }
2951
e9a1ab19
FB
2952}
2953
cd19cfa2
HY
2954#ifndef _WIN32
2955void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
2956{
2957 RAMBlock *block;
2958 ram_addr_t offset;
2959 int flags;
2960 void *area, *vaddr;
2961
2962 QLIST_FOREACH(block, &ram_list.blocks, next) {
2963 offset = addr - block->offset;
2964 if (offset < block->length) {
2965 vaddr = block->host + offset;
2966 if (block->flags & RAM_PREALLOC_MASK) {
2967 ;
2968 } else {
2969 flags = MAP_FIXED;
2970 munmap(vaddr, length);
2971 if (mem_path) {
2972#if defined(__linux__) && !defined(TARGET_S390X)
2973 if (block->fd) {
2974#ifdef MAP_POPULATE
2975 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
2976 MAP_PRIVATE;
2977#else
2978 flags |= MAP_PRIVATE;
2979#endif
2980 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2981 flags, block->fd, offset);
2982 } else {
2983 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2984 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2985 flags, -1, 0);
2986 }
fd28aa13
JK
2987#else
2988 abort();
cd19cfa2
HY
2989#endif
2990 } else {
2991#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2992 flags |= MAP_SHARED | MAP_ANONYMOUS;
2993 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
2994 flags, -1, 0);
2995#else
2996 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2997 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2998 flags, -1, 0);
2999#endif
3000 }
3001 if (area != vaddr) {
3002 fprintf(stderr, "Could not remap addr: %lx@%lx\n",
3003 length, addr);
3004 exit(1);
3005 }
3006 qemu_madvise(vaddr, length, QEMU_MADV_MERGEABLE);
3007 }
3008 return;
3009 }
3010 }
3011}
3012#endif /* !_WIN32 */
3013
dc828ca1 3014/* Return a host pointer to ram allocated with qemu_ram_alloc.
5579c7f3
PB
3015 With the exception of the softmmu code in this file, this should
3016 only be used for local memory (e.g. video ram) that the device owns,
3017 and knows it isn't going to access beyond the end of the block.
3018
3019 It should not be used for general purpose DMA.
3020 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
3021 */
c227f099 3022void *qemu_get_ram_ptr(ram_addr_t addr)
dc828ca1 3023{
94a6b54f
PB
3024 RAMBlock *block;
3025
f471a17e
AW
3026 QLIST_FOREACH(block, &ram_list.blocks, next) {
3027 if (addr - block->offset < block->length) {
7d82af38
VP
3028 /* Move this entry to to start of the list. */
3029 if (block != QLIST_FIRST(&ram_list.blocks)) {
3030 QLIST_REMOVE(block, next);
3031 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
3032 }
f471a17e
AW
3033 return block->host + (addr - block->offset);
3034 }
94a6b54f 3035 }
f471a17e
AW
3036
3037 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3038 abort();
3039
3040 return NULL;
dc828ca1
PB
3041}
3042
b2e0a138
MT
3043/* Return a host pointer to ram allocated with qemu_ram_alloc.
3044 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
3045 */
3046void *qemu_safe_ram_ptr(ram_addr_t addr)
3047{
3048 RAMBlock *block;
3049
3050 QLIST_FOREACH(block, &ram_list.blocks, next) {
3051 if (addr - block->offset < block->length) {
3052 return block->host + (addr - block->offset);
3053 }
3054 }
3055
3056 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3057 abort();
3058
3059 return NULL;
3060}
3061
e890261f 3062int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
5579c7f3 3063{
94a6b54f
PB
3064 RAMBlock *block;
3065 uint8_t *host = ptr;
3066
f471a17e
AW
3067 QLIST_FOREACH(block, &ram_list.blocks, next) {
3068 if (host - block->host < block->length) {
e890261f
MT
3069 *ram_addr = block->offset + (host - block->host);
3070 return 0;
f471a17e 3071 }
94a6b54f 3072 }
e890261f
MT
3073 return -1;
3074}
f471a17e 3075
e890261f
MT
3076/* Some of the softmmu routines need to translate from a host pointer
3077 (typically a TLB entry) back to a ram offset. */
3078ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
3079{
3080 ram_addr_t ram_addr;
f471a17e 3081
e890261f
MT
3082 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
3083 fprintf(stderr, "Bad ram pointer %p\n", ptr);
3084 abort();
3085 }
3086 return ram_addr;
5579c7f3
PB
3087}
3088
c227f099 3089static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
33417e70 3090{
67d3b957 3091#ifdef DEBUG_UNASSIGNED
ab3d1727 3092 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
b4f0a316 3093#endif
faed1c2a 3094#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
e18231a3
BS
3095 do_unassigned_access(addr, 0, 0, 0, 1);
3096#endif
3097 return 0;
3098}
3099
c227f099 3100static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
e18231a3
BS
3101{
3102#ifdef DEBUG_UNASSIGNED
3103 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3104#endif
faed1c2a 3105#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
e18231a3
BS
3106 do_unassigned_access(addr, 0, 0, 0, 2);
3107#endif
3108 return 0;
3109}
3110
c227f099 3111static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
e18231a3
BS
3112{
3113#ifdef DEBUG_UNASSIGNED
3114 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3115#endif
faed1c2a 3116#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
e18231a3 3117 do_unassigned_access(addr, 0, 0, 0, 4);
67d3b957 3118#endif
33417e70
FB
3119 return 0;
3120}
3121
c227f099 3122static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
33417e70 3123{
67d3b957 3124#ifdef DEBUG_UNASSIGNED
ab3d1727 3125 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
67d3b957 3126#endif
faed1c2a 3127#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
e18231a3
BS
3128 do_unassigned_access(addr, 1, 0, 0, 1);
3129#endif
3130}
3131
c227f099 3132static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
e18231a3
BS
3133{
3134#ifdef DEBUG_UNASSIGNED
3135 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3136#endif
faed1c2a 3137#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
e18231a3
BS
3138 do_unassigned_access(addr, 1, 0, 0, 2);
3139#endif
3140}
3141
c227f099 3142static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
e18231a3
BS
3143{
3144#ifdef DEBUG_UNASSIGNED
3145 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3146#endif
faed1c2a 3147#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
e18231a3 3148 do_unassigned_access(addr, 1, 0, 0, 4);
b4f0a316 3149#endif
33417e70
FB
3150}
3151
d60efc6b 3152static CPUReadMemoryFunc * const unassigned_mem_read[3] = {
33417e70 3153 unassigned_mem_readb,
e18231a3
BS
3154 unassigned_mem_readw,
3155 unassigned_mem_readl,
33417e70
FB
3156};
3157
d60efc6b 3158static CPUWriteMemoryFunc * const unassigned_mem_write[3] = {
33417e70 3159 unassigned_mem_writeb,
e18231a3
BS
3160 unassigned_mem_writew,
3161 unassigned_mem_writel,
33417e70
FB
3162};
3163
c227f099 3164static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
0f459d16 3165 uint32_t val)
9fa3e853 3166{
3a7d929e 3167 int dirty_flags;
f7c11b53 3168 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3a7d929e 3169 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 3170#if !defined(CONFIG_USER_ONLY)
3a7d929e 3171 tb_invalidate_phys_page_fast(ram_addr, 1);
f7c11b53 3172 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
9fa3e853 3173#endif
3a7d929e 3174 }
5579c7f3 3175 stb_p(qemu_get_ram_ptr(ram_addr), val);
f23db169 3176 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
f7c11b53 3177 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
f23db169
FB
3178 /* we remove the notdirty callback only if the code has been
3179 flushed */
3180 if (dirty_flags == 0xff)
2e70f6ef 3181 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
3182}
3183
c227f099 3184static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
0f459d16 3185 uint32_t val)
9fa3e853 3186{
3a7d929e 3187 int dirty_flags;
f7c11b53 3188 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3a7d929e 3189 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 3190#if !defined(CONFIG_USER_ONLY)
3a7d929e 3191 tb_invalidate_phys_page_fast(ram_addr, 2);
f7c11b53 3192 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
9fa3e853 3193#endif
3a7d929e 3194 }
5579c7f3 3195 stw_p(qemu_get_ram_ptr(ram_addr), val);
f23db169 3196 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
f7c11b53 3197 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
f23db169
FB
3198 /* we remove the notdirty callback only if the code has been
3199 flushed */
3200 if (dirty_flags == 0xff)
2e70f6ef 3201 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
3202}
3203
c227f099 3204static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
0f459d16 3205 uint32_t val)
9fa3e853 3206{
3a7d929e 3207 int dirty_flags;
f7c11b53 3208 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3a7d929e 3209 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 3210#if !defined(CONFIG_USER_ONLY)
3a7d929e 3211 tb_invalidate_phys_page_fast(ram_addr, 4);
f7c11b53 3212 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
9fa3e853 3213#endif
3a7d929e 3214 }
5579c7f3 3215 stl_p(qemu_get_ram_ptr(ram_addr), val);
f23db169 3216 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
f7c11b53 3217 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
f23db169
FB
3218 /* we remove the notdirty callback only if the code has been
3219 flushed */
3220 if (dirty_flags == 0xff)
2e70f6ef 3221 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
3222}
3223
d60efc6b 3224static CPUReadMemoryFunc * const error_mem_read[3] = {
9fa3e853
FB
3225 NULL, /* never used */
3226 NULL, /* never used */
3227 NULL, /* never used */
3228};
3229
d60efc6b 3230static CPUWriteMemoryFunc * const notdirty_mem_write[3] = {
1ccde1cb
FB
3231 notdirty_mem_writeb,
3232 notdirty_mem_writew,
3233 notdirty_mem_writel,
3234};
3235
0f459d16 3236/* Generate a debug exception if a watchpoint has been hit. */
b4051334 3237static void check_watchpoint(int offset, int len_mask, int flags)
0f459d16
PB
3238{
3239 CPUState *env = cpu_single_env;
06d55cc1
AL
3240 target_ulong pc, cs_base;
3241 TranslationBlock *tb;
0f459d16 3242 target_ulong vaddr;
a1d1bb31 3243 CPUWatchpoint *wp;
06d55cc1 3244 int cpu_flags;
0f459d16 3245
06d55cc1
AL
3246 if (env->watchpoint_hit) {
3247 /* We re-entered the check after replacing the TB. Now raise
3248 * the debug interrupt so that is will trigger after the
3249 * current instruction. */
3250 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3251 return;
3252 }
2e70f6ef 3253 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
72cf2d4f 3254 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334
AL
3255 if ((vaddr == (wp->vaddr & len_mask) ||
3256 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
6e140f28
AL
3257 wp->flags |= BP_WATCHPOINT_HIT;
3258 if (!env->watchpoint_hit) {
3259 env->watchpoint_hit = wp;
3260 tb = tb_find_pc(env->mem_io_pc);
3261 if (!tb) {
3262 cpu_abort(env, "check_watchpoint: could not find TB for "
3263 "pc=%p", (void *)env->mem_io_pc);
3264 }
3265 cpu_restore_state(tb, env, env->mem_io_pc, NULL);
3266 tb_phys_invalidate(tb, -1);
3267 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3268 env->exception_index = EXCP_DEBUG;
3269 } else {
3270 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3271 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
3272 }
3273 cpu_resume_from_signal(env, NULL);
06d55cc1 3274 }
6e140f28
AL
3275 } else {
3276 wp->flags &= ~BP_WATCHPOINT_HIT;
0f459d16
PB
3277 }
3278 }
3279}
3280
6658ffb8
PB
3281/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3282 so these check for a hit then pass through to the normal out-of-line
3283 phys routines. */
c227f099 3284static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
6658ffb8 3285{
b4051334 3286 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
6658ffb8
PB
3287 return ldub_phys(addr);
3288}
3289
c227f099 3290static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
6658ffb8 3291{
b4051334 3292 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
6658ffb8
PB
3293 return lduw_phys(addr);
3294}
3295
c227f099 3296static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
6658ffb8 3297{
b4051334 3298 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
6658ffb8
PB
3299 return ldl_phys(addr);
3300}
3301
c227f099 3302static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
6658ffb8
PB
3303 uint32_t val)
3304{
b4051334 3305 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
6658ffb8
PB
3306 stb_phys(addr, val);
3307}
3308
c227f099 3309static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
6658ffb8
PB
3310 uint32_t val)
3311{
b4051334 3312 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
6658ffb8
PB
3313 stw_phys(addr, val);
3314}
3315
c227f099 3316static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
6658ffb8
PB
3317 uint32_t val)
3318{
b4051334 3319 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
6658ffb8
PB
3320 stl_phys(addr, val);
3321}
3322
d60efc6b 3323static CPUReadMemoryFunc * const watch_mem_read[3] = {
6658ffb8
PB
3324 watch_mem_readb,
3325 watch_mem_readw,
3326 watch_mem_readl,
3327};
3328
d60efc6b 3329static CPUWriteMemoryFunc * const watch_mem_write[3] = {
6658ffb8
PB
3330 watch_mem_writeb,
3331 watch_mem_writew,
3332 watch_mem_writel,
3333};
6658ffb8 3334
f6405247
RH
3335static inline uint32_t subpage_readlen (subpage_t *mmio,
3336 target_phys_addr_t addr,
3337 unsigned int len)
db7b5426 3338{
f6405247 3339 unsigned int idx = SUBPAGE_IDX(addr);
db7b5426
BS
3340#if defined(DEBUG_SUBPAGE)
3341 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3342 mmio, len, addr, idx);
3343#endif
db7b5426 3344
f6405247
RH
3345 addr += mmio->region_offset[idx];
3346 idx = mmio->sub_io_index[idx];
3347 return io_mem_read[idx][len](io_mem_opaque[idx], addr);
db7b5426
BS
3348}
3349
c227f099 3350static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
f6405247 3351 uint32_t value, unsigned int len)
db7b5426 3352{
f6405247 3353 unsigned int idx = SUBPAGE_IDX(addr);
db7b5426 3354#if defined(DEBUG_SUBPAGE)
f6405247
RH
3355 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n",
3356 __func__, mmio, len, addr, idx, value);
db7b5426 3357#endif
f6405247
RH
3358
3359 addr += mmio->region_offset[idx];
3360 idx = mmio->sub_io_index[idx];
3361 io_mem_write[idx][len](io_mem_opaque[idx], addr, value);
db7b5426
BS
3362}
3363
c227f099 3364static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
db7b5426 3365{
db7b5426
BS
3366 return subpage_readlen(opaque, addr, 0);
3367}
3368
c227f099 3369static void subpage_writeb (void *opaque, target_phys_addr_t addr,
db7b5426
BS
3370 uint32_t value)
3371{
db7b5426
BS
3372 subpage_writelen(opaque, addr, value, 0);
3373}
3374
c227f099 3375static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
db7b5426 3376{
db7b5426
BS
3377 return subpage_readlen(opaque, addr, 1);
3378}
3379
c227f099 3380static void subpage_writew (void *opaque, target_phys_addr_t addr,
db7b5426
BS
3381 uint32_t value)
3382{
db7b5426
BS
3383 subpage_writelen(opaque, addr, value, 1);
3384}
3385
c227f099 3386static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
db7b5426 3387{
db7b5426
BS
3388 return subpage_readlen(opaque, addr, 2);
3389}
3390
f6405247
RH
3391static void subpage_writel (void *opaque, target_phys_addr_t addr,
3392 uint32_t value)
db7b5426 3393{
db7b5426
BS
3394 subpage_writelen(opaque, addr, value, 2);
3395}
3396
d60efc6b 3397static CPUReadMemoryFunc * const subpage_read[] = {
db7b5426
BS
3398 &subpage_readb,
3399 &subpage_readw,
3400 &subpage_readl,
3401};
3402
d60efc6b 3403static CPUWriteMemoryFunc * const subpage_write[] = {
db7b5426
BS
3404 &subpage_writeb,
3405 &subpage_writew,
3406 &subpage_writel,
3407};
3408
c227f099
AL
3409static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3410 ram_addr_t memory, ram_addr_t region_offset)
db7b5426
BS
3411{
3412 int idx, eidx;
3413
3414 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3415 return -1;
3416 idx = SUBPAGE_IDX(start);
3417 eidx = SUBPAGE_IDX(end);
3418#if defined(DEBUG_SUBPAGE)
0bf9e31a 3419 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
db7b5426
BS
3420 mmio, start, end, idx, eidx, memory);
3421#endif
95c318f5
GN
3422 if ((memory & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
3423 memory = IO_MEM_UNASSIGNED;
f6405247 3424 memory = (memory >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
db7b5426 3425 for (; idx <= eidx; idx++) {
f6405247
RH
3426 mmio->sub_io_index[idx] = memory;
3427 mmio->region_offset[idx] = region_offset;
db7b5426
BS
3428 }
3429
3430 return 0;
3431}
3432
f6405247
RH
3433static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
3434 ram_addr_t orig_memory,
3435 ram_addr_t region_offset)
db7b5426 3436{
c227f099 3437 subpage_t *mmio;
db7b5426
BS
3438 int subpage_memory;
3439
c227f099 3440 mmio = qemu_mallocz(sizeof(subpage_t));
1eec614b
AL
3441
3442 mmio->base = base;
2507c12a
AG
3443 subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio,
3444 DEVICE_NATIVE_ENDIAN);
db7b5426 3445#if defined(DEBUG_SUBPAGE)
1eec614b
AL
3446 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3447 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
db7b5426 3448#endif
1eec614b 3449 *phys = subpage_memory | IO_MEM_SUBPAGE;
f6405247 3450 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, orig_memory, region_offset);
db7b5426
BS
3451
3452 return mmio;
3453}
3454
88715657
AL
3455static int get_free_io_mem_idx(void)
3456{
3457 int i;
3458
3459 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3460 if (!io_mem_used[i]) {
3461 io_mem_used[i] = 1;
3462 return i;
3463 }
c6703b47 3464 fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
88715657
AL
3465 return -1;
3466}
3467
dd310534
AG
3468/*
3469 * Usually, devices operate in little endian mode. There are devices out
3470 * there that operate in big endian too. Each device gets byte swapped
3471 * mmio if plugged onto a CPU that does the other endianness.
3472 *
3473 * CPU Device swap?
3474 *
3475 * little little no
3476 * little big yes
3477 * big little yes
3478 * big big no
3479 */
3480
3481typedef struct SwapEndianContainer {
3482 CPUReadMemoryFunc *read[3];
3483 CPUWriteMemoryFunc *write[3];
3484 void *opaque;
3485} SwapEndianContainer;
3486
3487static uint32_t swapendian_mem_readb (void *opaque, target_phys_addr_t addr)
3488{
3489 uint32_t val;
3490 SwapEndianContainer *c = opaque;
3491 val = c->read[0](c->opaque, addr);
3492 return val;
3493}
3494
3495static uint32_t swapendian_mem_readw(void *opaque, target_phys_addr_t addr)
3496{
3497 uint32_t val;
3498 SwapEndianContainer *c = opaque;
3499 val = bswap16(c->read[1](c->opaque, addr));
3500 return val;
3501}
3502
3503static uint32_t swapendian_mem_readl(void *opaque, target_phys_addr_t addr)
3504{
3505 uint32_t val;
3506 SwapEndianContainer *c = opaque;
3507 val = bswap32(c->read[2](c->opaque, addr));
3508 return val;
3509}
3510
3511static CPUReadMemoryFunc * const swapendian_readfn[3]={
3512 swapendian_mem_readb,
3513 swapendian_mem_readw,
3514 swapendian_mem_readl
3515};
3516
3517static void swapendian_mem_writeb(void *opaque, target_phys_addr_t addr,
3518 uint32_t val)
3519{
3520 SwapEndianContainer *c = opaque;
3521 c->write[0](c->opaque, addr, val);
3522}
3523
3524static void swapendian_mem_writew(void *opaque, target_phys_addr_t addr,
3525 uint32_t val)
3526{
3527 SwapEndianContainer *c = opaque;
3528 c->write[1](c->opaque, addr, bswap16(val));
3529}
3530
3531static void swapendian_mem_writel(void *opaque, target_phys_addr_t addr,
3532 uint32_t val)
3533{
3534 SwapEndianContainer *c = opaque;
3535 c->write[2](c->opaque, addr, bswap32(val));
3536}
3537
3538static CPUWriteMemoryFunc * const swapendian_writefn[3]={
3539 swapendian_mem_writeb,
3540 swapendian_mem_writew,
3541 swapendian_mem_writel
3542};
3543
3544static void swapendian_init(int io_index)
3545{
3546 SwapEndianContainer *c = qemu_malloc(sizeof(SwapEndianContainer));
3547 int i;
3548
3549 /* Swap mmio for big endian targets */
3550 c->opaque = io_mem_opaque[io_index];
3551 for (i = 0; i < 3; i++) {
3552 c->read[i] = io_mem_read[io_index][i];
3553 c->write[i] = io_mem_write[io_index][i];
3554
3555 io_mem_read[io_index][i] = swapendian_readfn[i];
3556 io_mem_write[io_index][i] = swapendian_writefn[i];
3557 }
3558 io_mem_opaque[io_index] = c;
3559}
3560
3561static void swapendian_del(int io_index)
3562{
3563 if (io_mem_read[io_index][0] == swapendian_readfn[0]) {
3564 qemu_free(io_mem_opaque[io_index]);
3565 }
3566}
3567
33417e70
FB
3568/* mem_read and mem_write are arrays of functions containing the
3569 function to access byte (index 0), word (index 1) and dword (index
0b4e6e3e 3570 2). Functions can be omitted with a NULL function pointer.
3ee89922 3571 If io_index is non zero, the corresponding io zone is
4254fab8
BS
3572 modified. If it is zero, a new io zone is allocated. The return
3573 value can be used with cpu_register_physical_memory(). (-1) is
3574 returned if error. */
1eed09cb 3575static int cpu_register_io_memory_fixed(int io_index,
d60efc6b
BS
3576 CPUReadMemoryFunc * const *mem_read,
3577 CPUWriteMemoryFunc * const *mem_write,
dd310534 3578 void *opaque, enum device_endian endian)
33417e70 3579{
3cab721d
RH
3580 int i;
3581
33417e70 3582 if (io_index <= 0) {
88715657
AL
3583 io_index = get_free_io_mem_idx();
3584 if (io_index == -1)
3585 return io_index;
33417e70 3586 } else {
1eed09cb 3587 io_index >>= IO_MEM_SHIFT;
33417e70
FB
3588 if (io_index >= IO_MEM_NB_ENTRIES)
3589 return -1;
3590 }
b5ff1b31 3591
3cab721d
RH
3592 for (i = 0; i < 3; ++i) {
3593 io_mem_read[io_index][i]
3594 = (mem_read[i] ? mem_read[i] : unassigned_mem_read[i]);
3595 }
3596 for (i = 0; i < 3; ++i) {
3597 io_mem_write[io_index][i]
3598 = (mem_write[i] ? mem_write[i] : unassigned_mem_write[i]);
3599 }
a4193c8a 3600 io_mem_opaque[io_index] = opaque;
f6405247 3601
dd310534
AG
3602 switch (endian) {
3603 case DEVICE_BIG_ENDIAN:
3604#ifndef TARGET_WORDS_BIGENDIAN
3605 swapendian_init(io_index);
3606#endif
3607 break;
3608 case DEVICE_LITTLE_ENDIAN:
3609#ifdef TARGET_WORDS_BIGENDIAN
3610 swapendian_init(io_index);
3611#endif
3612 break;
3613 case DEVICE_NATIVE_ENDIAN:
3614 default:
3615 break;
3616 }
3617
f6405247 3618 return (io_index << IO_MEM_SHIFT);
33417e70 3619}
61382a50 3620
d60efc6b
BS
3621int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
3622 CPUWriteMemoryFunc * const *mem_write,
dd310534 3623 void *opaque, enum device_endian endian)
1eed09cb 3624{
2507c12a 3625 return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque, endian);
1eed09cb
AK
3626}
3627
88715657
AL
3628void cpu_unregister_io_memory(int io_table_address)
3629{
3630 int i;
3631 int io_index = io_table_address >> IO_MEM_SHIFT;
3632
dd310534
AG
3633 swapendian_del(io_index);
3634
88715657
AL
3635 for (i=0;i < 3; i++) {
3636 io_mem_read[io_index][i] = unassigned_mem_read[i];
3637 io_mem_write[io_index][i] = unassigned_mem_write[i];
3638 }
3639 io_mem_opaque[io_index] = NULL;
3640 io_mem_used[io_index] = 0;
3641}
3642
e9179ce1
AK
3643static void io_mem_init(void)
3644{
3645 int i;
3646
2507c12a
AG
3647 cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read,
3648 unassigned_mem_write, NULL,
3649 DEVICE_NATIVE_ENDIAN);
3650 cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read,
3651 unassigned_mem_write, NULL,
3652 DEVICE_NATIVE_ENDIAN);
3653 cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read,
3654 notdirty_mem_write, NULL,
3655 DEVICE_NATIVE_ENDIAN);
e9179ce1
AK
3656 for (i=0; i<5; i++)
3657 io_mem_used[i] = 1;
3658
3659 io_mem_watch = cpu_register_io_memory(watch_mem_read,
2507c12a
AG
3660 watch_mem_write, NULL,
3661 DEVICE_NATIVE_ENDIAN);
e9179ce1
AK
3662}
3663
e2eef170
PB
3664#endif /* !defined(CONFIG_USER_ONLY) */
3665
13eb76e0
FB
3666/* physical memory access (slow version, mainly for debug) */
3667#if defined(CONFIG_USER_ONLY)
a68fe89c
PB
3668int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3669 uint8_t *buf, int len, int is_write)
13eb76e0
FB
3670{
3671 int l, flags;
3672 target_ulong page;
53a5960a 3673 void * p;
13eb76e0
FB
3674
3675 while (len > 0) {
3676 page = addr & TARGET_PAGE_MASK;
3677 l = (page + TARGET_PAGE_SIZE) - addr;
3678 if (l > len)
3679 l = len;
3680 flags = page_get_flags(page);
3681 if (!(flags & PAGE_VALID))
a68fe89c 3682 return -1;
13eb76e0
FB
3683 if (is_write) {
3684 if (!(flags & PAGE_WRITE))
a68fe89c 3685 return -1;
579a97f7 3686 /* XXX: this code should not depend on lock_user */
72fb7daa 3687 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
a68fe89c 3688 return -1;
72fb7daa
AJ
3689 memcpy(p, buf, l);
3690 unlock_user(p, addr, l);
13eb76e0
FB
3691 } else {
3692 if (!(flags & PAGE_READ))
a68fe89c 3693 return -1;
579a97f7 3694 /* XXX: this code should not depend on lock_user */
72fb7daa 3695 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
a68fe89c 3696 return -1;
72fb7daa 3697 memcpy(buf, p, l);
5b257578 3698 unlock_user(p, addr, 0);
13eb76e0
FB
3699 }
3700 len -= l;
3701 buf += l;
3702 addr += l;
3703 }
a68fe89c 3704 return 0;
13eb76e0 3705}
8df1cd07 3706
13eb76e0 3707#else
c227f099 3708void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
3709 int len, int is_write)
3710{
3711 int l, io_index;
3712 uint8_t *ptr;
3713 uint32_t val;
c227f099 3714 target_phys_addr_t page;
2e12669a 3715 unsigned long pd;
92e873b9 3716 PhysPageDesc *p;
3b46e624 3717
13eb76e0
FB
3718 while (len > 0) {
3719 page = addr & TARGET_PAGE_MASK;
3720 l = (page + TARGET_PAGE_SIZE) - addr;
3721 if (l > len)
3722 l = len;
92e873b9 3723 p = phys_page_find(page >> TARGET_PAGE_BITS);
13eb76e0
FB
3724 if (!p) {
3725 pd = IO_MEM_UNASSIGNED;
3726 } else {
3727 pd = p->phys_offset;
3728 }
3b46e624 3729
13eb76e0 3730 if (is_write) {
3a7d929e 3731 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
c227f099 3732 target_phys_addr_t addr1 = addr;
13eb76e0 3733 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18 3734 if (p)
6c2934db 3735 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
6a00d601
FB
3736 /* XXX: could force cpu_single_env to NULL to avoid
3737 potential bugs */
6c2934db 3738 if (l >= 4 && ((addr1 & 3) == 0)) {
1c213d19 3739 /* 32 bit write access */
c27004ec 3740 val = ldl_p(buf);
6c2934db 3741 io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
13eb76e0 3742 l = 4;
6c2934db 3743 } else if (l >= 2 && ((addr1 & 1) == 0)) {
1c213d19 3744 /* 16 bit write access */
c27004ec 3745 val = lduw_p(buf);
6c2934db 3746 io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
13eb76e0
FB
3747 l = 2;
3748 } else {
1c213d19 3749 /* 8 bit write access */
c27004ec 3750 val = ldub_p(buf);
6c2934db 3751 io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
13eb76e0
FB
3752 l = 1;
3753 }
3754 } else {
b448f2f3
FB
3755 unsigned long addr1;
3756 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
13eb76e0 3757 /* RAM case */
5579c7f3 3758 ptr = qemu_get_ram_ptr(addr1);
13eb76e0 3759 memcpy(ptr, buf, l);
3a7d929e
FB
3760 if (!cpu_physical_memory_is_dirty(addr1)) {
3761 /* invalidate code */
3762 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3763 /* set dirty bit */
f7c11b53
YT
3764 cpu_physical_memory_set_dirty_flags(
3765 addr1, (0xff & ~CODE_DIRTY_FLAG));
3a7d929e 3766 }
13eb76e0
FB
3767 }
3768 } else {
5fafdf24 3769 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2a4188a3 3770 !(pd & IO_MEM_ROMD)) {
c227f099 3771 target_phys_addr_t addr1 = addr;
13eb76e0
FB
3772 /* I/O case */
3773 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18 3774 if (p)
6c2934db
AJ
3775 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3776 if (l >= 4 && ((addr1 & 3) == 0)) {
13eb76e0 3777 /* 32 bit read access */
6c2934db 3778 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
c27004ec 3779 stl_p(buf, val);
13eb76e0 3780 l = 4;
6c2934db 3781 } else if (l >= 2 && ((addr1 & 1) == 0)) {
13eb76e0 3782 /* 16 bit read access */
6c2934db 3783 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
c27004ec 3784 stw_p(buf, val);
13eb76e0
FB
3785 l = 2;
3786 } else {
1c213d19 3787 /* 8 bit read access */
6c2934db 3788 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
c27004ec 3789 stb_p(buf, val);
13eb76e0
FB
3790 l = 1;
3791 }
3792 } else {
3793 /* RAM case */
5579c7f3 3794 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
13eb76e0
FB
3795 (addr & ~TARGET_PAGE_MASK);
3796 memcpy(buf, ptr, l);
3797 }
3798 }
3799 len -= l;
3800 buf += l;
3801 addr += l;
3802 }
3803}
8df1cd07 3804
d0ecd2aa 3805/* used for ROM loading : can write in RAM and ROM */
c227f099 3806void cpu_physical_memory_write_rom(target_phys_addr_t addr,
d0ecd2aa
FB
3807 const uint8_t *buf, int len)
3808{
3809 int l;
3810 uint8_t *ptr;
c227f099 3811 target_phys_addr_t page;
d0ecd2aa
FB
3812 unsigned long pd;
3813 PhysPageDesc *p;
3b46e624 3814
d0ecd2aa
FB
3815 while (len > 0) {
3816 page = addr & TARGET_PAGE_MASK;
3817 l = (page + TARGET_PAGE_SIZE) - addr;
3818 if (l > len)
3819 l = len;
3820 p = phys_page_find(page >> TARGET_PAGE_BITS);
3821 if (!p) {
3822 pd = IO_MEM_UNASSIGNED;
3823 } else {
3824 pd = p->phys_offset;
3825 }
3b46e624 3826
d0ecd2aa 3827 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2a4188a3
FB
3828 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3829 !(pd & IO_MEM_ROMD)) {
d0ecd2aa
FB
3830 /* do nothing */
3831 } else {
3832 unsigned long addr1;
3833 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3834 /* ROM/RAM case */
5579c7f3 3835 ptr = qemu_get_ram_ptr(addr1);
d0ecd2aa
FB
3836 memcpy(ptr, buf, l);
3837 }
3838 len -= l;
3839 buf += l;
3840 addr += l;
3841 }
3842}
3843
6d16c2f8
AL
3844typedef struct {
3845 void *buffer;
c227f099
AL
3846 target_phys_addr_t addr;
3847 target_phys_addr_t len;
6d16c2f8
AL
3848} BounceBuffer;
3849
3850static BounceBuffer bounce;
3851
ba223c29
AL
3852typedef struct MapClient {
3853 void *opaque;
3854 void (*callback)(void *opaque);
72cf2d4f 3855 QLIST_ENTRY(MapClient) link;
ba223c29
AL
3856} MapClient;
3857
72cf2d4f
BS
3858static QLIST_HEAD(map_client_list, MapClient) map_client_list
3859 = QLIST_HEAD_INITIALIZER(map_client_list);
ba223c29
AL
3860
3861void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3862{
3863 MapClient *client = qemu_malloc(sizeof(*client));
3864
3865 client->opaque = opaque;
3866 client->callback = callback;
72cf2d4f 3867 QLIST_INSERT_HEAD(&map_client_list, client, link);
ba223c29
AL
3868 return client;
3869}
3870
3871void cpu_unregister_map_client(void *_client)
3872{
3873 MapClient *client = (MapClient *)_client;
3874
72cf2d4f 3875 QLIST_REMOVE(client, link);
34d5e948 3876 qemu_free(client);
ba223c29
AL
3877}
3878
3879static void cpu_notify_map_clients(void)
3880{
3881 MapClient *client;
3882
72cf2d4f
BS
3883 while (!QLIST_EMPTY(&map_client_list)) {
3884 client = QLIST_FIRST(&map_client_list);
ba223c29 3885 client->callback(client->opaque);
34d5e948 3886 cpu_unregister_map_client(client);
ba223c29
AL
3887 }
3888}
3889
6d16c2f8
AL
3890/* Map a physical memory region into a host virtual address.
3891 * May map a subset of the requested range, given by and returned in *plen.
3892 * May return NULL if resources needed to perform the mapping are exhausted.
3893 * Use only for reads OR writes - not for read-modify-write operations.
ba223c29
AL
3894 * Use cpu_register_map_client() to know when retrying the map operation is
3895 * likely to succeed.
6d16c2f8 3896 */
c227f099
AL
3897void *cpu_physical_memory_map(target_phys_addr_t addr,
3898 target_phys_addr_t *plen,
6d16c2f8
AL
3899 int is_write)
3900{
c227f099
AL
3901 target_phys_addr_t len = *plen;
3902 target_phys_addr_t done = 0;
6d16c2f8
AL
3903 int l;
3904 uint8_t *ret = NULL;
3905 uint8_t *ptr;
c227f099 3906 target_phys_addr_t page;
6d16c2f8
AL
3907 unsigned long pd;
3908 PhysPageDesc *p;
3909 unsigned long addr1;
3910
3911 while (len > 0) {
3912 page = addr & TARGET_PAGE_MASK;
3913 l = (page + TARGET_PAGE_SIZE) - addr;
3914 if (l > len)
3915 l = len;
3916 p = phys_page_find(page >> TARGET_PAGE_BITS);
3917 if (!p) {
3918 pd = IO_MEM_UNASSIGNED;
3919 } else {
3920 pd = p->phys_offset;
3921 }
3922
3923 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3924 if (done || bounce.buffer) {
3925 break;
3926 }
3927 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3928 bounce.addr = addr;
3929 bounce.len = l;
3930 if (!is_write) {
3931 cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
3932 }
3933 ptr = bounce.buffer;
3934 } else {
3935 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
5579c7f3 3936 ptr = qemu_get_ram_ptr(addr1);
6d16c2f8
AL
3937 }
3938 if (!done) {
3939 ret = ptr;
3940 } else if (ret + done != ptr) {
3941 break;
3942 }
3943
3944 len -= l;
3945 addr += l;
3946 done += l;
3947 }
3948 *plen = done;
3949 return ret;
3950}
3951
3952/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3953 * Will also mark the memory as dirty if is_write == 1. access_len gives
3954 * the amount of memory that was actually read or written by the caller.
3955 */
c227f099
AL
3956void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3957 int is_write, target_phys_addr_t access_len)
6d16c2f8
AL
3958{
3959 if (buffer != bounce.buffer) {
3960 if (is_write) {
e890261f 3961 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
6d16c2f8
AL
3962 while (access_len) {
3963 unsigned l;
3964 l = TARGET_PAGE_SIZE;
3965 if (l > access_len)
3966 l = access_len;
3967 if (!cpu_physical_memory_is_dirty(addr1)) {
3968 /* invalidate code */
3969 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3970 /* set dirty bit */
f7c11b53
YT
3971 cpu_physical_memory_set_dirty_flags(
3972 addr1, (0xff & ~CODE_DIRTY_FLAG));
6d16c2f8
AL
3973 }
3974 addr1 += l;
3975 access_len -= l;
3976 }
3977 }
3978 return;
3979 }
3980 if (is_write) {
3981 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3982 }
f8a83245 3983 qemu_vfree(bounce.buffer);
6d16c2f8 3984 bounce.buffer = NULL;
ba223c29 3985 cpu_notify_map_clients();
6d16c2f8 3986}
d0ecd2aa 3987
8df1cd07 3988/* warning: addr must be aligned */
c227f099 3989uint32_t ldl_phys(target_phys_addr_t addr)
8df1cd07
FB
3990{
3991 int io_index;
3992 uint8_t *ptr;
3993 uint32_t val;
3994 unsigned long pd;
3995 PhysPageDesc *p;
3996
3997 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3998 if (!p) {
3999 pd = IO_MEM_UNASSIGNED;
4000 } else {
4001 pd = p->phys_offset;
4002 }
3b46e624 4003
5fafdf24 4004 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2a4188a3 4005 !(pd & IO_MEM_ROMD)) {
8df1cd07
FB
4006 /* I/O case */
4007 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
4008 if (p)
4009 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
8df1cd07
FB
4010 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
4011 } else {
4012 /* RAM case */
5579c7f3 4013 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
8df1cd07
FB
4014 (addr & ~TARGET_PAGE_MASK);
4015 val = ldl_p(ptr);
4016 }
4017 return val;
4018}
4019
84b7b8e7 4020/* warning: addr must be aligned */
c227f099 4021uint64_t ldq_phys(target_phys_addr_t addr)
84b7b8e7
FB
4022{
4023 int io_index;
4024 uint8_t *ptr;
4025 uint64_t val;
4026 unsigned long pd;
4027 PhysPageDesc *p;
4028
4029 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4030 if (!p) {
4031 pd = IO_MEM_UNASSIGNED;
4032 } else {
4033 pd = p->phys_offset;
4034 }
3b46e624 4035
2a4188a3
FB
4036 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
4037 !(pd & IO_MEM_ROMD)) {
84b7b8e7
FB
4038 /* I/O case */
4039 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
4040 if (p)
4041 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
84b7b8e7
FB
4042#ifdef TARGET_WORDS_BIGENDIAN
4043 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
4044 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
4045#else
4046 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
4047 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
4048#endif
4049 } else {
4050 /* RAM case */
5579c7f3 4051 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
84b7b8e7
FB
4052 (addr & ~TARGET_PAGE_MASK);
4053 val = ldq_p(ptr);
4054 }
4055 return val;
4056}
4057
aab33094 4058/* XXX: optimize */
c227f099 4059uint32_t ldub_phys(target_phys_addr_t addr)
aab33094
FB
4060{
4061 uint8_t val;
4062 cpu_physical_memory_read(addr, &val, 1);
4063 return val;
4064}
4065
733f0b02 4066/* warning: addr must be aligned */
c227f099 4067uint32_t lduw_phys(target_phys_addr_t addr)
aab33094 4068{
733f0b02
MT
4069 int io_index;
4070 uint8_t *ptr;
4071 uint64_t val;
4072 unsigned long pd;
4073 PhysPageDesc *p;
4074
4075 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4076 if (!p) {
4077 pd = IO_MEM_UNASSIGNED;
4078 } else {
4079 pd = p->phys_offset;
4080 }
4081
4082 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
4083 !(pd & IO_MEM_ROMD)) {
4084 /* I/O case */
4085 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4086 if (p)
4087 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4088 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
4089 } else {
4090 /* RAM case */
4091 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4092 (addr & ~TARGET_PAGE_MASK);
4093 val = lduw_p(ptr);
4094 }
4095 return val;
aab33094
FB
4096}
4097
8df1cd07
FB
4098/* warning: addr must be aligned. The ram page is not masked as dirty
4099 and the code inside is not invalidated. It is useful if the dirty
4100 bits are used to track modified PTEs */
c227f099 4101void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
8df1cd07
FB
4102{
4103 int io_index;
4104 uint8_t *ptr;
4105 unsigned long pd;
4106 PhysPageDesc *p;
4107
4108 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4109 if (!p) {
4110 pd = IO_MEM_UNASSIGNED;
4111 } else {
4112 pd = p->phys_offset;
4113 }
3b46e624 4114
3a7d929e 4115 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07 4116 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
4117 if (p)
4118 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
8df1cd07
FB
4119 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4120 } else {
74576198 4121 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
5579c7f3 4122 ptr = qemu_get_ram_ptr(addr1);
8df1cd07 4123 stl_p(ptr, val);
74576198
AL
4124
4125 if (unlikely(in_migration)) {
4126 if (!cpu_physical_memory_is_dirty(addr1)) {
4127 /* invalidate code */
4128 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4129 /* set dirty bit */
f7c11b53
YT
4130 cpu_physical_memory_set_dirty_flags(
4131 addr1, (0xff & ~CODE_DIRTY_FLAG));
74576198
AL
4132 }
4133 }
8df1cd07
FB
4134 }
4135}
4136
c227f099 4137void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
bc98a7ef
JM
4138{
4139 int io_index;
4140 uint8_t *ptr;
4141 unsigned long pd;
4142 PhysPageDesc *p;
4143
4144 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4145 if (!p) {
4146 pd = IO_MEM_UNASSIGNED;
4147 } else {
4148 pd = p->phys_offset;
4149 }
3b46e624 4150
bc98a7ef
JM
4151 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4152 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
4153 if (p)
4154 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bc98a7ef
JM
4155#ifdef TARGET_WORDS_BIGENDIAN
4156 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
4157 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
4158#else
4159 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4160 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
4161#endif
4162 } else {
5579c7f3 4163 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
bc98a7ef
JM
4164 (addr & ~TARGET_PAGE_MASK);
4165 stq_p(ptr, val);
4166 }
4167}
4168
8df1cd07 4169/* warning: addr must be aligned */
c227f099 4170void stl_phys(target_phys_addr_t addr, uint32_t val)
8df1cd07
FB
4171{
4172 int io_index;
4173 uint8_t *ptr;
4174 unsigned long pd;
4175 PhysPageDesc *p;
4176
4177 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4178 if (!p) {
4179 pd = IO_MEM_UNASSIGNED;
4180 } else {
4181 pd = p->phys_offset;
4182 }
3b46e624 4183
3a7d929e 4184 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07 4185 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
4186 if (p)
4187 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
8df1cd07
FB
4188 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4189 } else {
4190 unsigned long addr1;
4191 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4192 /* RAM case */
5579c7f3 4193 ptr = qemu_get_ram_ptr(addr1);
8df1cd07 4194 stl_p(ptr, val);
3a7d929e
FB
4195 if (!cpu_physical_memory_is_dirty(addr1)) {
4196 /* invalidate code */
4197 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4198 /* set dirty bit */
f7c11b53
YT
4199 cpu_physical_memory_set_dirty_flags(addr1,
4200 (0xff & ~CODE_DIRTY_FLAG));
3a7d929e 4201 }
8df1cd07
FB
4202 }
4203}
4204
aab33094 4205/* XXX: optimize */
c227f099 4206void stb_phys(target_phys_addr_t addr, uint32_t val)
aab33094
FB
4207{
4208 uint8_t v = val;
4209 cpu_physical_memory_write(addr, &v, 1);
4210}
4211
733f0b02 4212/* warning: addr must be aligned */
c227f099 4213void stw_phys(target_phys_addr_t addr, uint32_t val)
aab33094 4214{
733f0b02
MT
4215 int io_index;
4216 uint8_t *ptr;
4217 unsigned long pd;
4218 PhysPageDesc *p;
4219
4220 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4221 if (!p) {
4222 pd = IO_MEM_UNASSIGNED;
4223 } else {
4224 pd = p->phys_offset;
4225 }
4226
4227 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4228 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4229 if (p)
4230 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4231 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
4232 } else {
4233 unsigned long addr1;
4234 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4235 /* RAM case */
4236 ptr = qemu_get_ram_ptr(addr1);
4237 stw_p(ptr, val);
4238 if (!cpu_physical_memory_is_dirty(addr1)) {
4239 /* invalidate code */
4240 tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
4241 /* set dirty bit */
4242 cpu_physical_memory_set_dirty_flags(addr1,
4243 (0xff & ~CODE_DIRTY_FLAG));
4244 }
4245 }
aab33094
FB
4246}
4247
4248/* XXX: optimize */
c227f099 4249void stq_phys(target_phys_addr_t addr, uint64_t val)
aab33094
FB
4250{
4251 val = tswap64(val);
4252 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
4253}
4254
5e2972fd 4255/* virtual memory access for debug (includes writing to ROM) */
5fafdf24 4256int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
b448f2f3 4257 uint8_t *buf, int len, int is_write)
13eb76e0
FB
4258{
4259 int l;
c227f099 4260 target_phys_addr_t phys_addr;
9b3c35e0 4261 target_ulong page;
13eb76e0
FB
4262
4263 while (len > 0) {
4264 page = addr & TARGET_PAGE_MASK;
4265 phys_addr = cpu_get_phys_page_debug(env, page);
4266 /* if no physical page mapped, return an error */
4267 if (phys_addr == -1)
4268 return -1;
4269 l = (page + TARGET_PAGE_SIZE) - addr;
4270 if (l > len)
4271 l = len;
5e2972fd 4272 phys_addr += (addr & ~TARGET_PAGE_MASK);
5e2972fd
AL
4273 if (is_write)
4274 cpu_physical_memory_write_rom(phys_addr, buf, l);
4275 else
5e2972fd 4276 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
13eb76e0
FB
4277 len -= l;
4278 buf += l;
4279 addr += l;
4280 }
4281 return 0;
4282}
a68fe89c 4283#endif
13eb76e0 4284
2e70f6ef
PB
4285/* in deterministic execution mode, instructions doing device I/Os
4286 must be at the end of the TB */
4287void cpu_io_recompile(CPUState *env, void *retaddr)
4288{
4289 TranslationBlock *tb;
4290 uint32_t n, cflags;
4291 target_ulong pc, cs_base;
4292 uint64_t flags;
4293
4294 tb = tb_find_pc((unsigned long)retaddr);
4295 if (!tb) {
4296 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
4297 retaddr);
4298 }
4299 n = env->icount_decr.u16.low + tb->icount;
4300 cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
4301 /* Calculate how many instructions had been executed before the fault
bf20dc07 4302 occurred. */
2e70f6ef
PB
4303 n = n - env->icount_decr.u16.low;
4304 /* Generate a new TB ending on the I/O insn. */
4305 n++;
4306 /* On MIPS and SH, delay slot instructions can only be restarted if
4307 they were already the first instruction in the TB. If this is not
bf20dc07 4308 the first instruction in a TB then re-execute the preceding
2e70f6ef
PB
4309 branch. */
4310#if defined(TARGET_MIPS)
4311 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4312 env->active_tc.PC -= 4;
4313 env->icount_decr.u16.low++;
4314 env->hflags &= ~MIPS_HFLAG_BMASK;
4315 }
4316#elif defined(TARGET_SH4)
4317 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4318 && n > 1) {
4319 env->pc -= 2;
4320 env->icount_decr.u16.low++;
4321 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4322 }
4323#endif
4324 /* This should never happen. */
4325 if (n > CF_COUNT_MASK)
4326 cpu_abort(env, "TB too big during recompile");
4327
4328 cflags = n | CF_LAST_IO;
4329 pc = tb->pc;
4330 cs_base = tb->cs_base;
4331 flags = tb->flags;
4332 tb_phys_invalidate(tb, -1);
4333 /* FIXME: In theory this could raise an exception. In practice
4334 we have already translated the block once so it's probably ok. */
4335 tb_gen_code(env, pc, cs_base, flags, cflags);
bf20dc07 4336 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
2e70f6ef
PB
4337 the first in the TB) then we end up generating a whole new TB and
4338 repeating the fault, which is horribly inefficient.
4339 Better would be to execute just this insn uncached, or generate a
4340 second new TB. */
4341 cpu_resume_from_signal(env, NULL);
4342}
4343
b3755a91
PB
4344#if !defined(CONFIG_USER_ONLY)
4345
055403b2 4346void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
e3db7226
FB
4347{
4348 int i, target_code_size, max_target_code_size;
4349 int direct_jmp_count, direct_jmp2_count, cross_page;
4350 TranslationBlock *tb;
3b46e624 4351
e3db7226
FB
4352 target_code_size = 0;
4353 max_target_code_size = 0;
4354 cross_page = 0;
4355 direct_jmp_count = 0;
4356 direct_jmp2_count = 0;
4357 for(i = 0; i < nb_tbs; i++) {
4358 tb = &tbs[i];
4359 target_code_size += tb->size;
4360 if (tb->size > max_target_code_size)
4361 max_target_code_size = tb->size;
4362 if (tb->page_addr[1] != -1)
4363 cross_page++;
4364 if (tb->tb_next_offset[0] != 0xffff) {
4365 direct_jmp_count++;
4366 if (tb->tb_next_offset[1] != 0xffff) {
4367 direct_jmp2_count++;
4368 }
4369 }
4370 }
4371 /* XXX: avoid using doubles ? */
57fec1fe 4372 cpu_fprintf(f, "Translation buffer state:\n");
055403b2 4373 cpu_fprintf(f, "gen code size %td/%ld\n",
26a5f13b
FB
4374 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4375 cpu_fprintf(f, "TB count %d/%d\n",
4376 nb_tbs, code_gen_max_blocks);
5fafdf24 4377 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
e3db7226
FB
4378 nb_tbs ? target_code_size / nb_tbs : 0,
4379 max_target_code_size);
055403b2 4380 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
e3db7226
FB
4381 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4382 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
5fafdf24
TS
4383 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4384 cross_page,
e3db7226
FB
4385 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4386 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
5fafdf24 4387 direct_jmp_count,
e3db7226
FB
4388 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4389 direct_jmp2_count,
4390 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
57fec1fe 4391 cpu_fprintf(f, "\nStatistics:\n");
e3db7226
FB
4392 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4393 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4394 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
b67d9a52 4395 tcg_dump_info(f, cpu_fprintf);
e3db7226
FB
4396}
4397
61382a50
FB
4398#define MMUSUFFIX _cmmu
4399#define GETPC() NULL
4400#define env cpu_single_env
b769d8fe 4401#define SOFTMMU_CODE_ACCESS
61382a50
FB
4402
4403#define SHIFT 0
4404#include "softmmu_template.h"
4405
4406#define SHIFT 1
4407#include "softmmu_template.h"
4408
4409#define SHIFT 2
4410#include "softmmu_template.h"
4411
4412#define SHIFT 3
4413#include "softmmu_template.h"
4414
4415#undef env
4416
4417#endif