]> git.proxmox.com Git - qemu.git/blame - exec.c
CPUPhysMemoryClient: Fix typo in phys memory client registration
[qemu.git] / exec.c
CommitLineData
54936004 1/*
fd6ce8f6 2 * virtual page mapping and translated block handling
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
54936004 18 */
67b915a5 19#include "config.h"
d5a8f07c
FB
20#ifdef _WIN32
21#include <windows.h>
22#else
a98d49b1 23#include <sys/types.h>
d5a8f07c
FB
24#include <sys/mman.h>
25#endif
54936004 26
055403b2 27#include "qemu-common.h"
6180a181
FB
28#include "cpu.h"
29#include "exec-all.h"
b67d9a52 30#include "tcg.h"
b3c7724c 31#include "hw/hw.h"
cc9e98cb 32#include "hw/qdev.h"
74576198 33#include "osdep.h"
7ba1e619 34#include "kvm.h"
29e922b6 35#include "qemu-timer.h"
53a5960a
PB
36#if defined(CONFIG_USER_ONLY)
37#include <qemu.h>
fd052bf6 38#include <signal.h>
f01576f1
JL
39#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
40#include <sys/param.h>
41#if __FreeBSD_version >= 700104
42#define HAVE_KINFO_GETVMMAP
43#define sigqueue sigqueue_freebsd /* avoid redefinition */
44#include <sys/time.h>
45#include <sys/proc.h>
46#include <machine/profile.h>
47#define _KERNEL
48#include <sys/user.h>
49#undef _KERNEL
50#undef sigqueue
51#include <libutil.h>
52#endif
53#endif
53a5960a 54#endif
54936004 55
fd6ce8f6 56//#define DEBUG_TB_INVALIDATE
66e85a21 57//#define DEBUG_FLUSH
9fa3e853 58//#define DEBUG_TLB
67d3b957 59//#define DEBUG_UNASSIGNED
fd6ce8f6
FB
60
61/* make various TB consistency checks */
5fafdf24
TS
62//#define DEBUG_TB_CHECK
63//#define DEBUG_TLB_CHECK
fd6ce8f6 64
1196be37 65//#define DEBUG_IOPORT
db7b5426 66//#define DEBUG_SUBPAGE
1196be37 67
99773bd4
PB
68#if !defined(CONFIG_USER_ONLY)
69/* TB consistency checks only implemented for usermode emulation. */
70#undef DEBUG_TB_CHECK
71#endif
72
9fa3e853
FB
73#define SMC_BITMAP_USE_THRESHOLD 10
74
bdaf78e0 75static TranslationBlock *tbs;
24ab68ac 76static int code_gen_max_blocks;
9fa3e853 77TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
bdaf78e0 78static int nb_tbs;
eb51d102 79/* any access to the tbs or the page table must use this lock */
c227f099 80spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
fd6ce8f6 81
141ac468
BS
82#if defined(__arm__) || defined(__sparc_v9__)
83/* The prologue must be reachable with a direct jump. ARM and Sparc64
84 have limited branch ranges (possibly also PPC) so place it in a
d03d860b
BS
85 section close to code segment. */
86#define code_gen_section \
87 __attribute__((__section__(".gen_code"))) \
88 __attribute__((aligned (32)))
f8e2af11
SW
89#elif defined(_WIN32)
90/* Maximum alignment for Win32 is 16. */
91#define code_gen_section \
92 __attribute__((aligned (16)))
d03d860b
BS
93#else
94#define code_gen_section \
95 __attribute__((aligned (32)))
96#endif
97
98uint8_t code_gen_prologue[1024] code_gen_section;
bdaf78e0
BS
99static uint8_t *code_gen_buffer;
100static unsigned long code_gen_buffer_size;
26a5f13b 101/* threshold to flush the translated code buffer */
bdaf78e0 102static unsigned long code_gen_buffer_max_size;
24ab68ac 103static uint8_t *code_gen_ptr;
fd6ce8f6 104
e2eef170 105#if !defined(CONFIG_USER_ONLY)
9fa3e853 106int phys_ram_fd;
74576198 107static int in_migration;
94a6b54f 108
f471a17e 109RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list) };
e2eef170 110#endif
9fa3e853 111
6a00d601
FB
112CPUState *first_cpu;
113/* current CPU in the current thread. It is only valid inside
114 cpu_exec() */
5fafdf24 115CPUState *cpu_single_env;
2e70f6ef 116/* 0 = Do not count executed instructions.
bf20dc07 117 1 = Precise instruction counting.
2e70f6ef
PB
118 2 = Adaptive rate instruction counting. */
119int use_icount = 0;
120/* Current instruction counter. While executing translated code this may
121 include some instructions that have not yet been executed. */
122int64_t qemu_icount;
6a00d601 123
54936004 124typedef struct PageDesc {
92e873b9 125 /* list of TBs intersecting this ram page */
fd6ce8f6 126 TranslationBlock *first_tb;
9fa3e853
FB
127 /* in order to optimize self modifying code, we count the number
128 of lookups we do to a given page to use a bitmap */
129 unsigned int code_write_count;
130 uint8_t *code_bitmap;
131#if defined(CONFIG_USER_ONLY)
132 unsigned long flags;
133#endif
54936004
FB
134} PageDesc;
135
41c1b1c9 136/* In system mode we want L1_MAP to be based on ram offsets,
5cd2c5b6
RH
137 while in user mode we want it to be based on virtual addresses. */
138#if !defined(CONFIG_USER_ONLY)
41c1b1c9
PB
139#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
140# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
141#else
5cd2c5b6 142# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
41c1b1c9 143#endif
bedb69ea 144#else
5cd2c5b6 145# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
bedb69ea 146#endif
54936004 147
5cd2c5b6
RH
148/* Size of the L2 (and L3, etc) page tables. */
149#define L2_BITS 10
54936004
FB
150#define L2_SIZE (1 << L2_BITS)
151
5cd2c5b6
RH
152/* The bits remaining after N lower levels of page tables. */
153#define P_L1_BITS_REM \
154 ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
155#define V_L1_BITS_REM \
156 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
157
158/* Size of the L1 page table. Avoid silly small sizes. */
159#if P_L1_BITS_REM < 4
160#define P_L1_BITS (P_L1_BITS_REM + L2_BITS)
161#else
162#define P_L1_BITS P_L1_BITS_REM
163#endif
164
165#if V_L1_BITS_REM < 4
166#define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
167#else
168#define V_L1_BITS V_L1_BITS_REM
169#endif
170
171#define P_L1_SIZE ((target_phys_addr_t)1 << P_L1_BITS)
172#define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
173
174#define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS)
175#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
176
83fb7adf
FB
177unsigned long qemu_real_host_page_size;
178unsigned long qemu_host_page_bits;
179unsigned long qemu_host_page_size;
180unsigned long qemu_host_page_mask;
54936004 181
5cd2c5b6
RH
182/* This is a multi-level map on the virtual address space.
183 The bottom level has pointers to PageDesc. */
184static void *l1_map[V_L1_SIZE];
54936004 185
e2eef170 186#if !defined(CONFIG_USER_ONLY)
41c1b1c9
PB
187typedef struct PhysPageDesc {
188 /* offset in host memory of the page + io_index in the low bits */
189 ram_addr_t phys_offset;
190 ram_addr_t region_offset;
191} PhysPageDesc;
192
5cd2c5b6
RH
193/* This is a multi-level map on the physical address space.
194 The bottom level has pointers to PhysPageDesc. */
195static void *l1_phys_map[P_L1_SIZE];
6d9a1304 196
e2eef170
PB
197static void io_mem_init(void);
198
33417e70 199/* io memory support */
33417e70
FB
200CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
201CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
a4193c8a 202void *io_mem_opaque[IO_MEM_NB_ENTRIES];
511d2b14 203static char io_mem_used[IO_MEM_NB_ENTRIES];
6658ffb8
PB
204static int io_mem_watch;
205#endif
33417e70 206
34865134 207/* log support */
1e8b27ca
JR
208#ifdef WIN32
209static const char *logfilename = "qemu.log";
210#else
d9b630fd 211static const char *logfilename = "/tmp/qemu.log";
1e8b27ca 212#endif
34865134
FB
213FILE *logfile;
214int loglevel;
e735b91c 215static int log_append = 0;
34865134 216
e3db7226 217/* statistics */
b3755a91 218#if !defined(CONFIG_USER_ONLY)
e3db7226 219static int tlb_flush_count;
b3755a91 220#endif
e3db7226
FB
221static int tb_flush_count;
222static int tb_phys_invalidate_count;
223
7cb69cae
FB
224#ifdef _WIN32
225static void map_exec(void *addr, long size)
226{
227 DWORD old_protect;
228 VirtualProtect(addr, size,
229 PAGE_EXECUTE_READWRITE, &old_protect);
230
231}
232#else
233static void map_exec(void *addr, long size)
234{
4369415f 235 unsigned long start, end, page_size;
7cb69cae 236
4369415f 237 page_size = getpagesize();
7cb69cae 238 start = (unsigned long)addr;
4369415f 239 start &= ~(page_size - 1);
7cb69cae
FB
240
241 end = (unsigned long)addr + size;
4369415f
FB
242 end += page_size - 1;
243 end &= ~(page_size - 1);
7cb69cae
FB
244
245 mprotect((void *)start, end - start,
246 PROT_READ | PROT_WRITE | PROT_EXEC);
247}
248#endif
249
b346ff46 250static void page_init(void)
54936004 251{
83fb7adf 252 /* NOTE: we can always suppose that qemu_host_page_size >=
54936004 253 TARGET_PAGE_SIZE */
c2b48b69
AL
254#ifdef _WIN32
255 {
256 SYSTEM_INFO system_info;
257
258 GetSystemInfo(&system_info);
259 qemu_real_host_page_size = system_info.dwPageSize;
260 }
261#else
262 qemu_real_host_page_size = getpagesize();
263#endif
83fb7adf
FB
264 if (qemu_host_page_size == 0)
265 qemu_host_page_size = qemu_real_host_page_size;
266 if (qemu_host_page_size < TARGET_PAGE_SIZE)
267 qemu_host_page_size = TARGET_PAGE_SIZE;
268 qemu_host_page_bits = 0;
269 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
270 qemu_host_page_bits++;
271 qemu_host_page_mask = ~(qemu_host_page_size - 1);
50a9569b 272
2e9a5713 273#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
50a9569b 274 {
f01576f1
JL
275#ifdef HAVE_KINFO_GETVMMAP
276 struct kinfo_vmentry *freep;
277 int i, cnt;
278
279 freep = kinfo_getvmmap(getpid(), &cnt);
280 if (freep) {
281 mmap_lock();
282 for (i = 0; i < cnt; i++) {
283 unsigned long startaddr, endaddr;
284
285 startaddr = freep[i].kve_start;
286 endaddr = freep[i].kve_end;
287 if (h2g_valid(startaddr)) {
288 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
289
290 if (h2g_valid(endaddr)) {
291 endaddr = h2g(endaddr);
fd436907 292 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
f01576f1
JL
293 } else {
294#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
295 endaddr = ~0ul;
fd436907 296 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
f01576f1
JL
297#endif
298 }
299 }
300 }
301 free(freep);
302 mmap_unlock();
303 }
304#else
50a9569b 305 FILE *f;
50a9569b 306
0776590d 307 last_brk = (unsigned long)sbrk(0);
5cd2c5b6 308
fd436907 309 f = fopen("/compat/linux/proc/self/maps", "r");
50a9569b 310 if (f) {
5cd2c5b6
RH
311 mmap_lock();
312
50a9569b 313 do {
5cd2c5b6
RH
314 unsigned long startaddr, endaddr;
315 int n;
316
317 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
318
319 if (n == 2 && h2g_valid(startaddr)) {
320 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
321
322 if (h2g_valid(endaddr)) {
323 endaddr = h2g(endaddr);
324 } else {
325 endaddr = ~0ul;
326 }
327 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
50a9569b
AZ
328 }
329 } while (!feof(f));
5cd2c5b6 330
50a9569b 331 fclose(f);
5cd2c5b6 332 mmap_unlock();
50a9569b 333 }
f01576f1 334#endif
50a9569b
AZ
335 }
336#endif
54936004
FB
337}
338
41c1b1c9 339static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
54936004 340{
41c1b1c9
PB
341 PageDesc *pd;
342 void **lp;
343 int i;
344
5cd2c5b6 345#if defined(CONFIG_USER_ONLY)
2e9a5713 346 /* We can't use qemu_malloc because it may recurse into a locked mutex. */
5cd2c5b6
RH
347# define ALLOC(P, SIZE) \
348 do { \
349 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
350 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
5cd2c5b6
RH
351 } while (0)
352#else
353# define ALLOC(P, SIZE) \
354 do { P = qemu_mallocz(SIZE); } while (0)
17e2377a 355#endif
434929bf 356
5cd2c5b6
RH
357 /* Level 1. Always allocated. */
358 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
359
360 /* Level 2..N-1. */
361 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
362 void **p = *lp;
363
364 if (p == NULL) {
365 if (!alloc) {
366 return NULL;
367 }
368 ALLOC(p, sizeof(void *) * L2_SIZE);
369 *lp = p;
17e2377a 370 }
5cd2c5b6
RH
371
372 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
373 }
374
375 pd = *lp;
376 if (pd == NULL) {
377 if (!alloc) {
378 return NULL;
379 }
380 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
381 *lp = pd;
54936004 382 }
5cd2c5b6
RH
383
384#undef ALLOC
5cd2c5b6
RH
385
386 return pd + (index & (L2_SIZE - 1));
54936004
FB
387}
388
41c1b1c9 389static inline PageDesc *page_find(tb_page_addr_t index)
54936004 390{
5cd2c5b6 391 return page_find_alloc(index, 0);
fd6ce8f6
FB
392}
393
6d9a1304 394#if !defined(CONFIG_USER_ONLY)
c227f099 395static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
92e873b9 396{
e3f4e2a4 397 PhysPageDesc *pd;
5cd2c5b6
RH
398 void **lp;
399 int i;
92e873b9 400
5cd2c5b6
RH
401 /* Level 1. Always allocated. */
402 lp = l1_phys_map + ((index >> P_L1_SHIFT) & (P_L1_SIZE - 1));
108c49b8 403
5cd2c5b6
RH
404 /* Level 2..N-1. */
405 for (i = P_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
406 void **p = *lp;
407 if (p == NULL) {
408 if (!alloc) {
409 return NULL;
410 }
411 *lp = p = qemu_mallocz(sizeof(void *) * L2_SIZE);
412 }
413 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
108c49b8 414 }
5cd2c5b6 415
e3f4e2a4 416 pd = *lp;
5cd2c5b6 417 if (pd == NULL) {
e3f4e2a4 418 int i;
5cd2c5b6
RH
419
420 if (!alloc) {
108c49b8 421 return NULL;
5cd2c5b6
RH
422 }
423
424 *lp = pd = qemu_malloc(sizeof(PhysPageDesc) * L2_SIZE);
425
67c4d23c 426 for (i = 0; i < L2_SIZE; i++) {
5cd2c5b6
RH
427 pd[i].phys_offset = IO_MEM_UNASSIGNED;
428 pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
67c4d23c 429 }
92e873b9 430 }
5cd2c5b6
RH
431
432 return pd + (index & (L2_SIZE - 1));
92e873b9
FB
433}
434
c227f099 435static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
92e873b9 436{
108c49b8 437 return phys_page_find_alloc(index, 0);
92e873b9
FB
438}
439
c227f099
AL
440static void tlb_protect_code(ram_addr_t ram_addr);
441static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 442 target_ulong vaddr);
c8a706fe
PB
443#define mmap_lock() do { } while(0)
444#define mmap_unlock() do { } while(0)
9fa3e853 445#endif
fd6ce8f6 446
4369415f
FB
447#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
448
449#if defined(CONFIG_USER_ONLY)
ccbb4d44 450/* Currently it is not recommended to allocate big chunks of data in
4369415f
FB
451 user mode. It will change when a dedicated libc will be used */
452#define USE_STATIC_CODE_GEN_BUFFER
453#endif
454
455#ifdef USE_STATIC_CODE_GEN_BUFFER
ebf50fb3
AJ
456static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
457 __attribute__((aligned (CODE_GEN_ALIGN)));
4369415f
FB
458#endif
459
8fcd3692 460static void code_gen_alloc(unsigned long tb_size)
26a5f13b 461{
4369415f
FB
462#ifdef USE_STATIC_CODE_GEN_BUFFER
463 code_gen_buffer = static_code_gen_buffer;
464 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
465 map_exec(code_gen_buffer, code_gen_buffer_size);
466#else
26a5f13b
FB
467 code_gen_buffer_size = tb_size;
468 if (code_gen_buffer_size == 0) {
4369415f
FB
469#if defined(CONFIG_USER_ONLY)
470 /* in user mode, phys_ram_size is not meaningful */
471 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
472#else
ccbb4d44 473 /* XXX: needs adjustments */
94a6b54f 474 code_gen_buffer_size = (unsigned long)(ram_size / 4);
4369415f 475#endif
26a5f13b
FB
476 }
477 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
478 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
479 /* The code gen buffer location may have constraints depending on
480 the host cpu and OS */
481#if defined(__linux__)
482 {
483 int flags;
141ac468
BS
484 void *start = NULL;
485
26a5f13b
FB
486 flags = MAP_PRIVATE | MAP_ANONYMOUS;
487#if defined(__x86_64__)
488 flags |= MAP_32BIT;
489 /* Cannot map more than that */
490 if (code_gen_buffer_size > (800 * 1024 * 1024))
491 code_gen_buffer_size = (800 * 1024 * 1024);
141ac468
BS
492#elif defined(__sparc_v9__)
493 // Map the buffer below 2G, so we can use direct calls and branches
494 flags |= MAP_FIXED;
495 start = (void *) 0x60000000UL;
496 if (code_gen_buffer_size > (512 * 1024 * 1024))
497 code_gen_buffer_size = (512 * 1024 * 1024);
1cb0661e 498#elif defined(__arm__)
63d41246 499 /* Map the buffer below 32M, so we can use direct calls and branches */
1cb0661e
AZ
500 flags |= MAP_FIXED;
501 start = (void *) 0x01000000UL;
502 if (code_gen_buffer_size > 16 * 1024 * 1024)
503 code_gen_buffer_size = 16 * 1024 * 1024;
eba0b893
RH
504#elif defined(__s390x__)
505 /* Map the buffer so that we can use direct calls and branches. */
506 /* We have a +- 4GB range on the branches; leave some slop. */
507 if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
508 code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
509 }
510 start = (void *)0x90000000UL;
26a5f13b 511#endif
141ac468
BS
512 code_gen_buffer = mmap(start, code_gen_buffer_size,
513 PROT_WRITE | PROT_READ | PROT_EXEC,
26a5f13b
FB
514 flags, -1, 0);
515 if (code_gen_buffer == MAP_FAILED) {
516 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
517 exit(1);
518 }
519 }
cbb608a5
B
520#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
521 || defined(__DragonFly__) || defined(__OpenBSD__)
06e67a82
AL
522 {
523 int flags;
524 void *addr = NULL;
525 flags = MAP_PRIVATE | MAP_ANONYMOUS;
526#if defined(__x86_64__)
527 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
528 * 0x40000000 is free */
529 flags |= MAP_FIXED;
530 addr = (void *)0x40000000;
531 /* Cannot map more than that */
532 if (code_gen_buffer_size > (800 * 1024 * 1024))
533 code_gen_buffer_size = (800 * 1024 * 1024);
4cd31ad2
BS
534#elif defined(__sparc_v9__)
535 // Map the buffer below 2G, so we can use direct calls and branches
536 flags |= MAP_FIXED;
537 addr = (void *) 0x60000000UL;
538 if (code_gen_buffer_size > (512 * 1024 * 1024)) {
539 code_gen_buffer_size = (512 * 1024 * 1024);
540 }
06e67a82
AL
541#endif
542 code_gen_buffer = mmap(addr, code_gen_buffer_size,
543 PROT_WRITE | PROT_READ | PROT_EXEC,
544 flags, -1, 0);
545 if (code_gen_buffer == MAP_FAILED) {
546 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
547 exit(1);
548 }
549 }
26a5f13b
FB
550#else
551 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
26a5f13b
FB
552 map_exec(code_gen_buffer, code_gen_buffer_size);
553#endif
4369415f 554#endif /* !USE_STATIC_CODE_GEN_BUFFER */
26a5f13b
FB
555 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
556 code_gen_buffer_max_size = code_gen_buffer_size -
239fda31 557 (TCG_MAX_OP_SIZE * OPC_MAX_SIZE);
26a5f13b
FB
558 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
559 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
560}
561
562/* Must be called before using the QEMU cpus. 'tb_size' is the size
563 (in bytes) allocated to the translation buffer. Zero means default
564 size. */
565void cpu_exec_init_all(unsigned long tb_size)
566{
26a5f13b
FB
567 cpu_gen_init();
568 code_gen_alloc(tb_size);
569 code_gen_ptr = code_gen_buffer;
4369415f 570 page_init();
e2eef170 571#if !defined(CONFIG_USER_ONLY)
26a5f13b 572 io_mem_init();
e2eef170 573#endif
9002ec79
RH
574#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
575 /* There's no guest base to take into account, so go ahead and
576 initialize the prologue now. */
577 tcg_prologue_init(&tcg_ctx);
578#endif
26a5f13b
FB
579}
580
9656f324
PB
581#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
582
e59fb374 583static int cpu_common_post_load(void *opaque, int version_id)
e7f4eff7
JQ
584{
585 CPUState *env = opaque;
9656f324 586
3098dba0
AJ
587 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
588 version_id is increased. */
589 env->interrupt_request &= ~0x01;
9656f324
PB
590 tlb_flush(env, 1);
591
592 return 0;
593}
e7f4eff7
JQ
594
595static const VMStateDescription vmstate_cpu_common = {
596 .name = "cpu_common",
597 .version_id = 1,
598 .minimum_version_id = 1,
599 .minimum_version_id_old = 1,
e7f4eff7
JQ
600 .post_load = cpu_common_post_load,
601 .fields = (VMStateField []) {
602 VMSTATE_UINT32(halted, CPUState),
603 VMSTATE_UINT32(interrupt_request, CPUState),
604 VMSTATE_END_OF_LIST()
605 }
606};
9656f324
PB
607#endif
608
950f1472
GC
609CPUState *qemu_get_cpu(int cpu)
610{
611 CPUState *env = first_cpu;
612
613 while (env) {
614 if (env->cpu_index == cpu)
615 break;
616 env = env->next_cpu;
617 }
618
619 return env;
620}
621
6a00d601 622void cpu_exec_init(CPUState *env)
fd6ce8f6 623{
6a00d601
FB
624 CPUState **penv;
625 int cpu_index;
626
c2764719
PB
627#if defined(CONFIG_USER_ONLY)
628 cpu_list_lock();
629#endif
6a00d601
FB
630 env->next_cpu = NULL;
631 penv = &first_cpu;
632 cpu_index = 0;
633 while (*penv != NULL) {
1e9fa730 634 penv = &(*penv)->next_cpu;
6a00d601
FB
635 cpu_index++;
636 }
637 env->cpu_index = cpu_index;
268a362c 638 env->numa_node = 0;
72cf2d4f
BS
639 QTAILQ_INIT(&env->breakpoints);
640 QTAILQ_INIT(&env->watchpoints);
dc7a09cf
JK
641#ifndef CONFIG_USER_ONLY
642 env->thread_id = qemu_get_thread_id();
643#endif
6a00d601 644 *penv = env;
c2764719
PB
645#if defined(CONFIG_USER_ONLY)
646 cpu_list_unlock();
647#endif
b3c7724c 648#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
0be71e32
AW
649 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
650 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
b3c7724c
PB
651 cpu_save, cpu_load, env);
652#endif
fd6ce8f6
FB
653}
654
d1a1eb74
TG
655/* Allocate a new translation block. Flush the translation buffer if
656 too many translation blocks or too much generated code. */
657static TranslationBlock *tb_alloc(target_ulong pc)
658{
659 TranslationBlock *tb;
660
661 if (nb_tbs >= code_gen_max_blocks ||
662 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
663 return NULL;
664 tb = &tbs[nb_tbs++];
665 tb->pc = pc;
666 tb->cflags = 0;
667 return tb;
668}
669
670void tb_free(TranslationBlock *tb)
671{
672 /* In practice this is mostly used for single use temporary TB
673 Ignore the hard cases and just back up if this TB happens to
674 be the last one generated. */
675 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
676 code_gen_ptr = tb->tc_ptr;
677 nb_tbs--;
678 }
679}
680
9fa3e853
FB
681static inline void invalidate_page_bitmap(PageDesc *p)
682{
683 if (p->code_bitmap) {
59817ccb 684 qemu_free(p->code_bitmap);
9fa3e853
FB
685 p->code_bitmap = NULL;
686 }
687 p->code_write_count = 0;
688}
689
5cd2c5b6
RH
690/* Set to NULL all the 'first_tb' fields in all PageDescs. */
691
692static void page_flush_tb_1 (int level, void **lp)
fd6ce8f6 693{
5cd2c5b6 694 int i;
fd6ce8f6 695
5cd2c5b6
RH
696 if (*lp == NULL) {
697 return;
698 }
699 if (level == 0) {
700 PageDesc *pd = *lp;
7296abac 701 for (i = 0; i < L2_SIZE; ++i) {
5cd2c5b6
RH
702 pd[i].first_tb = NULL;
703 invalidate_page_bitmap(pd + i);
fd6ce8f6 704 }
5cd2c5b6
RH
705 } else {
706 void **pp = *lp;
7296abac 707 for (i = 0; i < L2_SIZE; ++i) {
5cd2c5b6
RH
708 page_flush_tb_1 (level - 1, pp + i);
709 }
710 }
711}
712
713static void page_flush_tb(void)
714{
715 int i;
716 for (i = 0; i < V_L1_SIZE; i++) {
717 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
fd6ce8f6
FB
718 }
719}
720
721/* flush all the translation blocks */
d4e8164f 722/* XXX: tb_flush is currently not thread safe */
6a00d601 723void tb_flush(CPUState *env1)
fd6ce8f6 724{
6a00d601 725 CPUState *env;
0124311e 726#if defined(DEBUG_FLUSH)
ab3d1727
BS
727 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
728 (unsigned long)(code_gen_ptr - code_gen_buffer),
729 nb_tbs, nb_tbs > 0 ?
730 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
fd6ce8f6 731#endif
26a5f13b 732 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
a208e54a
PB
733 cpu_abort(env1, "Internal error: code buffer overflow\n");
734
fd6ce8f6 735 nb_tbs = 0;
3b46e624 736
6a00d601
FB
737 for(env = first_cpu; env != NULL; env = env->next_cpu) {
738 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
739 }
9fa3e853 740
8a8a608f 741 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
fd6ce8f6 742 page_flush_tb();
9fa3e853 743
fd6ce8f6 744 code_gen_ptr = code_gen_buffer;
d4e8164f
FB
745 /* XXX: flush processor icache at this point if cache flush is
746 expensive */
e3db7226 747 tb_flush_count++;
fd6ce8f6
FB
748}
749
750#ifdef DEBUG_TB_CHECK
751
bc98a7ef 752static void tb_invalidate_check(target_ulong address)
fd6ce8f6
FB
753{
754 TranslationBlock *tb;
755 int i;
756 address &= TARGET_PAGE_MASK;
99773bd4
PB
757 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
758 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
759 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
760 address >= tb->pc + tb->size)) {
0bf9e31a
BS
761 printf("ERROR invalidate: address=" TARGET_FMT_lx
762 " PC=%08lx size=%04x\n",
99773bd4 763 address, (long)tb->pc, tb->size);
fd6ce8f6
FB
764 }
765 }
766 }
767}
768
769/* verify that all the pages have correct rights for code */
770static void tb_page_check(void)
771{
772 TranslationBlock *tb;
773 int i, flags1, flags2;
3b46e624 774
99773bd4
PB
775 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
776 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
777 flags1 = page_get_flags(tb->pc);
778 flags2 = page_get_flags(tb->pc + tb->size - 1);
779 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
780 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
99773bd4 781 (long)tb->pc, tb->size, flags1, flags2);
fd6ce8f6
FB
782 }
783 }
784 }
785}
786
787#endif
788
789/* invalidate one TB */
790static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
791 int next_offset)
792{
793 TranslationBlock *tb1;
794 for(;;) {
795 tb1 = *ptb;
796 if (tb1 == tb) {
797 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
798 break;
799 }
800 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
801 }
802}
803
9fa3e853
FB
804static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
805{
806 TranslationBlock *tb1;
807 unsigned int n1;
808
809 for(;;) {
810 tb1 = *ptb;
811 n1 = (long)tb1 & 3;
812 tb1 = (TranslationBlock *)((long)tb1 & ~3);
813 if (tb1 == tb) {
814 *ptb = tb1->page_next[n1];
815 break;
816 }
817 ptb = &tb1->page_next[n1];
818 }
819}
820
d4e8164f
FB
821static inline void tb_jmp_remove(TranslationBlock *tb, int n)
822{
823 TranslationBlock *tb1, **ptb;
824 unsigned int n1;
825
826 ptb = &tb->jmp_next[n];
827 tb1 = *ptb;
828 if (tb1) {
829 /* find tb(n) in circular list */
830 for(;;) {
831 tb1 = *ptb;
832 n1 = (long)tb1 & 3;
833 tb1 = (TranslationBlock *)((long)tb1 & ~3);
834 if (n1 == n && tb1 == tb)
835 break;
836 if (n1 == 2) {
837 ptb = &tb1->jmp_first;
838 } else {
839 ptb = &tb1->jmp_next[n1];
840 }
841 }
842 /* now we can suppress tb(n) from the list */
843 *ptb = tb->jmp_next[n];
844
845 tb->jmp_next[n] = NULL;
846 }
847}
848
849/* reset the jump entry 'n' of a TB so that it is not chained to
850 another TB */
851static inline void tb_reset_jump(TranslationBlock *tb, int n)
852{
853 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
854}
855
41c1b1c9 856void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
fd6ce8f6 857{
6a00d601 858 CPUState *env;
8a40a180 859 PageDesc *p;
d4e8164f 860 unsigned int h, n1;
41c1b1c9 861 tb_page_addr_t phys_pc;
8a40a180 862 TranslationBlock *tb1, *tb2;
3b46e624 863
8a40a180
FB
864 /* remove the TB from the hash list */
865 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
866 h = tb_phys_hash_func(phys_pc);
5fafdf24 867 tb_remove(&tb_phys_hash[h], tb,
8a40a180
FB
868 offsetof(TranslationBlock, phys_hash_next));
869
870 /* remove the TB from the page list */
871 if (tb->page_addr[0] != page_addr) {
872 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
873 tb_page_remove(&p->first_tb, tb);
874 invalidate_page_bitmap(p);
875 }
876 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
877 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
878 tb_page_remove(&p->first_tb, tb);
879 invalidate_page_bitmap(p);
880 }
881
36bdbe54 882 tb_invalidated_flag = 1;
59817ccb 883
fd6ce8f6 884 /* remove the TB from the hash list */
8a40a180 885 h = tb_jmp_cache_hash_func(tb->pc);
6a00d601
FB
886 for(env = first_cpu; env != NULL; env = env->next_cpu) {
887 if (env->tb_jmp_cache[h] == tb)
888 env->tb_jmp_cache[h] = NULL;
889 }
d4e8164f
FB
890
891 /* suppress this TB from the two jump lists */
892 tb_jmp_remove(tb, 0);
893 tb_jmp_remove(tb, 1);
894
895 /* suppress any remaining jumps to this TB */
896 tb1 = tb->jmp_first;
897 for(;;) {
898 n1 = (long)tb1 & 3;
899 if (n1 == 2)
900 break;
901 tb1 = (TranslationBlock *)((long)tb1 & ~3);
902 tb2 = tb1->jmp_next[n1];
903 tb_reset_jump(tb1, n1);
904 tb1->jmp_next[n1] = NULL;
905 tb1 = tb2;
906 }
907 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
9fa3e853 908
e3db7226 909 tb_phys_invalidate_count++;
9fa3e853
FB
910}
911
912static inline void set_bits(uint8_t *tab, int start, int len)
913{
914 int end, mask, end1;
915
916 end = start + len;
917 tab += start >> 3;
918 mask = 0xff << (start & 7);
919 if ((start & ~7) == (end & ~7)) {
920 if (start < end) {
921 mask &= ~(0xff << (end & 7));
922 *tab |= mask;
923 }
924 } else {
925 *tab++ |= mask;
926 start = (start + 8) & ~7;
927 end1 = end & ~7;
928 while (start < end1) {
929 *tab++ = 0xff;
930 start += 8;
931 }
932 if (start < end) {
933 mask = ~(0xff << (end & 7));
934 *tab |= mask;
935 }
936 }
937}
938
939static void build_page_bitmap(PageDesc *p)
940{
941 int n, tb_start, tb_end;
942 TranslationBlock *tb;
3b46e624 943
b2a7081a 944 p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
9fa3e853
FB
945
946 tb = p->first_tb;
947 while (tb != NULL) {
948 n = (long)tb & 3;
949 tb = (TranslationBlock *)((long)tb & ~3);
950 /* NOTE: this is subtle as a TB may span two physical pages */
951 if (n == 0) {
952 /* NOTE: tb_end may be after the end of the page, but
953 it is not a problem */
954 tb_start = tb->pc & ~TARGET_PAGE_MASK;
955 tb_end = tb_start + tb->size;
956 if (tb_end > TARGET_PAGE_SIZE)
957 tb_end = TARGET_PAGE_SIZE;
958 } else {
959 tb_start = 0;
960 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
961 }
962 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
963 tb = tb->page_next[n];
964 }
965}
966
2e70f6ef
PB
967TranslationBlock *tb_gen_code(CPUState *env,
968 target_ulong pc, target_ulong cs_base,
969 int flags, int cflags)
d720b93d
FB
970{
971 TranslationBlock *tb;
972 uint8_t *tc_ptr;
41c1b1c9
PB
973 tb_page_addr_t phys_pc, phys_page2;
974 target_ulong virt_page2;
d720b93d
FB
975 int code_gen_size;
976
41c1b1c9 977 phys_pc = get_page_addr_code(env, pc);
c27004ec 978 tb = tb_alloc(pc);
d720b93d
FB
979 if (!tb) {
980 /* flush must be done */
981 tb_flush(env);
982 /* cannot fail at this point */
c27004ec 983 tb = tb_alloc(pc);
2e70f6ef
PB
984 /* Don't forget to invalidate previous TB info. */
985 tb_invalidated_flag = 1;
d720b93d
FB
986 }
987 tc_ptr = code_gen_ptr;
988 tb->tc_ptr = tc_ptr;
989 tb->cs_base = cs_base;
990 tb->flags = flags;
991 tb->cflags = cflags;
d07bde88 992 cpu_gen_code(env, tb, &code_gen_size);
d720b93d 993 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
3b46e624 994
d720b93d 995 /* check next page if needed */
c27004ec 996 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
d720b93d 997 phys_page2 = -1;
c27004ec 998 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
41c1b1c9 999 phys_page2 = get_page_addr_code(env, virt_page2);
d720b93d 1000 }
41c1b1c9 1001 tb_link_page(tb, phys_pc, phys_page2);
2e70f6ef 1002 return tb;
d720b93d 1003}
3b46e624 1004
9fa3e853
FB
1005/* invalidate all TBs which intersect with the target physical page
1006 starting in range [start;end[. NOTE: start and end must refer to
d720b93d
FB
1007 the same physical page. 'is_cpu_write_access' should be true if called
1008 from a real cpu write access: the virtual CPU will exit the current
1009 TB if code is modified inside this TB. */
41c1b1c9 1010void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
d720b93d
FB
1011 int is_cpu_write_access)
1012{
6b917547 1013 TranslationBlock *tb, *tb_next, *saved_tb;
d720b93d 1014 CPUState *env = cpu_single_env;
41c1b1c9 1015 tb_page_addr_t tb_start, tb_end;
6b917547
AL
1016 PageDesc *p;
1017 int n;
1018#ifdef TARGET_HAS_PRECISE_SMC
1019 int current_tb_not_found = is_cpu_write_access;
1020 TranslationBlock *current_tb = NULL;
1021 int current_tb_modified = 0;
1022 target_ulong current_pc = 0;
1023 target_ulong current_cs_base = 0;
1024 int current_flags = 0;
1025#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
1026
1027 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 1028 if (!p)
9fa3e853 1029 return;
5fafdf24 1030 if (!p->code_bitmap &&
d720b93d
FB
1031 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1032 is_cpu_write_access) {
9fa3e853
FB
1033 /* build code bitmap */
1034 build_page_bitmap(p);
1035 }
1036
1037 /* we remove all the TBs in the range [start, end[ */
1038 /* XXX: see if in some cases it could be faster to invalidate all the code */
1039 tb = p->first_tb;
1040 while (tb != NULL) {
1041 n = (long)tb & 3;
1042 tb = (TranslationBlock *)((long)tb & ~3);
1043 tb_next = tb->page_next[n];
1044 /* NOTE: this is subtle as a TB may span two physical pages */
1045 if (n == 0) {
1046 /* NOTE: tb_end may be after the end of the page, but
1047 it is not a problem */
1048 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1049 tb_end = tb_start + tb->size;
1050 } else {
1051 tb_start = tb->page_addr[1];
1052 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1053 }
1054 if (!(tb_end <= start || tb_start >= end)) {
d720b93d
FB
1055#ifdef TARGET_HAS_PRECISE_SMC
1056 if (current_tb_not_found) {
1057 current_tb_not_found = 0;
1058 current_tb = NULL;
2e70f6ef 1059 if (env->mem_io_pc) {
d720b93d 1060 /* now we have a real cpu fault */
2e70f6ef 1061 current_tb = tb_find_pc(env->mem_io_pc);
d720b93d
FB
1062 }
1063 }
1064 if (current_tb == tb &&
2e70f6ef 1065 (current_tb->cflags & CF_COUNT_MASK) != 1) {
d720b93d
FB
1066 /* If we are modifying the current TB, we must stop
1067 its execution. We could be more precise by checking
1068 that the modification is after the current PC, but it
1069 would require a specialized function to partially
1070 restore the CPU state */
3b46e624 1071
d720b93d 1072 current_tb_modified = 1;
5fafdf24 1073 cpu_restore_state(current_tb, env,
2e70f6ef 1074 env->mem_io_pc, NULL);
6b917547
AL
1075 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1076 &current_flags);
d720b93d
FB
1077 }
1078#endif /* TARGET_HAS_PRECISE_SMC */
6f5a9f7e
FB
1079 /* we need to do that to handle the case where a signal
1080 occurs while doing tb_phys_invalidate() */
1081 saved_tb = NULL;
1082 if (env) {
1083 saved_tb = env->current_tb;
1084 env->current_tb = NULL;
1085 }
9fa3e853 1086 tb_phys_invalidate(tb, -1);
6f5a9f7e
FB
1087 if (env) {
1088 env->current_tb = saved_tb;
1089 if (env->interrupt_request && env->current_tb)
1090 cpu_interrupt(env, env->interrupt_request);
1091 }
9fa3e853
FB
1092 }
1093 tb = tb_next;
1094 }
1095#if !defined(CONFIG_USER_ONLY)
1096 /* if no code remaining, no need to continue to use slow writes */
1097 if (!p->first_tb) {
1098 invalidate_page_bitmap(p);
d720b93d 1099 if (is_cpu_write_access) {
2e70f6ef 1100 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
d720b93d
FB
1101 }
1102 }
1103#endif
1104#ifdef TARGET_HAS_PRECISE_SMC
1105 if (current_tb_modified) {
1106 /* we generate a block containing just the instruction
1107 modifying the memory. It will ensure that it cannot modify
1108 itself */
ea1c1802 1109 env->current_tb = NULL;
2e70f6ef 1110 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
d720b93d 1111 cpu_resume_from_signal(env, NULL);
9fa3e853 1112 }
fd6ce8f6 1113#endif
9fa3e853 1114}
fd6ce8f6 1115
9fa3e853 1116/* len must be <= 8 and start must be a multiple of len */
41c1b1c9 1117static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
9fa3e853
FB
1118{
1119 PageDesc *p;
1120 int offset, b;
59817ccb 1121#if 0
a4193c8a 1122 if (1) {
93fcfe39
AL
1123 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1124 cpu_single_env->mem_io_vaddr, len,
1125 cpu_single_env->eip,
1126 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
59817ccb
FB
1127 }
1128#endif
9fa3e853 1129 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 1130 if (!p)
9fa3e853
FB
1131 return;
1132 if (p->code_bitmap) {
1133 offset = start & ~TARGET_PAGE_MASK;
1134 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1135 if (b & ((1 << len) - 1))
1136 goto do_invalidate;
1137 } else {
1138 do_invalidate:
d720b93d 1139 tb_invalidate_phys_page_range(start, start + len, 1);
9fa3e853
FB
1140 }
1141}
1142
9fa3e853 1143#if !defined(CONFIG_SOFTMMU)
41c1b1c9 1144static void tb_invalidate_phys_page(tb_page_addr_t addr,
d720b93d 1145 unsigned long pc, void *puc)
9fa3e853 1146{
6b917547 1147 TranslationBlock *tb;
9fa3e853 1148 PageDesc *p;
6b917547 1149 int n;
d720b93d 1150#ifdef TARGET_HAS_PRECISE_SMC
6b917547 1151 TranslationBlock *current_tb = NULL;
d720b93d 1152 CPUState *env = cpu_single_env;
6b917547
AL
1153 int current_tb_modified = 0;
1154 target_ulong current_pc = 0;
1155 target_ulong current_cs_base = 0;
1156 int current_flags = 0;
d720b93d 1157#endif
9fa3e853
FB
1158
1159 addr &= TARGET_PAGE_MASK;
1160 p = page_find(addr >> TARGET_PAGE_BITS);
5fafdf24 1161 if (!p)
9fa3e853
FB
1162 return;
1163 tb = p->first_tb;
d720b93d
FB
1164#ifdef TARGET_HAS_PRECISE_SMC
1165 if (tb && pc != 0) {
1166 current_tb = tb_find_pc(pc);
1167 }
1168#endif
9fa3e853
FB
1169 while (tb != NULL) {
1170 n = (long)tb & 3;
1171 tb = (TranslationBlock *)((long)tb & ~3);
d720b93d
FB
1172#ifdef TARGET_HAS_PRECISE_SMC
1173 if (current_tb == tb &&
2e70f6ef 1174 (current_tb->cflags & CF_COUNT_MASK) != 1) {
d720b93d
FB
1175 /* If we are modifying the current TB, we must stop
1176 its execution. We could be more precise by checking
1177 that the modification is after the current PC, but it
1178 would require a specialized function to partially
1179 restore the CPU state */
3b46e624 1180
d720b93d
FB
1181 current_tb_modified = 1;
1182 cpu_restore_state(current_tb, env, pc, puc);
6b917547
AL
1183 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1184 &current_flags);
d720b93d
FB
1185 }
1186#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
1187 tb_phys_invalidate(tb, addr);
1188 tb = tb->page_next[n];
1189 }
fd6ce8f6 1190 p->first_tb = NULL;
d720b93d
FB
1191#ifdef TARGET_HAS_PRECISE_SMC
1192 if (current_tb_modified) {
1193 /* we generate a block containing just the instruction
1194 modifying the memory. It will ensure that it cannot modify
1195 itself */
ea1c1802 1196 env->current_tb = NULL;
2e70f6ef 1197 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
d720b93d
FB
1198 cpu_resume_from_signal(env, puc);
1199 }
1200#endif
fd6ce8f6 1201}
9fa3e853 1202#endif
fd6ce8f6
FB
1203
1204/* add the tb in the target page and protect it if necessary */
5fafdf24 1205static inline void tb_alloc_page(TranslationBlock *tb,
41c1b1c9 1206 unsigned int n, tb_page_addr_t page_addr)
fd6ce8f6
FB
1207{
1208 PageDesc *p;
9fa3e853
FB
1209 TranslationBlock *last_first_tb;
1210
1211 tb->page_addr[n] = page_addr;
5cd2c5b6 1212 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
9fa3e853
FB
1213 tb->page_next[n] = p->first_tb;
1214 last_first_tb = p->first_tb;
1215 p->first_tb = (TranslationBlock *)((long)tb | n);
1216 invalidate_page_bitmap(p);
fd6ce8f6 1217
107db443 1218#if defined(TARGET_HAS_SMC) || 1
d720b93d 1219
9fa3e853 1220#if defined(CONFIG_USER_ONLY)
fd6ce8f6 1221 if (p->flags & PAGE_WRITE) {
53a5960a
PB
1222 target_ulong addr;
1223 PageDesc *p2;
9fa3e853
FB
1224 int prot;
1225
fd6ce8f6
FB
1226 /* force the host page as non writable (writes will have a
1227 page fault + mprotect overhead) */
53a5960a 1228 page_addr &= qemu_host_page_mask;
fd6ce8f6 1229 prot = 0;
53a5960a
PB
1230 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1231 addr += TARGET_PAGE_SIZE) {
1232
1233 p2 = page_find (addr >> TARGET_PAGE_BITS);
1234 if (!p2)
1235 continue;
1236 prot |= p2->flags;
1237 p2->flags &= ~PAGE_WRITE;
53a5960a 1238 }
5fafdf24 1239 mprotect(g2h(page_addr), qemu_host_page_size,
fd6ce8f6
FB
1240 (prot & PAGE_BITS) & ~PAGE_WRITE);
1241#ifdef DEBUG_TB_INVALIDATE
ab3d1727 1242 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
53a5960a 1243 page_addr);
fd6ce8f6 1244#endif
fd6ce8f6 1245 }
9fa3e853
FB
1246#else
1247 /* if some code is already present, then the pages are already
1248 protected. So we handle the case where only the first TB is
1249 allocated in a physical page */
1250 if (!last_first_tb) {
6a00d601 1251 tlb_protect_code(page_addr);
9fa3e853
FB
1252 }
1253#endif
d720b93d
FB
1254
1255#endif /* TARGET_HAS_SMC */
fd6ce8f6
FB
1256}
1257
9fa3e853
FB
1258/* add a new TB and link it to the physical page tables. phys_page2 is
1259 (-1) to indicate that only one page contains the TB. */
41c1b1c9
PB
1260void tb_link_page(TranslationBlock *tb,
1261 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
d4e8164f 1262{
9fa3e853
FB
1263 unsigned int h;
1264 TranslationBlock **ptb;
1265
c8a706fe
PB
1266 /* Grab the mmap lock to stop another thread invalidating this TB
1267 before we are done. */
1268 mmap_lock();
9fa3e853
FB
1269 /* add in the physical hash table */
1270 h = tb_phys_hash_func(phys_pc);
1271 ptb = &tb_phys_hash[h];
1272 tb->phys_hash_next = *ptb;
1273 *ptb = tb;
fd6ce8f6
FB
1274
1275 /* add in the page list */
9fa3e853
FB
1276 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1277 if (phys_page2 != -1)
1278 tb_alloc_page(tb, 1, phys_page2);
1279 else
1280 tb->page_addr[1] = -1;
9fa3e853 1281
d4e8164f
FB
1282 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1283 tb->jmp_next[0] = NULL;
1284 tb->jmp_next[1] = NULL;
1285
1286 /* init original jump addresses */
1287 if (tb->tb_next_offset[0] != 0xffff)
1288 tb_reset_jump(tb, 0);
1289 if (tb->tb_next_offset[1] != 0xffff)
1290 tb_reset_jump(tb, 1);
8a40a180
FB
1291
1292#ifdef DEBUG_TB_CHECK
1293 tb_page_check();
1294#endif
c8a706fe 1295 mmap_unlock();
fd6ce8f6
FB
1296}
1297
9fa3e853
FB
1298/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1299 tb[1].tc_ptr. Return NULL if not found */
1300TranslationBlock *tb_find_pc(unsigned long tc_ptr)
fd6ce8f6 1301{
9fa3e853
FB
1302 int m_min, m_max, m;
1303 unsigned long v;
1304 TranslationBlock *tb;
a513fe19
FB
1305
1306 if (nb_tbs <= 0)
1307 return NULL;
1308 if (tc_ptr < (unsigned long)code_gen_buffer ||
1309 tc_ptr >= (unsigned long)code_gen_ptr)
1310 return NULL;
1311 /* binary search (cf Knuth) */
1312 m_min = 0;
1313 m_max = nb_tbs - 1;
1314 while (m_min <= m_max) {
1315 m = (m_min + m_max) >> 1;
1316 tb = &tbs[m];
1317 v = (unsigned long)tb->tc_ptr;
1318 if (v == tc_ptr)
1319 return tb;
1320 else if (tc_ptr < v) {
1321 m_max = m - 1;
1322 } else {
1323 m_min = m + 1;
1324 }
5fafdf24 1325 }
a513fe19
FB
1326 return &tbs[m_max];
1327}
7501267e 1328
ea041c0e
FB
1329static void tb_reset_jump_recursive(TranslationBlock *tb);
1330
1331static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1332{
1333 TranslationBlock *tb1, *tb_next, **ptb;
1334 unsigned int n1;
1335
1336 tb1 = tb->jmp_next[n];
1337 if (tb1 != NULL) {
1338 /* find head of list */
1339 for(;;) {
1340 n1 = (long)tb1 & 3;
1341 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1342 if (n1 == 2)
1343 break;
1344 tb1 = tb1->jmp_next[n1];
1345 }
1346 /* we are now sure now that tb jumps to tb1 */
1347 tb_next = tb1;
1348
1349 /* remove tb from the jmp_first list */
1350 ptb = &tb_next->jmp_first;
1351 for(;;) {
1352 tb1 = *ptb;
1353 n1 = (long)tb1 & 3;
1354 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1355 if (n1 == n && tb1 == tb)
1356 break;
1357 ptb = &tb1->jmp_next[n1];
1358 }
1359 *ptb = tb->jmp_next[n];
1360 tb->jmp_next[n] = NULL;
3b46e624 1361
ea041c0e
FB
1362 /* suppress the jump to next tb in generated code */
1363 tb_reset_jump(tb, n);
1364
0124311e 1365 /* suppress jumps in the tb on which we could have jumped */
ea041c0e
FB
1366 tb_reset_jump_recursive(tb_next);
1367 }
1368}
1369
1370static void tb_reset_jump_recursive(TranslationBlock *tb)
1371{
1372 tb_reset_jump_recursive2(tb, 0);
1373 tb_reset_jump_recursive2(tb, 1);
1374}
1375
1fddef4b 1376#if defined(TARGET_HAS_ICE)
94df27fd
PB
1377#if defined(CONFIG_USER_ONLY)
1378static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1379{
1380 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1381}
1382#else
d720b93d
FB
1383static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1384{
c227f099 1385 target_phys_addr_t addr;
9b3c35e0 1386 target_ulong pd;
c227f099 1387 ram_addr_t ram_addr;
c2f07f81 1388 PhysPageDesc *p;
d720b93d 1389
c2f07f81
PB
1390 addr = cpu_get_phys_page_debug(env, pc);
1391 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1392 if (!p) {
1393 pd = IO_MEM_UNASSIGNED;
1394 } else {
1395 pd = p->phys_offset;
1396 }
1397 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
706cd4b5 1398 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
d720b93d 1399}
c27004ec 1400#endif
94df27fd 1401#endif /* TARGET_HAS_ICE */
d720b93d 1402
c527ee8f
PB
1403#if defined(CONFIG_USER_ONLY)
1404void cpu_watchpoint_remove_all(CPUState *env, int mask)
1405
1406{
1407}
1408
1409int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1410 int flags, CPUWatchpoint **watchpoint)
1411{
1412 return -ENOSYS;
1413}
1414#else
6658ffb8 1415/* Add a watchpoint. */
a1d1bb31
AL
1416int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1417 int flags, CPUWatchpoint **watchpoint)
6658ffb8 1418{
b4051334 1419 target_ulong len_mask = ~(len - 1);
c0ce998e 1420 CPUWatchpoint *wp;
6658ffb8 1421
b4051334
AL
1422 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1423 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1424 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1425 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1426 return -EINVAL;
1427 }
a1d1bb31 1428 wp = qemu_malloc(sizeof(*wp));
a1d1bb31
AL
1429
1430 wp->vaddr = addr;
b4051334 1431 wp->len_mask = len_mask;
a1d1bb31
AL
1432 wp->flags = flags;
1433
2dc9f411 1434 /* keep all GDB-injected watchpoints in front */
c0ce998e 1435 if (flags & BP_GDB)
72cf2d4f 1436 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
c0ce998e 1437 else
72cf2d4f 1438 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
6658ffb8 1439
6658ffb8 1440 tlb_flush_page(env, addr);
a1d1bb31
AL
1441
1442 if (watchpoint)
1443 *watchpoint = wp;
1444 return 0;
6658ffb8
PB
1445}
1446
a1d1bb31
AL
1447/* Remove a specific watchpoint. */
1448int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1449 int flags)
6658ffb8 1450{
b4051334 1451 target_ulong len_mask = ~(len - 1);
a1d1bb31 1452 CPUWatchpoint *wp;
6658ffb8 1453
72cf2d4f 1454 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334 1455 if (addr == wp->vaddr && len_mask == wp->len_mask
6e140f28 1456 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
a1d1bb31 1457 cpu_watchpoint_remove_by_ref(env, wp);
6658ffb8
PB
1458 return 0;
1459 }
1460 }
a1d1bb31 1461 return -ENOENT;
6658ffb8
PB
1462}
1463
a1d1bb31
AL
1464/* Remove a specific watchpoint by reference. */
1465void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1466{
72cf2d4f 1467 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
7d03f82f 1468
a1d1bb31
AL
1469 tlb_flush_page(env, watchpoint->vaddr);
1470
1471 qemu_free(watchpoint);
1472}
1473
1474/* Remove all matching watchpoints. */
1475void cpu_watchpoint_remove_all(CPUState *env, int mask)
1476{
c0ce998e 1477 CPUWatchpoint *wp, *next;
a1d1bb31 1478
72cf2d4f 1479 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
a1d1bb31
AL
1480 if (wp->flags & mask)
1481 cpu_watchpoint_remove_by_ref(env, wp);
c0ce998e 1482 }
7d03f82f 1483}
c527ee8f 1484#endif
7d03f82f 1485
a1d1bb31
AL
1486/* Add a breakpoint. */
1487int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1488 CPUBreakpoint **breakpoint)
4c3a88a2 1489{
1fddef4b 1490#if defined(TARGET_HAS_ICE)
c0ce998e 1491 CPUBreakpoint *bp;
3b46e624 1492
a1d1bb31 1493 bp = qemu_malloc(sizeof(*bp));
4c3a88a2 1494
a1d1bb31
AL
1495 bp->pc = pc;
1496 bp->flags = flags;
1497
2dc9f411 1498 /* keep all GDB-injected breakpoints in front */
c0ce998e 1499 if (flags & BP_GDB)
72cf2d4f 1500 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
c0ce998e 1501 else
72cf2d4f 1502 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
3b46e624 1503
d720b93d 1504 breakpoint_invalidate(env, pc);
a1d1bb31
AL
1505
1506 if (breakpoint)
1507 *breakpoint = bp;
4c3a88a2
FB
1508 return 0;
1509#else
a1d1bb31 1510 return -ENOSYS;
4c3a88a2
FB
1511#endif
1512}
1513
a1d1bb31
AL
1514/* Remove a specific breakpoint. */
1515int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1516{
7d03f82f 1517#if defined(TARGET_HAS_ICE)
a1d1bb31
AL
1518 CPUBreakpoint *bp;
1519
72cf2d4f 1520 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31
AL
1521 if (bp->pc == pc && bp->flags == flags) {
1522 cpu_breakpoint_remove_by_ref(env, bp);
1523 return 0;
1524 }
7d03f82f 1525 }
a1d1bb31
AL
1526 return -ENOENT;
1527#else
1528 return -ENOSYS;
7d03f82f
EI
1529#endif
1530}
1531
a1d1bb31
AL
1532/* Remove a specific breakpoint by reference. */
1533void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
4c3a88a2 1534{
1fddef4b 1535#if defined(TARGET_HAS_ICE)
72cf2d4f 1536 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
d720b93d 1537
a1d1bb31
AL
1538 breakpoint_invalidate(env, breakpoint->pc);
1539
1540 qemu_free(breakpoint);
1541#endif
1542}
1543
1544/* Remove all matching breakpoints. */
1545void cpu_breakpoint_remove_all(CPUState *env, int mask)
1546{
1547#if defined(TARGET_HAS_ICE)
c0ce998e 1548 CPUBreakpoint *bp, *next;
a1d1bb31 1549
72cf2d4f 1550 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
a1d1bb31
AL
1551 if (bp->flags & mask)
1552 cpu_breakpoint_remove_by_ref(env, bp);
c0ce998e 1553 }
4c3a88a2
FB
1554#endif
1555}
1556
c33a346e
FB
1557/* enable or disable single step mode. EXCP_DEBUG is returned by the
1558 CPU loop after each instruction */
1559void cpu_single_step(CPUState *env, int enabled)
1560{
1fddef4b 1561#if defined(TARGET_HAS_ICE)
c33a346e
FB
1562 if (env->singlestep_enabled != enabled) {
1563 env->singlestep_enabled = enabled;
e22a25c9
AL
1564 if (kvm_enabled())
1565 kvm_update_guest_debug(env, 0);
1566 else {
ccbb4d44 1567 /* must flush all the translated code to avoid inconsistencies */
e22a25c9
AL
1568 /* XXX: only flush what is necessary */
1569 tb_flush(env);
1570 }
c33a346e
FB
1571 }
1572#endif
1573}
1574
34865134
FB
1575/* enable or disable low levels log */
1576void cpu_set_log(int log_flags)
1577{
1578 loglevel = log_flags;
1579 if (loglevel && !logfile) {
11fcfab4 1580 logfile = fopen(logfilename, log_append ? "a" : "w");
34865134
FB
1581 if (!logfile) {
1582 perror(logfilename);
1583 _exit(1);
1584 }
9fa3e853
FB
1585#if !defined(CONFIG_SOFTMMU)
1586 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1587 {
b55266b5 1588 static char logfile_buf[4096];
9fa3e853
FB
1589 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1590 }
bf65f53f
FN
1591#elif !defined(_WIN32)
1592 /* Win32 doesn't support line-buffering and requires size >= 2 */
34865134 1593 setvbuf(logfile, NULL, _IOLBF, 0);
9fa3e853 1594#endif
e735b91c
PB
1595 log_append = 1;
1596 }
1597 if (!loglevel && logfile) {
1598 fclose(logfile);
1599 logfile = NULL;
34865134
FB
1600 }
1601}
1602
1603void cpu_set_log_filename(const char *filename)
1604{
1605 logfilename = strdup(filename);
e735b91c
PB
1606 if (logfile) {
1607 fclose(logfile);
1608 logfile = NULL;
1609 }
1610 cpu_set_log(loglevel);
34865134 1611}
c33a346e 1612
3098dba0 1613static void cpu_unlink_tb(CPUState *env)
ea041c0e 1614{
3098dba0
AJ
1615 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1616 problem and hope the cpu will stop of its own accord. For userspace
1617 emulation this often isn't actually as bad as it sounds. Often
1618 signals are used primarily to interrupt blocking syscalls. */
ea041c0e 1619 TranslationBlock *tb;
c227f099 1620 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
59817ccb 1621
cab1b4bd 1622 spin_lock(&interrupt_lock);
3098dba0
AJ
1623 tb = env->current_tb;
1624 /* if the cpu is currently executing code, we must unlink it and
1625 all the potentially executing TB */
f76cfe56 1626 if (tb) {
3098dba0
AJ
1627 env->current_tb = NULL;
1628 tb_reset_jump_recursive(tb);
be214e6c 1629 }
cab1b4bd 1630 spin_unlock(&interrupt_lock);
3098dba0
AJ
1631}
1632
1633/* mask must never be zero, except for A20 change call */
1634void cpu_interrupt(CPUState *env, int mask)
1635{
1636 int old_mask;
be214e6c 1637
2e70f6ef 1638 old_mask = env->interrupt_request;
68a79315 1639 env->interrupt_request |= mask;
3098dba0 1640
8edac960
AL
1641#ifndef CONFIG_USER_ONLY
1642 /*
1643 * If called from iothread context, wake the target cpu in
1644 * case its halted.
1645 */
b7680cb6 1646 if (!qemu_cpu_is_self(env)) {
8edac960
AL
1647 qemu_cpu_kick(env);
1648 return;
1649 }
1650#endif
1651
2e70f6ef 1652 if (use_icount) {
266910c4 1653 env->icount_decr.u16.high = 0xffff;
2e70f6ef 1654#ifndef CONFIG_USER_ONLY
2e70f6ef 1655 if (!can_do_io(env)
be214e6c 1656 && (mask & ~old_mask) != 0) {
2e70f6ef
PB
1657 cpu_abort(env, "Raised interrupt while not in I/O function");
1658 }
1659#endif
1660 } else {
3098dba0 1661 cpu_unlink_tb(env);
ea041c0e
FB
1662 }
1663}
1664
b54ad049
FB
1665void cpu_reset_interrupt(CPUState *env, int mask)
1666{
1667 env->interrupt_request &= ~mask;
1668}
1669
3098dba0
AJ
1670void cpu_exit(CPUState *env)
1671{
1672 env->exit_request = 1;
1673 cpu_unlink_tb(env);
1674}
1675
c7cd6a37 1676const CPULogItem cpu_log_items[] = {
5fafdf24 1677 { CPU_LOG_TB_OUT_ASM, "out_asm",
f193c797
FB
1678 "show generated host assembly code for each compiled TB" },
1679 { CPU_LOG_TB_IN_ASM, "in_asm",
1680 "show target assembly code for each compiled TB" },
5fafdf24 1681 { CPU_LOG_TB_OP, "op",
57fec1fe 1682 "show micro ops for each compiled TB" },
f193c797 1683 { CPU_LOG_TB_OP_OPT, "op_opt",
e01a1157
BS
1684 "show micro ops "
1685#ifdef TARGET_I386
1686 "before eflags optimization and "
f193c797 1687#endif
e01a1157 1688 "after liveness analysis" },
f193c797
FB
1689 { CPU_LOG_INT, "int",
1690 "show interrupts/exceptions in short format" },
1691 { CPU_LOG_EXEC, "exec",
1692 "show trace before each executed TB (lots of logs)" },
9fddaa0c 1693 { CPU_LOG_TB_CPU, "cpu",
e91c8a77 1694 "show CPU state before block translation" },
f193c797
FB
1695#ifdef TARGET_I386
1696 { CPU_LOG_PCALL, "pcall",
1697 "show protected mode far calls/returns/exceptions" },
eca1bdf4
AL
1698 { CPU_LOG_RESET, "cpu_reset",
1699 "show CPU state before CPU resets" },
f193c797 1700#endif
8e3a9fd2 1701#ifdef DEBUG_IOPORT
fd872598
FB
1702 { CPU_LOG_IOPORT, "ioport",
1703 "show all i/o ports accesses" },
8e3a9fd2 1704#endif
f193c797
FB
1705 { 0, NULL, NULL },
1706};
1707
f6f3fbca
MT
1708#ifndef CONFIG_USER_ONLY
1709static QLIST_HEAD(memory_client_list, CPUPhysMemoryClient) memory_client_list
1710 = QLIST_HEAD_INITIALIZER(memory_client_list);
1711
1712static void cpu_notify_set_memory(target_phys_addr_t start_addr,
9742bf26 1713 ram_addr_t size,
0fd542fb
MT
1714 ram_addr_t phys_offset,
1715 bool log_dirty)
f6f3fbca
MT
1716{
1717 CPUPhysMemoryClient *client;
1718 QLIST_FOREACH(client, &memory_client_list, list) {
0fd542fb 1719 client->set_memory(client, start_addr, size, phys_offset, log_dirty);
f6f3fbca
MT
1720 }
1721}
1722
1723static int cpu_notify_sync_dirty_bitmap(target_phys_addr_t start,
9742bf26 1724 target_phys_addr_t end)
f6f3fbca
MT
1725{
1726 CPUPhysMemoryClient *client;
1727 QLIST_FOREACH(client, &memory_client_list, list) {
1728 int r = client->sync_dirty_bitmap(client, start, end);
1729 if (r < 0)
1730 return r;
1731 }
1732 return 0;
1733}
1734
1735static int cpu_notify_migration_log(int enable)
1736{
1737 CPUPhysMemoryClient *client;
1738 QLIST_FOREACH(client, &memory_client_list, list) {
1739 int r = client->migration_log(client, enable);
1740 if (r < 0)
1741 return r;
1742 }
1743 return 0;
1744}
1745
5cd2c5b6
RH
1746static void phys_page_for_each_1(CPUPhysMemoryClient *client,
1747 int level, void **lp)
f6f3fbca 1748{
5cd2c5b6 1749 int i;
f6f3fbca 1750
5cd2c5b6
RH
1751 if (*lp == NULL) {
1752 return;
1753 }
1754 if (level == 0) {
1755 PhysPageDesc *pd = *lp;
7296abac 1756 for (i = 0; i < L2_SIZE; ++i) {
5cd2c5b6
RH
1757 if (pd[i].phys_offset != IO_MEM_UNASSIGNED) {
1758 client->set_memory(client, pd[i].region_offset,
0fd542fb 1759 TARGET_PAGE_SIZE, pd[i].phys_offset, false);
f6f3fbca 1760 }
5cd2c5b6
RH
1761 }
1762 } else {
1763 void **pp = *lp;
7296abac 1764 for (i = 0; i < L2_SIZE; ++i) {
5cd2c5b6 1765 phys_page_for_each_1(client, level - 1, pp + i);
f6f3fbca
MT
1766 }
1767 }
1768}
1769
1770static void phys_page_for_each(CPUPhysMemoryClient *client)
1771{
5cd2c5b6
RH
1772 int i;
1773 for (i = 0; i < P_L1_SIZE; ++i) {
1774 phys_page_for_each_1(client, P_L1_SHIFT / L2_BITS - 1,
c2f42bf0 1775 l1_phys_map + i);
f6f3fbca 1776 }
f6f3fbca
MT
1777}
1778
1779void cpu_register_phys_memory_client(CPUPhysMemoryClient *client)
1780{
1781 QLIST_INSERT_HEAD(&memory_client_list, client, list);
1782 phys_page_for_each(client);
1783}
1784
1785void cpu_unregister_phys_memory_client(CPUPhysMemoryClient *client)
1786{
1787 QLIST_REMOVE(client, list);
1788}
1789#endif
1790
f193c797
FB
1791static int cmp1(const char *s1, int n, const char *s2)
1792{
1793 if (strlen(s2) != n)
1794 return 0;
1795 return memcmp(s1, s2, n) == 0;
1796}
3b46e624 1797
f193c797
FB
1798/* takes a comma separated list of log masks. Return 0 if error. */
1799int cpu_str_to_log_mask(const char *str)
1800{
c7cd6a37 1801 const CPULogItem *item;
f193c797
FB
1802 int mask;
1803 const char *p, *p1;
1804
1805 p = str;
1806 mask = 0;
1807 for(;;) {
1808 p1 = strchr(p, ',');
1809 if (!p1)
1810 p1 = p + strlen(p);
9742bf26
YT
1811 if(cmp1(p,p1-p,"all")) {
1812 for(item = cpu_log_items; item->mask != 0; item++) {
1813 mask |= item->mask;
1814 }
1815 } else {
1816 for(item = cpu_log_items; item->mask != 0; item++) {
1817 if (cmp1(p, p1 - p, item->name))
1818 goto found;
1819 }
1820 return 0;
f193c797 1821 }
f193c797
FB
1822 found:
1823 mask |= item->mask;
1824 if (*p1 != ',')
1825 break;
1826 p = p1 + 1;
1827 }
1828 return mask;
1829}
ea041c0e 1830
7501267e
FB
1831void cpu_abort(CPUState *env, const char *fmt, ...)
1832{
1833 va_list ap;
493ae1f0 1834 va_list ap2;
7501267e
FB
1835
1836 va_start(ap, fmt);
493ae1f0 1837 va_copy(ap2, ap);
7501267e
FB
1838 fprintf(stderr, "qemu: fatal: ");
1839 vfprintf(stderr, fmt, ap);
1840 fprintf(stderr, "\n");
1841#ifdef TARGET_I386
7fe48483
FB
1842 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1843#else
1844 cpu_dump_state(env, stderr, fprintf, 0);
7501267e 1845#endif
93fcfe39
AL
1846 if (qemu_log_enabled()) {
1847 qemu_log("qemu: fatal: ");
1848 qemu_log_vprintf(fmt, ap2);
1849 qemu_log("\n");
f9373291 1850#ifdef TARGET_I386
93fcfe39 1851 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
f9373291 1852#else
93fcfe39 1853 log_cpu_state(env, 0);
f9373291 1854#endif
31b1a7b4 1855 qemu_log_flush();
93fcfe39 1856 qemu_log_close();
924edcae 1857 }
493ae1f0 1858 va_end(ap2);
f9373291 1859 va_end(ap);
fd052bf6
RV
1860#if defined(CONFIG_USER_ONLY)
1861 {
1862 struct sigaction act;
1863 sigfillset(&act.sa_mask);
1864 act.sa_handler = SIG_DFL;
1865 sigaction(SIGABRT, &act, NULL);
1866 }
1867#endif
7501267e
FB
1868 abort();
1869}
1870
c5be9f08
TS
1871CPUState *cpu_copy(CPUState *env)
1872{
01ba9816 1873 CPUState *new_env = cpu_init(env->cpu_model_str);
c5be9f08
TS
1874 CPUState *next_cpu = new_env->next_cpu;
1875 int cpu_index = new_env->cpu_index;
5a38f081
AL
1876#if defined(TARGET_HAS_ICE)
1877 CPUBreakpoint *bp;
1878 CPUWatchpoint *wp;
1879#endif
1880
c5be9f08 1881 memcpy(new_env, env, sizeof(CPUState));
5a38f081
AL
1882
1883 /* Preserve chaining and index. */
c5be9f08
TS
1884 new_env->next_cpu = next_cpu;
1885 new_env->cpu_index = cpu_index;
5a38f081
AL
1886
1887 /* Clone all break/watchpoints.
1888 Note: Once we support ptrace with hw-debug register access, make sure
1889 BP_CPU break/watchpoints are handled correctly on clone. */
72cf2d4f
BS
1890 QTAILQ_INIT(&env->breakpoints);
1891 QTAILQ_INIT(&env->watchpoints);
5a38f081 1892#if defined(TARGET_HAS_ICE)
72cf2d4f 1893 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
5a38f081
AL
1894 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1895 }
72cf2d4f 1896 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
5a38f081
AL
1897 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1898 wp->flags, NULL);
1899 }
1900#endif
1901
c5be9f08
TS
1902 return new_env;
1903}
1904
0124311e
FB
1905#if !defined(CONFIG_USER_ONLY)
1906
5c751e99
EI
1907static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1908{
1909 unsigned int i;
1910
1911 /* Discard jump cache entries for any tb which might potentially
1912 overlap the flushed page. */
1913 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1914 memset (&env->tb_jmp_cache[i], 0,
9742bf26 1915 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
5c751e99
EI
1916
1917 i = tb_jmp_cache_hash_page(addr);
1918 memset (&env->tb_jmp_cache[i], 0,
9742bf26 1919 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
5c751e99
EI
1920}
1921
08738984
IK
1922static CPUTLBEntry s_cputlb_empty_entry = {
1923 .addr_read = -1,
1924 .addr_write = -1,
1925 .addr_code = -1,
1926 .addend = -1,
1927};
1928
ee8b7021
FB
1929/* NOTE: if flush_global is true, also flush global entries (not
1930 implemented yet) */
1931void tlb_flush(CPUState *env, int flush_global)
33417e70 1932{
33417e70 1933 int i;
0124311e 1934
9fa3e853
FB
1935#if defined(DEBUG_TLB)
1936 printf("tlb_flush:\n");
1937#endif
0124311e
FB
1938 /* must reset current TB so that interrupts cannot modify the
1939 links while we are modifying them */
1940 env->current_tb = NULL;
1941
33417e70 1942 for(i = 0; i < CPU_TLB_SIZE; i++) {
cfde4bd9
IY
1943 int mmu_idx;
1944 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
08738984 1945 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
cfde4bd9 1946 }
33417e70 1947 }
9fa3e853 1948
8a40a180 1949 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
9fa3e853 1950
d4c430a8
PB
1951 env->tlb_flush_addr = -1;
1952 env->tlb_flush_mask = 0;
e3db7226 1953 tlb_flush_count++;
33417e70
FB
1954}
1955
274da6b2 1956static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
61382a50 1957{
5fafdf24 1958 if (addr == (tlb_entry->addr_read &
84b7b8e7 1959 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1960 addr == (tlb_entry->addr_write &
84b7b8e7 1961 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1962 addr == (tlb_entry->addr_code &
84b7b8e7 1963 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
08738984 1964 *tlb_entry = s_cputlb_empty_entry;
84b7b8e7 1965 }
61382a50
FB
1966}
1967
2e12669a 1968void tlb_flush_page(CPUState *env, target_ulong addr)
33417e70 1969{
8a40a180 1970 int i;
cfde4bd9 1971 int mmu_idx;
0124311e 1972
9fa3e853 1973#if defined(DEBUG_TLB)
108c49b8 1974 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
9fa3e853 1975#endif
d4c430a8
PB
1976 /* Check if we need to flush due to large pages. */
1977 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
1978#if defined(DEBUG_TLB)
1979 printf("tlb_flush_page: forced full flush ("
1980 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
1981 env->tlb_flush_addr, env->tlb_flush_mask);
1982#endif
1983 tlb_flush(env, 1);
1984 return;
1985 }
0124311e
FB
1986 /* must reset current TB so that interrupts cannot modify the
1987 links while we are modifying them */
1988 env->current_tb = NULL;
61382a50
FB
1989
1990 addr &= TARGET_PAGE_MASK;
1991 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
cfde4bd9
IY
1992 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1993 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
0124311e 1994
5c751e99 1995 tlb_flush_jmp_cache(env, addr);
9fa3e853
FB
1996}
1997
9fa3e853
FB
1998/* update the TLBs so that writes to code in the virtual page 'addr'
1999 can be detected */
c227f099 2000static void tlb_protect_code(ram_addr_t ram_addr)
9fa3e853 2001{
5fafdf24 2002 cpu_physical_memory_reset_dirty(ram_addr,
6a00d601
FB
2003 ram_addr + TARGET_PAGE_SIZE,
2004 CODE_DIRTY_FLAG);
9fa3e853
FB
2005}
2006
9fa3e853 2007/* update the TLB so that writes in physical page 'phys_addr' are no longer
3a7d929e 2008 tested for self modifying code */
c227f099 2009static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 2010 target_ulong vaddr)
9fa3e853 2011{
f7c11b53 2012 cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
1ccde1cb
FB
2013}
2014
5fafdf24 2015static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1ccde1cb
FB
2016 unsigned long start, unsigned long length)
2017{
2018 unsigned long addr;
84b7b8e7
FB
2019 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2020 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1ccde1cb 2021 if ((addr - start) < length) {
0f459d16 2022 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1ccde1cb
FB
2023 }
2024 }
2025}
2026
5579c7f3 2027/* Note: start and end must be within the same ram block. */
c227f099 2028void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 2029 int dirty_flags)
1ccde1cb
FB
2030{
2031 CPUState *env;
4f2ac237 2032 unsigned long length, start1;
f7c11b53 2033 int i;
1ccde1cb
FB
2034
2035 start &= TARGET_PAGE_MASK;
2036 end = TARGET_PAGE_ALIGN(end);
2037
2038 length = end - start;
2039 if (length == 0)
2040 return;
f7c11b53 2041 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
f23db169 2042
1ccde1cb
FB
2043 /* we modify the TLB cache so that the dirty bit will be set again
2044 when accessing the range */
b2e0a138 2045 start1 = (unsigned long)qemu_safe_ram_ptr(start);
5579c7f3
PB
2046 /* Chek that we don't span multiple blocks - this breaks the
2047 address comparisons below. */
b2e0a138 2048 if ((unsigned long)qemu_safe_ram_ptr(end - 1) - start1
5579c7f3
PB
2049 != (end - 1) - start) {
2050 abort();
2051 }
2052
6a00d601 2053 for(env = first_cpu; env != NULL; env = env->next_cpu) {
cfde4bd9
IY
2054 int mmu_idx;
2055 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2056 for(i = 0; i < CPU_TLB_SIZE; i++)
2057 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
2058 start1, length);
2059 }
6a00d601 2060 }
1ccde1cb
FB
2061}
2062
74576198
AL
2063int cpu_physical_memory_set_dirty_tracking(int enable)
2064{
f6f3fbca 2065 int ret = 0;
74576198 2066 in_migration = enable;
f6f3fbca
MT
2067 ret = cpu_notify_migration_log(!!enable);
2068 return ret;
74576198
AL
2069}
2070
2071int cpu_physical_memory_get_dirty_tracking(void)
2072{
2073 return in_migration;
2074}
2075
c227f099
AL
2076int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
2077 target_phys_addr_t end_addr)
2bec46dc 2078{
7b8f3b78 2079 int ret;
151f7749 2080
f6f3fbca 2081 ret = cpu_notify_sync_dirty_bitmap(start_addr, end_addr);
151f7749 2082 return ret;
2bec46dc
AL
2083}
2084
e5896b12
AP
2085int cpu_physical_log_start(target_phys_addr_t start_addr,
2086 ram_addr_t size)
2087{
2088 CPUPhysMemoryClient *client;
2089 QLIST_FOREACH(client, &memory_client_list, list) {
2090 if (client->log_start) {
2091 int r = client->log_start(client, start_addr, size);
2092 if (r < 0) {
2093 return r;
2094 }
2095 }
2096 }
2097 return 0;
2098}
2099
2100int cpu_physical_log_stop(target_phys_addr_t start_addr,
2101 ram_addr_t size)
2102{
2103 CPUPhysMemoryClient *client;
2104 QLIST_FOREACH(client, &memory_client_list, list) {
2105 if (client->log_stop) {
2106 int r = client->log_stop(client, start_addr, size);
2107 if (r < 0) {
2108 return r;
2109 }
2110 }
2111 }
2112 return 0;
2113}
2114
3a7d929e
FB
2115static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2116{
c227f099 2117 ram_addr_t ram_addr;
5579c7f3 2118 void *p;
3a7d929e 2119
84b7b8e7 2120 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
5579c7f3
PB
2121 p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
2122 + tlb_entry->addend);
e890261f 2123 ram_addr = qemu_ram_addr_from_host_nofail(p);
3a7d929e 2124 if (!cpu_physical_memory_is_dirty(ram_addr)) {
0f459d16 2125 tlb_entry->addr_write |= TLB_NOTDIRTY;
3a7d929e
FB
2126 }
2127 }
2128}
2129
2130/* update the TLB according to the current state of the dirty bits */
2131void cpu_tlb_update_dirty(CPUState *env)
2132{
2133 int i;
cfde4bd9
IY
2134 int mmu_idx;
2135 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2136 for(i = 0; i < CPU_TLB_SIZE; i++)
2137 tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2138 }
3a7d929e
FB
2139}
2140
0f459d16 2141static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1ccde1cb 2142{
0f459d16
PB
2143 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2144 tlb_entry->addr_write = vaddr;
1ccde1cb
FB
2145}
2146
0f459d16
PB
2147/* update the TLB corresponding to virtual page vaddr
2148 so that it is no longer dirty */
2149static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1ccde1cb 2150{
1ccde1cb 2151 int i;
cfde4bd9 2152 int mmu_idx;
1ccde1cb 2153
0f459d16 2154 vaddr &= TARGET_PAGE_MASK;
1ccde1cb 2155 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
cfde4bd9
IY
2156 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2157 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
9fa3e853
FB
2158}
2159
d4c430a8
PB
2160/* Our TLB does not support large pages, so remember the area covered by
2161 large pages and trigger a full TLB flush if these are invalidated. */
2162static void tlb_add_large_page(CPUState *env, target_ulong vaddr,
2163 target_ulong size)
2164{
2165 target_ulong mask = ~(size - 1);
2166
2167 if (env->tlb_flush_addr == (target_ulong)-1) {
2168 env->tlb_flush_addr = vaddr & mask;
2169 env->tlb_flush_mask = mask;
2170 return;
2171 }
2172 /* Extend the existing region to include the new page.
2173 This is a compromise between unnecessary flushes and the cost
2174 of maintaining a full variable size TLB. */
2175 mask &= env->tlb_flush_mask;
2176 while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
2177 mask <<= 1;
2178 }
2179 env->tlb_flush_addr &= mask;
2180 env->tlb_flush_mask = mask;
2181}
2182
2183/* Add a new TLB entry. At most one entry for a given virtual address
2184 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2185 supplied size is only used by tlb_flush_page. */
2186void tlb_set_page(CPUState *env, target_ulong vaddr,
2187 target_phys_addr_t paddr, int prot,
2188 int mmu_idx, target_ulong size)
9fa3e853 2189{
92e873b9 2190 PhysPageDesc *p;
4f2ac237 2191 unsigned long pd;
9fa3e853 2192 unsigned int index;
4f2ac237 2193 target_ulong address;
0f459d16 2194 target_ulong code_address;
355b1943 2195 unsigned long addend;
84b7b8e7 2196 CPUTLBEntry *te;
a1d1bb31 2197 CPUWatchpoint *wp;
c227f099 2198 target_phys_addr_t iotlb;
9fa3e853 2199
d4c430a8
PB
2200 assert(size >= TARGET_PAGE_SIZE);
2201 if (size != TARGET_PAGE_SIZE) {
2202 tlb_add_large_page(env, vaddr, size);
2203 }
92e873b9 2204 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
9fa3e853
FB
2205 if (!p) {
2206 pd = IO_MEM_UNASSIGNED;
9fa3e853
FB
2207 } else {
2208 pd = p->phys_offset;
9fa3e853
FB
2209 }
2210#if defined(DEBUG_TLB)
7fd3f494
SW
2211 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
2212 " prot=%x idx=%d pd=0x%08lx\n",
2213 vaddr, paddr, prot, mmu_idx, pd);
9fa3e853
FB
2214#endif
2215
0f459d16
PB
2216 address = vaddr;
2217 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2218 /* IO memory case (romd handled later) */
2219 address |= TLB_MMIO;
2220 }
5579c7f3 2221 addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
0f459d16
PB
2222 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2223 /* Normal RAM. */
2224 iotlb = pd & TARGET_PAGE_MASK;
2225 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2226 iotlb |= IO_MEM_NOTDIRTY;
2227 else
2228 iotlb |= IO_MEM_ROM;
2229 } else {
ccbb4d44 2230 /* IO handlers are currently passed a physical address.
0f459d16
PB
2231 It would be nice to pass an offset from the base address
2232 of that region. This would avoid having to special case RAM,
2233 and avoid full address decoding in every device.
2234 We can't use the high bits of pd for this because
2235 IO_MEM_ROMD uses these as a ram address. */
8da3ff18
PB
2236 iotlb = (pd & ~TARGET_PAGE_MASK);
2237 if (p) {
8da3ff18
PB
2238 iotlb += p->region_offset;
2239 } else {
2240 iotlb += paddr;
2241 }
0f459d16
PB
2242 }
2243
2244 code_address = address;
2245 /* Make accesses to pages with watchpoints go via the
2246 watchpoint trap routines. */
72cf2d4f 2247 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
a1d1bb31 2248 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
bf298f83
JK
2249 /* Avoid trapping reads of pages with a write breakpoint. */
2250 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
2251 iotlb = io_mem_watch + paddr;
2252 address |= TLB_MMIO;
2253 break;
2254 }
6658ffb8 2255 }
0f459d16 2256 }
d79acba4 2257
0f459d16
PB
2258 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2259 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2260 te = &env->tlb_table[mmu_idx][index];
2261 te->addend = addend - vaddr;
2262 if (prot & PAGE_READ) {
2263 te->addr_read = address;
2264 } else {
2265 te->addr_read = -1;
2266 }
5c751e99 2267
0f459d16
PB
2268 if (prot & PAGE_EXEC) {
2269 te->addr_code = code_address;
2270 } else {
2271 te->addr_code = -1;
2272 }
2273 if (prot & PAGE_WRITE) {
2274 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2275 (pd & IO_MEM_ROMD)) {
2276 /* Write access calls the I/O callback. */
2277 te->addr_write = address | TLB_MMIO;
2278 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2279 !cpu_physical_memory_is_dirty(pd)) {
2280 te->addr_write = address | TLB_NOTDIRTY;
9fa3e853 2281 } else {
0f459d16 2282 te->addr_write = address;
9fa3e853 2283 }
0f459d16
PB
2284 } else {
2285 te->addr_write = -1;
9fa3e853 2286 }
9fa3e853
FB
2287}
2288
0124311e
FB
2289#else
2290
ee8b7021 2291void tlb_flush(CPUState *env, int flush_global)
0124311e
FB
2292{
2293}
2294
2e12669a 2295void tlb_flush_page(CPUState *env, target_ulong addr)
0124311e
FB
2296{
2297}
2298
edf8e2af
MW
2299/*
2300 * Walks guest process memory "regions" one by one
2301 * and calls callback function 'fn' for each region.
2302 */
5cd2c5b6
RH
2303
2304struct walk_memory_regions_data
2305{
2306 walk_memory_regions_fn fn;
2307 void *priv;
2308 unsigned long start;
2309 int prot;
2310};
2311
2312static int walk_memory_regions_end(struct walk_memory_regions_data *data,
b480d9b7 2313 abi_ulong end, int new_prot)
5cd2c5b6
RH
2314{
2315 if (data->start != -1ul) {
2316 int rc = data->fn(data->priv, data->start, end, data->prot);
2317 if (rc != 0) {
2318 return rc;
2319 }
2320 }
2321
2322 data->start = (new_prot ? end : -1ul);
2323 data->prot = new_prot;
2324
2325 return 0;
2326}
2327
2328static int walk_memory_regions_1(struct walk_memory_regions_data *data,
b480d9b7 2329 abi_ulong base, int level, void **lp)
5cd2c5b6 2330{
b480d9b7 2331 abi_ulong pa;
5cd2c5b6
RH
2332 int i, rc;
2333
2334 if (*lp == NULL) {
2335 return walk_memory_regions_end(data, base, 0);
2336 }
2337
2338 if (level == 0) {
2339 PageDesc *pd = *lp;
7296abac 2340 for (i = 0; i < L2_SIZE; ++i) {
5cd2c5b6
RH
2341 int prot = pd[i].flags;
2342
2343 pa = base | (i << TARGET_PAGE_BITS);
2344 if (prot != data->prot) {
2345 rc = walk_memory_regions_end(data, pa, prot);
2346 if (rc != 0) {
2347 return rc;
9fa3e853 2348 }
9fa3e853 2349 }
5cd2c5b6
RH
2350 }
2351 } else {
2352 void **pp = *lp;
7296abac 2353 for (i = 0; i < L2_SIZE; ++i) {
b480d9b7
PB
2354 pa = base | ((abi_ulong)i <<
2355 (TARGET_PAGE_BITS + L2_BITS * level));
5cd2c5b6
RH
2356 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2357 if (rc != 0) {
2358 return rc;
2359 }
2360 }
2361 }
2362
2363 return 0;
2364}
2365
2366int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2367{
2368 struct walk_memory_regions_data data;
2369 unsigned long i;
2370
2371 data.fn = fn;
2372 data.priv = priv;
2373 data.start = -1ul;
2374 data.prot = 0;
2375
2376 for (i = 0; i < V_L1_SIZE; i++) {
b480d9b7 2377 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
5cd2c5b6
RH
2378 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2379 if (rc != 0) {
2380 return rc;
9fa3e853 2381 }
33417e70 2382 }
5cd2c5b6
RH
2383
2384 return walk_memory_regions_end(&data, 0, 0);
edf8e2af
MW
2385}
2386
b480d9b7
PB
2387static int dump_region(void *priv, abi_ulong start,
2388 abi_ulong end, unsigned long prot)
edf8e2af
MW
2389{
2390 FILE *f = (FILE *)priv;
2391
b480d9b7
PB
2392 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2393 " "TARGET_ABI_FMT_lx" %c%c%c\n",
edf8e2af
MW
2394 start, end, end - start,
2395 ((prot & PAGE_READ) ? 'r' : '-'),
2396 ((prot & PAGE_WRITE) ? 'w' : '-'),
2397 ((prot & PAGE_EXEC) ? 'x' : '-'));
2398
2399 return (0);
2400}
2401
2402/* dump memory mappings */
2403void page_dump(FILE *f)
2404{
2405 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2406 "start", "end", "size", "prot");
2407 walk_memory_regions(f, dump_region);
33417e70
FB
2408}
2409
53a5960a 2410int page_get_flags(target_ulong address)
33417e70 2411{
9fa3e853
FB
2412 PageDesc *p;
2413
2414 p = page_find(address >> TARGET_PAGE_BITS);
33417e70 2415 if (!p)
9fa3e853
FB
2416 return 0;
2417 return p->flags;
2418}
2419
376a7909
RH
2420/* Modify the flags of a page and invalidate the code if necessary.
2421 The flag PAGE_WRITE_ORG is positioned automatically depending
2422 on PAGE_WRITE. The mmap_lock should already be held. */
53a5960a 2423void page_set_flags(target_ulong start, target_ulong end, int flags)
9fa3e853 2424{
376a7909
RH
2425 target_ulong addr, len;
2426
2427 /* This function should never be called with addresses outside the
2428 guest address space. If this assert fires, it probably indicates
2429 a missing call to h2g_valid. */
b480d9b7
PB
2430#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2431 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
376a7909
RH
2432#endif
2433 assert(start < end);
9fa3e853
FB
2434
2435 start = start & TARGET_PAGE_MASK;
2436 end = TARGET_PAGE_ALIGN(end);
376a7909
RH
2437
2438 if (flags & PAGE_WRITE) {
9fa3e853 2439 flags |= PAGE_WRITE_ORG;
376a7909
RH
2440 }
2441
2442 for (addr = start, len = end - start;
2443 len != 0;
2444 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2445 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2446
2447 /* If the write protection bit is set, then we invalidate
2448 the code inside. */
5fafdf24 2449 if (!(p->flags & PAGE_WRITE) &&
9fa3e853
FB
2450 (flags & PAGE_WRITE) &&
2451 p->first_tb) {
d720b93d 2452 tb_invalidate_phys_page(addr, 0, NULL);
9fa3e853
FB
2453 }
2454 p->flags = flags;
2455 }
33417e70
FB
2456}
2457
3d97b40b
TS
2458int page_check_range(target_ulong start, target_ulong len, int flags)
2459{
2460 PageDesc *p;
2461 target_ulong end;
2462 target_ulong addr;
2463
376a7909
RH
2464 /* This function should never be called with addresses outside the
2465 guest address space. If this assert fires, it probably indicates
2466 a missing call to h2g_valid. */
338e9e6c
BS
2467#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2468 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
376a7909
RH
2469#endif
2470
3e0650a9
RH
2471 if (len == 0) {
2472 return 0;
2473 }
376a7909
RH
2474 if (start + len - 1 < start) {
2475 /* We've wrapped around. */
55f280c9 2476 return -1;
376a7909 2477 }
55f280c9 2478
3d97b40b
TS
2479 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2480 start = start & TARGET_PAGE_MASK;
2481
376a7909
RH
2482 for (addr = start, len = end - start;
2483 len != 0;
2484 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
3d97b40b
TS
2485 p = page_find(addr >> TARGET_PAGE_BITS);
2486 if( !p )
2487 return -1;
2488 if( !(p->flags & PAGE_VALID) )
2489 return -1;
2490
dae3270c 2491 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
3d97b40b 2492 return -1;
dae3270c
FB
2493 if (flags & PAGE_WRITE) {
2494 if (!(p->flags & PAGE_WRITE_ORG))
2495 return -1;
2496 /* unprotect the page if it was put read-only because it
2497 contains translated code */
2498 if (!(p->flags & PAGE_WRITE)) {
2499 if (!page_unprotect(addr, 0, NULL))
2500 return -1;
2501 }
2502 return 0;
2503 }
3d97b40b
TS
2504 }
2505 return 0;
2506}
2507
9fa3e853 2508/* called from signal handler: invalidate the code and unprotect the
ccbb4d44 2509 page. Return TRUE if the fault was successfully handled. */
53a5960a 2510int page_unprotect(target_ulong address, unsigned long pc, void *puc)
9fa3e853 2511{
45d679d6
AJ
2512 unsigned int prot;
2513 PageDesc *p;
53a5960a 2514 target_ulong host_start, host_end, addr;
9fa3e853 2515
c8a706fe
PB
2516 /* Technically this isn't safe inside a signal handler. However we
2517 know this only ever happens in a synchronous SEGV handler, so in
2518 practice it seems to be ok. */
2519 mmap_lock();
2520
45d679d6
AJ
2521 p = page_find(address >> TARGET_PAGE_BITS);
2522 if (!p) {
c8a706fe 2523 mmap_unlock();
9fa3e853 2524 return 0;
c8a706fe 2525 }
45d679d6 2526
9fa3e853
FB
2527 /* if the page was really writable, then we change its
2528 protection back to writable */
45d679d6
AJ
2529 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2530 host_start = address & qemu_host_page_mask;
2531 host_end = host_start + qemu_host_page_size;
2532
2533 prot = 0;
2534 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2535 p = page_find(addr >> TARGET_PAGE_BITS);
2536 p->flags |= PAGE_WRITE;
2537 prot |= p->flags;
2538
9fa3e853
FB
2539 /* and since the content will be modified, we must invalidate
2540 the corresponding translated code. */
45d679d6 2541 tb_invalidate_phys_page(addr, pc, puc);
9fa3e853 2542#ifdef DEBUG_TB_CHECK
45d679d6 2543 tb_invalidate_check(addr);
9fa3e853 2544#endif
9fa3e853 2545 }
45d679d6
AJ
2546 mprotect((void *)g2h(host_start), qemu_host_page_size,
2547 prot & PAGE_BITS);
2548
2549 mmap_unlock();
2550 return 1;
9fa3e853 2551 }
c8a706fe 2552 mmap_unlock();
9fa3e853
FB
2553 return 0;
2554}
2555
6a00d601
FB
2556static inline void tlb_set_dirty(CPUState *env,
2557 unsigned long addr, target_ulong vaddr)
1ccde1cb
FB
2558{
2559}
9fa3e853
FB
2560#endif /* defined(CONFIG_USER_ONLY) */
2561
e2eef170 2562#if !defined(CONFIG_USER_ONLY)
8da3ff18 2563
c04b2b78
PB
2564#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2565typedef struct subpage_t {
2566 target_phys_addr_t base;
f6405247
RH
2567 ram_addr_t sub_io_index[TARGET_PAGE_SIZE];
2568 ram_addr_t region_offset[TARGET_PAGE_SIZE];
c04b2b78
PB
2569} subpage_t;
2570
c227f099
AL
2571static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2572 ram_addr_t memory, ram_addr_t region_offset);
f6405247
RH
2573static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2574 ram_addr_t orig_memory,
2575 ram_addr_t region_offset);
db7b5426
BS
2576#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2577 need_subpage) \
2578 do { \
2579 if (addr > start_addr) \
2580 start_addr2 = 0; \
2581 else { \
2582 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2583 if (start_addr2 > 0) \
2584 need_subpage = 1; \
2585 } \
2586 \
49e9fba2 2587 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
db7b5426
BS
2588 end_addr2 = TARGET_PAGE_SIZE - 1; \
2589 else { \
2590 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2591 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2592 need_subpage = 1; \
2593 } \
2594 } while (0)
2595
8f2498f9
MT
2596/* register physical memory.
2597 For RAM, 'size' must be a multiple of the target page size.
2598 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
8da3ff18
PB
2599 io memory page. The address used when calling the IO function is
2600 the offset from the start of the region, plus region_offset. Both
ccbb4d44 2601 start_addr and region_offset are rounded down to a page boundary
8da3ff18
PB
2602 before calculating this offset. This should not be a problem unless
2603 the low bits of start_addr and region_offset differ. */
0fd542fb 2604void cpu_register_physical_memory_log(target_phys_addr_t start_addr,
c227f099
AL
2605 ram_addr_t size,
2606 ram_addr_t phys_offset,
0fd542fb
MT
2607 ram_addr_t region_offset,
2608 bool log_dirty)
33417e70 2609{
c227f099 2610 target_phys_addr_t addr, end_addr;
92e873b9 2611 PhysPageDesc *p;
9d42037b 2612 CPUState *env;
c227f099 2613 ram_addr_t orig_size = size;
f6405247 2614 subpage_t *subpage;
33417e70 2615
0fd542fb 2616 cpu_notify_set_memory(start_addr, size, phys_offset, log_dirty);
f6f3fbca 2617
67c4d23c
PB
2618 if (phys_offset == IO_MEM_UNASSIGNED) {
2619 region_offset = start_addr;
2620 }
8da3ff18 2621 region_offset &= TARGET_PAGE_MASK;
5fd386f6 2622 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
c227f099 2623 end_addr = start_addr + (target_phys_addr_t)size;
49e9fba2 2624 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
db7b5426
BS
2625 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2626 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
c227f099
AL
2627 ram_addr_t orig_memory = p->phys_offset;
2628 target_phys_addr_t start_addr2, end_addr2;
db7b5426
BS
2629 int need_subpage = 0;
2630
2631 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2632 need_subpage);
f6405247 2633 if (need_subpage) {
db7b5426
BS
2634 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2635 subpage = subpage_init((addr & TARGET_PAGE_MASK),
8da3ff18
PB
2636 &p->phys_offset, orig_memory,
2637 p->region_offset);
db7b5426
BS
2638 } else {
2639 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2640 >> IO_MEM_SHIFT];
2641 }
8da3ff18
PB
2642 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2643 region_offset);
2644 p->region_offset = 0;
db7b5426
BS
2645 } else {
2646 p->phys_offset = phys_offset;
2647 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2648 (phys_offset & IO_MEM_ROMD))
2649 phys_offset += TARGET_PAGE_SIZE;
2650 }
2651 } else {
2652 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2653 p->phys_offset = phys_offset;
8da3ff18 2654 p->region_offset = region_offset;
db7b5426 2655 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
8da3ff18 2656 (phys_offset & IO_MEM_ROMD)) {
db7b5426 2657 phys_offset += TARGET_PAGE_SIZE;
0e8f0967 2658 } else {
c227f099 2659 target_phys_addr_t start_addr2, end_addr2;
db7b5426
BS
2660 int need_subpage = 0;
2661
2662 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2663 end_addr2, need_subpage);
2664
f6405247 2665 if (need_subpage) {
db7b5426 2666 subpage = subpage_init((addr & TARGET_PAGE_MASK),
8da3ff18 2667 &p->phys_offset, IO_MEM_UNASSIGNED,
67c4d23c 2668 addr & TARGET_PAGE_MASK);
db7b5426 2669 subpage_register(subpage, start_addr2, end_addr2,
8da3ff18
PB
2670 phys_offset, region_offset);
2671 p->region_offset = 0;
db7b5426
BS
2672 }
2673 }
2674 }
8da3ff18 2675 region_offset += TARGET_PAGE_SIZE;
33417e70 2676 }
3b46e624 2677
9d42037b
FB
2678 /* since each CPU stores ram addresses in its TLB cache, we must
2679 reset the modified entries */
2680 /* XXX: slow ! */
2681 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2682 tlb_flush(env, 1);
2683 }
33417e70
FB
2684}
2685
ba863458 2686/* XXX: temporary until new memory mapping API */
c227f099 2687ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
ba863458
FB
2688{
2689 PhysPageDesc *p;
2690
2691 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2692 if (!p)
2693 return IO_MEM_UNASSIGNED;
2694 return p->phys_offset;
2695}
2696
c227f099 2697void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
f65ed4c1
AL
2698{
2699 if (kvm_enabled())
2700 kvm_coalesce_mmio_region(addr, size);
2701}
2702
c227f099 2703void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
f65ed4c1
AL
2704{
2705 if (kvm_enabled())
2706 kvm_uncoalesce_mmio_region(addr, size);
2707}
2708
62a2744c
SY
2709void qemu_flush_coalesced_mmio_buffer(void)
2710{
2711 if (kvm_enabled())
2712 kvm_flush_coalesced_mmio_buffer();
2713}
2714
c902760f
MT
2715#if defined(__linux__) && !defined(TARGET_S390X)
2716
2717#include <sys/vfs.h>
2718
2719#define HUGETLBFS_MAGIC 0x958458f6
2720
2721static long gethugepagesize(const char *path)
2722{
2723 struct statfs fs;
2724 int ret;
2725
2726 do {
9742bf26 2727 ret = statfs(path, &fs);
c902760f
MT
2728 } while (ret != 0 && errno == EINTR);
2729
2730 if (ret != 0) {
9742bf26
YT
2731 perror(path);
2732 return 0;
c902760f
MT
2733 }
2734
2735 if (fs.f_type != HUGETLBFS_MAGIC)
9742bf26 2736 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
c902760f
MT
2737
2738 return fs.f_bsize;
2739}
2740
04b16653
AW
2741static void *file_ram_alloc(RAMBlock *block,
2742 ram_addr_t memory,
2743 const char *path)
c902760f
MT
2744{
2745 char *filename;
2746 void *area;
2747 int fd;
2748#ifdef MAP_POPULATE
2749 int flags;
2750#endif
2751 unsigned long hpagesize;
2752
2753 hpagesize = gethugepagesize(path);
2754 if (!hpagesize) {
9742bf26 2755 return NULL;
c902760f
MT
2756 }
2757
2758 if (memory < hpagesize) {
2759 return NULL;
2760 }
2761
2762 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2763 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2764 return NULL;
2765 }
2766
2767 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
9742bf26 2768 return NULL;
c902760f
MT
2769 }
2770
2771 fd = mkstemp(filename);
2772 if (fd < 0) {
9742bf26
YT
2773 perror("unable to create backing store for hugepages");
2774 free(filename);
2775 return NULL;
c902760f
MT
2776 }
2777 unlink(filename);
2778 free(filename);
2779
2780 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2781
2782 /*
2783 * ftruncate is not supported by hugetlbfs in older
2784 * hosts, so don't bother bailing out on errors.
2785 * If anything goes wrong with it under other filesystems,
2786 * mmap will fail.
2787 */
2788 if (ftruncate(fd, memory))
9742bf26 2789 perror("ftruncate");
c902760f
MT
2790
2791#ifdef MAP_POPULATE
2792 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2793 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2794 * to sidestep this quirk.
2795 */
2796 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2797 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2798#else
2799 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2800#endif
2801 if (area == MAP_FAILED) {
9742bf26
YT
2802 perror("file_ram_alloc: can't mmap RAM pages");
2803 close(fd);
2804 return (NULL);
c902760f 2805 }
04b16653 2806 block->fd = fd;
c902760f
MT
2807 return area;
2808}
2809#endif
2810
d17b5288 2811static ram_addr_t find_ram_offset(ram_addr_t size)
04b16653
AW
2812{
2813 RAMBlock *block, *next_block;
09d7ae90 2814 ram_addr_t offset = 0, mingap = ULONG_MAX;
04b16653
AW
2815
2816 if (QLIST_EMPTY(&ram_list.blocks))
2817 return 0;
2818
2819 QLIST_FOREACH(block, &ram_list.blocks, next) {
2820 ram_addr_t end, next = ULONG_MAX;
2821
2822 end = block->offset + block->length;
2823
2824 QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2825 if (next_block->offset >= end) {
2826 next = MIN(next, next_block->offset);
2827 }
2828 }
2829 if (next - end >= size && next - end < mingap) {
2830 offset = end;
2831 mingap = next - end;
2832 }
2833 }
2834 return offset;
2835}
2836
2837static ram_addr_t last_ram_offset(void)
d17b5288
AW
2838{
2839 RAMBlock *block;
2840 ram_addr_t last = 0;
2841
2842 QLIST_FOREACH(block, &ram_list.blocks, next)
2843 last = MAX(last, block->offset + block->length);
2844
2845 return last;
2846}
2847
84b89d78 2848ram_addr_t qemu_ram_alloc_from_ptr(DeviceState *dev, const char *name,
6977dfe6 2849 ram_addr_t size, void *host)
84b89d78
CM
2850{
2851 RAMBlock *new_block, *block;
2852
2853 size = TARGET_PAGE_ALIGN(size);
2854 new_block = qemu_mallocz(sizeof(*new_block));
2855
2856 if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
2857 char *id = dev->parent_bus->info->get_dev_path(dev);
2858 if (id) {
2859 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
2860 qemu_free(id);
2861 }
2862 }
2863 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2864
2865 QLIST_FOREACH(block, &ram_list.blocks, next) {
2866 if (!strcmp(block->idstr, new_block->idstr)) {
2867 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2868 new_block->idstr);
2869 abort();
2870 }
2871 }
2872
6977dfe6
YT
2873 if (host) {
2874 new_block->host = host;
cd19cfa2 2875 new_block->flags |= RAM_PREALLOC_MASK;
6977dfe6
YT
2876 } else {
2877 if (mem_path) {
c902760f 2878#if defined (__linux__) && !defined(TARGET_S390X)
6977dfe6
YT
2879 new_block->host = file_ram_alloc(new_block, size, mem_path);
2880 if (!new_block->host) {
2881 new_block->host = qemu_vmalloc(size);
e78815a5 2882 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
6977dfe6 2883 }
c902760f 2884#else
6977dfe6
YT
2885 fprintf(stderr, "-mem-path option unsupported\n");
2886 exit(1);
c902760f 2887#endif
6977dfe6 2888 } else {
6b02494d 2889#if defined(TARGET_S390X) && defined(CONFIG_KVM)
6977dfe6
YT
2890 /* XXX S390 KVM requires the topmost vma of the RAM to be < 256GB */
2891 new_block->host = mmap((void*)0x1000000, size,
2892 PROT_EXEC|PROT_READ|PROT_WRITE,
2893 MAP_SHARED | MAP_ANONYMOUS, -1, 0);
6b02494d 2894#else
6977dfe6 2895 new_block->host = qemu_vmalloc(size);
6b02494d 2896#endif
e78815a5 2897 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
6977dfe6 2898 }
c902760f 2899 }
6977dfe6 2900
d17b5288 2901 new_block->offset = find_ram_offset(size);
94a6b54f
PB
2902 new_block->length = size;
2903
f471a17e 2904 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
94a6b54f 2905
f471a17e 2906 ram_list.phys_dirty = qemu_realloc(ram_list.phys_dirty,
04b16653 2907 last_ram_offset() >> TARGET_PAGE_BITS);
d17b5288 2908 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
94a6b54f
PB
2909 0xff, size >> TARGET_PAGE_BITS);
2910
6f0437e8
JK
2911 if (kvm_enabled())
2912 kvm_setup_guest_memory(new_block->host, size);
2913
94a6b54f
PB
2914 return new_block->offset;
2915}
e9a1ab19 2916
6977dfe6
YT
2917ram_addr_t qemu_ram_alloc(DeviceState *dev, const char *name, ram_addr_t size)
2918{
2919 return qemu_ram_alloc_from_ptr(dev, name, size, NULL);
2920}
2921
c227f099 2922void qemu_ram_free(ram_addr_t addr)
e9a1ab19 2923{
04b16653
AW
2924 RAMBlock *block;
2925
2926 QLIST_FOREACH(block, &ram_list.blocks, next) {
2927 if (addr == block->offset) {
2928 QLIST_REMOVE(block, next);
cd19cfa2
HY
2929 if (block->flags & RAM_PREALLOC_MASK) {
2930 ;
2931 } else if (mem_path) {
04b16653
AW
2932#if defined (__linux__) && !defined(TARGET_S390X)
2933 if (block->fd) {
2934 munmap(block->host, block->length);
2935 close(block->fd);
2936 } else {
2937 qemu_vfree(block->host);
2938 }
fd28aa13
JK
2939#else
2940 abort();
04b16653
AW
2941#endif
2942 } else {
2943#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2944 munmap(block->host, block->length);
2945#else
2946 qemu_vfree(block->host);
2947#endif
2948 }
2949 qemu_free(block);
2950 return;
2951 }
2952 }
2953
e9a1ab19
FB
2954}
2955
cd19cfa2
HY
2956#ifndef _WIN32
2957void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
2958{
2959 RAMBlock *block;
2960 ram_addr_t offset;
2961 int flags;
2962 void *area, *vaddr;
2963
2964 QLIST_FOREACH(block, &ram_list.blocks, next) {
2965 offset = addr - block->offset;
2966 if (offset < block->length) {
2967 vaddr = block->host + offset;
2968 if (block->flags & RAM_PREALLOC_MASK) {
2969 ;
2970 } else {
2971 flags = MAP_FIXED;
2972 munmap(vaddr, length);
2973 if (mem_path) {
2974#if defined(__linux__) && !defined(TARGET_S390X)
2975 if (block->fd) {
2976#ifdef MAP_POPULATE
2977 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
2978 MAP_PRIVATE;
2979#else
2980 flags |= MAP_PRIVATE;
2981#endif
2982 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2983 flags, block->fd, offset);
2984 } else {
2985 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2986 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2987 flags, -1, 0);
2988 }
fd28aa13
JK
2989#else
2990 abort();
cd19cfa2
HY
2991#endif
2992 } else {
2993#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2994 flags |= MAP_SHARED | MAP_ANONYMOUS;
2995 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
2996 flags, -1, 0);
2997#else
2998 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2999 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3000 flags, -1, 0);
3001#endif
3002 }
3003 if (area != vaddr) {
3004 fprintf(stderr, "Could not remap addr: %lx@%lx\n",
3005 length, addr);
3006 exit(1);
3007 }
3008 qemu_madvise(vaddr, length, QEMU_MADV_MERGEABLE);
3009 }
3010 return;
3011 }
3012 }
3013}
3014#endif /* !_WIN32 */
3015
dc828ca1 3016/* Return a host pointer to ram allocated with qemu_ram_alloc.
5579c7f3
PB
3017 With the exception of the softmmu code in this file, this should
3018 only be used for local memory (e.g. video ram) that the device owns,
3019 and knows it isn't going to access beyond the end of the block.
3020
3021 It should not be used for general purpose DMA.
3022 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
3023 */
c227f099 3024void *qemu_get_ram_ptr(ram_addr_t addr)
dc828ca1 3025{
94a6b54f
PB
3026 RAMBlock *block;
3027
f471a17e
AW
3028 QLIST_FOREACH(block, &ram_list.blocks, next) {
3029 if (addr - block->offset < block->length) {
7d82af38
VP
3030 /* Move this entry to to start of the list. */
3031 if (block != QLIST_FIRST(&ram_list.blocks)) {
3032 QLIST_REMOVE(block, next);
3033 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
3034 }
f471a17e
AW
3035 return block->host + (addr - block->offset);
3036 }
94a6b54f 3037 }
f471a17e
AW
3038
3039 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3040 abort();
3041
3042 return NULL;
dc828ca1
PB
3043}
3044
b2e0a138
MT
3045/* Return a host pointer to ram allocated with qemu_ram_alloc.
3046 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
3047 */
3048void *qemu_safe_ram_ptr(ram_addr_t addr)
3049{
3050 RAMBlock *block;
3051
3052 QLIST_FOREACH(block, &ram_list.blocks, next) {
3053 if (addr - block->offset < block->length) {
3054 return block->host + (addr - block->offset);
3055 }
3056 }
3057
3058 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3059 abort();
3060
3061 return NULL;
3062}
3063
e890261f 3064int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
5579c7f3 3065{
94a6b54f
PB
3066 RAMBlock *block;
3067 uint8_t *host = ptr;
3068
f471a17e
AW
3069 QLIST_FOREACH(block, &ram_list.blocks, next) {
3070 if (host - block->host < block->length) {
e890261f
MT
3071 *ram_addr = block->offset + (host - block->host);
3072 return 0;
f471a17e 3073 }
94a6b54f 3074 }
e890261f
MT
3075 return -1;
3076}
f471a17e 3077
e890261f
MT
3078/* Some of the softmmu routines need to translate from a host pointer
3079 (typically a TLB entry) back to a ram offset. */
3080ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
3081{
3082 ram_addr_t ram_addr;
f471a17e 3083
e890261f
MT
3084 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
3085 fprintf(stderr, "Bad ram pointer %p\n", ptr);
3086 abort();
3087 }
3088 return ram_addr;
5579c7f3
PB
3089}
3090
c227f099 3091static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
33417e70 3092{
67d3b957 3093#ifdef DEBUG_UNASSIGNED
ab3d1727 3094 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
b4f0a316 3095#endif
faed1c2a 3096#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
e18231a3
BS
3097 do_unassigned_access(addr, 0, 0, 0, 1);
3098#endif
3099 return 0;
3100}
3101
c227f099 3102static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
e18231a3
BS
3103{
3104#ifdef DEBUG_UNASSIGNED
3105 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3106#endif
faed1c2a 3107#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
e18231a3
BS
3108 do_unassigned_access(addr, 0, 0, 0, 2);
3109#endif
3110 return 0;
3111}
3112
c227f099 3113static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
e18231a3
BS
3114{
3115#ifdef DEBUG_UNASSIGNED
3116 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3117#endif
faed1c2a 3118#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
e18231a3 3119 do_unassigned_access(addr, 0, 0, 0, 4);
67d3b957 3120#endif
33417e70
FB
3121 return 0;
3122}
3123
c227f099 3124static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
33417e70 3125{
67d3b957 3126#ifdef DEBUG_UNASSIGNED
ab3d1727 3127 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
67d3b957 3128#endif
faed1c2a 3129#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
e18231a3
BS
3130 do_unassigned_access(addr, 1, 0, 0, 1);
3131#endif
3132}
3133
c227f099 3134static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
e18231a3
BS
3135{
3136#ifdef DEBUG_UNASSIGNED
3137 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3138#endif
faed1c2a 3139#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
e18231a3
BS
3140 do_unassigned_access(addr, 1, 0, 0, 2);
3141#endif
3142}
3143
c227f099 3144static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
e18231a3
BS
3145{
3146#ifdef DEBUG_UNASSIGNED
3147 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3148#endif
faed1c2a 3149#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
e18231a3 3150 do_unassigned_access(addr, 1, 0, 0, 4);
b4f0a316 3151#endif
33417e70
FB
3152}
3153
d60efc6b 3154static CPUReadMemoryFunc * const unassigned_mem_read[3] = {
33417e70 3155 unassigned_mem_readb,
e18231a3
BS
3156 unassigned_mem_readw,
3157 unassigned_mem_readl,
33417e70
FB
3158};
3159
d60efc6b 3160static CPUWriteMemoryFunc * const unassigned_mem_write[3] = {
33417e70 3161 unassigned_mem_writeb,
e18231a3
BS
3162 unassigned_mem_writew,
3163 unassigned_mem_writel,
33417e70
FB
3164};
3165
c227f099 3166static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
0f459d16 3167 uint32_t val)
9fa3e853 3168{
3a7d929e 3169 int dirty_flags;
f7c11b53 3170 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3a7d929e 3171 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 3172#if !defined(CONFIG_USER_ONLY)
3a7d929e 3173 tb_invalidate_phys_page_fast(ram_addr, 1);
f7c11b53 3174 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
9fa3e853 3175#endif
3a7d929e 3176 }
5579c7f3 3177 stb_p(qemu_get_ram_ptr(ram_addr), val);
f23db169 3178 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
f7c11b53 3179 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
f23db169
FB
3180 /* we remove the notdirty callback only if the code has been
3181 flushed */
3182 if (dirty_flags == 0xff)
2e70f6ef 3183 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
3184}
3185
c227f099 3186static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
0f459d16 3187 uint32_t val)
9fa3e853 3188{
3a7d929e 3189 int dirty_flags;
f7c11b53 3190 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3a7d929e 3191 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 3192#if !defined(CONFIG_USER_ONLY)
3a7d929e 3193 tb_invalidate_phys_page_fast(ram_addr, 2);
f7c11b53 3194 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
9fa3e853 3195#endif
3a7d929e 3196 }
5579c7f3 3197 stw_p(qemu_get_ram_ptr(ram_addr), val);
f23db169 3198 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
f7c11b53 3199 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
f23db169
FB
3200 /* we remove the notdirty callback only if the code has been
3201 flushed */
3202 if (dirty_flags == 0xff)
2e70f6ef 3203 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
3204}
3205
c227f099 3206static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
0f459d16 3207 uint32_t val)
9fa3e853 3208{
3a7d929e 3209 int dirty_flags;
f7c11b53 3210 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3a7d929e 3211 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 3212#if !defined(CONFIG_USER_ONLY)
3a7d929e 3213 tb_invalidate_phys_page_fast(ram_addr, 4);
f7c11b53 3214 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
9fa3e853 3215#endif
3a7d929e 3216 }
5579c7f3 3217 stl_p(qemu_get_ram_ptr(ram_addr), val);
f23db169 3218 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
f7c11b53 3219 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
f23db169
FB
3220 /* we remove the notdirty callback only if the code has been
3221 flushed */
3222 if (dirty_flags == 0xff)
2e70f6ef 3223 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
3224}
3225
d60efc6b 3226static CPUReadMemoryFunc * const error_mem_read[3] = {
9fa3e853
FB
3227 NULL, /* never used */
3228 NULL, /* never used */
3229 NULL, /* never used */
3230};
3231
d60efc6b 3232static CPUWriteMemoryFunc * const notdirty_mem_write[3] = {
1ccde1cb
FB
3233 notdirty_mem_writeb,
3234 notdirty_mem_writew,
3235 notdirty_mem_writel,
3236};
3237
0f459d16 3238/* Generate a debug exception if a watchpoint has been hit. */
b4051334 3239static void check_watchpoint(int offset, int len_mask, int flags)
0f459d16
PB
3240{
3241 CPUState *env = cpu_single_env;
06d55cc1
AL
3242 target_ulong pc, cs_base;
3243 TranslationBlock *tb;
0f459d16 3244 target_ulong vaddr;
a1d1bb31 3245 CPUWatchpoint *wp;
06d55cc1 3246 int cpu_flags;
0f459d16 3247
06d55cc1
AL
3248 if (env->watchpoint_hit) {
3249 /* We re-entered the check after replacing the TB. Now raise
3250 * the debug interrupt so that is will trigger after the
3251 * current instruction. */
3252 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3253 return;
3254 }
2e70f6ef 3255 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
72cf2d4f 3256 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334
AL
3257 if ((vaddr == (wp->vaddr & len_mask) ||
3258 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
6e140f28
AL
3259 wp->flags |= BP_WATCHPOINT_HIT;
3260 if (!env->watchpoint_hit) {
3261 env->watchpoint_hit = wp;
3262 tb = tb_find_pc(env->mem_io_pc);
3263 if (!tb) {
3264 cpu_abort(env, "check_watchpoint: could not find TB for "
3265 "pc=%p", (void *)env->mem_io_pc);
3266 }
3267 cpu_restore_state(tb, env, env->mem_io_pc, NULL);
3268 tb_phys_invalidate(tb, -1);
3269 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3270 env->exception_index = EXCP_DEBUG;
3271 } else {
3272 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3273 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
3274 }
3275 cpu_resume_from_signal(env, NULL);
06d55cc1 3276 }
6e140f28
AL
3277 } else {
3278 wp->flags &= ~BP_WATCHPOINT_HIT;
0f459d16
PB
3279 }
3280 }
3281}
3282
6658ffb8
PB
3283/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3284 so these check for a hit then pass through to the normal out-of-line
3285 phys routines. */
c227f099 3286static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
6658ffb8 3287{
b4051334 3288 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
6658ffb8
PB
3289 return ldub_phys(addr);
3290}
3291
c227f099 3292static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
6658ffb8 3293{
b4051334 3294 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
6658ffb8
PB
3295 return lduw_phys(addr);
3296}
3297
c227f099 3298static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
6658ffb8 3299{
b4051334 3300 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
6658ffb8
PB
3301 return ldl_phys(addr);
3302}
3303
c227f099 3304static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
6658ffb8
PB
3305 uint32_t val)
3306{
b4051334 3307 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
6658ffb8
PB
3308 stb_phys(addr, val);
3309}
3310
c227f099 3311static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
6658ffb8
PB
3312 uint32_t val)
3313{
b4051334 3314 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
6658ffb8
PB
3315 stw_phys(addr, val);
3316}
3317
c227f099 3318static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
6658ffb8
PB
3319 uint32_t val)
3320{
b4051334 3321 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
6658ffb8
PB
3322 stl_phys(addr, val);
3323}
3324
d60efc6b 3325static CPUReadMemoryFunc * const watch_mem_read[3] = {
6658ffb8
PB
3326 watch_mem_readb,
3327 watch_mem_readw,
3328 watch_mem_readl,
3329};
3330
d60efc6b 3331static CPUWriteMemoryFunc * const watch_mem_write[3] = {
6658ffb8
PB
3332 watch_mem_writeb,
3333 watch_mem_writew,
3334 watch_mem_writel,
3335};
6658ffb8 3336
f6405247
RH
3337static inline uint32_t subpage_readlen (subpage_t *mmio,
3338 target_phys_addr_t addr,
3339 unsigned int len)
db7b5426 3340{
f6405247 3341 unsigned int idx = SUBPAGE_IDX(addr);
db7b5426
BS
3342#if defined(DEBUG_SUBPAGE)
3343 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3344 mmio, len, addr, idx);
3345#endif
db7b5426 3346
f6405247
RH
3347 addr += mmio->region_offset[idx];
3348 idx = mmio->sub_io_index[idx];
3349 return io_mem_read[idx][len](io_mem_opaque[idx], addr);
db7b5426
BS
3350}
3351
c227f099 3352static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
f6405247 3353 uint32_t value, unsigned int len)
db7b5426 3354{
f6405247 3355 unsigned int idx = SUBPAGE_IDX(addr);
db7b5426 3356#if defined(DEBUG_SUBPAGE)
f6405247
RH
3357 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n",
3358 __func__, mmio, len, addr, idx, value);
db7b5426 3359#endif
f6405247
RH
3360
3361 addr += mmio->region_offset[idx];
3362 idx = mmio->sub_io_index[idx];
3363 io_mem_write[idx][len](io_mem_opaque[idx], addr, value);
db7b5426
BS
3364}
3365
c227f099 3366static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
db7b5426 3367{
db7b5426
BS
3368 return subpage_readlen(opaque, addr, 0);
3369}
3370
c227f099 3371static void subpage_writeb (void *opaque, target_phys_addr_t addr,
db7b5426
BS
3372 uint32_t value)
3373{
db7b5426
BS
3374 subpage_writelen(opaque, addr, value, 0);
3375}
3376
c227f099 3377static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
db7b5426 3378{
db7b5426
BS
3379 return subpage_readlen(opaque, addr, 1);
3380}
3381
c227f099 3382static void subpage_writew (void *opaque, target_phys_addr_t addr,
db7b5426
BS
3383 uint32_t value)
3384{
db7b5426
BS
3385 subpage_writelen(opaque, addr, value, 1);
3386}
3387
c227f099 3388static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
db7b5426 3389{
db7b5426
BS
3390 return subpage_readlen(opaque, addr, 2);
3391}
3392
f6405247
RH
3393static void subpage_writel (void *opaque, target_phys_addr_t addr,
3394 uint32_t value)
db7b5426 3395{
db7b5426
BS
3396 subpage_writelen(opaque, addr, value, 2);
3397}
3398
d60efc6b 3399static CPUReadMemoryFunc * const subpage_read[] = {
db7b5426
BS
3400 &subpage_readb,
3401 &subpage_readw,
3402 &subpage_readl,
3403};
3404
d60efc6b 3405static CPUWriteMemoryFunc * const subpage_write[] = {
db7b5426
BS
3406 &subpage_writeb,
3407 &subpage_writew,
3408 &subpage_writel,
3409};
3410
c227f099
AL
3411static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3412 ram_addr_t memory, ram_addr_t region_offset)
db7b5426
BS
3413{
3414 int idx, eidx;
3415
3416 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3417 return -1;
3418 idx = SUBPAGE_IDX(start);
3419 eidx = SUBPAGE_IDX(end);
3420#if defined(DEBUG_SUBPAGE)
0bf9e31a 3421 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
db7b5426
BS
3422 mmio, start, end, idx, eidx, memory);
3423#endif
95c318f5
GN
3424 if ((memory & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
3425 memory = IO_MEM_UNASSIGNED;
f6405247 3426 memory = (memory >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
db7b5426 3427 for (; idx <= eidx; idx++) {
f6405247
RH
3428 mmio->sub_io_index[idx] = memory;
3429 mmio->region_offset[idx] = region_offset;
db7b5426
BS
3430 }
3431
3432 return 0;
3433}
3434
f6405247
RH
3435static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
3436 ram_addr_t orig_memory,
3437 ram_addr_t region_offset)
db7b5426 3438{
c227f099 3439 subpage_t *mmio;
db7b5426
BS
3440 int subpage_memory;
3441
c227f099 3442 mmio = qemu_mallocz(sizeof(subpage_t));
1eec614b
AL
3443
3444 mmio->base = base;
2507c12a
AG
3445 subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio,
3446 DEVICE_NATIVE_ENDIAN);
db7b5426 3447#if defined(DEBUG_SUBPAGE)
1eec614b
AL
3448 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3449 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
db7b5426 3450#endif
1eec614b 3451 *phys = subpage_memory | IO_MEM_SUBPAGE;
f6405247 3452 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, orig_memory, region_offset);
db7b5426
BS
3453
3454 return mmio;
3455}
3456
88715657
AL
3457static int get_free_io_mem_idx(void)
3458{
3459 int i;
3460
3461 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3462 if (!io_mem_used[i]) {
3463 io_mem_used[i] = 1;
3464 return i;
3465 }
c6703b47 3466 fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
88715657
AL
3467 return -1;
3468}
3469
dd310534
AG
3470/*
3471 * Usually, devices operate in little endian mode. There are devices out
3472 * there that operate in big endian too. Each device gets byte swapped
3473 * mmio if plugged onto a CPU that does the other endianness.
3474 *
3475 * CPU Device swap?
3476 *
3477 * little little no
3478 * little big yes
3479 * big little yes
3480 * big big no
3481 */
3482
3483typedef struct SwapEndianContainer {
3484 CPUReadMemoryFunc *read[3];
3485 CPUWriteMemoryFunc *write[3];
3486 void *opaque;
3487} SwapEndianContainer;
3488
3489static uint32_t swapendian_mem_readb (void *opaque, target_phys_addr_t addr)
3490{
3491 uint32_t val;
3492 SwapEndianContainer *c = opaque;
3493 val = c->read[0](c->opaque, addr);
3494 return val;
3495}
3496
3497static uint32_t swapendian_mem_readw(void *opaque, target_phys_addr_t addr)
3498{
3499 uint32_t val;
3500 SwapEndianContainer *c = opaque;
3501 val = bswap16(c->read[1](c->opaque, addr));
3502 return val;
3503}
3504
3505static uint32_t swapendian_mem_readl(void *opaque, target_phys_addr_t addr)
3506{
3507 uint32_t val;
3508 SwapEndianContainer *c = opaque;
3509 val = bswap32(c->read[2](c->opaque, addr));
3510 return val;
3511}
3512
3513static CPUReadMemoryFunc * const swapendian_readfn[3]={
3514 swapendian_mem_readb,
3515 swapendian_mem_readw,
3516 swapendian_mem_readl
3517};
3518
3519static void swapendian_mem_writeb(void *opaque, target_phys_addr_t addr,
3520 uint32_t val)
3521{
3522 SwapEndianContainer *c = opaque;
3523 c->write[0](c->opaque, addr, val);
3524}
3525
3526static void swapendian_mem_writew(void *opaque, target_phys_addr_t addr,
3527 uint32_t val)
3528{
3529 SwapEndianContainer *c = opaque;
3530 c->write[1](c->opaque, addr, bswap16(val));
3531}
3532
3533static void swapendian_mem_writel(void *opaque, target_phys_addr_t addr,
3534 uint32_t val)
3535{
3536 SwapEndianContainer *c = opaque;
3537 c->write[2](c->opaque, addr, bswap32(val));
3538}
3539
3540static CPUWriteMemoryFunc * const swapendian_writefn[3]={
3541 swapendian_mem_writeb,
3542 swapendian_mem_writew,
3543 swapendian_mem_writel
3544};
3545
3546static void swapendian_init(int io_index)
3547{
3548 SwapEndianContainer *c = qemu_malloc(sizeof(SwapEndianContainer));
3549 int i;
3550
3551 /* Swap mmio for big endian targets */
3552 c->opaque = io_mem_opaque[io_index];
3553 for (i = 0; i < 3; i++) {
3554 c->read[i] = io_mem_read[io_index][i];
3555 c->write[i] = io_mem_write[io_index][i];
3556
3557 io_mem_read[io_index][i] = swapendian_readfn[i];
3558 io_mem_write[io_index][i] = swapendian_writefn[i];
3559 }
3560 io_mem_opaque[io_index] = c;
3561}
3562
3563static void swapendian_del(int io_index)
3564{
3565 if (io_mem_read[io_index][0] == swapendian_readfn[0]) {
3566 qemu_free(io_mem_opaque[io_index]);
3567 }
3568}
3569
33417e70
FB
3570/* mem_read and mem_write are arrays of functions containing the
3571 function to access byte (index 0), word (index 1) and dword (index
0b4e6e3e 3572 2). Functions can be omitted with a NULL function pointer.
3ee89922 3573 If io_index is non zero, the corresponding io zone is
4254fab8
BS
3574 modified. If it is zero, a new io zone is allocated. The return
3575 value can be used with cpu_register_physical_memory(). (-1) is
3576 returned if error. */
1eed09cb 3577static int cpu_register_io_memory_fixed(int io_index,
d60efc6b
BS
3578 CPUReadMemoryFunc * const *mem_read,
3579 CPUWriteMemoryFunc * const *mem_write,
dd310534 3580 void *opaque, enum device_endian endian)
33417e70 3581{
3cab721d
RH
3582 int i;
3583
33417e70 3584 if (io_index <= 0) {
88715657
AL
3585 io_index = get_free_io_mem_idx();
3586 if (io_index == -1)
3587 return io_index;
33417e70 3588 } else {
1eed09cb 3589 io_index >>= IO_MEM_SHIFT;
33417e70
FB
3590 if (io_index >= IO_MEM_NB_ENTRIES)
3591 return -1;
3592 }
b5ff1b31 3593
3cab721d
RH
3594 for (i = 0; i < 3; ++i) {
3595 io_mem_read[io_index][i]
3596 = (mem_read[i] ? mem_read[i] : unassigned_mem_read[i]);
3597 }
3598 for (i = 0; i < 3; ++i) {
3599 io_mem_write[io_index][i]
3600 = (mem_write[i] ? mem_write[i] : unassigned_mem_write[i]);
3601 }
a4193c8a 3602 io_mem_opaque[io_index] = opaque;
f6405247 3603
dd310534
AG
3604 switch (endian) {
3605 case DEVICE_BIG_ENDIAN:
3606#ifndef TARGET_WORDS_BIGENDIAN
3607 swapendian_init(io_index);
3608#endif
3609 break;
3610 case DEVICE_LITTLE_ENDIAN:
3611#ifdef TARGET_WORDS_BIGENDIAN
3612 swapendian_init(io_index);
3613#endif
3614 break;
3615 case DEVICE_NATIVE_ENDIAN:
3616 default:
3617 break;
3618 }
3619
f6405247 3620 return (io_index << IO_MEM_SHIFT);
33417e70 3621}
61382a50 3622
d60efc6b
BS
3623int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
3624 CPUWriteMemoryFunc * const *mem_write,
dd310534 3625 void *opaque, enum device_endian endian)
1eed09cb 3626{
2507c12a 3627 return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque, endian);
1eed09cb
AK
3628}
3629
88715657
AL
3630void cpu_unregister_io_memory(int io_table_address)
3631{
3632 int i;
3633 int io_index = io_table_address >> IO_MEM_SHIFT;
3634
dd310534
AG
3635 swapendian_del(io_index);
3636
88715657
AL
3637 for (i=0;i < 3; i++) {
3638 io_mem_read[io_index][i] = unassigned_mem_read[i];
3639 io_mem_write[io_index][i] = unassigned_mem_write[i];
3640 }
3641 io_mem_opaque[io_index] = NULL;
3642 io_mem_used[io_index] = 0;
3643}
3644
e9179ce1
AK
3645static void io_mem_init(void)
3646{
3647 int i;
3648
2507c12a
AG
3649 cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read,
3650 unassigned_mem_write, NULL,
3651 DEVICE_NATIVE_ENDIAN);
3652 cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read,
3653 unassigned_mem_write, NULL,
3654 DEVICE_NATIVE_ENDIAN);
3655 cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read,
3656 notdirty_mem_write, NULL,
3657 DEVICE_NATIVE_ENDIAN);
e9179ce1
AK
3658 for (i=0; i<5; i++)
3659 io_mem_used[i] = 1;
3660
3661 io_mem_watch = cpu_register_io_memory(watch_mem_read,
2507c12a
AG
3662 watch_mem_write, NULL,
3663 DEVICE_NATIVE_ENDIAN);
e9179ce1
AK
3664}
3665
e2eef170
PB
3666#endif /* !defined(CONFIG_USER_ONLY) */
3667
13eb76e0
FB
3668/* physical memory access (slow version, mainly for debug) */
3669#if defined(CONFIG_USER_ONLY)
a68fe89c
PB
3670int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3671 uint8_t *buf, int len, int is_write)
13eb76e0
FB
3672{
3673 int l, flags;
3674 target_ulong page;
53a5960a 3675 void * p;
13eb76e0
FB
3676
3677 while (len > 0) {
3678 page = addr & TARGET_PAGE_MASK;
3679 l = (page + TARGET_PAGE_SIZE) - addr;
3680 if (l > len)
3681 l = len;
3682 flags = page_get_flags(page);
3683 if (!(flags & PAGE_VALID))
a68fe89c 3684 return -1;
13eb76e0
FB
3685 if (is_write) {
3686 if (!(flags & PAGE_WRITE))
a68fe89c 3687 return -1;
579a97f7 3688 /* XXX: this code should not depend on lock_user */
72fb7daa 3689 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
a68fe89c 3690 return -1;
72fb7daa
AJ
3691 memcpy(p, buf, l);
3692 unlock_user(p, addr, l);
13eb76e0
FB
3693 } else {
3694 if (!(flags & PAGE_READ))
a68fe89c 3695 return -1;
579a97f7 3696 /* XXX: this code should not depend on lock_user */
72fb7daa 3697 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
a68fe89c 3698 return -1;
72fb7daa 3699 memcpy(buf, p, l);
5b257578 3700 unlock_user(p, addr, 0);
13eb76e0
FB
3701 }
3702 len -= l;
3703 buf += l;
3704 addr += l;
3705 }
a68fe89c 3706 return 0;
13eb76e0 3707}
8df1cd07 3708
13eb76e0 3709#else
c227f099 3710void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
3711 int len, int is_write)
3712{
3713 int l, io_index;
3714 uint8_t *ptr;
3715 uint32_t val;
c227f099 3716 target_phys_addr_t page;
2e12669a 3717 unsigned long pd;
92e873b9 3718 PhysPageDesc *p;
3b46e624 3719
13eb76e0
FB
3720 while (len > 0) {
3721 page = addr & TARGET_PAGE_MASK;
3722 l = (page + TARGET_PAGE_SIZE) - addr;
3723 if (l > len)
3724 l = len;
92e873b9 3725 p = phys_page_find(page >> TARGET_PAGE_BITS);
13eb76e0
FB
3726 if (!p) {
3727 pd = IO_MEM_UNASSIGNED;
3728 } else {
3729 pd = p->phys_offset;
3730 }
3b46e624 3731
13eb76e0 3732 if (is_write) {
3a7d929e 3733 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
c227f099 3734 target_phys_addr_t addr1 = addr;
13eb76e0 3735 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18 3736 if (p)
6c2934db 3737 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
6a00d601
FB
3738 /* XXX: could force cpu_single_env to NULL to avoid
3739 potential bugs */
6c2934db 3740 if (l >= 4 && ((addr1 & 3) == 0)) {
1c213d19 3741 /* 32 bit write access */
c27004ec 3742 val = ldl_p(buf);
6c2934db 3743 io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
13eb76e0 3744 l = 4;
6c2934db 3745 } else if (l >= 2 && ((addr1 & 1) == 0)) {
1c213d19 3746 /* 16 bit write access */
c27004ec 3747 val = lduw_p(buf);
6c2934db 3748 io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
13eb76e0
FB
3749 l = 2;
3750 } else {
1c213d19 3751 /* 8 bit write access */
c27004ec 3752 val = ldub_p(buf);
6c2934db 3753 io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
13eb76e0
FB
3754 l = 1;
3755 }
3756 } else {
b448f2f3
FB
3757 unsigned long addr1;
3758 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
13eb76e0 3759 /* RAM case */
5579c7f3 3760 ptr = qemu_get_ram_ptr(addr1);
13eb76e0 3761 memcpy(ptr, buf, l);
3a7d929e
FB
3762 if (!cpu_physical_memory_is_dirty(addr1)) {
3763 /* invalidate code */
3764 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3765 /* set dirty bit */
f7c11b53
YT
3766 cpu_physical_memory_set_dirty_flags(
3767 addr1, (0xff & ~CODE_DIRTY_FLAG));
3a7d929e 3768 }
13eb76e0
FB
3769 }
3770 } else {
5fafdf24 3771 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2a4188a3 3772 !(pd & IO_MEM_ROMD)) {
c227f099 3773 target_phys_addr_t addr1 = addr;
13eb76e0
FB
3774 /* I/O case */
3775 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18 3776 if (p)
6c2934db
AJ
3777 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3778 if (l >= 4 && ((addr1 & 3) == 0)) {
13eb76e0 3779 /* 32 bit read access */
6c2934db 3780 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
c27004ec 3781 stl_p(buf, val);
13eb76e0 3782 l = 4;
6c2934db 3783 } else if (l >= 2 && ((addr1 & 1) == 0)) {
13eb76e0 3784 /* 16 bit read access */
6c2934db 3785 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
c27004ec 3786 stw_p(buf, val);
13eb76e0
FB
3787 l = 2;
3788 } else {
1c213d19 3789 /* 8 bit read access */
6c2934db 3790 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
c27004ec 3791 stb_p(buf, val);
13eb76e0
FB
3792 l = 1;
3793 }
3794 } else {
3795 /* RAM case */
5579c7f3 3796 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
13eb76e0
FB
3797 (addr & ~TARGET_PAGE_MASK);
3798 memcpy(buf, ptr, l);
3799 }
3800 }
3801 len -= l;
3802 buf += l;
3803 addr += l;
3804 }
3805}
8df1cd07 3806
d0ecd2aa 3807/* used for ROM loading : can write in RAM and ROM */
c227f099 3808void cpu_physical_memory_write_rom(target_phys_addr_t addr,
d0ecd2aa
FB
3809 const uint8_t *buf, int len)
3810{
3811 int l;
3812 uint8_t *ptr;
c227f099 3813 target_phys_addr_t page;
d0ecd2aa
FB
3814 unsigned long pd;
3815 PhysPageDesc *p;
3b46e624 3816
d0ecd2aa
FB
3817 while (len > 0) {
3818 page = addr & TARGET_PAGE_MASK;
3819 l = (page + TARGET_PAGE_SIZE) - addr;
3820 if (l > len)
3821 l = len;
3822 p = phys_page_find(page >> TARGET_PAGE_BITS);
3823 if (!p) {
3824 pd = IO_MEM_UNASSIGNED;
3825 } else {
3826 pd = p->phys_offset;
3827 }
3b46e624 3828
d0ecd2aa 3829 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2a4188a3
FB
3830 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3831 !(pd & IO_MEM_ROMD)) {
d0ecd2aa
FB
3832 /* do nothing */
3833 } else {
3834 unsigned long addr1;
3835 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3836 /* ROM/RAM case */
5579c7f3 3837 ptr = qemu_get_ram_ptr(addr1);
d0ecd2aa
FB
3838 memcpy(ptr, buf, l);
3839 }
3840 len -= l;
3841 buf += l;
3842 addr += l;
3843 }
3844}
3845
6d16c2f8
AL
3846typedef struct {
3847 void *buffer;
c227f099
AL
3848 target_phys_addr_t addr;
3849 target_phys_addr_t len;
6d16c2f8
AL
3850} BounceBuffer;
3851
3852static BounceBuffer bounce;
3853
ba223c29
AL
3854typedef struct MapClient {
3855 void *opaque;
3856 void (*callback)(void *opaque);
72cf2d4f 3857 QLIST_ENTRY(MapClient) link;
ba223c29
AL
3858} MapClient;
3859
72cf2d4f
BS
3860static QLIST_HEAD(map_client_list, MapClient) map_client_list
3861 = QLIST_HEAD_INITIALIZER(map_client_list);
ba223c29
AL
3862
3863void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3864{
3865 MapClient *client = qemu_malloc(sizeof(*client));
3866
3867 client->opaque = opaque;
3868 client->callback = callback;
72cf2d4f 3869 QLIST_INSERT_HEAD(&map_client_list, client, link);
ba223c29
AL
3870 return client;
3871}
3872
3873void cpu_unregister_map_client(void *_client)
3874{
3875 MapClient *client = (MapClient *)_client;
3876
72cf2d4f 3877 QLIST_REMOVE(client, link);
34d5e948 3878 qemu_free(client);
ba223c29
AL
3879}
3880
3881static void cpu_notify_map_clients(void)
3882{
3883 MapClient *client;
3884
72cf2d4f
BS
3885 while (!QLIST_EMPTY(&map_client_list)) {
3886 client = QLIST_FIRST(&map_client_list);
ba223c29 3887 client->callback(client->opaque);
34d5e948 3888 cpu_unregister_map_client(client);
ba223c29
AL
3889 }
3890}
3891
6d16c2f8
AL
3892/* Map a physical memory region into a host virtual address.
3893 * May map a subset of the requested range, given by and returned in *plen.
3894 * May return NULL if resources needed to perform the mapping are exhausted.
3895 * Use only for reads OR writes - not for read-modify-write operations.
ba223c29
AL
3896 * Use cpu_register_map_client() to know when retrying the map operation is
3897 * likely to succeed.
6d16c2f8 3898 */
c227f099
AL
3899void *cpu_physical_memory_map(target_phys_addr_t addr,
3900 target_phys_addr_t *plen,
6d16c2f8
AL
3901 int is_write)
3902{
c227f099
AL
3903 target_phys_addr_t len = *plen;
3904 target_phys_addr_t done = 0;
6d16c2f8
AL
3905 int l;
3906 uint8_t *ret = NULL;
3907 uint8_t *ptr;
c227f099 3908 target_phys_addr_t page;
6d16c2f8
AL
3909 unsigned long pd;
3910 PhysPageDesc *p;
3911 unsigned long addr1;
3912
3913 while (len > 0) {
3914 page = addr & TARGET_PAGE_MASK;
3915 l = (page + TARGET_PAGE_SIZE) - addr;
3916 if (l > len)
3917 l = len;
3918 p = phys_page_find(page >> TARGET_PAGE_BITS);
3919 if (!p) {
3920 pd = IO_MEM_UNASSIGNED;
3921 } else {
3922 pd = p->phys_offset;
3923 }
3924
3925 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3926 if (done || bounce.buffer) {
3927 break;
3928 }
3929 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3930 bounce.addr = addr;
3931 bounce.len = l;
3932 if (!is_write) {
3933 cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
3934 }
3935 ptr = bounce.buffer;
3936 } else {
3937 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
5579c7f3 3938 ptr = qemu_get_ram_ptr(addr1);
6d16c2f8
AL
3939 }
3940 if (!done) {
3941 ret = ptr;
3942 } else if (ret + done != ptr) {
3943 break;
3944 }
3945
3946 len -= l;
3947 addr += l;
3948 done += l;
3949 }
3950 *plen = done;
3951 return ret;
3952}
3953
3954/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3955 * Will also mark the memory as dirty if is_write == 1. access_len gives
3956 * the amount of memory that was actually read or written by the caller.
3957 */
c227f099
AL
3958void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3959 int is_write, target_phys_addr_t access_len)
6d16c2f8
AL
3960{
3961 if (buffer != bounce.buffer) {
3962 if (is_write) {
e890261f 3963 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
6d16c2f8
AL
3964 while (access_len) {
3965 unsigned l;
3966 l = TARGET_PAGE_SIZE;
3967 if (l > access_len)
3968 l = access_len;
3969 if (!cpu_physical_memory_is_dirty(addr1)) {
3970 /* invalidate code */
3971 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3972 /* set dirty bit */
f7c11b53
YT
3973 cpu_physical_memory_set_dirty_flags(
3974 addr1, (0xff & ~CODE_DIRTY_FLAG));
6d16c2f8
AL
3975 }
3976 addr1 += l;
3977 access_len -= l;
3978 }
3979 }
3980 return;
3981 }
3982 if (is_write) {
3983 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3984 }
f8a83245 3985 qemu_vfree(bounce.buffer);
6d16c2f8 3986 bounce.buffer = NULL;
ba223c29 3987 cpu_notify_map_clients();
6d16c2f8 3988}
d0ecd2aa 3989
8df1cd07 3990/* warning: addr must be aligned */
c227f099 3991uint32_t ldl_phys(target_phys_addr_t addr)
8df1cd07
FB
3992{
3993 int io_index;
3994 uint8_t *ptr;
3995 uint32_t val;
3996 unsigned long pd;
3997 PhysPageDesc *p;
3998
3999 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4000 if (!p) {
4001 pd = IO_MEM_UNASSIGNED;
4002 } else {
4003 pd = p->phys_offset;
4004 }
3b46e624 4005
5fafdf24 4006 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2a4188a3 4007 !(pd & IO_MEM_ROMD)) {
8df1cd07
FB
4008 /* I/O case */
4009 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
4010 if (p)
4011 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
8df1cd07
FB
4012 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
4013 } else {
4014 /* RAM case */
5579c7f3 4015 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
8df1cd07
FB
4016 (addr & ~TARGET_PAGE_MASK);
4017 val = ldl_p(ptr);
4018 }
4019 return val;
4020}
4021
84b7b8e7 4022/* warning: addr must be aligned */
c227f099 4023uint64_t ldq_phys(target_phys_addr_t addr)
84b7b8e7
FB
4024{
4025 int io_index;
4026 uint8_t *ptr;
4027 uint64_t val;
4028 unsigned long pd;
4029 PhysPageDesc *p;
4030
4031 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4032 if (!p) {
4033 pd = IO_MEM_UNASSIGNED;
4034 } else {
4035 pd = p->phys_offset;
4036 }
3b46e624 4037
2a4188a3
FB
4038 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
4039 !(pd & IO_MEM_ROMD)) {
84b7b8e7
FB
4040 /* I/O case */
4041 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
4042 if (p)
4043 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
84b7b8e7
FB
4044#ifdef TARGET_WORDS_BIGENDIAN
4045 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
4046 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
4047#else
4048 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
4049 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
4050#endif
4051 } else {
4052 /* RAM case */
5579c7f3 4053 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
84b7b8e7
FB
4054 (addr & ~TARGET_PAGE_MASK);
4055 val = ldq_p(ptr);
4056 }
4057 return val;
4058}
4059
aab33094 4060/* XXX: optimize */
c227f099 4061uint32_t ldub_phys(target_phys_addr_t addr)
aab33094
FB
4062{
4063 uint8_t val;
4064 cpu_physical_memory_read(addr, &val, 1);
4065 return val;
4066}
4067
733f0b02 4068/* warning: addr must be aligned */
c227f099 4069uint32_t lduw_phys(target_phys_addr_t addr)
aab33094 4070{
733f0b02
MT
4071 int io_index;
4072 uint8_t *ptr;
4073 uint64_t val;
4074 unsigned long pd;
4075 PhysPageDesc *p;
4076
4077 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4078 if (!p) {
4079 pd = IO_MEM_UNASSIGNED;
4080 } else {
4081 pd = p->phys_offset;
4082 }
4083
4084 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
4085 !(pd & IO_MEM_ROMD)) {
4086 /* I/O case */
4087 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4088 if (p)
4089 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4090 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
4091 } else {
4092 /* RAM case */
4093 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4094 (addr & ~TARGET_PAGE_MASK);
4095 val = lduw_p(ptr);
4096 }
4097 return val;
aab33094
FB
4098}
4099
8df1cd07
FB
4100/* warning: addr must be aligned. The ram page is not masked as dirty
4101 and the code inside is not invalidated. It is useful if the dirty
4102 bits are used to track modified PTEs */
c227f099 4103void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
8df1cd07
FB
4104{
4105 int io_index;
4106 uint8_t *ptr;
4107 unsigned long pd;
4108 PhysPageDesc *p;
4109
4110 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4111 if (!p) {
4112 pd = IO_MEM_UNASSIGNED;
4113 } else {
4114 pd = p->phys_offset;
4115 }
3b46e624 4116
3a7d929e 4117 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07 4118 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
4119 if (p)
4120 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
8df1cd07
FB
4121 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4122 } else {
74576198 4123 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
5579c7f3 4124 ptr = qemu_get_ram_ptr(addr1);
8df1cd07 4125 stl_p(ptr, val);
74576198
AL
4126
4127 if (unlikely(in_migration)) {
4128 if (!cpu_physical_memory_is_dirty(addr1)) {
4129 /* invalidate code */
4130 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4131 /* set dirty bit */
f7c11b53
YT
4132 cpu_physical_memory_set_dirty_flags(
4133 addr1, (0xff & ~CODE_DIRTY_FLAG));
74576198
AL
4134 }
4135 }
8df1cd07
FB
4136 }
4137}
4138
c227f099 4139void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
bc98a7ef
JM
4140{
4141 int io_index;
4142 uint8_t *ptr;
4143 unsigned long pd;
4144 PhysPageDesc *p;
4145
4146 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4147 if (!p) {
4148 pd = IO_MEM_UNASSIGNED;
4149 } else {
4150 pd = p->phys_offset;
4151 }
3b46e624 4152
bc98a7ef
JM
4153 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4154 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
4155 if (p)
4156 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bc98a7ef
JM
4157#ifdef TARGET_WORDS_BIGENDIAN
4158 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
4159 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
4160#else
4161 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4162 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
4163#endif
4164 } else {
5579c7f3 4165 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
bc98a7ef
JM
4166 (addr & ~TARGET_PAGE_MASK);
4167 stq_p(ptr, val);
4168 }
4169}
4170
8df1cd07 4171/* warning: addr must be aligned */
c227f099 4172void stl_phys(target_phys_addr_t addr, uint32_t val)
8df1cd07
FB
4173{
4174 int io_index;
4175 uint8_t *ptr;
4176 unsigned long pd;
4177 PhysPageDesc *p;
4178
4179 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4180 if (!p) {
4181 pd = IO_MEM_UNASSIGNED;
4182 } else {
4183 pd = p->phys_offset;
4184 }
3b46e624 4185
3a7d929e 4186 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07 4187 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
4188 if (p)
4189 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
8df1cd07
FB
4190 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4191 } else {
4192 unsigned long addr1;
4193 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4194 /* RAM case */
5579c7f3 4195 ptr = qemu_get_ram_ptr(addr1);
8df1cd07 4196 stl_p(ptr, val);
3a7d929e
FB
4197 if (!cpu_physical_memory_is_dirty(addr1)) {
4198 /* invalidate code */
4199 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4200 /* set dirty bit */
f7c11b53
YT
4201 cpu_physical_memory_set_dirty_flags(addr1,
4202 (0xff & ~CODE_DIRTY_FLAG));
3a7d929e 4203 }
8df1cd07
FB
4204 }
4205}
4206
aab33094 4207/* XXX: optimize */
c227f099 4208void stb_phys(target_phys_addr_t addr, uint32_t val)
aab33094
FB
4209{
4210 uint8_t v = val;
4211 cpu_physical_memory_write(addr, &v, 1);
4212}
4213
733f0b02 4214/* warning: addr must be aligned */
c227f099 4215void stw_phys(target_phys_addr_t addr, uint32_t val)
aab33094 4216{
733f0b02
MT
4217 int io_index;
4218 uint8_t *ptr;
4219 unsigned long pd;
4220 PhysPageDesc *p;
4221
4222 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4223 if (!p) {
4224 pd = IO_MEM_UNASSIGNED;
4225 } else {
4226 pd = p->phys_offset;
4227 }
4228
4229 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4230 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4231 if (p)
4232 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4233 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
4234 } else {
4235 unsigned long addr1;
4236 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4237 /* RAM case */
4238 ptr = qemu_get_ram_ptr(addr1);
4239 stw_p(ptr, val);
4240 if (!cpu_physical_memory_is_dirty(addr1)) {
4241 /* invalidate code */
4242 tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
4243 /* set dirty bit */
4244 cpu_physical_memory_set_dirty_flags(addr1,
4245 (0xff & ~CODE_DIRTY_FLAG));
4246 }
4247 }
aab33094
FB
4248}
4249
4250/* XXX: optimize */
c227f099 4251void stq_phys(target_phys_addr_t addr, uint64_t val)
aab33094
FB
4252{
4253 val = tswap64(val);
4254 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
4255}
4256
5e2972fd 4257/* virtual memory access for debug (includes writing to ROM) */
5fafdf24 4258int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
b448f2f3 4259 uint8_t *buf, int len, int is_write)
13eb76e0
FB
4260{
4261 int l;
c227f099 4262 target_phys_addr_t phys_addr;
9b3c35e0 4263 target_ulong page;
13eb76e0
FB
4264
4265 while (len > 0) {
4266 page = addr & TARGET_PAGE_MASK;
4267 phys_addr = cpu_get_phys_page_debug(env, page);
4268 /* if no physical page mapped, return an error */
4269 if (phys_addr == -1)
4270 return -1;
4271 l = (page + TARGET_PAGE_SIZE) - addr;
4272 if (l > len)
4273 l = len;
5e2972fd 4274 phys_addr += (addr & ~TARGET_PAGE_MASK);
5e2972fd
AL
4275 if (is_write)
4276 cpu_physical_memory_write_rom(phys_addr, buf, l);
4277 else
5e2972fd 4278 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
13eb76e0
FB
4279 len -= l;
4280 buf += l;
4281 addr += l;
4282 }
4283 return 0;
4284}
a68fe89c 4285#endif
13eb76e0 4286
2e70f6ef
PB
4287/* in deterministic execution mode, instructions doing device I/Os
4288 must be at the end of the TB */
4289void cpu_io_recompile(CPUState *env, void *retaddr)
4290{
4291 TranslationBlock *tb;
4292 uint32_t n, cflags;
4293 target_ulong pc, cs_base;
4294 uint64_t flags;
4295
4296 tb = tb_find_pc((unsigned long)retaddr);
4297 if (!tb) {
4298 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
4299 retaddr);
4300 }
4301 n = env->icount_decr.u16.low + tb->icount;
4302 cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
4303 /* Calculate how many instructions had been executed before the fault
bf20dc07 4304 occurred. */
2e70f6ef
PB
4305 n = n - env->icount_decr.u16.low;
4306 /* Generate a new TB ending on the I/O insn. */
4307 n++;
4308 /* On MIPS and SH, delay slot instructions can only be restarted if
4309 they were already the first instruction in the TB. If this is not
bf20dc07 4310 the first instruction in a TB then re-execute the preceding
2e70f6ef
PB
4311 branch. */
4312#if defined(TARGET_MIPS)
4313 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4314 env->active_tc.PC -= 4;
4315 env->icount_decr.u16.low++;
4316 env->hflags &= ~MIPS_HFLAG_BMASK;
4317 }
4318#elif defined(TARGET_SH4)
4319 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4320 && n > 1) {
4321 env->pc -= 2;
4322 env->icount_decr.u16.low++;
4323 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4324 }
4325#endif
4326 /* This should never happen. */
4327 if (n > CF_COUNT_MASK)
4328 cpu_abort(env, "TB too big during recompile");
4329
4330 cflags = n | CF_LAST_IO;
4331 pc = tb->pc;
4332 cs_base = tb->cs_base;
4333 flags = tb->flags;
4334 tb_phys_invalidate(tb, -1);
4335 /* FIXME: In theory this could raise an exception. In practice
4336 we have already translated the block once so it's probably ok. */
4337 tb_gen_code(env, pc, cs_base, flags, cflags);
bf20dc07 4338 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
2e70f6ef
PB
4339 the first in the TB) then we end up generating a whole new TB and
4340 repeating the fault, which is horribly inefficient.
4341 Better would be to execute just this insn uncached, or generate a
4342 second new TB. */
4343 cpu_resume_from_signal(env, NULL);
4344}
4345
b3755a91
PB
4346#if !defined(CONFIG_USER_ONLY)
4347
055403b2 4348void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
e3db7226
FB
4349{
4350 int i, target_code_size, max_target_code_size;
4351 int direct_jmp_count, direct_jmp2_count, cross_page;
4352 TranslationBlock *tb;
3b46e624 4353
e3db7226
FB
4354 target_code_size = 0;
4355 max_target_code_size = 0;
4356 cross_page = 0;
4357 direct_jmp_count = 0;
4358 direct_jmp2_count = 0;
4359 for(i = 0; i < nb_tbs; i++) {
4360 tb = &tbs[i];
4361 target_code_size += tb->size;
4362 if (tb->size > max_target_code_size)
4363 max_target_code_size = tb->size;
4364 if (tb->page_addr[1] != -1)
4365 cross_page++;
4366 if (tb->tb_next_offset[0] != 0xffff) {
4367 direct_jmp_count++;
4368 if (tb->tb_next_offset[1] != 0xffff) {
4369 direct_jmp2_count++;
4370 }
4371 }
4372 }
4373 /* XXX: avoid using doubles ? */
57fec1fe 4374 cpu_fprintf(f, "Translation buffer state:\n");
055403b2 4375 cpu_fprintf(f, "gen code size %td/%ld\n",
26a5f13b
FB
4376 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4377 cpu_fprintf(f, "TB count %d/%d\n",
4378 nb_tbs, code_gen_max_blocks);
5fafdf24 4379 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
e3db7226
FB
4380 nb_tbs ? target_code_size / nb_tbs : 0,
4381 max_target_code_size);
055403b2 4382 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
e3db7226
FB
4383 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4384 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
5fafdf24
TS
4385 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4386 cross_page,
e3db7226
FB
4387 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4388 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
5fafdf24 4389 direct_jmp_count,
e3db7226
FB
4390 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4391 direct_jmp2_count,
4392 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
57fec1fe 4393 cpu_fprintf(f, "\nStatistics:\n");
e3db7226
FB
4394 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4395 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4396 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
b67d9a52 4397 tcg_dump_info(f, cpu_fprintf);
e3db7226
FB
4398}
4399
61382a50
FB
4400#define MMUSUFFIX _cmmu
4401#define GETPC() NULL
4402#define env cpu_single_env
b769d8fe 4403#define SOFTMMU_CODE_ACCESS
61382a50
FB
4404
4405#define SHIFT 0
4406#include "softmmu_template.h"
4407
4408#define SHIFT 1
4409#include "softmmu_template.h"
4410
4411#define SHIFT 2
4412#include "softmmu_template.h"
4413
4414#define SHIFT 3
4415#include "softmmu_template.h"
4416
4417#undef env
4418
4419#endif