]> git.proxmox.com Git - qemu.git/blame - exec.c
linux-user: Fix mmap_find_vma returning invalid addresses.
[qemu.git] / exec.c
CommitLineData
54936004 1/*
fd6ce8f6 2 * virtual page mapping and translated block handling
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
54936004 18 */
67b915a5 19#include "config.h"
d5a8f07c
FB
20#ifdef _WIN32
21#include <windows.h>
22#else
a98d49b1 23#include <sys/types.h>
d5a8f07c
FB
24#include <sys/mman.h>
25#endif
54936004
FB
26#include <stdlib.h>
27#include <stdio.h>
28#include <stdarg.h>
29#include <string.h>
30#include <errno.h>
31#include <unistd.h>
32#include <inttypes.h>
33
6180a181
FB
34#include "cpu.h"
35#include "exec-all.h"
ca10f867 36#include "qemu-common.h"
b67d9a52 37#include "tcg.h"
b3c7724c 38#include "hw/hw.h"
74576198 39#include "osdep.h"
7ba1e619 40#include "kvm.h"
53a5960a
PB
41#if defined(CONFIG_USER_ONLY)
42#include <qemu.h>
fd052bf6 43#include <signal.h>
53a5960a 44#endif
54936004 45
fd6ce8f6 46//#define DEBUG_TB_INVALIDATE
66e85a21 47//#define DEBUG_FLUSH
9fa3e853 48//#define DEBUG_TLB
67d3b957 49//#define DEBUG_UNASSIGNED
fd6ce8f6
FB
50
51/* make various TB consistency checks */
5fafdf24
TS
52//#define DEBUG_TB_CHECK
53//#define DEBUG_TLB_CHECK
fd6ce8f6 54
1196be37 55//#define DEBUG_IOPORT
db7b5426 56//#define DEBUG_SUBPAGE
1196be37 57
99773bd4
PB
58#if !defined(CONFIG_USER_ONLY)
59/* TB consistency checks only implemented for usermode emulation. */
60#undef DEBUG_TB_CHECK
61#endif
62
9fa3e853
FB
63#define SMC_BITMAP_USE_THRESHOLD 10
64
bdaf78e0 65static TranslationBlock *tbs;
26a5f13b 66int code_gen_max_blocks;
9fa3e853 67TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
bdaf78e0 68static int nb_tbs;
eb51d102 69/* any access to the tbs or the page table must use this lock */
c227f099 70spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
fd6ce8f6 71
141ac468
BS
72#if defined(__arm__) || defined(__sparc_v9__)
73/* The prologue must be reachable with a direct jump. ARM and Sparc64
74 have limited branch ranges (possibly also PPC) so place it in a
d03d860b
BS
75 section close to code segment. */
76#define code_gen_section \
77 __attribute__((__section__(".gen_code"))) \
78 __attribute__((aligned (32)))
f8e2af11
SW
79#elif defined(_WIN32)
80/* Maximum alignment for Win32 is 16. */
81#define code_gen_section \
82 __attribute__((aligned (16)))
d03d860b
BS
83#else
84#define code_gen_section \
85 __attribute__((aligned (32)))
86#endif
87
88uint8_t code_gen_prologue[1024] code_gen_section;
bdaf78e0
BS
89static uint8_t *code_gen_buffer;
90static unsigned long code_gen_buffer_size;
26a5f13b 91/* threshold to flush the translated code buffer */
bdaf78e0 92static unsigned long code_gen_buffer_max_size;
fd6ce8f6
FB
93uint8_t *code_gen_ptr;
94
e2eef170 95#if !defined(CONFIG_USER_ONLY)
9fa3e853 96int phys_ram_fd;
1ccde1cb 97uint8_t *phys_ram_dirty;
74576198 98static int in_migration;
94a6b54f
PB
99
100typedef struct RAMBlock {
101 uint8_t *host;
c227f099
AL
102 ram_addr_t offset;
103 ram_addr_t length;
94a6b54f
PB
104 struct RAMBlock *next;
105} RAMBlock;
106
107static RAMBlock *ram_blocks;
108/* TODO: When we implement (and use) ram deallocation (e.g. for hotplug)
ccbb4d44 109 then we can no longer assume contiguous ram offsets, and external uses
94a6b54f 110 of this variable will break. */
c227f099 111ram_addr_t last_ram_offset;
e2eef170 112#endif
9fa3e853 113
6a00d601
FB
114CPUState *first_cpu;
115/* current CPU in the current thread. It is only valid inside
116 cpu_exec() */
5fafdf24 117CPUState *cpu_single_env;
2e70f6ef 118/* 0 = Do not count executed instructions.
bf20dc07 119 1 = Precise instruction counting.
2e70f6ef
PB
120 2 = Adaptive rate instruction counting. */
121int use_icount = 0;
122/* Current instruction counter. While executing translated code this may
123 include some instructions that have not yet been executed. */
124int64_t qemu_icount;
6a00d601 125
54936004 126typedef struct PageDesc {
92e873b9 127 /* list of TBs intersecting this ram page */
fd6ce8f6 128 TranslationBlock *first_tb;
9fa3e853
FB
129 /* in order to optimize self modifying code, we count the number
130 of lookups we do to a given page to use a bitmap */
131 unsigned int code_write_count;
132 uint8_t *code_bitmap;
133#if defined(CONFIG_USER_ONLY)
134 unsigned long flags;
135#endif
54936004
FB
136} PageDesc;
137
92e873b9 138typedef struct PhysPageDesc {
0f459d16 139 /* offset in host memory of the page + io_index in the low bits */
c227f099
AL
140 ram_addr_t phys_offset;
141 ram_addr_t region_offset;
92e873b9
FB
142} PhysPageDesc;
143
54936004 144#define L2_BITS 10
bedb69ea
JM
145#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
146/* XXX: this is a temporary hack for alpha target.
147 * In the future, this is to be replaced by a multi-level table
148 * to actually be able to handle the complete 64 bits address space.
149 */
150#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
151#else
03875444 152#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
bedb69ea 153#endif
54936004
FB
154
155#define L1_SIZE (1 << L1_BITS)
156#define L2_SIZE (1 << L2_BITS)
157
83fb7adf
FB
158unsigned long qemu_real_host_page_size;
159unsigned long qemu_host_page_bits;
160unsigned long qemu_host_page_size;
161unsigned long qemu_host_page_mask;
54936004 162
92e873b9 163/* XXX: for system emulation, it could just be an array */
54936004
FB
164static PageDesc *l1_map[L1_SIZE];
165
e2eef170 166#if !defined(CONFIG_USER_ONLY)
6d9a1304
PB
167static PhysPageDesc **l1_phys_map;
168
e2eef170
PB
169static void io_mem_init(void);
170
33417e70 171/* io memory support */
33417e70
FB
172CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
173CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
a4193c8a 174void *io_mem_opaque[IO_MEM_NB_ENTRIES];
511d2b14 175static char io_mem_used[IO_MEM_NB_ENTRIES];
6658ffb8
PB
176static int io_mem_watch;
177#endif
33417e70 178
34865134 179/* log support */
1e8b27ca
JR
180#ifdef WIN32
181static const char *logfilename = "qemu.log";
182#else
d9b630fd 183static const char *logfilename = "/tmp/qemu.log";
1e8b27ca 184#endif
34865134
FB
185FILE *logfile;
186int loglevel;
e735b91c 187static int log_append = 0;
34865134 188
e3db7226
FB
189/* statistics */
190static int tlb_flush_count;
191static int tb_flush_count;
192static int tb_phys_invalidate_count;
193
7cb69cae
FB
194#ifdef _WIN32
195static void map_exec(void *addr, long size)
196{
197 DWORD old_protect;
198 VirtualProtect(addr, size,
199 PAGE_EXECUTE_READWRITE, &old_protect);
200
201}
202#else
203static void map_exec(void *addr, long size)
204{
4369415f 205 unsigned long start, end, page_size;
7cb69cae 206
4369415f 207 page_size = getpagesize();
7cb69cae 208 start = (unsigned long)addr;
4369415f 209 start &= ~(page_size - 1);
7cb69cae
FB
210
211 end = (unsigned long)addr + size;
4369415f
FB
212 end += page_size - 1;
213 end &= ~(page_size - 1);
7cb69cae
FB
214
215 mprotect((void *)start, end - start,
216 PROT_READ | PROT_WRITE | PROT_EXEC);
217}
218#endif
219
b346ff46 220static void page_init(void)
54936004 221{
83fb7adf 222 /* NOTE: we can always suppose that qemu_host_page_size >=
54936004 223 TARGET_PAGE_SIZE */
c2b48b69
AL
224#ifdef _WIN32
225 {
226 SYSTEM_INFO system_info;
227
228 GetSystemInfo(&system_info);
229 qemu_real_host_page_size = system_info.dwPageSize;
230 }
231#else
232 qemu_real_host_page_size = getpagesize();
233#endif
83fb7adf
FB
234 if (qemu_host_page_size == 0)
235 qemu_host_page_size = qemu_real_host_page_size;
236 if (qemu_host_page_size < TARGET_PAGE_SIZE)
237 qemu_host_page_size = TARGET_PAGE_SIZE;
238 qemu_host_page_bits = 0;
239 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
240 qemu_host_page_bits++;
241 qemu_host_page_mask = ~(qemu_host_page_size - 1);
6d9a1304 242#if !defined(CONFIG_USER_ONLY)
108c49b8
FB
243 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
244 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
6d9a1304 245#endif
50a9569b
AZ
246
247#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
248 {
249 long long startaddr, endaddr;
250 FILE *f;
251 int n;
252
c8a706fe 253 mmap_lock();
0776590d 254 last_brk = (unsigned long)sbrk(0);
50a9569b
AZ
255 f = fopen("/proc/self/maps", "r");
256 if (f) {
257 do {
258 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
259 if (n == 2) {
e0b8d65a
BS
260 startaddr = MIN(startaddr,
261 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
262 endaddr = MIN(endaddr,
263 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
b5fc909e 264 page_set_flags(startaddr & TARGET_PAGE_MASK,
50a9569b
AZ
265 TARGET_PAGE_ALIGN(endaddr),
266 PAGE_RESERVED);
267 }
268 } while (!feof(f));
269 fclose(f);
270 }
c8a706fe 271 mmap_unlock();
50a9569b
AZ
272 }
273#endif
54936004
FB
274}
275
434929bf 276static inline PageDesc **page_l1_map(target_ulong index)
54936004 277{
17e2377a
PB
278#if TARGET_LONG_BITS > 32
279 /* Host memory outside guest VM. For 32-bit targets we have already
280 excluded high addresses. */
d8173e0f 281 if (index > ((target_ulong)L2_SIZE * L1_SIZE))
17e2377a
PB
282 return NULL;
283#endif
434929bf
AL
284 return &l1_map[index >> L2_BITS];
285}
286
287static inline PageDesc *page_find_alloc(target_ulong index)
288{
289 PageDesc **lp, *p;
290 lp = page_l1_map(index);
291 if (!lp)
292 return NULL;
293
54936004
FB
294 p = *lp;
295 if (!p) {
296 /* allocate if not found */
17e2377a 297#if defined(CONFIG_USER_ONLY)
17e2377a
PB
298 size_t len = sizeof(PageDesc) * L2_SIZE;
299 /* Don't use qemu_malloc because it may recurse. */
660f11be 300 p = mmap(NULL, len, PROT_READ | PROT_WRITE,
17e2377a 301 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
54936004 302 *lp = p;
fb1c2cd7
AJ
303 if (h2g_valid(p)) {
304 unsigned long addr = h2g(p);
17e2377a
PB
305 page_set_flags(addr & TARGET_PAGE_MASK,
306 TARGET_PAGE_ALIGN(addr + len),
307 PAGE_RESERVED);
308 }
309#else
310 p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
311 *lp = p;
312#endif
54936004
FB
313 }
314 return p + (index & (L2_SIZE - 1));
315}
316
00f82b8a 317static inline PageDesc *page_find(target_ulong index)
54936004 318{
434929bf
AL
319 PageDesc **lp, *p;
320 lp = page_l1_map(index);
321 if (!lp)
322 return NULL;
54936004 323
434929bf 324 p = *lp;
660f11be
BS
325 if (!p) {
326 return NULL;
327 }
fd6ce8f6
FB
328 return p + (index & (L2_SIZE - 1));
329}
330
6d9a1304 331#if !defined(CONFIG_USER_ONLY)
c227f099 332static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
92e873b9 333{
108c49b8 334 void **lp, **p;
e3f4e2a4 335 PhysPageDesc *pd;
92e873b9 336
108c49b8
FB
337 p = (void **)l1_phys_map;
338#if TARGET_PHYS_ADDR_SPACE_BITS > 32
339
340#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
341#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
342#endif
343 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
92e873b9
FB
344 p = *lp;
345 if (!p) {
346 /* allocate if not found */
108c49b8
FB
347 if (!alloc)
348 return NULL;
349 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
350 memset(p, 0, sizeof(void *) * L1_SIZE);
351 *lp = p;
352 }
353#endif
354 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
e3f4e2a4
PB
355 pd = *lp;
356 if (!pd) {
357 int i;
108c49b8
FB
358 /* allocate if not found */
359 if (!alloc)
360 return NULL;
e3f4e2a4
PB
361 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
362 *lp = pd;
67c4d23c 363 for (i = 0; i < L2_SIZE; i++) {
e3f4e2a4 364 pd[i].phys_offset = IO_MEM_UNASSIGNED;
67c4d23c
PB
365 pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
366 }
92e873b9 367 }
e3f4e2a4 368 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
92e873b9
FB
369}
370
c227f099 371static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
92e873b9 372{
108c49b8 373 return phys_page_find_alloc(index, 0);
92e873b9
FB
374}
375
c227f099
AL
376static void tlb_protect_code(ram_addr_t ram_addr);
377static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 378 target_ulong vaddr);
c8a706fe
PB
379#define mmap_lock() do { } while(0)
380#define mmap_unlock() do { } while(0)
9fa3e853 381#endif
fd6ce8f6 382
4369415f
FB
383#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
384
385#if defined(CONFIG_USER_ONLY)
ccbb4d44 386/* Currently it is not recommended to allocate big chunks of data in
4369415f
FB
387 user mode. It will change when a dedicated libc will be used */
388#define USE_STATIC_CODE_GEN_BUFFER
389#endif
390
391#ifdef USE_STATIC_CODE_GEN_BUFFER
392static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
393#endif
394
8fcd3692 395static void code_gen_alloc(unsigned long tb_size)
26a5f13b 396{
4369415f
FB
397#ifdef USE_STATIC_CODE_GEN_BUFFER
398 code_gen_buffer = static_code_gen_buffer;
399 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
400 map_exec(code_gen_buffer, code_gen_buffer_size);
401#else
26a5f13b
FB
402 code_gen_buffer_size = tb_size;
403 if (code_gen_buffer_size == 0) {
4369415f
FB
404#if defined(CONFIG_USER_ONLY)
405 /* in user mode, phys_ram_size is not meaningful */
406 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
407#else
ccbb4d44 408 /* XXX: needs adjustments */
94a6b54f 409 code_gen_buffer_size = (unsigned long)(ram_size / 4);
4369415f 410#endif
26a5f13b
FB
411 }
412 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
413 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
414 /* The code gen buffer location may have constraints depending on
415 the host cpu and OS */
416#if defined(__linux__)
417 {
418 int flags;
141ac468
BS
419 void *start = NULL;
420
26a5f13b
FB
421 flags = MAP_PRIVATE | MAP_ANONYMOUS;
422#if defined(__x86_64__)
423 flags |= MAP_32BIT;
424 /* Cannot map more than that */
425 if (code_gen_buffer_size > (800 * 1024 * 1024))
426 code_gen_buffer_size = (800 * 1024 * 1024);
141ac468
BS
427#elif defined(__sparc_v9__)
428 // Map the buffer below 2G, so we can use direct calls and branches
429 flags |= MAP_FIXED;
430 start = (void *) 0x60000000UL;
431 if (code_gen_buffer_size > (512 * 1024 * 1024))
432 code_gen_buffer_size = (512 * 1024 * 1024);
1cb0661e 433#elif defined(__arm__)
63d41246 434 /* Map the buffer below 32M, so we can use direct calls and branches */
1cb0661e
AZ
435 flags |= MAP_FIXED;
436 start = (void *) 0x01000000UL;
437 if (code_gen_buffer_size > 16 * 1024 * 1024)
438 code_gen_buffer_size = 16 * 1024 * 1024;
26a5f13b 439#endif
141ac468
BS
440 code_gen_buffer = mmap(start, code_gen_buffer_size,
441 PROT_WRITE | PROT_READ | PROT_EXEC,
26a5f13b
FB
442 flags, -1, 0);
443 if (code_gen_buffer == MAP_FAILED) {
444 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
445 exit(1);
446 }
447 }
a167ba50 448#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__DragonFly__)
06e67a82
AL
449 {
450 int flags;
451 void *addr = NULL;
452 flags = MAP_PRIVATE | MAP_ANONYMOUS;
453#if defined(__x86_64__)
454 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
455 * 0x40000000 is free */
456 flags |= MAP_FIXED;
457 addr = (void *)0x40000000;
458 /* Cannot map more than that */
459 if (code_gen_buffer_size > (800 * 1024 * 1024))
460 code_gen_buffer_size = (800 * 1024 * 1024);
461#endif
462 code_gen_buffer = mmap(addr, code_gen_buffer_size,
463 PROT_WRITE | PROT_READ | PROT_EXEC,
464 flags, -1, 0);
465 if (code_gen_buffer == MAP_FAILED) {
466 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
467 exit(1);
468 }
469 }
26a5f13b
FB
470#else
471 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
26a5f13b
FB
472 map_exec(code_gen_buffer, code_gen_buffer_size);
473#endif
4369415f 474#endif /* !USE_STATIC_CODE_GEN_BUFFER */
26a5f13b
FB
475 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
476 code_gen_buffer_max_size = code_gen_buffer_size -
477 code_gen_max_block_size();
478 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
479 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
480}
481
482/* Must be called before using the QEMU cpus. 'tb_size' is the size
483 (in bytes) allocated to the translation buffer. Zero means default
484 size. */
485void cpu_exec_init_all(unsigned long tb_size)
486{
26a5f13b
FB
487 cpu_gen_init();
488 code_gen_alloc(tb_size);
489 code_gen_ptr = code_gen_buffer;
4369415f 490 page_init();
e2eef170 491#if !defined(CONFIG_USER_ONLY)
26a5f13b 492 io_mem_init();
e2eef170 493#endif
26a5f13b
FB
494}
495
9656f324
PB
496#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
497
e59fb374 498static int cpu_common_post_load(void *opaque, int version_id)
e7f4eff7
JQ
499{
500 CPUState *env = opaque;
9656f324 501
3098dba0
AJ
502 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
503 version_id is increased. */
504 env->interrupt_request &= ~0x01;
9656f324
PB
505 tlb_flush(env, 1);
506
507 return 0;
508}
e7f4eff7
JQ
509
510static const VMStateDescription vmstate_cpu_common = {
511 .name = "cpu_common",
512 .version_id = 1,
513 .minimum_version_id = 1,
514 .minimum_version_id_old = 1,
e7f4eff7
JQ
515 .post_load = cpu_common_post_load,
516 .fields = (VMStateField []) {
517 VMSTATE_UINT32(halted, CPUState),
518 VMSTATE_UINT32(interrupt_request, CPUState),
519 VMSTATE_END_OF_LIST()
520 }
521};
9656f324
PB
522#endif
523
950f1472
GC
524CPUState *qemu_get_cpu(int cpu)
525{
526 CPUState *env = first_cpu;
527
528 while (env) {
529 if (env->cpu_index == cpu)
530 break;
531 env = env->next_cpu;
532 }
533
534 return env;
535}
536
6a00d601 537void cpu_exec_init(CPUState *env)
fd6ce8f6 538{
6a00d601
FB
539 CPUState **penv;
540 int cpu_index;
541
c2764719
PB
542#if defined(CONFIG_USER_ONLY)
543 cpu_list_lock();
544#endif
6a00d601
FB
545 env->next_cpu = NULL;
546 penv = &first_cpu;
547 cpu_index = 0;
548 while (*penv != NULL) {
1e9fa730 549 penv = &(*penv)->next_cpu;
6a00d601
FB
550 cpu_index++;
551 }
552 env->cpu_index = cpu_index;
268a362c 553 env->numa_node = 0;
72cf2d4f
BS
554 QTAILQ_INIT(&env->breakpoints);
555 QTAILQ_INIT(&env->watchpoints);
6a00d601 556 *penv = env;
c2764719
PB
557#if defined(CONFIG_USER_ONLY)
558 cpu_list_unlock();
559#endif
b3c7724c 560#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
e7f4eff7 561 vmstate_register(cpu_index, &vmstate_cpu_common, env);
b3c7724c
PB
562 register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
563 cpu_save, cpu_load, env);
564#endif
fd6ce8f6
FB
565}
566
9fa3e853
FB
567static inline void invalidate_page_bitmap(PageDesc *p)
568{
569 if (p->code_bitmap) {
59817ccb 570 qemu_free(p->code_bitmap);
9fa3e853
FB
571 p->code_bitmap = NULL;
572 }
573 p->code_write_count = 0;
574}
575
fd6ce8f6
FB
576/* set to NULL all the 'first_tb' fields in all PageDescs */
577static void page_flush_tb(void)
578{
579 int i, j;
580 PageDesc *p;
581
582 for(i = 0; i < L1_SIZE; i++) {
583 p = l1_map[i];
584 if (p) {
9fa3e853
FB
585 for(j = 0; j < L2_SIZE; j++) {
586 p->first_tb = NULL;
587 invalidate_page_bitmap(p);
588 p++;
589 }
fd6ce8f6
FB
590 }
591 }
592}
593
594/* flush all the translation blocks */
d4e8164f 595/* XXX: tb_flush is currently not thread safe */
6a00d601 596void tb_flush(CPUState *env1)
fd6ce8f6 597{
6a00d601 598 CPUState *env;
0124311e 599#if defined(DEBUG_FLUSH)
ab3d1727
BS
600 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
601 (unsigned long)(code_gen_ptr - code_gen_buffer),
602 nb_tbs, nb_tbs > 0 ?
603 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
fd6ce8f6 604#endif
26a5f13b 605 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
a208e54a
PB
606 cpu_abort(env1, "Internal error: code buffer overflow\n");
607
fd6ce8f6 608 nb_tbs = 0;
3b46e624 609
6a00d601
FB
610 for(env = first_cpu; env != NULL; env = env->next_cpu) {
611 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
612 }
9fa3e853 613
8a8a608f 614 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
fd6ce8f6 615 page_flush_tb();
9fa3e853 616
fd6ce8f6 617 code_gen_ptr = code_gen_buffer;
d4e8164f
FB
618 /* XXX: flush processor icache at this point if cache flush is
619 expensive */
e3db7226 620 tb_flush_count++;
fd6ce8f6
FB
621}
622
623#ifdef DEBUG_TB_CHECK
624
bc98a7ef 625static void tb_invalidate_check(target_ulong address)
fd6ce8f6
FB
626{
627 TranslationBlock *tb;
628 int i;
629 address &= TARGET_PAGE_MASK;
99773bd4
PB
630 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
631 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
632 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
633 address >= tb->pc + tb->size)) {
0bf9e31a
BS
634 printf("ERROR invalidate: address=" TARGET_FMT_lx
635 " PC=%08lx size=%04x\n",
99773bd4 636 address, (long)tb->pc, tb->size);
fd6ce8f6
FB
637 }
638 }
639 }
640}
641
642/* verify that all the pages have correct rights for code */
643static void tb_page_check(void)
644{
645 TranslationBlock *tb;
646 int i, flags1, flags2;
3b46e624 647
99773bd4
PB
648 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
649 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
650 flags1 = page_get_flags(tb->pc);
651 flags2 = page_get_flags(tb->pc + tb->size - 1);
652 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
653 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
99773bd4 654 (long)tb->pc, tb->size, flags1, flags2);
fd6ce8f6
FB
655 }
656 }
657 }
658}
659
660#endif
661
662/* invalidate one TB */
663static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
664 int next_offset)
665{
666 TranslationBlock *tb1;
667 for(;;) {
668 tb1 = *ptb;
669 if (tb1 == tb) {
670 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
671 break;
672 }
673 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
674 }
675}
676
9fa3e853
FB
677static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
678{
679 TranslationBlock *tb1;
680 unsigned int n1;
681
682 for(;;) {
683 tb1 = *ptb;
684 n1 = (long)tb1 & 3;
685 tb1 = (TranslationBlock *)((long)tb1 & ~3);
686 if (tb1 == tb) {
687 *ptb = tb1->page_next[n1];
688 break;
689 }
690 ptb = &tb1->page_next[n1];
691 }
692}
693
d4e8164f
FB
694static inline void tb_jmp_remove(TranslationBlock *tb, int n)
695{
696 TranslationBlock *tb1, **ptb;
697 unsigned int n1;
698
699 ptb = &tb->jmp_next[n];
700 tb1 = *ptb;
701 if (tb1) {
702 /* find tb(n) in circular list */
703 for(;;) {
704 tb1 = *ptb;
705 n1 = (long)tb1 & 3;
706 tb1 = (TranslationBlock *)((long)tb1 & ~3);
707 if (n1 == n && tb1 == tb)
708 break;
709 if (n1 == 2) {
710 ptb = &tb1->jmp_first;
711 } else {
712 ptb = &tb1->jmp_next[n1];
713 }
714 }
715 /* now we can suppress tb(n) from the list */
716 *ptb = tb->jmp_next[n];
717
718 tb->jmp_next[n] = NULL;
719 }
720}
721
722/* reset the jump entry 'n' of a TB so that it is not chained to
723 another TB */
724static inline void tb_reset_jump(TranslationBlock *tb, int n)
725{
726 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
727}
728
2e70f6ef 729void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
fd6ce8f6 730{
6a00d601 731 CPUState *env;
8a40a180 732 PageDesc *p;
d4e8164f 733 unsigned int h, n1;
c227f099 734 target_phys_addr_t phys_pc;
8a40a180 735 TranslationBlock *tb1, *tb2;
3b46e624 736
8a40a180
FB
737 /* remove the TB from the hash list */
738 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
739 h = tb_phys_hash_func(phys_pc);
5fafdf24 740 tb_remove(&tb_phys_hash[h], tb,
8a40a180
FB
741 offsetof(TranslationBlock, phys_hash_next));
742
743 /* remove the TB from the page list */
744 if (tb->page_addr[0] != page_addr) {
745 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
746 tb_page_remove(&p->first_tb, tb);
747 invalidate_page_bitmap(p);
748 }
749 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
750 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
751 tb_page_remove(&p->first_tb, tb);
752 invalidate_page_bitmap(p);
753 }
754
36bdbe54 755 tb_invalidated_flag = 1;
59817ccb 756
fd6ce8f6 757 /* remove the TB from the hash list */
8a40a180 758 h = tb_jmp_cache_hash_func(tb->pc);
6a00d601
FB
759 for(env = first_cpu; env != NULL; env = env->next_cpu) {
760 if (env->tb_jmp_cache[h] == tb)
761 env->tb_jmp_cache[h] = NULL;
762 }
d4e8164f
FB
763
764 /* suppress this TB from the two jump lists */
765 tb_jmp_remove(tb, 0);
766 tb_jmp_remove(tb, 1);
767
768 /* suppress any remaining jumps to this TB */
769 tb1 = tb->jmp_first;
770 for(;;) {
771 n1 = (long)tb1 & 3;
772 if (n1 == 2)
773 break;
774 tb1 = (TranslationBlock *)((long)tb1 & ~3);
775 tb2 = tb1->jmp_next[n1];
776 tb_reset_jump(tb1, n1);
777 tb1->jmp_next[n1] = NULL;
778 tb1 = tb2;
779 }
780 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
9fa3e853 781
e3db7226 782 tb_phys_invalidate_count++;
9fa3e853
FB
783}
784
785static inline void set_bits(uint8_t *tab, int start, int len)
786{
787 int end, mask, end1;
788
789 end = start + len;
790 tab += start >> 3;
791 mask = 0xff << (start & 7);
792 if ((start & ~7) == (end & ~7)) {
793 if (start < end) {
794 mask &= ~(0xff << (end & 7));
795 *tab |= mask;
796 }
797 } else {
798 *tab++ |= mask;
799 start = (start + 8) & ~7;
800 end1 = end & ~7;
801 while (start < end1) {
802 *tab++ = 0xff;
803 start += 8;
804 }
805 if (start < end) {
806 mask = ~(0xff << (end & 7));
807 *tab |= mask;
808 }
809 }
810}
811
812static void build_page_bitmap(PageDesc *p)
813{
814 int n, tb_start, tb_end;
815 TranslationBlock *tb;
3b46e624 816
b2a7081a 817 p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
9fa3e853
FB
818
819 tb = p->first_tb;
820 while (tb != NULL) {
821 n = (long)tb & 3;
822 tb = (TranslationBlock *)((long)tb & ~3);
823 /* NOTE: this is subtle as a TB may span two physical pages */
824 if (n == 0) {
825 /* NOTE: tb_end may be after the end of the page, but
826 it is not a problem */
827 tb_start = tb->pc & ~TARGET_PAGE_MASK;
828 tb_end = tb_start + tb->size;
829 if (tb_end > TARGET_PAGE_SIZE)
830 tb_end = TARGET_PAGE_SIZE;
831 } else {
832 tb_start = 0;
833 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
834 }
835 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
836 tb = tb->page_next[n];
837 }
838}
839
2e70f6ef
PB
840TranslationBlock *tb_gen_code(CPUState *env,
841 target_ulong pc, target_ulong cs_base,
842 int flags, int cflags)
d720b93d
FB
843{
844 TranslationBlock *tb;
845 uint8_t *tc_ptr;
846 target_ulong phys_pc, phys_page2, virt_page2;
847 int code_gen_size;
848
c27004ec
FB
849 phys_pc = get_phys_addr_code(env, pc);
850 tb = tb_alloc(pc);
d720b93d
FB
851 if (!tb) {
852 /* flush must be done */
853 tb_flush(env);
854 /* cannot fail at this point */
c27004ec 855 tb = tb_alloc(pc);
2e70f6ef
PB
856 /* Don't forget to invalidate previous TB info. */
857 tb_invalidated_flag = 1;
d720b93d
FB
858 }
859 tc_ptr = code_gen_ptr;
860 tb->tc_ptr = tc_ptr;
861 tb->cs_base = cs_base;
862 tb->flags = flags;
863 tb->cflags = cflags;
d07bde88 864 cpu_gen_code(env, tb, &code_gen_size);
d720b93d 865 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
3b46e624 866
d720b93d 867 /* check next page if needed */
c27004ec 868 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
d720b93d 869 phys_page2 = -1;
c27004ec 870 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
d720b93d
FB
871 phys_page2 = get_phys_addr_code(env, virt_page2);
872 }
873 tb_link_phys(tb, phys_pc, phys_page2);
2e70f6ef 874 return tb;
d720b93d 875}
3b46e624 876
9fa3e853
FB
877/* invalidate all TBs which intersect with the target physical page
878 starting in range [start;end[. NOTE: start and end must refer to
d720b93d
FB
879 the same physical page. 'is_cpu_write_access' should be true if called
880 from a real cpu write access: the virtual CPU will exit the current
881 TB if code is modified inside this TB. */
c227f099 882void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
d720b93d
FB
883 int is_cpu_write_access)
884{
6b917547 885 TranslationBlock *tb, *tb_next, *saved_tb;
d720b93d 886 CPUState *env = cpu_single_env;
9fa3e853 887 target_ulong tb_start, tb_end;
6b917547
AL
888 PageDesc *p;
889 int n;
890#ifdef TARGET_HAS_PRECISE_SMC
891 int current_tb_not_found = is_cpu_write_access;
892 TranslationBlock *current_tb = NULL;
893 int current_tb_modified = 0;
894 target_ulong current_pc = 0;
895 target_ulong current_cs_base = 0;
896 int current_flags = 0;
897#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
898
899 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 900 if (!p)
9fa3e853 901 return;
5fafdf24 902 if (!p->code_bitmap &&
d720b93d
FB
903 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
904 is_cpu_write_access) {
9fa3e853
FB
905 /* build code bitmap */
906 build_page_bitmap(p);
907 }
908
909 /* we remove all the TBs in the range [start, end[ */
910 /* XXX: see if in some cases it could be faster to invalidate all the code */
911 tb = p->first_tb;
912 while (tb != NULL) {
913 n = (long)tb & 3;
914 tb = (TranslationBlock *)((long)tb & ~3);
915 tb_next = tb->page_next[n];
916 /* NOTE: this is subtle as a TB may span two physical pages */
917 if (n == 0) {
918 /* NOTE: tb_end may be after the end of the page, but
919 it is not a problem */
920 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
921 tb_end = tb_start + tb->size;
922 } else {
923 tb_start = tb->page_addr[1];
924 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
925 }
926 if (!(tb_end <= start || tb_start >= end)) {
d720b93d
FB
927#ifdef TARGET_HAS_PRECISE_SMC
928 if (current_tb_not_found) {
929 current_tb_not_found = 0;
930 current_tb = NULL;
2e70f6ef 931 if (env->mem_io_pc) {
d720b93d 932 /* now we have a real cpu fault */
2e70f6ef 933 current_tb = tb_find_pc(env->mem_io_pc);
d720b93d
FB
934 }
935 }
936 if (current_tb == tb &&
2e70f6ef 937 (current_tb->cflags & CF_COUNT_MASK) != 1) {
d720b93d
FB
938 /* If we are modifying the current TB, we must stop
939 its execution. We could be more precise by checking
940 that the modification is after the current PC, but it
941 would require a specialized function to partially
942 restore the CPU state */
3b46e624 943
d720b93d 944 current_tb_modified = 1;
5fafdf24 945 cpu_restore_state(current_tb, env,
2e70f6ef 946 env->mem_io_pc, NULL);
6b917547
AL
947 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
948 &current_flags);
d720b93d
FB
949 }
950#endif /* TARGET_HAS_PRECISE_SMC */
6f5a9f7e
FB
951 /* we need to do that to handle the case where a signal
952 occurs while doing tb_phys_invalidate() */
953 saved_tb = NULL;
954 if (env) {
955 saved_tb = env->current_tb;
956 env->current_tb = NULL;
957 }
9fa3e853 958 tb_phys_invalidate(tb, -1);
6f5a9f7e
FB
959 if (env) {
960 env->current_tb = saved_tb;
961 if (env->interrupt_request && env->current_tb)
962 cpu_interrupt(env, env->interrupt_request);
963 }
9fa3e853
FB
964 }
965 tb = tb_next;
966 }
967#if !defined(CONFIG_USER_ONLY)
968 /* if no code remaining, no need to continue to use slow writes */
969 if (!p->first_tb) {
970 invalidate_page_bitmap(p);
d720b93d 971 if (is_cpu_write_access) {
2e70f6ef 972 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
d720b93d
FB
973 }
974 }
975#endif
976#ifdef TARGET_HAS_PRECISE_SMC
977 if (current_tb_modified) {
978 /* we generate a block containing just the instruction
979 modifying the memory. It will ensure that it cannot modify
980 itself */
ea1c1802 981 env->current_tb = NULL;
2e70f6ef 982 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
d720b93d 983 cpu_resume_from_signal(env, NULL);
9fa3e853 984 }
fd6ce8f6 985#endif
9fa3e853 986}
fd6ce8f6 987
9fa3e853 988/* len must be <= 8 and start must be a multiple of len */
c227f099 989static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
9fa3e853
FB
990{
991 PageDesc *p;
992 int offset, b;
59817ccb 993#if 0
a4193c8a 994 if (1) {
93fcfe39
AL
995 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
996 cpu_single_env->mem_io_vaddr, len,
997 cpu_single_env->eip,
998 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
59817ccb
FB
999 }
1000#endif
9fa3e853 1001 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 1002 if (!p)
9fa3e853
FB
1003 return;
1004 if (p->code_bitmap) {
1005 offset = start & ~TARGET_PAGE_MASK;
1006 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1007 if (b & ((1 << len) - 1))
1008 goto do_invalidate;
1009 } else {
1010 do_invalidate:
d720b93d 1011 tb_invalidate_phys_page_range(start, start + len, 1);
9fa3e853
FB
1012 }
1013}
1014
9fa3e853 1015#if !defined(CONFIG_SOFTMMU)
c227f099 1016static void tb_invalidate_phys_page(target_phys_addr_t addr,
d720b93d 1017 unsigned long pc, void *puc)
9fa3e853 1018{
6b917547 1019 TranslationBlock *tb;
9fa3e853 1020 PageDesc *p;
6b917547 1021 int n;
d720b93d 1022#ifdef TARGET_HAS_PRECISE_SMC
6b917547 1023 TranslationBlock *current_tb = NULL;
d720b93d 1024 CPUState *env = cpu_single_env;
6b917547
AL
1025 int current_tb_modified = 0;
1026 target_ulong current_pc = 0;
1027 target_ulong current_cs_base = 0;
1028 int current_flags = 0;
d720b93d 1029#endif
9fa3e853
FB
1030
1031 addr &= TARGET_PAGE_MASK;
1032 p = page_find(addr >> TARGET_PAGE_BITS);
5fafdf24 1033 if (!p)
9fa3e853
FB
1034 return;
1035 tb = p->first_tb;
d720b93d
FB
1036#ifdef TARGET_HAS_PRECISE_SMC
1037 if (tb && pc != 0) {
1038 current_tb = tb_find_pc(pc);
1039 }
1040#endif
9fa3e853
FB
1041 while (tb != NULL) {
1042 n = (long)tb & 3;
1043 tb = (TranslationBlock *)((long)tb & ~3);
d720b93d
FB
1044#ifdef TARGET_HAS_PRECISE_SMC
1045 if (current_tb == tb &&
2e70f6ef 1046 (current_tb->cflags & CF_COUNT_MASK) != 1) {
d720b93d
FB
1047 /* If we are modifying the current TB, we must stop
1048 its execution. We could be more precise by checking
1049 that the modification is after the current PC, but it
1050 would require a specialized function to partially
1051 restore the CPU state */
3b46e624 1052
d720b93d
FB
1053 current_tb_modified = 1;
1054 cpu_restore_state(current_tb, env, pc, puc);
6b917547
AL
1055 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1056 &current_flags);
d720b93d
FB
1057 }
1058#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
1059 tb_phys_invalidate(tb, addr);
1060 tb = tb->page_next[n];
1061 }
fd6ce8f6 1062 p->first_tb = NULL;
d720b93d
FB
1063#ifdef TARGET_HAS_PRECISE_SMC
1064 if (current_tb_modified) {
1065 /* we generate a block containing just the instruction
1066 modifying the memory. It will ensure that it cannot modify
1067 itself */
ea1c1802 1068 env->current_tb = NULL;
2e70f6ef 1069 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
d720b93d
FB
1070 cpu_resume_from_signal(env, puc);
1071 }
1072#endif
fd6ce8f6 1073}
9fa3e853 1074#endif
fd6ce8f6
FB
1075
1076/* add the tb in the target page and protect it if necessary */
5fafdf24 1077static inline void tb_alloc_page(TranslationBlock *tb,
53a5960a 1078 unsigned int n, target_ulong page_addr)
fd6ce8f6
FB
1079{
1080 PageDesc *p;
9fa3e853
FB
1081 TranslationBlock *last_first_tb;
1082
1083 tb->page_addr[n] = page_addr;
3a7d929e 1084 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
9fa3e853
FB
1085 tb->page_next[n] = p->first_tb;
1086 last_first_tb = p->first_tb;
1087 p->first_tb = (TranslationBlock *)((long)tb | n);
1088 invalidate_page_bitmap(p);
fd6ce8f6 1089
107db443 1090#if defined(TARGET_HAS_SMC) || 1
d720b93d 1091
9fa3e853 1092#if defined(CONFIG_USER_ONLY)
fd6ce8f6 1093 if (p->flags & PAGE_WRITE) {
53a5960a
PB
1094 target_ulong addr;
1095 PageDesc *p2;
9fa3e853
FB
1096 int prot;
1097
fd6ce8f6
FB
1098 /* force the host page as non writable (writes will have a
1099 page fault + mprotect overhead) */
53a5960a 1100 page_addr &= qemu_host_page_mask;
fd6ce8f6 1101 prot = 0;
53a5960a
PB
1102 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1103 addr += TARGET_PAGE_SIZE) {
1104
1105 p2 = page_find (addr >> TARGET_PAGE_BITS);
1106 if (!p2)
1107 continue;
1108 prot |= p2->flags;
1109 p2->flags &= ~PAGE_WRITE;
1110 page_get_flags(addr);
1111 }
5fafdf24 1112 mprotect(g2h(page_addr), qemu_host_page_size,
fd6ce8f6
FB
1113 (prot & PAGE_BITS) & ~PAGE_WRITE);
1114#ifdef DEBUG_TB_INVALIDATE
ab3d1727 1115 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
53a5960a 1116 page_addr);
fd6ce8f6 1117#endif
fd6ce8f6 1118 }
9fa3e853
FB
1119#else
1120 /* if some code is already present, then the pages are already
1121 protected. So we handle the case where only the first TB is
1122 allocated in a physical page */
1123 if (!last_first_tb) {
6a00d601 1124 tlb_protect_code(page_addr);
9fa3e853
FB
1125 }
1126#endif
d720b93d
FB
1127
1128#endif /* TARGET_HAS_SMC */
fd6ce8f6
FB
1129}
1130
1131/* Allocate a new translation block. Flush the translation buffer if
1132 too many translation blocks or too much generated code. */
c27004ec 1133TranslationBlock *tb_alloc(target_ulong pc)
fd6ce8f6
FB
1134{
1135 TranslationBlock *tb;
fd6ce8f6 1136
26a5f13b
FB
1137 if (nb_tbs >= code_gen_max_blocks ||
1138 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
d4e8164f 1139 return NULL;
fd6ce8f6
FB
1140 tb = &tbs[nb_tbs++];
1141 tb->pc = pc;
b448f2f3 1142 tb->cflags = 0;
d4e8164f
FB
1143 return tb;
1144}
1145
2e70f6ef
PB
1146void tb_free(TranslationBlock *tb)
1147{
bf20dc07 1148 /* In practice this is mostly used for single use temporary TB
2e70f6ef
PB
1149 Ignore the hard cases and just back up if this TB happens to
1150 be the last one generated. */
1151 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1152 code_gen_ptr = tb->tc_ptr;
1153 nb_tbs--;
1154 }
1155}
1156
9fa3e853
FB
1157/* add a new TB and link it to the physical page tables. phys_page2 is
1158 (-1) to indicate that only one page contains the TB. */
5fafdf24 1159void tb_link_phys(TranslationBlock *tb,
9fa3e853 1160 target_ulong phys_pc, target_ulong phys_page2)
d4e8164f 1161{
9fa3e853
FB
1162 unsigned int h;
1163 TranslationBlock **ptb;
1164
c8a706fe
PB
1165 /* Grab the mmap lock to stop another thread invalidating this TB
1166 before we are done. */
1167 mmap_lock();
9fa3e853
FB
1168 /* add in the physical hash table */
1169 h = tb_phys_hash_func(phys_pc);
1170 ptb = &tb_phys_hash[h];
1171 tb->phys_hash_next = *ptb;
1172 *ptb = tb;
fd6ce8f6
FB
1173
1174 /* add in the page list */
9fa3e853
FB
1175 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1176 if (phys_page2 != -1)
1177 tb_alloc_page(tb, 1, phys_page2);
1178 else
1179 tb->page_addr[1] = -1;
9fa3e853 1180
d4e8164f
FB
1181 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1182 tb->jmp_next[0] = NULL;
1183 tb->jmp_next[1] = NULL;
1184
1185 /* init original jump addresses */
1186 if (tb->tb_next_offset[0] != 0xffff)
1187 tb_reset_jump(tb, 0);
1188 if (tb->tb_next_offset[1] != 0xffff)
1189 tb_reset_jump(tb, 1);
8a40a180
FB
1190
1191#ifdef DEBUG_TB_CHECK
1192 tb_page_check();
1193#endif
c8a706fe 1194 mmap_unlock();
fd6ce8f6
FB
1195}
1196
9fa3e853
FB
1197/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1198 tb[1].tc_ptr. Return NULL if not found */
1199TranslationBlock *tb_find_pc(unsigned long tc_ptr)
fd6ce8f6 1200{
9fa3e853
FB
1201 int m_min, m_max, m;
1202 unsigned long v;
1203 TranslationBlock *tb;
a513fe19
FB
1204
1205 if (nb_tbs <= 0)
1206 return NULL;
1207 if (tc_ptr < (unsigned long)code_gen_buffer ||
1208 tc_ptr >= (unsigned long)code_gen_ptr)
1209 return NULL;
1210 /* binary search (cf Knuth) */
1211 m_min = 0;
1212 m_max = nb_tbs - 1;
1213 while (m_min <= m_max) {
1214 m = (m_min + m_max) >> 1;
1215 tb = &tbs[m];
1216 v = (unsigned long)tb->tc_ptr;
1217 if (v == tc_ptr)
1218 return tb;
1219 else if (tc_ptr < v) {
1220 m_max = m - 1;
1221 } else {
1222 m_min = m + 1;
1223 }
5fafdf24 1224 }
a513fe19
FB
1225 return &tbs[m_max];
1226}
7501267e 1227
ea041c0e
FB
1228static void tb_reset_jump_recursive(TranslationBlock *tb);
1229
1230static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1231{
1232 TranslationBlock *tb1, *tb_next, **ptb;
1233 unsigned int n1;
1234
1235 tb1 = tb->jmp_next[n];
1236 if (tb1 != NULL) {
1237 /* find head of list */
1238 for(;;) {
1239 n1 = (long)tb1 & 3;
1240 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1241 if (n1 == 2)
1242 break;
1243 tb1 = tb1->jmp_next[n1];
1244 }
1245 /* we are now sure now that tb jumps to tb1 */
1246 tb_next = tb1;
1247
1248 /* remove tb from the jmp_first list */
1249 ptb = &tb_next->jmp_first;
1250 for(;;) {
1251 tb1 = *ptb;
1252 n1 = (long)tb1 & 3;
1253 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1254 if (n1 == n && tb1 == tb)
1255 break;
1256 ptb = &tb1->jmp_next[n1];
1257 }
1258 *ptb = tb->jmp_next[n];
1259 tb->jmp_next[n] = NULL;
3b46e624 1260
ea041c0e
FB
1261 /* suppress the jump to next tb in generated code */
1262 tb_reset_jump(tb, n);
1263
0124311e 1264 /* suppress jumps in the tb on which we could have jumped */
ea041c0e
FB
1265 tb_reset_jump_recursive(tb_next);
1266 }
1267}
1268
1269static void tb_reset_jump_recursive(TranslationBlock *tb)
1270{
1271 tb_reset_jump_recursive2(tb, 0);
1272 tb_reset_jump_recursive2(tb, 1);
1273}
1274
1fddef4b 1275#if defined(TARGET_HAS_ICE)
94df27fd
PB
1276#if defined(CONFIG_USER_ONLY)
1277static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1278{
1279 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1280}
1281#else
d720b93d
FB
1282static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1283{
c227f099 1284 target_phys_addr_t addr;
9b3c35e0 1285 target_ulong pd;
c227f099 1286 ram_addr_t ram_addr;
c2f07f81 1287 PhysPageDesc *p;
d720b93d 1288
c2f07f81
PB
1289 addr = cpu_get_phys_page_debug(env, pc);
1290 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1291 if (!p) {
1292 pd = IO_MEM_UNASSIGNED;
1293 } else {
1294 pd = p->phys_offset;
1295 }
1296 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
706cd4b5 1297 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
d720b93d 1298}
c27004ec 1299#endif
94df27fd 1300#endif /* TARGET_HAS_ICE */
d720b93d 1301
c527ee8f
PB
1302#if defined(CONFIG_USER_ONLY)
1303void cpu_watchpoint_remove_all(CPUState *env, int mask)
1304
1305{
1306}
1307
1308int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1309 int flags, CPUWatchpoint **watchpoint)
1310{
1311 return -ENOSYS;
1312}
1313#else
6658ffb8 1314/* Add a watchpoint. */
a1d1bb31
AL
1315int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1316 int flags, CPUWatchpoint **watchpoint)
6658ffb8 1317{
b4051334 1318 target_ulong len_mask = ~(len - 1);
c0ce998e 1319 CPUWatchpoint *wp;
6658ffb8 1320
b4051334
AL
1321 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1322 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1323 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1324 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1325 return -EINVAL;
1326 }
a1d1bb31 1327 wp = qemu_malloc(sizeof(*wp));
a1d1bb31
AL
1328
1329 wp->vaddr = addr;
b4051334 1330 wp->len_mask = len_mask;
a1d1bb31
AL
1331 wp->flags = flags;
1332
2dc9f411 1333 /* keep all GDB-injected watchpoints in front */
c0ce998e 1334 if (flags & BP_GDB)
72cf2d4f 1335 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
c0ce998e 1336 else
72cf2d4f 1337 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
6658ffb8 1338
6658ffb8 1339 tlb_flush_page(env, addr);
a1d1bb31
AL
1340
1341 if (watchpoint)
1342 *watchpoint = wp;
1343 return 0;
6658ffb8
PB
1344}
1345
a1d1bb31
AL
1346/* Remove a specific watchpoint. */
1347int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1348 int flags)
6658ffb8 1349{
b4051334 1350 target_ulong len_mask = ~(len - 1);
a1d1bb31 1351 CPUWatchpoint *wp;
6658ffb8 1352
72cf2d4f 1353 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334 1354 if (addr == wp->vaddr && len_mask == wp->len_mask
6e140f28 1355 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
a1d1bb31 1356 cpu_watchpoint_remove_by_ref(env, wp);
6658ffb8
PB
1357 return 0;
1358 }
1359 }
a1d1bb31 1360 return -ENOENT;
6658ffb8
PB
1361}
1362
a1d1bb31
AL
1363/* Remove a specific watchpoint by reference. */
1364void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1365{
72cf2d4f 1366 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
7d03f82f 1367
a1d1bb31
AL
1368 tlb_flush_page(env, watchpoint->vaddr);
1369
1370 qemu_free(watchpoint);
1371}
1372
1373/* Remove all matching watchpoints. */
1374void cpu_watchpoint_remove_all(CPUState *env, int mask)
1375{
c0ce998e 1376 CPUWatchpoint *wp, *next;
a1d1bb31 1377
72cf2d4f 1378 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
a1d1bb31
AL
1379 if (wp->flags & mask)
1380 cpu_watchpoint_remove_by_ref(env, wp);
c0ce998e 1381 }
7d03f82f 1382}
c527ee8f 1383#endif
7d03f82f 1384
a1d1bb31
AL
1385/* Add a breakpoint. */
1386int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1387 CPUBreakpoint **breakpoint)
4c3a88a2 1388{
1fddef4b 1389#if defined(TARGET_HAS_ICE)
c0ce998e 1390 CPUBreakpoint *bp;
3b46e624 1391
a1d1bb31 1392 bp = qemu_malloc(sizeof(*bp));
4c3a88a2 1393
a1d1bb31
AL
1394 bp->pc = pc;
1395 bp->flags = flags;
1396
2dc9f411 1397 /* keep all GDB-injected breakpoints in front */
c0ce998e 1398 if (flags & BP_GDB)
72cf2d4f 1399 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
c0ce998e 1400 else
72cf2d4f 1401 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
3b46e624 1402
d720b93d 1403 breakpoint_invalidate(env, pc);
a1d1bb31
AL
1404
1405 if (breakpoint)
1406 *breakpoint = bp;
4c3a88a2
FB
1407 return 0;
1408#else
a1d1bb31 1409 return -ENOSYS;
4c3a88a2
FB
1410#endif
1411}
1412
a1d1bb31
AL
1413/* Remove a specific breakpoint. */
1414int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1415{
7d03f82f 1416#if defined(TARGET_HAS_ICE)
a1d1bb31
AL
1417 CPUBreakpoint *bp;
1418
72cf2d4f 1419 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31
AL
1420 if (bp->pc == pc && bp->flags == flags) {
1421 cpu_breakpoint_remove_by_ref(env, bp);
1422 return 0;
1423 }
7d03f82f 1424 }
a1d1bb31
AL
1425 return -ENOENT;
1426#else
1427 return -ENOSYS;
7d03f82f
EI
1428#endif
1429}
1430
a1d1bb31
AL
1431/* Remove a specific breakpoint by reference. */
1432void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
4c3a88a2 1433{
1fddef4b 1434#if defined(TARGET_HAS_ICE)
72cf2d4f 1435 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
d720b93d 1436
a1d1bb31
AL
1437 breakpoint_invalidate(env, breakpoint->pc);
1438
1439 qemu_free(breakpoint);
1440#endif
1441}
1442
1443/* Remove all matching breakpoints. */
1444void cpu_breakpoint_remove_all(CPUState *env, int mask)
1445{
1446#if defined(TARGET_HAS_ICE)
c0ce998e 1447 CPUBreakpoint *bp, *next;
a1d1bb31 1448
72cf2d4f 1449 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
a1d1bb31
AL
1450 if (bp->flags & mask)
1451 cpu_breakpoint_remove_by_ref(env, bp);
c0ce998e 1452 }
4c3a88a2
FB
1453#endif
1454}
1455
c33a346e
FB
1456/* enable or disable single step mode. EXCP_DEBUG is returned by the
1457 CPU loop after each instruction */
1458void cpu_single_step(CPUState *env, int enabled)
1459{
1fddef4b 1460#if defined(TARGET_HAS_ICE)
c33a346e
FB
1461 if (env->singlestep_enabled != enabled) {
1462 env->singlestep_enabled = enabled;
e22a25c9
AL
1463 if (kvm_enabled())
1464 kvm_update_guest_debug(env, 0);
1465 else {
ccbb4d44 1466 /* must flush all the translated code to avoid inconsistencies */
e22a25c9
AL
1467 /* XXX: only flush what is necessary */
1468 tb_flush(env);
1469 }
c33a346e
FB
1470 }
1471#endif
1472}
1473
34865134
FB
1474/* enable or disable low levels log */
1475void cpu_set_log(int log_flags)
1476{
1477 loglevel = log_flags;
1478 if (loglevel && !logfile) {
11fcfab4 1479 logfile = fopen(logfilename, log_append ? "a" : "w");
34865134
FB
1480 if (!logfile) {
1481 perror(logfilename);
1482 _exit(1);
1483 }
9fa3e853
FB
1484#if !defined(CONFIG_SOFTMMU)
1485 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1486 {
b55266b5 1487 static char logfile_buf[4096];
9fa3e853
FB
1488 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1489 }
bf65f53f
FN
1490#elif !defined(_WIN32)
1491 /* Win32 doesn't support line-buffering and requires size >= 2 */
34865134 1492 setvbuf(logfile, NULL, _IOLBF, 0);
9fa3e853 1493#endif
e735b91c
PB
1494 log_append = 1;
1495 }
1496 if (!loglevel && logfile) {
1497 fclose(logfile);
1498 logfile = NULL;
34865134
FB
1499 }
1500}
1501
1502void cpu_set_log_filename(const char *filename)
1503{
1504 logfilename = strdup(filename);
e735b91c
PB
1505 if (logfile) {
1506 fclose(logfile);
1507 logfile = NULL;
1508 }
1509 cpu_set_log(loglevel);
34865134 1510}
c33a346e 1511
3098dba0 1512static void cpu_unlink_tb(CPUState *env)
ea041c0e 1513{
3098dba0
AJ
1514 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1515 problem and hope the cpu will stop of its own accord. For userspace
1516 emulation this often isn't actually as bad as it sounds. Often
1517 signals are used primarily to interrupt blocking syscalls. */
ea041c0e 1518 TranslationBlock *tb;
c227f099 1519 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
59817ccb 1520
cab1b4bd 1521 spin_lock(&interrupt_lock);
3098dba0
AJ
1522 tb = env->current_tb;
1523 /* if the cpu is currently executing code, we must unlink it and
1524 all the potentially executing TB */
f76cfe56 1525 if (tb) {
3098dba0
AJ
1526 env->current_tb = NULL;
1527 tb_reset_jump_recursive(tb);
be214e6c 1528 }
cab1b4bd 1529 spin_unlock(&interrupt_lock);
3098dba0
AJ
1530}
1531
1532/* mask must never be zero, except for A20 change call */
1533void cpu_interrupt(CPUState *env, int mask)
1534{
1535 int old_mask;
be214e6c 1536
2e70f6ef 1537 old_mask = env->interrupt_request;
68a79315 1538 env->interrupt_request |= mask;
3098dba0 1539
8edac960
AL
1540#ifndef CONFIG_USER_ONLY
1541 /*
1542 * If called from iothread context, wake the target cpu in
1543 * case its halted.
1544 */
1545 if (!qemu_cpu_self(env)) {
1546 qemu_cpu_kick(env);
1547 return;
1548 }
1549#endif
1550
2e70f6ef 1551 if (use_icount) {
266910c4 1552 env->icount_decr.u16.high = 0xffff;
2e70f6ef 1553#ifndef CONFIG_USER_ONLY
2e70f6ef 1554 if (!can_do_io(env)
be214e6c 1555 && (mask & ~old_mask) != 0) {
2e70f6ef
PB
1556 cpu_abort(env, "Raised interrupt while not in I/O function");
1557 }
1558#endif
1559 } else {
3098dba0 1560 cpu_unlink_tb(env);
ea041c0e
FB
1561 }
1562}
1563
b54ad049
FB
1564void cpu_reset_interrupt(CPUState *env, int mask)
1565{
1566 env->interrupt_request &= ~mask;
1567}
1568
3098dba0
AJ
1569void cpu_exit(CPUState *env)
1570{
1571 env->exit_request = 1;
1572 cpu_unlink_tb(env);
1573}
1574
c7cd6a37 1575const CPULogItem cpu_log_items[] = {
5fafdf24 1576 { CPU_LOG_TB_OUT_ASM, "out_asm",
f193c797
FB
1577 "show generated host assembly code for each compiled TB" },
1578 { CPU_LOG_TB_IN_ASM, "in_asm",
1579 "show target assembly code for each compiled TB" },
5fafdf24 1580 { CPU_LOG_TB_OP, "op",
57fec1fe 1581 "show micro ops for each compiled TB" },
f193c797 1582 { CPU_LOG_TB_OP_OPT, "op_opt",
e01a1157
BS
1583 "show micro ops "
1584#ifdef TARGET_I386
1585 "before eflags optimization and "
f193c797 1586#endif
e01a1157 1587 "after liveness analysis" },
f193c797
FB
1588 { CPU_LOG_INT, "int",
1589 "show interrupts/exceptions in short format" },
1590 { CPU_LOG_EXEC, "exec",
1591 "show trace before each executed TB (lots of logs)" },
9fddaa0c 1592 { CPU_LOG_TB_CPU, "cpu",
e91c8a77 1593 "show CPU state before block translation" },
f193c797
FB
1594#ifdef TARGET_I386
1595 { CPU_LOG_PCALL, "pcall",
1596 "show protected mode far calls/returns/exceptions" },
eca1bdf4
AL
1597 { CPU_LOG_RESET, "cpu_reset",
1598 "show CPU state before CPU resets" },
f193c797 1599#endif
8e3a9fd2 1600#ifdef DEBUG_IOPORT
fd872598
FB
1601 { CPU_LOG_IOPORT, "ioport",
1602 "show all i/o ports accesses" },
8e3a9fd2 1603#endif
f193c797
FB
1604 { 0, NULL, NULL },
1605};
1606
f6f3fbca
MT
1607#ifndef CONFIG_USER_ONLY
1608static QLIST_HEAD(memory_client_list, CPUPhysMemoryClient) memory_client_list
1609 = QLIST_HEAD_INITIALIZER(memory_client_list);
1610
1611static void cpu_notify_set_memory(target_phys_addr_t start_addr,
1612 ram_addr_t size,
1613 ram_addr_t phys_offset)
1614{
1615 CPUPhysMemoryClient *client;
1616 QLIST_FOREACH(client, &memory_client_list, list) {
1617 client->set_memory(client, start_addr, size, phys_offset);
1618 }
1619}
1620
1621static int cpu_notify_sync_dirty_bitmap(target_phys_addr_t start,
1622 target_phys_addr_t end)
1623{
1624 CPUPhysMemoryClient *client;
1625 QLIST_FOREACH(client, &memory_client_list, list) {
1626 int r = client->sync_dirty_bitmap(client, start, end);
1627 if (r < 0)
1628 return r;
1629 }
1630 return 0;
1631}
1632
1633static int cpu_notify_migration_log(int enable)
1634{
1635 CPUPhysMemoryClient *client;
1636 QLIST_FOREACH(client, &memory_client_list, list) {
1637 int r = client->migration_log(client, enable);
1638 if (r < 0)
1639 return r;
1640 }
1641 return 0;
1642}
1643
1644static void phys_page_for_each_in_l1_map(PhysPageDesc **phys_map,
1645 CPUPhysMemoryClient *client)
1646{
1647 PhysPageDesc *pd;
1648 int l1, l2;
1649
1650 for (l1 = 0; l1 < L1_SIZE; ++l1) {
1651 pd = phys_map[l1];
1652 if (!pd) {
1653 continue;
1654 }
1655 for (l2 = 0; l2 < L2_SIZE; ++l2) {
1656 if (pd[l2].phys_offset == IO_MEM_UNASSIGNED) {
1657 continue;
1658 }
1659 client->set_memory(client, pd[l2].region_offset,
1660 TARGET_PAGE_SIZE, pd[l2].phys_offset);
1661 }
1662 }
1663}
1664
1665static void phys_page_for_each(CPUPhysMemoryClient *client)
1666{
1667#if TARGET_PHYS_ADDR_SPACE_BITS > 32
1668
1669#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
1670#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
1671#endif
1672 void **phys_map = (void **)l1_phys_map;
1673 int l1;
1674 if (!l1_phys_map) {
1675 return;
1676 }
1677 for (l1 = 0; l1 < L1_SIZE; ++l1) {
1678 if (phys_map[l1]) {
1679 phys_page_for_each_in_l1_map(phys_map[l1], client);
1680 }
1681 }
1682#else
1683 if (!l1_phys_map) {
1684 return;
1685 }
1686 phys_page_for_each_in_l1_map(l1_phys_map, client);
1687#endif
1688}
1689
1690void cpu_register_phys_memory_client(CPUPhysMemoryClient *client)
1691{
1692 QLIST_INSERT_HEAD(&memory_client_list, client, list);
1693 phys_page_for_each(client);
1694}
1695
1696void cpu_unregister_phys_memory_client(CPUPhysMemoryClient *client)
1697{
1698 QLIST_REMOVE(client, list);
1699}
1700#endif
1701
f193c797
FB
1702static int cmp1(const char *s1, int n, const char *s2)
1703{
1704 if (strlen(s2) != n)
1705 return 0;
1706 return memcmp(s1, s2, n) == 0;
1707}
3b46e624 1708
f193c797
FB
1709/* takes a comma separated list of log masks. Return 0 if error. */
1710int cpu_str_to_log_mask(const char *str)
1711{
c7cd6a37 1712 const CPULogItem *item;
f193c797
FB
1713 int mask;
1714 const char *p, *p1;
1715
1716 p = str;
1717 mask = 0;
1718 for(;;) {
1719 p1 = strchr(p, ',');
1720 if (!p1)
1721 p1 = p + strlen(p);
8e3a9fd2
FB
1722 if(cmp1(p,p1-p,"all")) {
1723 for(item = cpu_log_items; item->mask != 0; item++) {
1724 mask |= item->mask;
1725 }
1726 } else {
f193c797
FB
1727 for(item = cpu_log_items; item->mask != 0; item++) {
1728 if (cmp1(p, p1 - p, item->name))
1729 goto found;
1730 }
1731 return 0;
8e3a9fd2 1732 }
f193c797
FB
1733 found:
1734 mask |= item->mask;
1735 if (*p1 != ',')
1736 break;
1737 p = p1 + 1;
1738 }
1739 return mask;
1740}
ea041c0e 1741
7501267e
FB
1742void cpu_abort(CPUState *env, const char *fmt, ...)
1743{
1744 va_list ap;
493ae1f0 1745 va_list ap2;
7501267e
FB
1746
1747 va_start(ap, fmt);
493ae1f0 1748 va_copy(ap2, ap);
7501267e
FB
1749 fprintf(stderr, "qemu: fatal: ");
1750 vfprintf(stderr, fmt, ap);
1751 fprintf(stderr, "\n");
1752#ifdef TARGET_I386
7fe48483
FB
1753 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1754#else
1755 cpu_dump_state(env, stderr, fprintf, 0);
7501267e 1756#endif
93fcfe39
AL
1757 if (qemu_log_enabled()) {
1758 qemu_log("qemu: fatal: ");
1759 qemu_log_vprintf(fmt, ap2);
1760 qemu_log("\n");
f9373291 1761#ifdef TARGET_I386
93fcfe39 1762 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
f9373291 1763#else
93fcfe39 1764 log_cpu_state(env, 0);
f9373291 1765#endif
31b1a7b4 1766 qemu_log_flush();
93fcfe39 1767 qemu_log_close();
924edcae 1768 }
493ae1f0 1769 va_end(ap2);
f9373291 1770 va_end(ap);
fd052bf6
RV
1771#if defined(CONFIG_USER_ONLY)
1772 {
1773 struct sigaction act;
1774 sigfillset(&act.sa_mask);
1775 act.sa_handler = SIG_DFL;
1776 sigaction(SIGABRT, &act, NULL);
1777 }
1778#endif
7501267e
FB
1779 abort();
1780}
1781
c5be9f08
TS
1782CPUState *cpu_copy(CPUState *env)
1783{
01ba9816 1784 CPUState *new_env = cpu_init(env->cpu_model_str);
c5be9f08
TS
1785 CPUState *next_cpu = new_env->next_cpu;
1786 int cpu_index = new_env->cpu_index;
5a38f081
AL
1787#if defined(TARGET_HAS_ICE)
1788 CPUBreakpoint *bp;
1789 CPUWatchpoint *wp;
1790#endif
1791
c5be9f08 1792 memcpy(new_env, env, sizeof(CPUState));
5a38f081
AL
1793
1794 /* Preserve chaining and index. */
c5be9f08
TS
1795 new_env->next_cpu = next_cpu;
1796 new_env->cpu_index = cpu_index;
5a38f081
AL
1797
1798 /* Clone all break/watchpoints.
1799 Note: Once we support ptrace with hw-debug register access, make sure
1800 BP_CPU break/watchpoints are handled correctly on clone. */
72cf2d4f
BS
1801 QTAILQ_INIT(&env->breakpoints);
1802 QTAILQ_INIT(&env->watchpoints);
5a38f081 1803#if defined(TARGET_HAS_ICE)
72cf2d4f 1804 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
5a38f081
AL
1805 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1806 }
72cf2d4f 1807 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
5a38f081
AL
1808 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1809 wp->flags, NULL);
1810 }
1811#endif
1812
c5be9f08
TS
1813 return new_env;
1814}
1815
0124311e
FB
1816#if !defined(CONFIG_USER_ONLY)
1817
5c751e99
EI
1818static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1819{
1820 unsigned int i;
1821
1822 /* Discard jump cache entries for any tb which might potentially
1823 overlap the flushed page. */
1824 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1825 memset (&env->tb_jmp_cache[i], 0,
1826 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1827
1828 i = tb_jmp_cache_hash_page(addr);
1829 memset (&env->tb_jmp_cache[i], 0,
1830 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1831}
1832
08738984
IK
1833static CPUTLBEntry s_cputlb_empty_entry = {
1834 .addr_read = -1,
1835 .addr_write = -1,
1836 .addr_code = -1,
1837 .addend = -1,
1838};
1839
ee8b7021
FB
1840/* NOTE: if flush_global is true, also flush global entries (not
1841 implemented yet) */
1842void tlb_flush(CPUState *env, int flush_global)
33417e70 1843{
33417e70 1844 int i;
0124311e 1845
9fa3e853
FB
1846#if defined(DEBUG_TLB)
1847 printf("tlb_flush:\n");
1848#endif
0124311e
FB
1849 /* must reset current TB so that interrupts cannot modify the
1850 links while we are modifying them */
1851 env->current_tb = NULL;
1852
33417e70 1853 for(i = 0; i < CPU_TLB_SIZE; i++) {
cfde4bd9
IY
1854 int mmu_idx;
1855 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
08738984 1856 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
cfde4bd9 1857 }
33417e70 1858 }
9fa3e853 1859
8a40a180 1860 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
9fa3e853 1861
e3db7226 1862 tlb_flush_count++;
33417e70
FB
1863}
1864
274da6b2 1865static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
61382a50 1866{
5fafdf24 1867 if (addr == (tlb_entry->addr_read &
84b7b8e7 1868 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1869 addr == (tlb_entry->addr_write &
84b7b8e7 1870 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1871 addr == (tlb_entry->addr_code &
84b7b8e7 1872 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
08738984 1873 *tlb_entry = s_cputlb_empty_entry;
84b7b8e7 1874 }
61382a50
FB
1875}
1876
2e12669a 1877void tlb_flush_page(CPUState *env, target_ulong addr)
33417e70 1878{
8a40a180 1879 int i;
cfde4bd9 1880 int mmu_idx;
0124311e 1881
9fa3e853 1882#if defined(DEBUG_TLB)
108c49b8 1883 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
9fa3e853 1884#endif
0124311e
FB
1885 /* must reset current TB so that interrupts cannot modify the
1886 links while we are modifying them */
1887 env->current_tb = NULL;
61382a50
FB
1888
1889 addr &= TARGET_PAGE_MASK;
1890 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
cfde4bd9
IY
1891 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1892 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
0124311e 1893
5c751e99 1894 tlb_flush_jmp_cache(env, addr);
9fa3e853
FB
1895}
1896
9fa3e853
FB
1897/* update the TLBs so that writes to code in the virtual page 'addr'
1898 can be detected */
c227f099 1899static void tlb_protect_code(ram_addr_t ram_addr)
9fa3e853 1900{
5fafdf24 1901 cpu_physical_memory_reset_dirty(ram_addr,
6a00d601
FB
1902 ram_addr + TARGET_PAGE_SIZE,
1903 CODE_DIRTY_FLAG);
9fa3e853
FB
1904}
1905
9fa3e853 1906/* update the TLB so that writes in physical page 'phys_addr' are no longer
3a7d929e 1907 tested for self modifying code */
c227f099 1908static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 1909 target_ulong vaddr)
9fa3e853 1910{
3a7d929e 1911 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1ccde1cb
FB
1912}
1913
5fafdf24 1914static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1ccde1cb
FB
1915 unsigned long start, unsigned long length)
1916{
1917 unsigned long addr;
84b7b8e7
FB
1918 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1919 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1ccde1cb 1920 if ((addr - start) < length) {
0f459d16 1921 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1ccde1cb
FB
1922 }
1923 }
1924}
1925
5579c7f3 1926/* Note: start and end must be within the same ram block. */
c227f099 1927void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 1928 int dirty_flags)
1ccde1cb
FB
1929{
1930 CPUState *env;
4f2ac237 1931 unsigned long length, start1;
0a962c02
FB
1932 int i, mask, len;
1933 uint8_t *p;
1ccde1cb
FB
1934
1935 start &= TARGET_PAGE_MASK;
1936 end = TARGET_PAGE_ALIGN(end);
1937
1938 length = end - start;
1939 if (length == 0)
1940 return;
0a962c02 1941 len = length >> TARGET_PAGE_BITS;
f23db169
FB
1942 mask = ~dirty_flags;
1943 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1944 for(i = 0; i < len; i++)
1945 p[i] &= mask;
1946
1ccde1cb
FB
1947 /* we modify the TLB cache so that the dirty bit will be set again
1948 when accessing the range */
5579c7f3
PB
1949 start1 = (unsigned long)qemu_get_ram_ptr(start);
1950 /* Chek that we don't span multiple blocks - this breaks the
1951 address comparisons below. */
1952 if ((unsigned long)qemu_get_ram_ptr(end - 1) - start1
1953 != (end - 1) - start) {
1954 abort();
1955 }
1956
6a00d601 1957 for(env = first_cpu; env != NULL; env = env->next_cpu) {
cfde4bd9
IY
1958 int mmu_idx;
1959 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1960 for(i = 0; i < CPU_TLB_SIZE; i++)
1961 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
1962 start1, length);
1963 }
6a00d601 1964 }
1ccde1cb
FB
1965}
1966
74576198
AL
1967int cpu_physical_memory_set_dirty_tracking(int enable)
1968{
f6f3fbca 1969 int ret = 0;
74576198 1970 in_migration = enable;
f6f3fbca
MT
1971 ret = cpu_notify_migration_log(!!enable);
1972 return ret;
74576198
AL
1973}
1974
1975int cpu_physical_memory_get_dirty_tracking(void)
1976{
1977 return in_migration;
1978}
1979
c227f099
AL
1980int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
1981 target_phys_addr_t end_addr)
2bec46dc 1982{
7b8f3b78 1983 int ret;
151f7749 1984
f6f3fbca 1985 ret = cpu_notify_sync_dirty_bitmap(start_addr, end_addr);
151f7749 1986 return ret;
2bec46dc
AL
1987}
1988
3a7d929e
FB
1989static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1990{
c227f099 1991 ram_addr_t ram_addr;
5579c7f3 1992 void *p;
3a7d929e 1993
84b7b8e7 1994 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
5579c7f3
PB
1995 p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
1996 + tlb_entry->addend);
1997 ram_addr = qemu_ram_addr_from_host(p);
3a7d929e 1998 if (!cpu_physical_memory_is_dirty(ram_addr)) {
0f459d16 1999 tlb_entry->addr_write |= TLB_NOTDIRTY;
3a7d929e
FB
2000 }
2001 }
2002}
2003
2004/* update the TLB according to the current state of the dirty bits */
2005void cpu_tlb_update_dirty(CPUState *env)
2006{
2007 int i;
cfde4bd9
IY
2008 int mmu_idx;
2009 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2010 for(i = 0; i < CPU_TLB_SIZE; i++)
2011 tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2012 }
3a7d929e
FB
2013}
2014
0f459d16 2015static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1ccde1cb 2016{
0f459d16
PB
2017 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2018 tlb_entry->addr_write = vaddr;
1ccde1cb
FB
2019}
2020
0f459d16
PB
2021/* update the TLB corresponding to virtual page vaddr
2022 so that it is no longer dirty */
2023static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1ccde1cb 2024{
1ccde1cb 2025 int i;
cfde4bd9 2026 int mmu_idx;
1ccde1cb 2027
0f459d16 2028 vaddr &= TARGET_PAGE_MASK;
1ccde1cb 2029 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
cfde4bd9
IY
2030 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2031 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
9fa3e853
FB
2032}
2033
59817ccb
FB
2034/* add a new TLB entry. At most one entry for a given virtual address
2035 is permitted. Return 0 if OK or 2 if the page could not be mapped
2036 (can only happen in non SOFTMMU mode for I/O pages or pages
2037 conflicting with the host address space). */
5fafdf24 2038int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
c227f099 2039 target_phys_addr_t paddr, int prot,
6ebbf390 2040 int mmu_idx, int is_softmmu)
9fa3e853 2041{
92e873b9 2042 PhysPageDesc *p;
4f2ac237 2043 unsigned long pd;
9fa3e853 2044 unsigned int index;
4f2ac237 2045 target_ulong address;
0f459d16 2046 target_ulong code_address;
c227f099 2047 target_phys_addr_t addend;
9fa3e853 2048 int ret;
84b7b8e7 2049 CPUTLBEntry *te;
a1d1bb31 2050 CPUWatchpoint *wp;
c227f099 2051 target_phys_addr_t iotlb;
9fa3e853 2052
92e873b9 2053 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
9fa3e853
FB
2054 if (!p) {
2055 pd = IO_MEM_UNASSIGNED;
9fa3e853
FB
2056 } else {
2057 pd = p->phys_offset;
9fa3e853
FB
2058 }
2059#if defined(DEBUG_TLB)
6ebbf390
JM
2060 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
2061 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
9fa3e853
FB
2062#endif
2063
2064 ret = 0;
0f459d16
PB
2065 address = vaddr;
2066 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2067 /* IO memory case (romd handled later) */
2068 address |= TLB_MMIO;
2069 }
5579c7f3 2070 addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
0f459d16
PB
2071 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2072 /* Normal RAM. */
2073 iotlb = pd & TARGET_PAGE_MASK;
2074 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2075 iotlb |= IO_MEM_NOTDIRTY;
2076 else
2077 iotlb |= IO_MEM_ROM;
2078 } else {
ccbb4d44 2079 /* IO handlers are currently passed a physical address.
0f459d16
PB
2080 It would be nice to pass an offset from the base address
2081 of that region. This would avoid having to special case RAM,
2082 and avoid full address decoding in every device.
2083 We can't use the high bits of pd for this because
2084 IO_MEM_ROMD uses these as a ram address. */
8da3ff18
PB
2085 iotlb = (pd & ~TARGET_PAGE_MASK);
2086 if (p) {
8da3ff18
PB
2087 iotlb += p->region_offset;
2088 } else {
2089 iotlb += paddr;
2090 }
0f459d16
PB
2091 }
2092
2093 code_address = address;
2094 /* Make accesses to pages with watchpoints go via the
2095 watchpoint trap routines. */
72cf2d4f 2096 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
a1d1bb31 2097 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
0f459d16
PB
2098 iotlb = io_mem_watch + paddr;
2099 /* TODO: The memory case can be optimized by not trapping
2100 reads of pages with a write breakpoint. */
2101 address |= TLB_MMIO;
6658ffb8 2102 }
0f459d16 2103 }
d79acba4 2104
0f459d16
PB
2105 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2106 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2107 te = &env->tlb_table[mmu_idx][index];
2108 te->addend = addend - vaddr;
2109 if (prot & PAGE_READ) {
2110 te->addr_read = address;
2111 } else {
2112 te->addr_read = -1;
2113 }
5c751e99 2114
0f459d16
PB
2115 if (prot & PAGE_EXEC) {
2116 te->addr_code = code_address;
2117 } else {
2118 te->addr_code = -1;
2119 }
2120 if (prot & PAGE_WRITE) {
2121 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2122 (pd & IO_MEM_ROMD)) {
2123 /* Write access calls the I/O callback. */
2124 te->addr_write = address | TLB_MMIO;
2125 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2126 !cpu_physical_memory_is_dirty(pd)) {
2127 te->addr_write = address | TLB_NOTDIRTY;
9fa3e853 2128 } else {
0f459d16 2129 te->addr_write = address;
9fa3e853 2130 }
0f459d16
PB
2131 } else {
2132 te->addr_write = -1;
9fa3e853 2133 }
9fa3e853
FB
2134 return ret;
2135}
2136
0124311e
FB
2137#else
2138
ee8b7021 2139void tlb_flush(CPUState *env, int flush_global)
0124311e
FB
2140{
2141}
2142
2e12669a 2143void tlb_flush_page(CPUState *env, target_ulong addr)
0124311e
FB
2144{
2145}
2146
edf8e2af
MW
2147/*
2148 * Walks guest process memory "regions" one by one
2149 * and calls callback function 'fn' for each region.
2150 */
2151int walk_memory_regions(void *priv,
2152 int (*fn)(void *, unsigned long, unsigned long, unsigned long))
33417e70 2153{
9fa3e853 2154 unsigned long start, end;
edf8e2af 2155 PageDesc *p = NULL;
9fa3e853 2156 int i, j, prot, prot1;
edf8e2af 2157 int rc = 0;
33417e70 2158
edf8e2af 2159 start = end = -1;
9fa3e853 2160 prot = 0;
edf8e2af
MW
2161
2162 for (i = 0; i <= L1_SIZE; i++) {
2163 p = (i < L1_SIZE) ? l1_map[i] : NULL;
2164 for (j = 0; j < L2_SIZE; j++) {
2165 prot1 = (p == NULL) ? 0 : p[j].flags;
2166 /*
2167 * "region" is one continuous chunk of memory
2168 * that has same protection flags set.
2169 */
9fa3e853
FB
2170 if (prot1 != prot) {
2171 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2172 if (start != -1) {
edf8e2af
MW
2173 rc = (*fn)(priv, start, end, prot);
2174 /* callback can stop iteration by returning != 0 */
2175 if (rc != 0)
2176 return (rc);
9fa3e853
FB
2177 }
2178 if (prot1 != 0)
2179 start = end;
2180 else
2181 start = -1;
2182 prot = prot1;
2183 }
edf8e2af 2184 if (p == NULL)
9fa3e853
FB
2185 break;
2186 }
33417e70 2187 }
edf8e2af
MW
2188 return (rc);
2189}
2190
2191static int dump_region(void *priv, unsigned long start,
2192 unsigned long end, unsigned long prot)
2193{
2194 FILE *f = (FILE *)priv;
2195
2196 (void) fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
2197 start, end, end - start,
2198 ((prot & PAGE_READ) ? 'r' : '-'),
2199 ((prot & PAGE_WRITE) ? 'w' : '-'),
2200 ((prot & PAGE_EXEC) ? 'x' : '-'));
2201
2202 return (0);
2203}
2204
2205/* dump memory mappings */
2206void page_dump(FILE *f)
2207{
2208 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2209 "start", "end", "size", "prot");
2210 walk_memory_regions(f, dump_region);
33417e70
FB
2211}
2212
53a5960a 2213int page_get_flags(target_ulong address)
33417e70 2214{
9fa3e853
FB
2215 PageDesc *p;
2216
2217 p = page_find(address >> TARGET_PAGE_BITS);
33417e70 2218 if (!p)
9fa3e853
FB
2219 return 0;
2220 return p->flags;
2221}
2222
2223/* modify the flags of a page and invalidate the code if
ccbb4d44 2224 necessary. The flag PAGE_WRITE_ORG is positioned automatically
9fa3e853 2225 depending on PAGE_WRITE */
53a5960a 2226void page_set_flags(target_ulong start, target_ulong end, int flags)
9fa3e853
FB
2227{
2228 PageDesc *p;
53a5960a 2229 target_ulong addr;
9fa3e853 2230
c8a706fe 2231 /* mmap_lock should already be held. */
9fa3e853
FB
2232 start = start & TARGET_PAGE_MASK;
2233 end = TARGET_PAGE_ALIGN(end);
2234 if (flags & PAGE_WRITE)
2235 flags |= PAGE_WRITE_ORG;
9fa3e853
FB
2236 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2237 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
17e2377a
PB
2238 /* We may be called for host regions that are outside guest
2239 address space. */
2240 if (!p)
2241 return;
9fa3e853
FB
2242 /* if the write protection is set, then we invalidate the code
2243 inside */
5fafdf24 2244 if (!(p->flags & PAGE_WRITE) &&
9fa3e853
FB
2245 (flags & PAGE_WRITE) &&
2246 p->first_tb) {
d720b93d 2247 tb_invalidate_phys_page(addr, 0, NULL);
9fa3e853
FB
2248 }
2249 p->flags = flags;
2250 }
33417e70
FB
2251}
2252
3d97b40b
TS
2253int page_check_range(target_ulong start, target_ulong len, int flags)
2254{
2255 PageDesc *p;
2256 target_ulong end;
2257 target_ulong addr;
2258
55f280c9
AZ
2259 if (start + len < start)
2260 /* we've wrapped around */
2261 return -1;
2262
3d97b40b
TS
2263 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2264 start = start & TARGET_PAGE_MASK;
2265
3d97b40b
TS
2266 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2267 p = page_find(addr >> TARGET_PAGE_BITS);
2268 if( !p )
2269 return -1;
2270 if( !(p->flags & PAGE_VALID) )
2271 return -1;
2272
dae3270c 2273 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
3d97b40b 2274 return -1;
dae3270c
FB
2275 if (flags & PAGE_WRITE) {
2276 if (!(p->flags & PAGE_WRITE_ORG))
2277 return -1;
2278 /* unprotect the page if it was put read-only because it
2279 contains translated code */
2280 if (!(p->flags & PAGE_WRITE)) {
2281 if (!page_unprotect(addr, 0, NULL))
2282 return -1;
2283 }
2284 return 0;
2285 }
3d97b40b
TS
2286 }
2287 return 0;
2288}
2289
9fa3e853 2290/* called from signal handler: invalidate the code and unprotect the
ccbb4d44 2291 page. Return TRUE if the fault was successfully handled. */
53a5960a 2292int page_unprotect(target_ulong address, unsigned long pc, void *puc)
9fa3e853
FB
2293{
2294 unsigned int page_index, prot, pindex;
2295 PageDesc *p, *p1;
53a5960a 2296 target_ulong host_start, host_end, addr;
9fa3e853 2297
c8a706fe
PB
2298 /* Technically this isn't safe inside a signal handler. However we
2299 know this only ever happens in a synchronous SEGV handler, so in
2300 practice it seems to be ok. */
2301 mmap_lock();
2302
83fb7adf 2303 host_start = address & qemu_host_page_mask;
9fa3e853
FB
2304 page_index = host_start >> TARGET_PAGE_BITS;
2305 p1 = page_find(page_index);
c8a706fe
PB
2306 if (!p1) {
2307 mmap_unlock();
9fa3e853 2308 return 0;
c8a706fe 2309 }
83fb7adf 2310 host_end = host_start + qemu_host_page_size;
9fa3e853
FB
2311 p = p1;
2312 prot = 0;
2313 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2314 prot |= p->flags;
2315 p++;
2316 }
2317 /* if the page was really writable, then we change its
2318 protection back to writable */
2319 if (prot & PAGE_WRITE_ORG) {
2320 pindex = (address - host_start) >> TARGET_PAGE_BITS;
2321 if (!(p1[pindex].flags & PAGE_WRITE)) {
5fafdf24 2322 mprotect((void *)g2h(host_start), qemu_host_page_size,
9fa3e853
FB
2323 (prot & PAGE_BITS) | PAGE_WRITE);
2324 p1[pindex].flags |= PAGE_WRITE;
2325 /* and since the content will be modified, we must invalidate
2326 the corresponding translated code. */
d720b93d 2327 tb_invalidate_phys_page(address, pc, puc);
9fa3e853
FB
2328#ifdef DEBUG_TB_CHECK
2329 tb_invalidate_check(address);
2330#endif
c8a706fe 2331 mmap_unlock();
9fa3e853
FB
2332 return 1;
2333 }
2334 }
c8a706fe 2335 mmap_unlock();
9fa3e853
FB
2336 return 0;
2337}
2338
6a00d601
FB
2339static inline void tlb_set_dirty(CPUState *env,
2340 unsigned long addr, target_ulong vaddr)
1ccde1cb
FB
2341{
2342}
9fa3e853
FB
2343#endif /* defined(CONFIG_USER_ONLY) */
2344
e2eef170 2345#if !defined(CONFIG_USER_ONLY)
8da3ff18 2346
c04b2b78
PB
2347#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2348typedef struct subpage_t {
2349 target_phys_addr_t base;
2350 CPUReadMemoryFunc * const *mem_read[TARGET_PAGE_SIZE][4];
2351 CPUWriteMemoryFunc * const *mem_write[TARGET_PAGE_SIZE][4];
2352 void *opaque[TARGET_PAGE_SIZE][2][4];
2353 ram_addr_t region_offset[TARGET_PAGE_SIZE][2][4];
2354} subpage_t;
2355
c227f099
AL
2356static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2357 ram_addr_t memory, ram_addr_t region_offset);
2358static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2359 ram_addr_t orig_memory, ram_addr_t region_offset);
db7b5426
BS
2360#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2361 need_subpage) \
2362 do { \
2363 if (addr > start_addr) \
2364 start_addr2 = 0; \
2365 else { \
2366 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2367 if (start_addr2 > 0) \
2368 need_subpage = 1; \
2369 } \
2370 \
49e9fba2 2371 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
db7b5426
BS
2372 end_addr2 = TARGET_PAGE_SIZE - 1; \
2373 else { \
2374 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2375 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2376 need_subpage = 1; \
2377 } \
2378 } while (0)
2379
8f2498f9
MT
2380/* register physical memory.
2381 For RAM, 'size' must be a multiple of the target page size.
2382 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
8da3ff18
PB
2383 io memory page. The address used when calling the IO function is
2384 the offset from the start of the region, plus region_offset. Both
ccbb4d44 2385 start_addr and region_offset are rounded down to a page boundary
8da3ff18
PB
2386 before calculating this offset. This should not be a problem unless
2387 the low bits of start_addr and region_offset differ. */
c227f099
AL
2388void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2389 ram_addr_t size,
2390 ram_addr_t phys_offset,
2391 ram_addr_t region_offset)
33417e70 2392{
c227f099 2393 target_phys_addr_t addr, end_addr;
92e873b9 2394 PhysPageDesc *p;
9d42037b 2395 CPUState *env;
c227f099 2396 ram_addr_t orig_size = size;
db7b5426 2397 void *subpage;
33417e70 2398
f6f3fbca
MT
2399 cpu_notify_set_memory(start_addr, size, phys_offset);
2400
67c4d23c
PB
2401 if (phys_offset == IO_MEM_UNASSIGNED) {
2402 region_offset = start_addr;
2403 }
8da3ff18 2404 region_offset &= TARGET_PAGE_MASK;
5fd386f6 2405 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
c227f099 2406 end_addr = start_addr + (target_phys_addr_t)size;
49e9fba2 2407 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
db7b5426
BS
2408 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2409 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
c227f099
AL
2410 ram_addr_t orig_memory = p->phys_offset;
2411 target_phys_addr_t start_addr2, end_addr2;
db7b5426
BS
2412 int need_subpage = 0;
2413
2414 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2415 need_subpage);
4254fab8 2416 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
db7b5426
BS
2417 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2418 subpage = subpage_init((addr & TARGET_PAGE_MASK),
8da3ff18
PB
2419 &p->phys_offset, orig_memory,
2420 p->region_offset);
db7b5426
BS
2421 } else {
2422 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2423 >> IO_MEM_SHIFT];
2424 }
8da3ff18
PB
2425 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2426 region_offset);
2427 p->region_offset = 0;
db7b5426
BS
2428 } else {
2429 p->phys_offset = phys_offset;
2430 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2431 (phys_offset & IO_MEM_ROMD))
2432 phys_offset += TARGET_PAGE_SIZE;
2433 }
2434 } else {
2435 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2436 p->phys_offset = phys_offset;
8da3ff18 2437 p->region_offset = region_offset;
db7b5426 2438 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
8da3ff18 2439 (phys_offset & IO_MEM_ROMD)) {
db7b5426 2440 phys_offset += TARGET_PAGE_SIZE;
0e8f0967 2441 } else {
c227f099 2442 target_phys_addr_t start_addr2, end_addr2;
db7b5426
BS
2443 int need_subpage = 0;
2444
2445 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2446 end_addr2, need_subpage);
2447
4254fab8 2448 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
db7b5426 2449 subpage = subpage_init((addr & TARGET_PAGE_MASK),
8da3ff18 2450 &p->phys_offset, IO_MEM_UNASSIGNED,
67c4d23c 2451 addr & TARGET_PAGE_MASK);
db7b5426 2452 subpage_register(subpage, start_addr2, end_addr2,
8da3ff18
PB
2453 phys_offset, region_offset);
2454 p->region_offset = 0;
db7b5426
BS
2455 }
2456 }
2457 }
8da3ff18 2458 region_offset += TARGET_PAGE_SIZE;
33417e70 2459 }
3b46e624 2460
9d42037b
FB
2461 /* since each CPU stores ram addresses in its TLB cache, we must
2462 reset the modified entries */
2463 /* XXX: slow ! */
2464 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2465 tlb_flush(env, 1);
2466 }
33417e70
FB
2467}
2468
ba863458 2469/* XXX: temporary until new memory mapping API */
c227f099 2470ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
ba863458
FB
2471{
2472 PhysPageDesc *p;
2473
2474 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2475 if (!p)
2476 return IO_MEM_UNASSIGNED;
2477 return p->phys_offset;
2478}
2479
c227f099 2480void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
f65ed4c1
AL
2481{
2482 if (kvm_enabled())
2483 kvm_coalesce_mmio_region(addr, size);
2484}
2485
c227f099 2486void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
f65ed4c1
AL
2487{
2488 if (kvm_enabled())
2489 kvm_uncoalesce_mmio_region(addr, size);
2490}
2491
62a2744c
SY
2492void qemu_flush_coalesced_mmio_buffer(void)
2493{
2494 if (kvm_enabled())
2495 kvm_flush_coalesced_mmio_buffer();
2496}
2497
c902760f
MT
2498#if defined(__linux__) && !defined(TARGET_S390X)
2499
2500#include <sys/vfs.h>
2501
2502#define HUGETLBFS_MAGIC 0x958458f6
2503
2504static long gethugepagesize(const char *path)
2505{
2506 struct statfs fs;
2507 int ret;
2508
2509 do {
2510 ret = statfs(path, &fs);
2511 } while (ret != 0 && errno == EINTR);
2512
2513 if (ret != 0) {
2514 perror("statfs");
2515 return 0;
2516 }
2517
2518 if (fs.f_type != HUGETLBFS_MAGIC)
2519 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
2520
2521 return fs.f_bsize;
2522}
2523
2524static void *file_ram_alloc(ram_addr_t memory, const char *path)
2525{
2526 char *filename;
2527 void *area;
2528 int fd;
2529#ifdef MAP_POPULATE
2530 int flags;
2531#endif
2532 unsigned long hpagesize;
2533
2534 hpagesize = gethugepagesize(path);
2535 if (!hpagesize) {
2536 return NULL;
2537 }
2538
2539 if (memory < hpagesize) {
2540 return NULL;
2541 }
2542
2543 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2544 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2545 return NULL;
2546 }
2547
2548 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
2549 return NULL;
2550 }
2551
2552 fd = mkstemp(filename);
2553 if (fd < 0) {
2554 perror("mkstemp");
2555 free(filename);
2556 return NULL;
2557 }
2558 unlink(filename);
2559 free(filename);
2560
2561 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2562
2563 /*
2564 * ftruncate is not supported by hugetlbfs in older
2565 * hosts, so don't bother bailing out on errors.
2566 * If anything goes wrong with it under other filesystems,
2567 * mmap will fail.
2568 */
2569 if (ftruncate(fd, memory))
2570 perror("ftruncate");
2571
2572#ifdef MAP_POPULATE
2573 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2574 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2575 * to sidestep this quirk.
2576 */
2577 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2578 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2579#else
2580 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2581#endif
2582 if (area == MAP_FAILED) {
2583 perror("file_ram_alloc: can't mmap RAM pages");
2584 close(fd);
2585 return (NULL);
2586 }
2587 return area;
2588}
2589#endif
2590
c227f099 2591ram_addr_t qemu_ram_alloc(ram_addr_t size)
94a6b54f
PB
2592{
2593 RAMBlock *new_block;
2594
94a6b54f
PB
2595 size = TARGET_PAGE_ALIGN(size);
2596 new_block = qemu_malloc(sizeof(*new_block));
2597
c902760f
MT
2598 if (mem_path) {
2599#if defined (__linux__) && !defined(TARGET_S390X)
2600 new_block->host = file_ram_alloc(size, mem_path);
2601 if (!new_block->host)
2602 exit(1);
2603#else
2604 fprintf(stderr, "-mem-path option unsupported\n");
2605 exit(1);
2606#endif
2607 } else {
6b02494d 2608#if defined(TARGET_S390X) && defined(CONFIG_KVM)
c902760f
MT
2609 /* XXX S390 KVM requires the topmost vma of the RAM to be < 256GB */
2610 new_block->host = mmap((void*)0x1000000, size,
2611 PROT_EXEC|PROT_READ|PROT_WRITE,
2612 MAP_SHARED | MAP_ANONYMOUS, -1, 0);
6b02494d 2613#else
c902760f 2614 new_block->host = qemu_vmalloc(size);
6b02494d 2615#endif
ccb167e9 2616#ifdef MADV_MERGEABLE
c902760f 2617 madvise(new_block->host, size, MADV_MERGEABLE);
ccb167e9 2618#endif
c902760f 2619 }
94a6b54f
PB
2620 new_block->offset = last_ram_offset;
2621 new_block->length = size;
2622
2623 new_block->next = ram_blocks;
2624 ram_blocks = new_block;
2625
2626 phys_ram_dirty = qemu_realloc(phys_ram_dirty,
2627 (last_ram_offset + size) >> TARGET_PAGE_BITS);
2628 memset(phys_ram_dirty + (last_ram_offset >> TARGET_PAGE_BITS),
2629 0xff, size >> TARGET_PAGE_BITS);
2630
2631 last_ram_offset += size;
2632
6f0437e8
JK
2633 if (kvm_enabled())
2634 kvm_setup_guest_memory(new_block->host, size);
2635
94a6b54f
PB
2636 return new_block->offset;
2637}
e9a1ab19 2638
c227f099 2639void qemu_ram_free(ram_addr_t addr)
e9a1ab19 2640{
94a6b54f 2641 /* TODO: implement this. */
e9a1ab19
FB
2642}
2643
dc828ca1 2644/* Return a host pointer to ram allocated with qemu_ram_alloc.
5579c7f3
PB
2645 With the exception of the softmmu code in this file, this should
2646 only be used for local memory (e.g. video ram) that the device owns,
2647 and knows it isn't going to access beyond the end of the block.
2648
2649 It should not be used for general purpose DMA.
2650 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2651 */
c227f099 2652void *qemu_get_ram_ptr(ram_addr_t addr)
dc828ca1 2653{
94a6b54f
PB
2654 RAMBlock *prev;
2655 RAMBlock **prevp;
2656 RAMBlock *block;
2657
94a6b54f
PB
2658 prev = NULL;
2659 prevp = &ram_blocks;
2660 block = ram_blocks;
2661 while (block && (block->offset > addr
2662 || block->offset + block->length <= addr)) {
2663 if (prev)
2664 prevp = &prev->next;
2665 prev = block;
2666 block = block->next;
2667 }
2668 if (!block) {
2669 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2670 abort();
2671 }
2672 /* Move this entry to to start of the list. */
2673 if (prev) {
2674 prev->next = block->next;
2675 block->next = *prevp;
2676 *prevp = block;
2677 }
2678 return block->host + (addr - block->offset);
dc828ca1
PB
2679}
2680
5579c7f3
PB
2681/* Some of the softmmu routines need to translate from a host pointer
2682 (typically a TLB entry) back to a ram offset. */
c227f099 2683ram_addr_t qemu_ram_addr_from_host(void *ptr)
5579c7f3 2684{
94a6b54f 2685 RAMBlock *prev;
94a6b54f
PB
2686 RAMBlock *block;
2687 uint8_t *host = ptr;
2688
94a6b54f 2689 prev = NULL;
94a6b54f
PB
2690 block = ram_blocks;
2691 while (block && (block->host > host
2692 || block->host + block->length <= host)) {
94a6b54f
PB
2693 prev = block;
2694 block = block->next;
2695 }
2696 if (!block) {
2697 fprintf(stderr, "Bad ram pointer %p\n", ptr);
2698 abort();
2699 }
2700 return block->offset + (host - block->host);
5579c7f3
PB
2701}
2702
c227f099 2703static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
33417e70 2704{
67d3b957 2705#ifdef DEBUG_UNASSIGNED
ab3d1727 2706 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
b4f0a316 2707#endif
faed1c2a 2708#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
e18231a3
BS
2709 do_unassigned_access(addr, 0, 0, 0, 1);
2710#endif
2711 return 0;
2712}
2713
c227f099 2714static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
e18231a3
BS
2715{
2716#ifdef DEBUG_UNASSIGNED
2717 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2718#endif
faed1c2a 2719#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
e18231a3
BS
2720 do_unassigned_access(addr, 0, 0, 0, 2);
2721#endif
2722 return 0;
2723}
2724
c227f099 2725static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
e18231a3
BS
2726{
2727#ifdef DEBUG_UNASSIGNED
2728 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2729#endif
faed1c2a 2730#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
e18231a3 2731 do_unassigned_access(addr, 0, 0, 0, 4);
67d3b957 2732#endif
33417e70
FB
2733 return 0;
2734}
2735
c227f099 2736static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
33417e70 2737{
67d3b957 2738#ifdef DEBUG_UNASSIGNED
ab3d1727 2739 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
67d3b957 2740#endif
faed1c2a 2741#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
e18231a3
BS
2742 do_unassigned_access(addr, 1, 0, 0, 1);
2743#endif
2744}
2745
c227f099 2746static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
e18231a3
BS
2747{
2748#ifdef DEBUG_UNASSIGNED
2749 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2750#endif
faed1c2a 2751#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
e18231a3
BS
2752 do_unassigned_access(addr, 1, 0, 0, 2);
2753#endif
2754}
2755
c227f099 2756static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
e18231a3
BS
2757{
2758#ifdef DEBUG_UNASSIGNED
2759 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2760#endif
faed1c2a 2761#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
e18231a3 2762 do_unassigned_access(addr, 1, 0, 0, 4);
b4f0a316 2763#endif
33417e70
FB
2764}
2765
d60efc6b 2766static CPUReadMemoryFunc * const unassigned_mem_read[3] = {
33417e70 2767 unassigned_mem_readb,
e18231a3
BS
2768 unassigned_mem_readw,
2769 unassigned_mem_readl,
33417e70
FB
2770};
2771
d60efc6b 2772static CPUWriteMemoryFunc * const unassigned_mem_write[3] = {
33417e70 2773 unassigned_mem_writeb,
e18231a3
BS
2774 unassigned_mem_writew,
2775 unassigned_mem_writel,
33417e70
FB
2776};
2777
c227f099 2778static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
0f459d16 2779 uint32_t val)
9fa3e853 2780{
3a7d929e 2781 int dirty_flags;
3a7d929e
FB
2782 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2783 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2784#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2785 tb_invalidate_phys_page_fast(ram_addr, 1);
2786 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2787#endif
3a7d929e 2788 }
5579c7f3 2789 stb_p(qemu_get_ram_ptr(ram_addr), val);
f23db169
FB
2790 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2791 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2792 /* we remove the notdirty callback only if the code has been
2793 flushed */
2794 if (dirty_flags == 0xff)
2e70f6ef 2795 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
2796}
2797
c227f099 2798static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
0f459d16 2799 uint32_t val)
9fa3e853 2800{
3a7d929e 2801 int dirty_flags;
3a7d929e
FB
2802 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2803 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2804#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2805 tb_invalidate_phys_page_fast(ram_addr, 2);
2806 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2807#endif
3a7d929e 2808 }
5579c7f3 2809 stw_p(qemu_get_ram_ptr(ram_addr), val);
f23db169
FB
2810 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2811 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2812 /* we remove the notdirty callback only if the code has been
2813 flushed */
2814 if (dirty_flags == 0xff)
2e70f6ef 2815 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
2816}
2817
c227f099 2818static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
0f459d16 2819 uint32_t val)
9fa3e853 2820{
3a7d929e 2821 int dirty_flags;
3a7d929e
FB
2822 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2823 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2824#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2825 tb_invalidate_phys_page_fast(ram_addr, 4);
2826 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2827#endif
3a7d929e 2828 }
5579c7f3 2829 stl_p(qemu_get_ram_ptr(ram_addr), val);
f23db169
FB
2830 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2831 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2832 /* we remove the notdirty callback only if the code has been
2833 flushed */
2834 if (dirty_flags == 0xff)
2e70f6ef 2835 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
2836}
2837
d60efc6b 2838static CPUReadMemoryFunc * const error_mem_read[3] = {
9fa3e853
FB
2839 NULL, /* never used */
2840 NULL, /* never used */
2841 NULL, /* never used */
2842};
2843
d60efc6b 2844static CPUWriteMemoryFunc * const notdirty_mem_write[3] = {
1ccde1cb
FB
2845 notdirty_mem_writeb,
2846 notdirty_mem_writew,
2847 notdirty_mem_writel,
2848};
2849
0f459d16 2850/* Generate a debug exception if a watchpoint has been hit. */
b4051334 2851static void check_watchpoint(int offset, int len_mask, int flags)
0f459d16
PB
2852{
2853 CPUState *env = cpu_single_env;
06d55cc1
AL
2854 target_ulong pc, cs_base;
2855 TranslationBlock *tb;
0f459d16 2856 target_ulong vaddr;
a1d1bb31 2857 CPUWatchpoint *wp;
06d55cc1 2858 int cpu_flags;
0f459d16 2859
06d55cc1
AL
2860 if (env->watchpoint_hit) {
2861 /* We re-entered the check after replacing the TB. Now raise
2862 * the debug interrupt so that is will trigger after the
2863 * current instruction. */
2864 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2865 return;
2866 }
2e70f6ef 2867 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
72cf2d4f 2868 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334
AL
2869 if ((vaddr == (wp->vaddr & len_mask) ||
2870 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
6e140f28
AL
2871 wp->flags |= BP_WATCHPOINT_HIT;
2872 if (!env->watchpoint_hit) {
2873 env->watchpoint_hit = wp;
2874 tb = tb_find_pc(env->mem_io_pc);
2875 if (!tb) {
2876 cpu_abort(env, "check_watchpoint: could not find TB for "
2877 "pc=%p", (void *)env->mem_io_pc);
2878 }
2879 cpu_restore_state(tb, env, env->mem_io_pc, NULL);
2880 tb_phys_invalidate(tb, -1);
2881 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2882 env->exception_index = EXCP_DEBUG;
2883 } else {
2884 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2885 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
2886 }
2887 cpu_resume_from_signal(env, NULL);
06d55cc1 2888 }
6e140f28
AL
2889 } else {
2890 wp->flags &= ~BP_WATCHPOINT_HIT;
0f459d16
PB
2891 }
2892 }
2893}
2894
6658ffb8
PB
2895/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2896 so these check for a hit then pass through to the normal out-of-line
2897 phys routines. */
c227f099 2898static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
6658ffb8 2899{
b4051334 2900 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
6658ffb8
PB
2901 return ldub_phys(addr);
2902}
2903
c227f099 2904static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
6658ffb8 2905{
b4051334 2906 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
6658ffb8
PB
2907 return lduw_phys(addr);
2908}
2909
c227f099 2910static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
6658ffb8 2911{
b4051334 2912 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
6658ffb8
PB
2913 return ldl_phys(addr);
2914}
2915
c227f099 2916static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
6658ffb8
PB
2917 uint32_t val)
2918{
b4051334 2919 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
6658ffb8
PB
2920 stb_phys(addr, val);
2921}
2922
c227f099 2923static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
6658ffb8
PB
2924 uint32_t val)
2925{
b4051334 2926 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
6658ffb8
PB
2927 stw_phys(addr, val);
2928}
2929
c227f099 2930static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
6658ffb8
PB
2931 uint32_t val)
2932{
b4051334 2933 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
6658ffb8
PB
2934 stl_phys(addr, val);
2935}
2936
d60efc6b 2937static CPUReadMemoryFunc * const watch_mem_read[3] = {
6658ffb8
PB
2938 watch_mem_readb,
2939 watch_mem_readw,
2940 watch_mem_readl,
2941};
2942
d60efc6b 2943static CPUWriteMemoryFunc * const watch_mem_write[3] = {
6658ffb8
PB
2944 watch_mem_writeb,
2945 watch_mem_writew,
2946 watch_mem_writel,
2947};
6658ffb8 2948
c227f099 2949static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
db7b5426
BS
2950 unsigned int len)
2951{
db7b5426
BS
2952 uint32_t ret;
2953 unsigned int idx;
2954
8da3ff18 2955 idx = SUBPAGE_IDX(addr);
db7b5426
BS
2956#if defined(DEBUG_SUBPAGE)
2957 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2958 mmio, len, addr, idx);
2959#endif
8da3ff18
PB
2960 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len],
2961 addr + mmio->region_offset[idx][0][len]);
db7b5426
BS
2962
2963 return ret;
2964}
2965
c227f099 2966static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
db7b5426
BS
2967 uint32_t value, unsigned int len)
2968{
db7b5426
BS
2969 unsigned int idx;
2970
8da3ff18 2971 idx = SUBPAGE_IDX(addr);
db7b5426
BS
2972#if defined(DEBUG_SUBPAGE)
2973 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2974 mmio, len, addr, idx, value);
2975#endif
8da3ff18
PB
2976 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len],
2977 addr + mmio->region_offset[idx][1][len],
2978 value);
db7b5426
BS
2979}
2980
c227f099 2981static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
db7b5426
BS
2982{
2983#if defined(DEBUG_SUBPAGE)
2984 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2985#endif
2986
2987 return subpage_readlen(opaque, addr, 0);
2988}
2989
c227f099 2990static void subpage_writeb (void *opaque, target_phys_addr_t addr,
db7b5426
BS
2991 uint32_t value)
2992{
2993#if defined(DEBUG_SUBPAGE)
2994 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2995#endif
2996 subpage_writelen(opaque, addr, value, 0);
2997}
2998
c227f099 2999static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
db7b5426
BS
3000{
3001#if defined(DEBUG_SUBPAGE)
3002 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
3003#endif
3004
3005 return subpage_readlen(opaque, addr, 1);
3006}
3007
c227f099 3008static void subpage_writew (void *opaque, target_phys_addr_t addr,
db7b5426
BS
3009 uint32_t value)
3010{
3011#if defined(DEBUG_SUBPAGE)
3012 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
3013#endif
3014 subpage_writelen(opaque, addr, value, 1);
3015}
3016
c227f099 3017static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
db7b5426
BS
3018{
3019#if defined(DEBUG_SUBPAGE)
3020 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
3021#endif
3022
3023 return subpage_readlen(opaque, addr, 2);
3024}
3025
3026static void subpage_writel (void *opaque,
c227f099 3027 target_phys_addr_t addr, uint32_t value)
db7b5426
BS
3028{
3029#if defined(DEBUG_SUBPAGE)
3030 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
3031#endif
3032 subpage_writelen(opaque, addr, value, 2);
3033}
3034
d60efc6b 3035static CPUReadMemoryFunc * const subpage_read[] = {
db7b5426
BS
3036 &subpage_readb,
3037 &subpage_readw,
3038 &subpage_readl,
3039};
3040
d60efc6b 3041static CPUWriteMemoryFunc * const subpage_write[] = {
db7b5426
BS
3042 &subpage_writeb,
3043 &subpage_writew,
3044 &subpage_writel,
3045};
3046
c227f099
AL
3047static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3048 ram_addr_t memory, ram_addr_t region_offset)
db7b5426
BS
3049{
3050 int idx, eidx;
4254fab8 3051 unsigned int i;
db7b5426
BS
3052
3053 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3054 return -1;
3055 idx = SUBPAGE_IDX(start);
3056 eidx = SUBPAGE_IDX(end);
3057#if defined(DEBUG_SUBPAGE)
0bf9e31a 3058 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
db7b5426
BS
3059 mmio, start, end, idx, eidx, memory);
3060#endif
3061 memory >>= IO_MEM_SHIFT;
3062 for (; idx <= eidx; idx++) {
4254fab8 3063 for (i = 0; i < 4; i++) {
3ee89922
BS
3064 if (io_mem_read[memory][i]) {
3065 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
3066 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
8da3ff18 3067 mmio->region_offset[idx][0][i] = region_offset;
3ee89922
BS
3068 }
3069 if (io_mem_write[memory][i]) {
3070 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
3071 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
8da3ff18 3072 mmio->region_offset[idx][1][i] = region_offset;
3ee89922 3073 }
4254fab8 3074 }
db7b5426
BS
3075 }
3076
3077 return 0;
3078}
3079
c227f099
AL
3080static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
3081 ram_addr_t orig_memory, ram_addr_t region_offset)
db7b5426 3082{
c227f099 3083 subpage_t *mmio;
db7b5426
BS
3084 int subpage_memory;
3085
c227f099 3086 mmio = qemu_mallocz(sizeof(subpage_t));
1eec614b
AL
3087
3088 mmio->base = base;
1eed09cb 3089 subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio);
db7b5426 3090#if defined(DEBUG_SUBPAGE)
1eec614b
AL
3091 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3092 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
db7b5426 3093#endif
1eec614b
AL
3094 *phys = subpage_memory | IO_MEM_SUBPAGE;
3095 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory,
8da3ff18 3096 region_offset);
db7b5426
BS
3097
3098 return mmio;
3099}
3100
88715657
AL
3101static int get_free_io_mem_idx(void)
3102{
3103 int i;
3104
3105 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3106 if (!io_mem_used[i]) {
3107 io_mem_used[i] = 1;
3108 return i;
3109 }
c6703b47 3110 fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
88715657
AL
3111 return -1;
3112}
3113
33417e70
FB
3114/* mem_read and mem_write are arrays of functions containing the
3115 function to access byte (index 0), word (index 1) and dword (index
0b4e6e3e 3116 2). Functions can be omitted with a NULL function pointer.
3ee89922 3117 If io_index is non zero, the corresponding io zone is
4254fab8
BS
3118 modified. If it is zero, a new io zone is allocated. The return
3119 value can be used with cpu_register_physical_memory(). (-1) is
3120 returned if error. */
1eed09cb 3121static int cpu_register_io_memory_fixed(int io_index,
d60efc6b
BS
3122 CPUReadMemoryFunc * const *mem_read,
3123 CPUWriteMemoryFunc * const *mem_write,
1eed09cb 3124 void *opaque)
33417e70 3125{
4254fab8 3126 int i, subwidth = 0;
33417e70
FB
3127
3128 if (io_index <= 0) {
88715657
AL
3129 io_index = get_free_io_mem_idx();
3130 if (io_index == -1)
3131 return io_index;
33417e70 3132 } else {
1eed09cb 3133 io_index >>= IO_MEM_SHIFT;
33417e70
FB
3134 if (io_index >= IO_MEM_NB_ENTRIES)
3135 return -1;
3136 }
b5ff1b31 3137
33417e70 3138 for(i = 0;i < 3; i++) {
4254fab8
BS
3139 if (!mem_read[i] || !mem_write[i])
3140 subwidth = IO_MEM_SUBWIDTH;
33417e70
FB
3141 io_mem_read[io_index][i] = mem_read[i];
3142 io_mem_write[io_index][i] = mem_write[i];
3143 }
a4193c8a 3144 io_mem_opaque[io_index] = opaque;
4254fab8 3145 return (io_index << IO_MEM_SHIFT) | subwidth;
33417e70 3146}
61382a50 3147
d60efc6b
BS
3148int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
3149 CPUWriteMemoryFunc * const *mem_write,
1eed09cb
AK
3150 void *opaque)
3151{
3152 return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque);
3153}
3154
88715657
AL
3155void cpu_unregister_io_memory(int io_table_address)
3156{
3157 int i;
3158 int io_index = io_table_address >> IO_MEM_SHIFT;
3159
3160 for (i=0;i < 3; i++) {
3161 io_mem_read[io_index][i] = unassigned_mem_read[i];
3162 io_mem_write[io_index][i] = unassigned_mem_write[i];
3163 }
3164 io_mem_opaque[io_index] = NULL;
3165 io_mem_used[io_index] = 0;
3166}
3167
e9179ce1
AK
3168static void io_mem_init(void)
3169{
3170 int i;
3171
3172 cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read, unassigned_mem_write, NULL);
3173 cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read, unassigned_mem_write, NULL);
3174 cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read, notdirty_mem_write, NULL);
3175 for (i=0; i<5; i++)
3176 io_mem_used[i] = 1;
3177
3178 io_mem_watch = cpu_register_io_memory(watch_mem_read,
3179 watch_mem_write, NULL);
e9179ce1
AK
3180}
3181
e2eef170
PB
3182#endif /* !defined(CONFIG_USER_ONLY) */
3183
13eb76e0
FB
3184/* physical memory access (slow version, mainly for debug) */
3185#if defined(CONFIG_USER_ONLY)
a68fe89c
PB
3186int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3187 uint8_t *buf, int len, int is_write)
13eb76e0
FB
3188{
3189 int l, flags;
3190 target_ulong page;
53a5960a 3191 void * p;
13eb76e0
FB
3192
3193 while (len > 0) {
3194 page = addr & TARGET_PAGE_MASK;
3195 l = (page + TARGET_PAGE_SIZE) - addr;
3196 if (l > len)
3197 l = len;
3198 flags = page_get_flags(page);
3199 if (!(flags & PAGE_VALID))
a68fe89c 3200 return -1;
13eb76e0
FB
3201 if (is_write) {
3202 if (!(flags & PAGE_WRITE))
a68fe89c 3203 return -1;
579a97f7 3204 /* XXX: this code should not depend on lock_user */
72fb7daa 3205 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
a68fe89c 3206 return -1;
72fb7daa
AJ
3207 memcpy(p, buf, l);
3208 unlock_user(p, addr, l);
13eb76e0
FB
3209 } else {
3210 if (!(flags & PAGE_READ))
a68fe89c 3211 return -1;
579a97f7 3212 /* XXX: this code should not depend on lock_user */
72fb7daa 3213 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
a68fe89c 3214 return -1;
72fb7daa 3215 memcpy(buf, p, l);
5b257578 3216 unlock_user(p, addr, 0);
13eb76e0
FB
3217 }
3218 len -= l;
3219 buf += l;
3220 addr += l;
3221 }
a68fe89c 3222 return 0;
13eb76e0 3223}
8df1cd07 3224
13eb76e0 3225#else
c227f099 3226void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
3227 int len, int is_write)
3228{
3229 int l, io_index;
3230 uint8_t *ptr;
3231 uint32_t val;
c227f099 3232 target_phys_addr_t page;
2e12669a 3233 unsigned long pd;
92e873b9 3234 PhysPageDesc *p;
3b46e624 3235
13eb76e0
FB
3236 while (len > 0) {
3237 page = addr & TARGET_PAGE_MASK;
3238 l = (page + TARGET_PAGE_SIZE) - addr;
3239 if (l > len)
3240 l = len;
92e873b9 3241 p = phys_page_find(page >> TARGET_PAGE_BITS);
13eb76e0
FB
3242 if (!p) {
3243 pd = IO_MEM_UNASSIGNED;
3244 } else {
3245 pd = p->phys_offset;
3246 }
3b46e624 3247
13eb76e0 3248 if (is_write) {
3a7d929e 3249 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
c227f099 3250 target_phys_addr_t addr1 = addr;
13eb76e0 3251 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18 3252 if (p)
6c2934db 3253 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
6a00d601
FB
3254 /* XXX: could force cpu_single_env to NULL to avoid
3255 potential bugs */
6c2934db 3256 if (l >= 4 && ((addr1 & 3) == 0)) {
1c213d19 3257 /* 32 bit write access */
c27004ec 3258 val = ldl_p(buf);
6c2934db 3259 io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
13eb76e0 3260 l = 4;
6c2934db 3261 } else if (l >= 2 && ((addr1 & 1) == 0)) {
1c213d19 3262 /* 16 bit write access */
c27004ec 3263 val = lduw_p(buf);
6c2934db 3264 io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
13eb76e0
FB
3265 l = 2;
3266 } else {
1c213d19 3267 /* 8 bit write access */
c27004ec 3268 val = ldub_p(buf);
6c2934db 3269 io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
13eb76e0
FB
3270 l = 1;
3271 }
3272 } else {
b448f2f3
FB
3273 unsigned long addr1;
3274 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
13eb76e0 3275 /* RAM case */
5579c7f3 3276 ptr = qemu_get_ram_ptr(addr1);
13eb76e0 3277 memcpy(ptr, buf, l);
3a7d929e
FB
3278 if (!cpu_physical_memory_is_dirty(addr1)) {
3279 /* invalidate code */
3280 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3281 /* set dirty bit */
5fafdf24 3282 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
f23db169 3283 (0xff & ~CODE_DIRTY_FLAG);
3a7d929e 3284 }
13eb76e0
FB
3285 }
3286 } else {
5fafdf24 3287 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2a4188a3 3288 !(pd & IO_MEM_ROMD)) {
c227f099 3289 target_phys_addr_t addr1 = addr;
13eb76e0
FB
3290 /* I/O case */
3291 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18 3292 if (p)
6c2934db
AJ
3293 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3294 if (l >= 4 && ((addr1 & 3) == 0)) {
13eb76e0 3295 /* 32 bit read access */
6c2934db 3296 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
c27004ec 3297 stl_p(buf, val);
13eb76e0 3298 l = 4;
6c2934db 3299 } else if (l >= 2 && ((addr1 & 1) == 0)) {
13eb76e0 3300 /* 16 bit read access */
6c2934db 3301 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
c27004ec 3302 stw_p(buf, val);
13eb76e0
FB
3303 l = 2;
3304 } else {
1c213d19 3305 /* 8 bit read access */
6c2934db 3306 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
c27004ec 3307 stb_p(buf, val);
13eb76e0
FB
3308 l = 1;
3309 }
3310 } else {
3311 /* RAM case */
5579c7f3 3312 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
13eb76e0
FB
3313 (addr & ~TARGET_PAGE_MASK);
3314 memcpy(buf, ptr, l);
3315 }
3316 }
3317 len -= l;
3318 buf += l;
3319 addr += l;
3320 }
3321}
8df1cd07 3322
d0ecd2aa 3323/* used for ROM loading : can write in RAM and ROM */
c227f099 3324void cpu_physical_memory_write_rom(target_phys_addr_t addr,
d0ecd2aa
FB
3325 const uint8_t *buf, int len)
3326{
3327 int l;
3328 uint8_t *ptr;
c227f099 3329 target_phys_addr_t page;
d0ecd2aa
FB
3330 unsigned long pd;
3331 PhysPageDesc *p;
3b46e624 3332
d0ecd2aa
FB
3333 while (len > 0) {
3334 page = addr & TARGET_PAGE_MASK;
3335 l = (page + TARGET_PAGE_SIZE) - addr;
3336 if (l > len)
3337 l = len;
3338 p = phys_page_find(page >> TARGET_PAGE_BITS);
3339 if (!p) {
3340 pd = IO_MEM_UNASSIGNED;
3341 } else {
3342 pd = p->phys_offset;
3343 }
3b46e624 3344
d0ecd2aa 3345 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2a4188a3
FB
3346 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3347 !(pd & IO_MEM_ROMD)) {
d0ecd2aa
FB
3348 /* do nothing */
3349 } else {
3350 unsigned long addr1;
3351 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3352 /* ROM/RAM case */
5579c7f3 3353 ptr = qemu_get_ram_ptr(addr1);
d0ecd2aa
FB
3354 memcpy(ptr, buf, l);
3355 }
3356 len -= l;
3357 buf += l;
3358 addr += l;
3359 }
3360}
3361
6d16c2f8
AL
3362typedef struct {
3363 void *buffer;
c227f099
AL
3364 target_phys_addr_t addr;
3365 target_phys_addr_t len;
6d16c2f8
AL
3366} BounceBuffer;
3367
3368static BounceBuffer bounce;
3369
ba223c29
AL
3370typedef struct MapClient {
3371 void *opaque;
3372 void (*callback)(void *opaque);
72cf2d4f 3373 QLIST_ENTRY(MapClient) link;
ba223c29
AL
3374} MapClient;
3375
72cf2d4f
BS
3376static QLIST_HEAD(map_client_list, MapClient) map_client_list
3377 = QLIST_HEAD_INITIALIZER(map_client_list);
ba223c29
AL
3378
3379void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3380{
3381 MapClient *client = qemu_malloc(sizeof(*client));
3382
3383 client->opaque = opaque;
3384 client->callback = callback;
72cf2d4f 3385 QLIST_INSERT_HEAD(&map_client_list, client, link);
ba223c29
AL
3386 return client;
3387}
3388
3389void cpu_unregister_map_client(void *_client)
3390{
3391 MapClient *client = (MapClient *)_client;
3392
72cf2d4f 3393 QLIST_REMOVE(client, link);
34d5e948 3394 qemu_free(client);
ba223c29
AL
3395}
3396
3397static void cpu_notify_map_clients(void)
3398{
3399 MapClient *client;
3400
72cf2d4f
BS
3401 while (!QLIST_EMPTY(&map_client_list)) {
3402 client = QLIST_FIRST(&map_client_list);
ba223c29 3403 client->callback(client->opaque);
34d5e948 3404 cpu_unregister_map_client(client);
ba223c29
AL
3405 }
3406}
3407
6d16c2f8
AL
3408/* Map a physical memory region into a host virtual address.
3409 * May map a subset of the requested range, given by and returned in *plen.
3410 * May return NULL if resources needed to perform the mapping are exhausted.
3411 * Use only for reads OR writes - not for read-modify-write operations.
ba223c29
AL
3412 * Use cpu_register_map_client() to know when retrying the map operation is
3413 * likely to succeed.
6d16c2f8 3414 */
c227f099
AL
3415void *cpu_physical_memory_map(target_phys_addr_t addr,
3416 target_phys_addr_t *plen,
6d16c2f8
AL
3417 int is_write)
3418{
c227f099
AL
3419 target_phys_addr_t len = *plen;
3420 target_phys_addr_t done = 0;
6d16c2f8
AL
3421 int l;
3422 uint8_t *ret = NULL;
3423 uint8_t *ptr;
c227f099 3424 target_phys_addr_t page;
6d16c2f8
AL
3425 unsigned long pd;
3426 PhysPageDesc *p;
3427 unsigned long addr1;
3428
3429 while (len > 0) {
3430 page = addr & TARGET_PAGE_MASK;
3431 l = (page + TARGET_PAGE_SIZE) - addr;
3432 if (l > len)
3433 l = len;
3434 p = phys_page_find(page >> TARGET_PAGE_BITS);
3435 if (!p) {
3436 pd = IO_MEM_UNASSIGNED;
3437 } else {
3438 pd = p->phys_offset;
3439 }
3440
3441 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3442 if (done || bounce.buffer) {
3443 break;
3444 }
3445 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3446 bounce.addr = addr;
3447 bounce.len = l;
3448 if (!is_write) {
3449 cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
3450 }
3451 ptr = bounce.buffer;
3452 } else {
3453 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
5579c7f3 3454 ptr = qemu_get_ram_ptr(addr1);
6d16c2f8
AL
3455 }
3456 if (!done) {
3457 ret = ptr;
3458 } else if (ret + done != ptr) {
3459 break;
3460 }
3461
3462 len -= l;
3463 addr += l;
3464 done += l;
3465 }
3466 *plen = done;
3467 return ret;
3468}
3469
3470/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3471 * Will also mark the memory as dirty if is_write == 1. access_len gives
3472 * the amount of memory that was actually read or written by the caller.
3473 */
c227f099
AL
3474void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3475 int is_write, target_phys_addr_t access_len)
6d16c2f8
AL
3476{
3477 if (buffer != bounce.buffer) {
3478 if (is_write) {
c227f099 3479 ram_addr_t addr1 = qemu_ram_addr_from_host(buffer);
6d16c2f8
AL
3480 while (access_len) {
3481 unsigned l;
3482 l = TARGET_PAGE_SIZE;
3483 if (l > access_len)
3484 l = access_len;
3485 if (!cpu_physical_memory_is_dirty(addr1)) {
3486 /* invalidate code */
3487 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3488 /* set dirty bit */
3489 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3490 (0xff & ~CODE_DIRTY_FLAG);
3491 }
3492 addr1 += l;
3493 access_len -= l;
3494 }
3495 }
3496 return;
3497 }
3498 if (is_write) {
3499 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3500 }
f8a83245 3501 qemu_vfree(bounce.buffer);
6d16c2f8 3502 bounce.buffer = NULL;
ba223c29 3503 cpu_notify_map_clients();
6d16c2f8 3504}
d0ecd2aa 3505
8df1cd07 3506/* warning: addr must be aligned */
c227f099 3507uint32_t ldl_phys(target_phys_addr_t addr)
8df1cd07
FB
3508{
3509 int io_index;
3510 uint8_t *ptr;
3511 uint32_t val;
3512 unsigned long pd;
3513 PhysPageDesc *p;
3514
3515 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3516 if (!p) {
3517 pd = IO_MEM_UNASSIGNED;
3518 } else {
3519 pd = p->phys_offset;
3520 }
3b46e624 3521
5fafdf24 3522 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2a4188a3 3523 !(pd & IO_MEM_ROMD)) {
8df1cd07
FB
3524 /* I/O case */
3525 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
3526 if (p)
3527 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
8df1cd07
FB
3528 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3529 } else {
3530 /* RAM case */
5579c7f3 3531 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
8df1cd07
FB
3532 (addr & ~TARGET_PAGE_MASK);
3533 val = ldl_p(ptr);
3534 }
3535 return val;
3536}
3537
84b7b8e7 3538/* warning: addr must be aligned */
c227f099 3539uint64_t ldq_phys(target_phys_addr_t addr)
84b7b8e7
FB
3540{
3541 int io_index;
3542 uint8_t *ptr;
3543 uint64_t val;
3544 unsigned long pd;
3545 PhysPageDesc *p;
3546
3547 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3548 if (!p) {
3549 pd = IO_MEM_UNASSIGNED;
3550 } else {
3551 pd = p->phys_offset;
3552 }
3b46e624 3553
2a4188a3
FB
3554 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3555 !(pd & IO_MEM_ROMD)) {
84b7b8e7
FB
3556 /* I/O case */
3557 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
3558 if (p)
3559 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
84b7b8e7
FB
3560#ifdef TARGET_WORDS_BIGENDIAN
3561 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3562 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3563#else
3564 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3565 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3566#endif
3567 } else {
3568 /* RAM case */
5579c7f3 3569 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
84b7b8e7
FB
3570 (addr & ~TARGET_PAGE_MASK);
3571 val = ldq_p(ptr);
3572 }
3573 return val;
3574}
3575
aab33094 3576/* XXX: optimize */
c227f099 3577uint32_t ldub_phys(target_phys_addr_t addr)
aab33094
FB
3578{
3579 uint8_t val;
3580 cpu_physical_memory_read(addr, &val, 1);
3581 return val;
3582}
3583
3584/* XXX: optimize */
c227f099 3585uint32_t lduw_phys(target_phys_addr_t addr)
aab33094
FB
3586{
3587 uint16_t val;
3588 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3589 return tswap16(val);
3590}
3591
8df1cd07
FB
3592/* warning: addr must be aligned. The ram page is not masked as dirty
3593 and the code inside is not invalidated. It is useful if the dirty
3594 bits are used to track modified PTEs */
c227f099 3595void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
8df1cd07
FB
3596{
3597 int io_index;
3598 uint8_t *ptr;
3599 unsigned long pd;
3600 PhysPageDesc *p;
3601
3602 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3603 if (!p) {
3604 pd = IO_MEM_UNASSIGNED;
3605 } else {
3606 pd = p->phys_offset;
3607 }
3b46e624 3608
3a7d929e 3609 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07 3610 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
3611 if (p)
3612 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
8df1cd07
FB
3613 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3614 } else {
74576198 3615 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
5579c7f3 3616 ptr = qemu_get_ram_ptr(addr1);
8df1cd07 3617 stl_p(ptr, val);
74576198
AL
3618
3619 if (unlikely(in_migration)) {
3620 if (!cpu_physical_memory_is_dirty(addr1)) {
3621 /* invalidate code */
3622 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3623 /* set dirty bit */
3624 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3625 (0xff & ~CODE_DIRTY_FLAG);
3626 }
3627 }
8df1cd07
FB
3628 }
3629}
3630
c227f099 3631void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
bc98a7ef
JM
3632{
3633 int io_index;
3634 uint8_t *ptr;
3635 unsigned long pd;
3636 PhysPageDesc *p;
3637
3638 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3639 if (!p) {
3640 pd = IO_MEM_UNASSIGNED;
3641 } else {
3642 pd = p->phys_offset;
3643 }
3b46e624 3644
bc98a7ef
JM
3645 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3646 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
3647 if (p)
3648 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bc98a7ef
JM
3649#ifdef TARGET_WORDS_BIGENDIAN
3650 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3651 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3652#else
3653 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3654 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3655#endif
3656 } else {
5579c7f3 3657 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
bc98a7ef
JM
3658 (addr & ~TARGET_PAGE_MASK);
3659 stq_p(ptr, val);
3660 }
3661}
3662
8df1cd07 3663/* warning: addr must be aligned */
c227f099 3664void stl_phys(target_phys_addr_t addr, uint32_t val)
8df1cd07
FB
3665{
3666 int io_index;
3667 uint8_t *ptr;
3668 unsigned long pd;
3669 PhysPageDesc *p;
3670
3671 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3672 if (!p) {
3673 pd = IO_MEM_UNASSIGNED;
3674 } else {
3675 pd = p->phys_offset;
3676 }
3b46e624 3677
3a7d929e 3678 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07 3679 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
3680 if (p)
3681 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
8df1cd07
FB
3682 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3683 } else {
3684 unsigned long addr1;
3685 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3686 /* RAM case */
5579c7f3 3687 ptr = qemu_get_ram_ptr(addr1);
8df1cd07 3688 stl_p(ptr, val);
3a7d929e
FB
3689 if (!cpu_physical_memory_is_dirty(addr1)) {
3690 /* invalidate code */
3691 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3692 /* set dirty bit */
f23db169
FB
3693 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3694 (0xff & ~CODE_DIRTY_FLAG);
3a7d929e 3695 }
8df1cd07
FB
3696 }
3697}
3698
aab33094 3699/* XXX: optimize */
c227f099 3700void stb_phys(target_phys_addr_t addr, uint32_t val)
aab33094
FB
3701{
3702 uint8_t v = val;
3703 cpu_physical_memory_write(addr, &v, 1);
3704}
3705
3706/* XXX: optimize */
c227f099 3707void stw_phys(target_phys_addr_t addr, uint32_t val)
aab33094
FB
3708{
3709 uint16_t v = tswap16(val);
3710 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3711}
3712
3713/* XXX: optimize */
c227f099 3714void stq_phys(target_phys_addr_t addr, uint64_t val)
aab33094
FB
3715{
3716 val = tswap64(val);
3717 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3718}
3719
5e2972fd 3720/* virtual memory access for debug (includes writing to ROM) */
5fafdf24 3721int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
b448f2f3 3722 uint8_t *buf, int len, int is_write)
13eb76e0
FB
3723{
3724 int l;
c227f099 3725 target_phys_addr_t phys_addr;
9b3c35e0 3726 target_ulong page;
13eb76e0
FB
3727
3728 while (len > 0) {
3729 page = addr & TARGET_PAGE_MASK;
3730 phys_addr = cpu_get_phys_page_debug(env, page);
3731 /* if no physical page mapped, return an error */
3732 if (phys_addr == -1)
3733 return -1;
3734 l = (page + TARGET_PAGE_SIZE) - addr;
3735 if (l > len)
3736 l = len;
5e2972fd 3737 phys_addr += (addr & ~TARGET_PAGE_MASK);
5e2972fd
AL
3738 if (is_write)
3739 cpu_physical_memory_write_rom(phys_addr, buf, l);
3740 else
5e2972fd 3741 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
13eb76e0
FB
3742 len -= l;
3743 buf += l;
3744 addr += l;
3745 }
3746 return 0;
3747}
a68fe89c 3748#endif
13eb76e0 3749
2e70f6ef
PB
3750/* in deterministic execution mode, instructions doing device I/Os
3751 must be at the end of the TB */
3752void cpu_io_recompile(CPUState *env, void *retaddr)
3753{
3754 TranslationBlock *tb;
3755 uint32_t n, cflags;
3756 target_ulong pc, cs_base;
3757 uint64_t flags;
3758
3759 tb = tb_find_pc((unsigned long)retaddr);
3760 if (!tb) {
3761 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
3762 retaddr);
3763 }
3764 n = env->icount_decr.u16.low + tb->icount;
3765 cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3766 /* Calculate how many instructions had been executed before the fault
bf20dc07 3767 occurred. */
2e70f6ef
PB
3768 n = n - env->icount_decr.u16.low;
3769 /* Generate a new TB ending on the I/O insn. */
3770 n++;
3771 /* On MIPS and SH, delay slot instructions can only be restarted if
3772 they were already the first instruction in the TB. If this is not
bf20dc07 3773 the first instruction in a TB then re-execute the preceding
2e70f6ef
PB
3774 branch. */
3775#if defined(TARGET_MIPS)
3776 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3777 env->active_tc.PC -= 4;
3778 env->icount_decr.u16.low++;
3779 env->hflags &= ~MIPS_HFLAG_BMASK;
3780 }
3781#elif defined(TARGET_SH4)
3782 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3783 && n > 1) {
3784 env->pc -= 2;
3785 env->icount_decr.u16.low++;
3786 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3787 }
3788#endif
3789 /* This should never happen. */
3790 if (n > CF_COUNT_MASK)
3791 cpu_abort(env, "TB too big during recompile");
3792
3793 cflags = n | CF_LAST_IO;
3794 pc = tb->pc;
3795 cs_base = tb->cs_base;
3796 flags = tb->flags;
3797 tb_phys_invalidate(tb, -1);
3798 /* FIXME: In theory this could raise an exception. In practice
3799 we have already translated the block once so it's probably ok. */
3800 tb_gen_code(env, pc, cs_base, flags, cflags);
bf20dc07 3801 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
2e70f6ef
PB
3802 the first in the TB) then we end up generating a whole new TB and
3803 repeating the fault, which is horribly inefficient.
3804 Better would be to execute just this insn uncached, or generate a
3805 second new TB. */
3806 cpu_resume_from_signal(env, NULL);
3807}
3808
e3db7226
FB
3809void dump_exec_info(FILE *f,
3810 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3811{
3812 int i, target_code_size, max_target_code_size;
3813 int direct_jmp_count, direct_jmp2_count, cross_page;
3814 TranslationBlock *tb;
3b46e624 3815
e3db7226
FB
3816 target_code_size = 0;
3817 max_target_code_size = 0;
3818 cross_page = 0;
3819 direct_jmp_count = 0;
3820 direct_jmp2_count = 0;
3821 for(i = 0; i < nb_tbs; i++) {
3822 tb = &tbs[i];
3823 target_code_size += tb->size;
3824 if (tb->size > max_target_code_size)
3825 max_target_code_size = tb->size;
3826 if (tb->page_addr[1] != -1)
3827 cross_page++;
3828 if (tb->tb_next_offset[0] != 0xffff) {
3829 direct_jmp_count++;
3830 if (tb->tb_next_offset[1] != 0xffff) {
3831 direct_jmp2_count++;
3832 }
3833 }
3834 }
3835 /* XXX: avoid using doubles ? */
57fec1fe 3836 cpu_fprintf(f, "Translation buffer state:\n");
26a5f13b
FB
3837 cpu_fprintf(f, "gen code size %ld/%ld\n",
3838 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3839 cpu_fprintf(f, "TB count %d/%d\n",
3840 nb_tbs, code_gen_max_blocks);
5fafdf24 3841 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
e3db7226
FB
3842 nb_tbs ? target_code_size / nb_tbs : 0,
3843 max_target_code_size);
5fafdf24 3844 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
e3db7226
FB
3845 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3846 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
5fafdf24
TS
3847 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3848 cross_page,
e3db7226
FB
3849 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3850 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
5fafdf24 3851 direct_jmp_count,
e3db7226
FB
3852 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3853 direct_jmp2_count,
3854 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
57fec1fe 3855 cpu_fprintf(f, "\nStatistics:\n");
e3db7226
FB
3856 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
3857 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3858 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
b67d9a52 3859 tcg_dump_info(f, cpu_fprintf);
e3db7226
FB
3860}
3861
5fafdf24 3862#if !defined(CONFIG_USER_ONLY)
61382a50
FB
3863
3864#define MMUSUFFIX _cmmu
3865#define GETPC() NULL
3866#define env cpu_single_env
b769d8fe 3867#define SOFTMMU_CODE_ACCESS
61382a50
FB
3868
3869#define SHIFT 0
3870#include "softmmu_template.h"
3871
3872#define SHIFT 1
3873#include "softmmu_template.h"
3874
3875#define SHIFT 2
3876#include "softmmu_template.h"
3877
3878#define SHIFT 3
3879#include "softmmu_template.h"
3880
3881#undef env
3882
3883#endif