]> git.proxmox.com Git - qemu.git/blame - exec.c
Silence warnings generated due to `#if BUILD_Y8950'
[qemu.git] / exec.c
CommitLineData
54936004 1/*
fd6ce8f6 2 * virtual page mapping and translated block handling
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
67b915a5 20#include "config.h"
d5a8f07c 21#ifdef _WIN32
4fddf62a 22#define WIN32_LEAN_AND_MEAN
d5a8f07c
FB
23#include <windows.h>
24#else
a98d49b1 25#include <sys/types.h>
d5a8f07c
FB
26#include <sys/mman.h>
27#endif
54936004
FB
28#include <stdlib.h>
29#include <stdio.h>
30#include <stdarg.h>
31#include <string.h>
32#include <errno.h>
33#include <unistd.h>
34#include <inttypes.h>
35
6180a181
FB
36#include "cpu.h"
37#include "exec-all.h"
ca10f867 38#include "qemu-common.h"
b67d9a52 39#include "tcg.h"
b3c7724c 40#include "hw/hw.h"
74576198 41#include "osdep.h"
7ba1e619 42#include "kvm.h"
53a5960a
PB
43#if defined(CONFIG_USER_ONLY)
44#include <qemu.h>
45#endif
54936004 46
fd6ce8f6 47//#define DEBUG_TB_INVALIDATE
66e85a21 48//#define DEBUG_FLUSH
9fa3e853 49//#define DEBUG_TLB
67d3b957 50//#define DEBUG_UNASSIGNED
fd6ce8f6
FB
51
52/* make various TB consistency checks */
5fafdf24
TS
53//#define DEBUG_TB_CHECK
54//#define DEBUG_TLB_CHECK
fd6ce8f6 55
1196be37 56//#define DEBUG_IOPORT
db7b5426 57//#define DEBUG_SUBPAGE
1196be37 58
99773bd4
PB
59#if !defined(CONFIG_USER_ONLY)
60/* TB consistency checks only implemented for usermode emulation. */
61#undef DEBUG_TB_CHECK
62#endif
63
9fa3e853
FB
64#define SMC_BITMAP_USE_THRESHOLD 10
65
66#define MMAP_AREA_START 0x00000000
67#define MMAP_AREA_END 0xa8000000
fd6ce8f6 68
108c49b8
FB
69#if defined(TARGET_SPARC64)
70#define TARGET_PHYS_ADDR_SPACE_BITS 41
5dcb6b91
BS
71#elif defined(TARGET_SPARC)
72#define TARGET_PHYS_ADDR_SPACE_BITS 36
bedb69ea
JM
73#elif defined(TARGET_ALPHA)
74#define TARGET_PHYS_ADDR_SPACE_BITS 42
75#define TARGET_VIRT_ADDR_SPACE_BITS 42
108c49b8
FB
76#elif defined(TARGET_PPC64)
77#define TARGET_PHYS_ADDR_SPACE_BITS 42
00f82b8a
AJ
78#elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
79#define TARGET_PHYS_ADDR_SPACE_BITS 42
80#elif defined(TARGET_I386) && !defined(USE_KQEMU)
81#define TARGET_PHYS_ADDR_SPACE_BITS 36
108c49b8
FB
82#else
83/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
84#define TARGET_PHYS_ADDR_SPACE_BITS 32
85#endif
86
bdaf78e0 87static TranslationBlock *tbs;
26a5f13b 88int code_gen_max_blocks;
9fa3e853 89TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
bdaf78e0 90static int nb_tbs;
eb51d102
FB
91/* any access to the tbs or the page table must use this lock */
92spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
fd6ce8f6 93
141ac468
BS
94#if defined(__arm__) || defined(__sparc_v9__)
95/* The prologue must be reachable with a direct jump. ARM and Sparc64
96 have limited branch ranges (possibly also PPC) so place it in a
d03d860b
BS
97 section close to code segment. */
98#define code_gen_section \
99 __attribute__((__section__(".gen_code"))) \
100 __attribute__((aligned (32)))
101#else
102#define code_gen_section \
103 __attribute__((aligned (32)))
104#endif
105
106uint8_t code_gen_prologue[1024] code_gen_section;
bdaf78e0
BS
107static uint8_t *code_gen_buffer;
108static unsigned long code_gen_buffer_size;
26a5f13b 109/* threshold to flush the translated code buffer */
bdaf78e0 110static unsigned long code_gen_buffer_max_size;
fd6ce8f6
FB
111uint8_t *code_gen_ptr;
112
e2eef170 113#if !defined(CONFIG_USER_ONLY)
00f82b8a 114ram_addr_t phys_ram_size;
9fa3e853
FB
115int phys_ram_fd;
116uint8_t *phys_ram_base;
1ccde1cb 117uint8_t *phys_ram_dirty;
74576198 118static int in_migration;
e9a1ab19 119static ram_addr_t phys_ram_alloc_offset = 0;
e2eef170 120#endif
9fa3e853 121
6a00d601
FB
122CPUState *first_cpu;
123/* current CPU in the current thread. It is only valid inside
124 cpu_exec() */
5fafdf24 125CPUState *cpu_single_env;
2e70f6ef 126/* 0 = Do not count executed instructions.
bf20dc07 127 1 = Precise instruction counting.
2e70f6ef
PB
128 2 = Adaptive rate instruction counting. */
129int use_icount = 0;
130/* Current instruction counter. While executing translated code this may
131 include some instructions that have not yet been executed. */
132int64_t qemu_icount;
6a00d601 133
54936004 134typedef struct PageDesc {
92e873b9 135 /* list of TBs intersecting this ram page */
fd6ce8f6 136 TranslationBlock *first_tb;
9fa3e853
FB
137 /* in order to optimize self modifying code, we count the number
138 of lookups we do to a given page to use a bitmap */
139 unsigned int code_write_count;
140 uint8_t *code_bitmap;
141#if defined(CONFIG_USER_ONLY)
142 unsigned long flags;
143#endif
54936004
FB
144} PageDesc;
145
92e873b9 146typedef struct PhysPageDesc {
0f459d16 147 /* offset in host memory of the page + io_index in the low bits */
00f82b8a 148 ram_addr_t phys_offset;
8da3ff18 149 ram_addr_t region_offset;
92e873b9
FB
150} PhysPageDesc;
151
54936004 152#define L2_BITS 10
bedb69ea
JM
153#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
154/* XXX: this is a temporary hack for alpha target.
155 * In the future, this is to be replaced by a multi-level table
156 * to actually be able to handle the complete 64 bits address space.
157 */
158#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
159#else
03875444 160#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
bedb69ea 161#endif
54936004
FB
162
163#define L1_SIZE (1 << L1_BITS)
164#define L2_SIZE (1 << L2_BITS)
165
83fb7adf
FB
166unsigned long qemu_real_host_page_size;
167unsigned long qemu_host_page_bits;
168unsigned long qemu_host_page_size;
169unsigned long qemu_host_page_mask;
54936004 170
92e873b9 171/* XXX: for system emulation, it could just be an array */
54936004 172static PageDesc *l1_map[L1_SIZE];
bdaf78e0 173static PhysPageDesc **l1_phys_map;
54936004 174
e2eef170
PB
175#if !defined(CONFIG_USER_ONLY)
176static void io_mem_init(void);
177
33417e70 178/* io memory support */
33417e70
FB
179CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
180CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
a4193c8a 181void *io_mem_opaque[IO_MEM_NB_ENTRIES];
33417e70 182static int io_mem_nb;
6658ffb8
PB
183static int io_mem_watch;
184#endif
33417e70 185
34865134 186/* log support */
d9b630fd 187static const char *logfilename = "/tmp/qemu.log";
34865134
FB
188FILE *logfile;
189int loglevel;
e735b91c 190static int log_append = 0;
34865134 191
e3db7226
FB
192/* statistics */
193static int tlb_flush_count;
194static int tb_flush_count;
195static int tb_phys_invalidate_count;
196
db7b5426
BS
197#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
198typedef struct subpage_t {
199 target_phys_addr_t base;
3ee89922
BS
200 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
201 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
202 void *opaque[TARGET_PAGE_SIZE][2][4];
8da3ff18 203 ram_addr_t region_offset[TARGET_PAGE_SIZE][2][4];
db7b5426
BS
204} subpage_t;
205
7cb69cae
FB
206#ifdef _WIN32
207static void map_exec(void *addr, long size)
208{
209 DWORD old_protect;
210 VirtualProtect(addr, size,
211 PAGE_EXECUTE_READWRITE, &old_protect);
212
213}
214#else
215static void map_exec(void *addr, long size)
216{
4369415f 217 unsigned long start, end, page_size;
7cb69cae 218
4369415f 219 page_size = getpagesize();
7cb69cae 220 start = (unsigned long)addr;
4369415f 221 start &= ~(page_size - 1);
7cb69cae
FB
222
223 end = (unsigned long)addr + size;
4369415f
FB
224 end += page_size - 1;
225 end &= ~(page_size - 1);
7cb69cae
FB
226
227 mprotect((void *)start, end - start,
228 PROT_READ | PROT_WRITE | PROT_EXEC);
229}
230#endif
231
b346ff46 232static void page_init(void)
54936004 233{
83fb7adf 234 /* NOTE: we can always suppose that qemu_host_page_size >=
54936004 235 TARGET_PAGE_SIZE */
c2b48b69
AL
236#ifdef _WIN32
237 {
238 SYSTEM_INFO system_info;
239
240 GetSystemInfo(&system_info);
241 qemu_real_host_page_size = system_info.dwPageSize;
242 }
243#else
244 qemu_real_host_page_size = getpagesize();
245#endif
83fb7adf
FB
246 if (qemu_host_page_size == 0)
247 qemu_host_page_size = qemu_real_host_page_size;
248 if (qemu_host_page_size < TARGET_PAGE_SIZE)
249 qemu_host_page_size = TARGET_PAGE_SIZE;
250 qemu_host_page_bits = 0;
251 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
252 qemu_host_page_bits++;
253 qemu_host_page_mask = ~(qemu_host_page_size - 1);
108c49b8
FB
254 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
255 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
50a9569b
AZ
256
257#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
258 {
259 long long startaddr, endaddr;
260 FILE *f;
261 int n;
262
c8a706fe 263 mmap_lock();
0776590d 264 last_brk = (unsigned long)sbrk(0);
50a9569b
AZ
265 f = fopen("/proc/self/maps", "r");
266 if (f) {
267 do {
268 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
269 if (n == 2) {
e0b8d65a
BS
270 startaddr = MIN(startaddr,
271 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
272 endaddr = MIN(endaddr,
273 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
b5fc909e 274 page_set_flags(startaddr & TARGET_PAGE_MASK,
50a9569b
AZ
275 TARGET_PAGE_ALIGN(endaddr),
276 PAGE_RESERVED);
277 }
278 } while (!feof(f));
279 fclose(f);
280 }
c8a706fe 281 mmap_unlock();
50a9569b
AZ
282 }
283#endif
54936004
FB
284}
285
434929bf 286static inline PageDesc **page_l1_map(target_ulong index)
54936004 287{
17e2377a
PB
288#if TARGET_LONG_BITS > 32
289 /* Host memory outside guest VM. For 32-bit targets we have already
290 excluded high addresses. */
d8173e0f 291 if (index > ((target_ulong)L2_SIZE * L1_SIZE))
17e2377a
PB
292 return NULL;
293#endif
434929bf
AL
294 return &l1_map[index >> L2_BITS];
295}
296
297static inline PageDesc *page_find_alloc(target_ulong index)
298{
299 PageDesc **lp, *p;
300 lp = page_l1_map(index);
301 if (!lp)
302 return NULL;
303
54936004
FB
304 p = *lp;
305 if (!p) {
306 /* allocate if not found */
17e2377a
PB
307#if defined(CONFIG_USER_ONLY)
308 unsigned long addr;
309 size_t len = sizeof(PageDesc) * L2_SIZE;
310 /* Don't use qemu_malloc because it may recurse. */
311 p = mmap(0, len, PROT_READ | PROT_WRITE,
312 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
54936004 313 *lp = p;
17e2377a
PB
314 addr = h2g(p);
315 if (addr == (target_ulong)addr) {
316 page_set_flags(addr & TARGET_PAGE_MASK,
317 TARGET_PAGE_ALIGN(addr + len),
318 PAGE_RESERVED);
319 }
320#else
321 p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
322 *lp = p;
323#endif
54936004
FB
324 }
325 return p + (index & (L2_SIZE - 1));
326}
327
00f82b8a 328static inline PageDesc *page_find(target_ulong index)
54936004 329{
434929bf
AL
330 PageDesc **lp, *p;
331 lp = page_l1_map(index);
332 if (!lp)
333 return NULL;
54936004 334
434929bf 335 p = *lp;
54936004
FB
336 if (!p)
337 return 0;
fd6ce8f6
FB
338 return p + (index & (L2_SIZE - 1));
339}
340
108c49b8 341static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
92e873b9 342{
108c49b8 343 void **lp, **p;
e3f4e2a4 344 PhysPageDesc *pd;
92e873b9 345
108c49b8
FB
346 p = (void **)l1_phys_map;
347#if TARGET_PHYS_ADDR_SPACE_BITS > 32
348
349#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
350#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
351#endif
352 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
92e873b9
FB
353 p = *lp;
354 if (!p) {
355 /* allocate if not found */
108c49b8
FB
356 if (!alloc)
357 return NULL;
358 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
359 memset(p, 0, sizeof(void *) * L1_SIZE);
360 *lp = p;
361 }
362#endif
363 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
e3f4e2a4
PB
364 pd = *lp;
365 if (!pd) {
366 int i;
108c49b8
FB
367 /* allocate if not found */
368 if (!alloc)
369 return NULL;
e3f4e2a4
PB
370 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
371 *lp = pd;
372 for (i = 0; i < L2_SIZE; i++)
373 pd[i].phys_offset = IO_MEM_UNASSIGNED;
92e873b9 374 }
e3f4e2a4 375 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
92e873b9
FB
376}
377
108c49b8 378static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
92e873b9 379{
108c49b8 380 return phys_page_find_alloc(index, 0);
92e873b9
FB
381}
382
9fa3e853 383#if !defined(CONFIG_USER_ONLY)
6a00d601 384static void tlb_protect_code(ram_addr_t ram_addr);
5fafdf24 385static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 386 target_ulong vaddr);
c8a706fe
PB
387#define mmap_lock() do { } while(0)
388#define mmap_unlock() do { } while(0)
9fa3e853 389#endif
fd6ce8f6 390
4369415f
FB
391#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
392
393#if defined(CONFIG_USER_ONLY)
394/* Currently it is not recommanded to allocate big chunks of data in
395 user mode. It will change when a dedicated libc will be used */
396#define USE_STATIC_CODE_GEN_BUFFER
397#endif
398
399#ifdef USE_STATIC_CODE_GEN_BUFFER
400static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
401#endif
402
8fcd3692 403static void code_gen_alloc(unsigned long tb_size)
26a5f13b 404{
4369415f
FB
405#ifdef USE_STATIC_CODE_GEN_BUFFER
406 code_gen_buffer = static_code_gen_buffer;
407 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
408 map_exec(code_gen_buffer, code_gen_buffer_size);
409#else
26a5f13b
FB
410 code_gen_buffer_size = tb_size;
411 if (code_gen_buffer_size == 0) {
4369415f
FB
412#if defined(CONFIG_USER_ONLY)
413 /* in user mode, phys_ram_size is not meaningful */
414 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
415#else
26a5f13b 416 /* XXX: needs ajustments */
174a9a1f 417 code_gen_buffer_size = (unsigned long)(phys_ram_size / 4);
4369415f 418#endif
26a5f13b
FB
419 }
420 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
421 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
422 /* The code gen buffer location may have constraints depending on
423 the host cpu and OS */
424#if defined(__linux__)
425 {
426 int flags;
141ac468
BS
427 void *start = NULL;
428
26a5f13b
FB
429 flags = MAP_PRIVATE | MAP_ANONYMOUS;
430#if defined(__x86_64__)
431 flags |= MAP_32BIT;
432 /* Cannot map more than that */
433 if (code_gen_buffer_size > (800 * 1024 * 1024))
434 code_gen_buffer_size = (800 * 1024 * 1024);
141ac468
BS
435#elif defined(__sparc_v9__)
436 // Map the buffer below 2G, so we can use direct calls and branches
437 flags |= MAP_FIXED;
438 start = (void *) 0x60000000UL;
439 if (code_gen_buffer_size > (512 * 1024 * 1024))
440 code_gen_buffer_size = (512 * 1024 * 1024);
1cb0661e 441#elif defined(__arm__)
63d41246 442 /* Map the buffer below 32M, so we can use direct calls and branches */
1cb0661e
AZ
443 flags |= MAP_FIXED;
444 start = (void *) 0x01000000UL;
445 if (code_gen_buffer_size > 16 * 1024 * 1024)
446 code_gen_buffer_size = 16 * 1024 * 1024;
26a5f13b 447#endif
141ac468
BS
448 code_gen_buffer = mmap(start, code_gen_buffer_size,
449 PROT_WRITE | PROT_READ | PROT_EXEC,
26a5f13b
FB
450 flags, -1, 0);
451 if (code_gen_buffer == MAP_FAILED) {
452 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
453 exit(1);
454 }
455 }
06e67a82
AL
456#elif defined(__FreeBSD__)
457 {
458 int flags;
459 void *addr = NULL;
460 flags = MAP_PRIVATE | MAP_ANONYMOUS;
461#if defined(__x86_64__)
462 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
463 * 0x40000000 is free */
464 flags |= MAP_FIXED;
465 addr = (void *)0x40000000;
466 /* Cannot map more than that */
467 if (code_gen_buffer_size > (800 * 1024 * 1024))
468 code_gen_buffer_size = (800 * 1024 * 1024);
469#endif
470 code_gen_buffer = mmap(addr, code_gen_buffer_size,
471 PROT_WRITE | PROT_READ | PROT_EXEC,
472 flags, -1, 0);
473 if (code_gen_buffer == MAP_FAILED) {
474 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
475 exit(1);
476 }
477 }
26a5f13b
FB
478#else
479 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
480 if (!code_gen_buffer) {
481 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
482 exit(1);
483 }
484 map_exec(code_gen_buffer, code_gen_buffer_size);
485#endif
4369415f 486#endif /* !USE_STATIC_CODE_GEN_BUFFER */
26a5f13b
FB
487 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
488 code_gen_buffer_max_size = code_gen_buffer_size -
489 code_gen_max_block_size();
490 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
491 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
492}
493
494/* Must be called before using the QEMU cpus. 'tb_size' is the size
495 (in bytes) allocated to the translation buffer. Zero means default
496 size. */
497void cpu_exec_init_all(unsigned long tb_size)
498{
26a5f13b
FB
499 cpu_gen_init();
500 code_gen_alloc(tb_size);
501 code_gen_ptr = code_gen_buffer;
4369415f 502 page_init();
e2eef170 503#if !defined(CONFIG_USER_ONLY)
26a5f13b 504 io_mem_init();
e2eef170 505#endif
26a5f13b
FB
506}
507
9656f324
PB
508#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
509
510#define CPU_COMMON_SAVE_VERSION 1
511
512static void cpu_common_save(QEMUFile *f, void *opaque)
513{
514 CPUState *env = opaque;
515
516 qemu_put_be32s(f, &env->halted);
517 qemu_put_be32s(f, &env->interrupt_request);
518}
519
520static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
521{
522 CPUState *env = opaque;
523
524 if (version_id != CPU_COMMON_SAVE_VERSION)
525 return -EINVAL;
526
527 qemu_get_be32s(f, &env->halted);
75f482ae 528 qemu_get_be32s(f, &env->interrupt_request);
9656f324
PB
529 tlb_flush(env, 1);
530
531 return 0;
532}
533#endif
534
6a00d601 535void cpu_exec_init(CPUState *env)
fd6ce8f6 536{
6a00d601
FB
537 CPUState **penv;
538 int cpu_index;
539
6a00d601
FB
540 env->next_cpu = NULL;
541 penv = &first_cpu;
542 cpu_index = 0;
543 while (*penv != NULL) {
544 penv = (CPUState **)&(*penv)->next_cpu;
545 cpu_index++;
546 }
547 env->cpu_index = cpu_index;
c0ce998e
AL
548 TAILQ_INIT(&env->breakpoints);
549 TAILQ_INIT(&env->watchpoints);
6a00d601 550 *penv = env;
b3c7724c 551#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
9656f324
PB
552 register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
553 cpu_common_save, cpu_common_load, env);
b3c7724c
PB
554 register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
555 cpu_save, cpu_load, env);
556#endif
fd6ce8f6
FB
557}
558
9fa3e853
FB
559static inline void invalidate_page_bitmap(PageDesc *p)
560{
561 if (p->code_bitmap) {
59817ccb 562 qemu_free(p->code_bitmap);
9fa3e853
FB
563 p->code_bitmap = NULL;
564 }
565 p->code_write_count = 0;
566}
567
fd6ce8f6
FB
568/* set to NULL all the 'first_tb' fields in all PageDescs */
569static void page_flush_tb(void)
570{
571 int i, j;
572 PageDesc *p;
573
574 for(i = 0; i < L1_SIZE; i++) {
575 p = l1_map[i];
576 if (p) {
9fa3e853
FB
577 for(j = 0; j < L2_SIZE; j++) {
578 p->first_tb = NULL;
579 invalidate_page_bitmap(p);
580 p++;
581 }
fd6ce8f6
FB
582 }
583 }
584}
585
586/* flush all the translation blocks */
d4e8164f 587/* XXX: tb_flush is currently not thread safe */
6a00d601 588void tb_flush(CPUState *env1)
fd6ce8f6 589{
6a00d601 590 CPUState *env;
0124311e 591#if defined(DEBUG_FLUSH)
ab3d1727
BS
592 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
593 (unsigned long)(code_gen_ptr - code_gen_buffer),
594 nb_tbs, nb_tbs > 0 ?
595 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
fd6ce8f6 596#endif
26a5f13b 597 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
a208e54a
PB
598 cpu_abort(env1, "Internal error: code buffer overflow\n");
599
fd6ce8f6 600 nb_tbs = 0;
3b46e624 601
6a00d601
FB
602 for(env = first_cpu; env != NULL; env = env->next_cpu) {
603 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
604 }
9fa3e853 605
8a8a608f 606 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
fd6ce8f6 607 page_flush_tb();
9fa3e853 608
fd6ce8f6 609 code_gen_ptr = code_gen_buffer;
d4e8164f
FB
610 /* XXX: flush processor icache at this point if cache flush is
611 expensive */
e3db7226 612 tb_flush_count++;
fd6ce8f6
FB
613}
614
615#ifdef DEBUG_TB_CHECK
616
bc98a7ef 617static void tb_invalidate_check(target_ulong address)
fd6ce8f6
FB
618{
619 TranslationBlock *tb;
620 int i;
621 address &= TARGET_PAGE_MASK;
99773bd4
PB
622 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
623 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
624 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
625 address >= tb->pc + tb->size)) {
626 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
99773bd4 627 address, (long)tb->pc, tb->size);
fd6ce8f6
FB
628 }
629 }
630 }
631}
632
633/* verify that all the pages have correct rights for code */
634static void tb_page_check(void)
635{
636 TranslationBlock *tb;
637 int i, flags1, flags2;
3b46e624 638
99773bd4
PB
639 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
640 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
641 flags1 = page_get_flags(tb->pc);
642 flags2 = page_get_flags(tb->pc + tb->size - 1);
643 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
644 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
99773bd4 645 (long)tb->pc, tb->size, flags1, flags2);
fd6ce8f6
FB
646 }
647 }
648 }
649}
650
bdaf78e0 651static void tb_jmp_check(TranslationBlock *tb)
d4e8164f
FB
652{
653 TranslationBlock *tb1;
654 unsigned int n1;
655
656 /* suppress any remaining jumps to this TB */
657 tb1 = tb->jmp_first;
658 for(;;) {
659 n1 = (long)tb1 & 3;
660 tb1 = (TranslationBlock *)((long)tb1 & ~3);
661 if (n1 == 2)
662 break;
663 tb1 = tb1->jmp_next[n1];
664 }
665 /* check end of list */
666 if (tb1 != tb) {
667 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
668 }
669}
670
fd6ce8f6
FB
671#endif
672
673/* invalidate one TB */
674static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
675 int next_offset)
676{
677 TranslationBlock *tb1;
678 for(;;) {
679 tb1 = *ptb;
680 if (tb1 == tb) {
681 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
682 break;
683 }
684 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
685 }
686}
687
9fa3e853
FB
688static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
689{
690 TranslationBlock *tb1;
691 unsigned int n1;
692
693 for(;;) {
694 tb1 = *ptb;
695 n1 = (long)tb1 & 3;
696 tb1 = (TranslationBlock *)((long)tb1 & ~3);
697 if (tb1 == tb) {
698 *ptb = tb1->page_next[n1];
699 break;
700 }
701 ptb = &tb1->page_next[n1];
702 }
703}
704
d4e8164f
FB
705static inline void tb_jmp_remove(TranslationBlock *tb, int n)
706{
707 TranslationBlock *tb1, **ptb;
708 unsigned int n1;
709
710 ptb = &tb->jmp_next[n];
711 tb1 = *ptb;
712 if (tb1) {
713 /* find tb(n) in circular list */
714 for(;;) {
715 tb1 = *ptb;
716 n1 = (long)tb1 & 3;
717 tb1 = (TranslationBlock *)((long)tb1 & ~3);
718 if (n1 == n && tb1 == tb)
719 break;
720 if (n1 == 2) {
721 ptb = &tb1->jmp_first;
722 } else {
723 ptb = &tb1->jmp_next[n1];
724 }
725 }
726 /* now we can suppress tb(n) from the list */
727 *ptb = tb->jmp_next[n];
728
729 tb->jmp_next[n] = NULL;
730 }
731}
732
733/* reset the jump entry 'n' of a TB so that it is not chained to
734 another TB */
735static inline void tb_reset_jump(TranslationBlock *tb, int n)
736{
737 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
738}
739
2e70f6ef 740void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
fd6ce8f6 741{
6a00d601 742 CPUState *env;
8a40a180 743 PageDesc *p;
d4e8164f 744 unsigned int h, n1;
00f82b8a 745 target_phys_addr_t phys_pc;
8a40a180 746 TranslationBlock *tb1, *tb2;
3b46e624 747
8a40a180
FB
748 /* remove the TB from the hash list */
749 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
750 h = tb_phys_hash_func(phys_pc);
5fafdf24 751 tb_remove(&tb_phys_hash[h], tb,
8a40a180
FB
752 offsetof(TranslationBlock, phys_hash_next));
753
754 /* remove the TB from the page list */
755 if (tb->page_addr[0] != page_addr) {
756 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
757 tb_page_remove(&p->first_tb, tb);
758 invalidate_page_bitmap(p);
759 }
760 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
761 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
762 tb_page_remove(&p->first_tb, tb);
763 invalidate_page_bitmap(p);
764 }
765
36bdbe54 766 tb_invalidated_flag = 1;
59817ccb 767
fd6ce8f6 768 /* remove the TB from the hash list */
8a40a180 769 h = tb_jmp_cache_hash_func(tb->pc);
6a00d601
FB
770 for(env = first_cpu; env != NULL; env = env->next_cpu) {
771 if (env->tb_jmp_cache[h] == tb)
772 env->tb_jmp_cache[h] = NULL;
773 }
d4e8164f
FB
774
775 /* suppress this TB from the two jump lists */
776 tb_jmp_remove(tb, 0);
777 tb_jmp_remove(tb, 1);
778
779 /* suppress any remaining jumps to this TB */
780 tb1 = tb->jmp_first;
781 for(;;) {
782 n1 = (long)tb1 & 3;
783 if (n1 == 2)
784 break;
785 tb1 = (TranslationBlock *)((long)tb1 & ~3);
786 tb2 = tb1->jmp_next[n1];
787 tb_reset_jump(tb1, n1);
788 tb1->jmp_next[n1] = NULL;
789 tb1 = tb2;
790 }
791 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
9fa3e853 792
e3db7226 793 tb_phys_invalidate_count++;
9fa3e853
FB
794}
795
796static inline void set_bits(uint8_t *tab, int start, int len)
797{
798 int end, mask, end1;
799
800 end = start + len;
801 tab += start >> 3;
802 mask = 0xff << (start & 7);
803 if ((start & ~7) == (end & ~7)) {
804 if (start < end) {
805 mask &= ~(0xff << (end & 7));
806 *tab |= mask;
807 }
808 } else {
809 *tab++ |= mask;
810 start = (start + 8) & ~7;
811 end1 = end & ~7;
812 while (start < end1) {
813 *tab++ = 0xff;
814 start += 8;
815 }
816 if (start < end) {
817 mask = ~(0xff << (end & 7));
818 *tab |= mask;
819 }
820 }
821}
822
823static void build_page_bitmap(PageDesc *p)
824{
825 int n, tb_start, tb_end;
826 TranslationBlock *tb;
3b46e624 827
b2a7081a 828 p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
9fa3e853
FB
829 if (!p->code_bitmap)
830 return;
9fa3e853
FB
831
832 tb = p->first_tb;
833 while (tb != NULL) {
834 n = (long)tb & 3;
835 tb = (TranslationBlock *)((long)tb & ~3);
836 /* NOTE: this is subtle as a TB may span two physical pages */
837 if (n == 0) {
838 /* NOTE: tb_end may be after the end of the page, but
839 it is not a problem */
840 tb_start = tb->pc & ~TARGET_PAGE_MASK;
841 tb_end = tb_start + tb->size;
842 if (tb_end > TARGET_PAGE_SIZE)
843 tb_end = TARGET_PAGE_SIZE;
844 } else {
845 tb_start = 0;
846 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
847 }
848 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
849 tb = tb->page_next[n];
850 }
851}
852
2e70f6ef
PB
853TranslationBlock *tb_gen_code(CPUState *env,
854 target_ulong pc, target_ulong cs_base,
855 int flags, int cflags)
d720b93d
FB
856{
857 TranslationBlock *tb;
858 uint8_t *tc_ptr;
859 target_ulong phys_pc, phys_page2, virt_page2;
860 int code_gen_size;
861
c27004ec
FB
862 phys_pc = get_phys_addr_code(env, pc);
863 tb = tb_alloc(pc);
d720b93d
FB
864 if (!tb) {
865 /* flush must be done */
866 tb_flush(env);
867 /* cannot fail at this point */
c27004ec 868 tb = tb_alloc(pc);
2e70f6ef
PB
869 /* Don't forget to invalidate previous TB info. */
870 tb_invalidated_flag = 1;
d720b93d
FB
871 }
872 tc_ptr = code_gen_ptr;
873 tb->tc_ptr = tc_ptr;
874 tb->cs_base = cs_base;
875 tb->flags = flags;
876 tb->cflags = cflags;
d07bde88 877 cpu_gen_code(env, tb, &code_gen_size);
d720b93d 878 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
3b46e624 879
d720b93d 880 /* check next page if needed */
c27004ec 881 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
d720b93d 882 phys_page2 = -1;
c27004ec 883 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
d720b93d
FB
884 phys_page2 = get_phys_addr_code(env, virt_page2);
885 }
886 tb_link_phys(tb, phys_pc, phys_page2);
2e70f6ef 887 return tb;
d720b93d 888}
3b46e624 889
9fa3e853
FB
890/* invalidate all TBs which intersect with the target physical page
891 starting in range [start;end[. NOTE: start and end must refer to
d720b93d
FB
892 the same physical page. 'is_cpu_write_access' should be true if called
893 from a real cpu write access: the virtual CPU will exit the current
894 TB if code is modified inside this TB. */
00f82b8a 895void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
d720b93d
FB
896 int is_cpu_write_access)
897{
6b917547 898 TranslationBlock *tb, *tb_next, *saved_tb;
d720b93d 899 CPUState *env = cpu_single_env;
9fa3e853 900 target_ulong tb_start, tb_end;
6b917547
AL
901 PageDesc *p;
902 int n;
903#ifdef TARGET_HAS_PRECISE_SMC
904 int current_tb_not_found = is_cpu_write_access;
905 TranslationBlock *current_tb = NULL;
906 int current_tb_modified = 0;
907 target_ulong current_pc = 0;
908 target_ulong current_cs_base = 0;
909 int current_flags = 0;
910#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
911
912 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 913 if (!p)
9fa3e853 914 return;
5fafdf24 915 if (!p->code_bitmap &&
d720b93d
FB
916 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
917 is_cpu_write_access) {
9fa3e853
FB
918 /* build code bitmap */
919 build_page_bitmap(p);
920 }
921
922 /* we remove all the TBs in the range [start, end[ */
923 /* XXX: see if in some cases it could be faster to invalidate all the code */
924 tb = p->first_tb;
925 while (tb != NULL) {
926 n = (long)tb & 3;
927 tb = (TranslationBlock *)((long)tb & ~3);
928 tb_next = tb->page_next[n];
929 /* NOTE: this is subtle as a TB may span two physical pages */
930 if (n == 0) {
931 /* NOTE: tb_end may be after the end of the page, but
932 it is not a problem */
933 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
934 tb_end = tb_start + tb->size;
935 } else {
936 tb_start = tb->page_addr[1];
937 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
938 }
939 if (!(tb_end <= start || tb_start >= end)) {
d720b93d
FB
940#ifdef TARGET_HAS_PRECISE_SMC
941 if (current_tb_not_found) {
942 current_tb_not_found = 0;
943 current_tb = NULL;
2e70f6ef 944 if (env->mem_io_pc) {
d720b93d 945 /* now we have a real cpu fault */
2e70f6ef 946 current_tb = tb_find_pc(env->mem_io_pc);
d720b93d
FB
947 }
948 }
949 if (current_tb == tb &&
2e70f6ef 950 (current_tb->cflags & CF_COUNT_MASK) != 1) {
d720b93d
FB
951 /* If we are modifying the current TB, we must stop
952 its execution. We could be more precise by checking
953 that the modification is after the current PC, but it
954 would require a specialized function to partially
955 restore the CPU state */
3b46e624 956
d720b93d 957 current_tb_modified = 1;
5fafdf24 958 cpu_restore_state(current_tb, env,
2e70f6ef 959 env->mem_io_pc, NULL);
6b917547
AL
960 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
961 &current_flags);
d720b93d
FB
962 }
963#endif /* TARGET_HAS_PRECISE_SMC */
6f5a9f7e
FB
964 /* we need to do that to handle the case where a signal
965 occurs while doing tb_phys_invalidate() */
966 saved_tb = NULL;
967 if (env) {
968 saved_tb = env->current_tb;
969 env->current_tb = NULL;
970 }
9fa3e853 971 tb_phys_invalidate(tb, -1);
6f5a9f7e
FB
972 if (env) {
973 env->current_tb = saved_tb;
974 if (env->interrupt_request && env->current_tb)
975 cpu_interrupt(env, env->interrupt_request);
976 }
9fa3e853
FB
977 }
978 tb = tb_next;
979 }
980#if !defined(CONFIG_USER_ONLY)
981 /* if no code remaining, no need to continue to use slow writes */
982 if (!p->first_tb) {
983 invalidate_page_bitmap(p);
d720b93d 984 if (is_cpu_write_access) {
2e70f6ef 985 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
d720b93d
FB
986 }
987 }
988#endif
989#ifdef TARGET_HAS_PRECISE_SMC
990 if (current_tb_modified) {
991 /* we generate a block containing just the instruction
992 modifying the memory. It will ensure that it cannot modify
993 itself */
ea1c1802 994 env->current_tb = NULL;
2e70f6ef 995 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
d720b93d 996 cpu_resume_from_signal(env, NULL);
9fa3e853 997 }
fd6ce8f6 998#endif
9fa3e853 999}
fd6ce8f6 1000
9fa3e853 1001/* len must be <= 8 and start must be a multiple of len */
00f82b8a 1002static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
9fa3e853
FB
1003{
1004 PageDesc *p;
1005 int offset, b;
59817ccb 1006#if 0
a4193c8a
FB
1007 if (1) {
1008 if (loglevel) {
5fafdf24 1009 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
2e70f6ef 1010 cpu_single_env->mem_io_vaddr, len,
5fafdf24 1011 cpu_single_env->eip,
a4193c8a
FB
1012 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1013 }
59817ccb
FB
1014 }
1015#endif
9fa3e853 1016 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 1017 if (!p)
9fa3e853
FB
1018 return;
1019 if (p->code_bitmap) {
1020 offset = start & ~TARGET_PAGE_MASK;
1021 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1022 if (b & ((1 << len) - 1))
1023 goto do_invalidate;
1024 } else {
1025 do_invalidate:
d720b93d 1026 tb_invalidate_phys_page_range(start, start + len, 1);
9fa3e853
FB
1027 }
1028}
1029
9fa3e853 1030#if !defined(CONFIG_SOFTMMU)
00f82b8a 1031static void tb_invalidate_phys_page(target_phys_addr_t addr,
d720b93d 1032 unsigned long pc, void *puc)
9fa3e853 1033{
6b917547 1034 TranslationBlock *tb;
9fa3e853 1035 PageDesc *p;
6b917547 1036 int n;
d720b93d 1037#ifdef TARGET_HAS_PRECISE_SMC
6b917547 1038 TranslationBlock *current_tb = NULL;
d720b93d 1039 CPUState *env = cpu_single_env;
6b917547
AL
1040 int current_tb_modified = 0;
1041 target_ulong current_pc = 0;
1042 target_ulong current_cs_base = 0;
1043 int current_flags = 0;
d720b93d 1044#endif
9fa3e853
FB
1045
1046 addr &= TARGET_PAGE_MASK;
1047 p = page_find(addr >> TARGET_PAGE_BITS);
5fafdf24 1048 if (!p)
9fa3e853
FB
1049 return;
1050 tb = p->first_tb;
d720b93d
FB
1051#ifdef TARGET_HAS_PRECISE_SMC
1052 if (tb && pc != 0) {
1053 current_tb = tb_find_pc(pc);
1054 }
1055#endif
9fa3e853
FB
1056 while (tb != NULL) {
1057 n = (long)tb & 3;
1058 tb = (TranslationBlock *)((long)tb & ~3);
d720b93d
FB
1059#ifdef TARGET_HAS_PRECISE_SMC
1060 if (current_tb == tb &&
2e70f6ef 1061 (current_tb->cflags & CF_COUNT_MASK) != 1) {
d720b93d
FB
1062 /* If we are modifying the current TB, we must stop
1063 its execution. We could be more precise by checking
1064 that the modification is after the current PC, but it
1065 would require a specialized function to partially
1066 restore the CPU state */
3b46e624 1067
d720b93d
FB
1068 current_tb_modified = 1;
1069 cpu_restore_state(current_tb, env, pc, puc);
6b917547
AL
1070 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1071 &current_flags);
d720b93d
FB
1072 }
1073#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
1074 tb_phys_invalidate(tb, addr);
1075 tb = tb->page_next[n];
1076 }
fd6ce8f6 1077 p->first_tb = NULL;
d720b93d
FB
1078#ifdef TARGET_HAS_PRECISE_SMC
1079 if (current_tb_modified) {
1080 /* we generate a block containing just the instruction
1081 modifying the memory. It will ensure that it cannot modify
1082 itself */
ea1c1802 1083 env->current_tb = NULL;
2e70f6ef 1084 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
d720b93d
FB
1085 cpu_resume_from_signal(env, puc);
1086 }
1087#endif
fd6ce8f6 1088}
9fa3e853 1089#endif
fd6ce8f6
FB
1090
1091/* add the tb in the target page and protect it if necessary */
5fafdf24 1092static inline void tb_alloc_page(TranslationBlock *tb,
53a5960a 1093 unsigned int n, target_ulong page_addr)
fd6ce8f6
FB
1094{
1095 PageDesc *p;
9fa3e853
FB
1096 TranslationBlock *last_first_tb;
1097
1098 tb->page_addr[n] = page_addr;
3a7d929e 1099 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
9fa3e853
FB
1100 tb->page_next[n] = p->first_tb;
1101 last_first_tb = p->first_tb;
1102 p->first_tb = (TranslationBlock *)((long)tb | n);
1103 invalidate_page_bitmap(p);
fd6ce8f6 1104
107db443 1105#if defined(TARGET_HAS_SMC) || 1
d720b93d 1106
9fa3e853 1107#if defined(CONFIG_USER_ONLY)
fd6ce8f6 1108 if (p->flags & PAGE_WRITE) {
53a5960a
PB
1109 target_ulong addr;
1110 PageDesc *p2;
9fa3e853
FB
1111 int prot;
1112
fd6ce8f6
FB
1113 /* force the host page as non writable (writes will have a
1114 page fault + mprotect overhead) */
53a5960a 1115 page_addr &= qemu_host_page_mask;
fd6ce8f6 1116 prot = 0;
53a5960a
PB
1117 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1118 addr += TARGET_PAGE_SIZE) {
1119
1120 p2 = page_find (addr >> TARGET_PAGE_BITS);
1121 if (!p2)
1122 continue;
1123 prot |= p2->flags;
1124 p2->flags &= ~PAGE_WRITE;
1125 page_get_flags(addr);
1126 }
5fafdf24 1127 mprotect(g2h(page_addr), qemu_host_page_size,
fd6ce8f6
FB
1128 (prot & PAGE_BITS) & ~PAGE_WRITE);
1129#ifdef DEBUG_TB_INVALIDATE
ab3d1727 1130 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
53a5960a 1131 page_addr);
fd6ce8f6 1132#endif
fd6ce8f6 1133 }
9fa3e853
FB
1134#else
1135 /* if some code is already present, then the pages are already
1136 protected. So we handle the case where only the first TB is
1137 allocated in a physical page */
1138 if (!last_first_tb) {
6a00d601 1139 tlb_protect_code(page_addr);
9fa3e853
FB
1140 }
1141#endif
d720b93d
FB
1142
1143#endif /* TARGET_HAS_SMC */
fd6ce8f6
FB
1144}
1145
1146/* Allocate a new translation block. Flush the translation buffer if
1147 too many translation blocks or too much generated code. */
c27004ec 1148TranslationBlock *tb_alloc(target_ulong pc)
fd6ce8f6
FB
1149{
1150 TranslationBlock *tb;
fd6ce8f6 1151
26a5f13b
FB
1152 if (nb_tbs >= code_gen_max_blocks ||
1153 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
d4e8164f 1154 return NULL;
fd6ce8f6
FB
1155 tb = &tbs[nb_tbs++];
1156 tb->pc = pc;
b448f2f3 1157 tb->cflags = 0;
d4e8164f
FB
1158 return tb;
1159}
1160
2e70f6ef
PB
1161void tb_free(TranslationBlock *tb)
1162{
bf20dc07 1163 /* In practice this is mostly used for single use temporary TB
2e70f6ef
PB
1164 Ignore the hard cases and just back up if this TB happens to
1165 be the last one generated. */
1166 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1167 code_gen_ptr = tb->tc_ptr;
1168 nb_tbs--;
1169 }
1170}
1171
9fa3e853
FB
1172/* add a new TB and link it to the physical page tables. phys_page2 is
1173 (-1) to indicate that only one page contains the TB. */
5fafdf24 1174void tb_link_phys(TranslationBlock *tb,
9fa3e853 1175 target_ulong phys_pc, target_ulong phys_page2)
d4e8164f 1176{
9fa3e853
FB
1177 unsigned int h;
1178 TranslationBlock **ptb;
1179
c8a706fe
PB
1180 /* Grab the mmap lock to stop another thread invalidating this TB
1181 before we are done. */
1182 mmap_lock();
9fa3e853
FB
1183 /* add in the physical hash table */
1184 h = tb_phys_hash_func(phys_pc);
1185 ptb = &tb_phys_hash[h];
1186 tb->phys_hash_next = *ptb;
1187 *ptb = tb;
fd6ce8f6
FB
1188
1189 /* add in the page list */
9fa3e853
FB
1190 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1191 if (phys_page2 != -1)
1192 tb_alloc_page(tb, 1, phys_page2);
1193 else
1194 tb->page_addr[1] = -1;
9fa3e853 1195
d4e8164f
FB
1196 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1197 tb->jmp_next[0] = NULL;
1198 tb->jmp_next[1] = NULL;
1199
1200 /* init original jump addresses */
1201 if (tb->tb_next_offset[0] != 0xffff)
1202 tb_reset_jump(tb, 0);
1203 if (tb->tb_next_offset[1] != 0xffff)
1204 tb_reset_jump(tb, 1);
8a40a180
FB
1205
1206#ifdef DEBUG_TB_CHECK
1207 tb_page_check();
1208#endif
c8a706fe 1209 mmap_unlock();
fd6ce8f6
FB
1210}
1211
9fa3e853
FB
1212/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1213 tb[1].tc_ptr. Return NULL if not found */
1214TranslationBlock *tb_find_pc(unsigned long tc_ptr)
fd6ce8f6 1215{
9fa3e853
FB
1216 int m_min, m_max, m;
1217 unsigned long v;
1218 TranslationBlock *tb;
a513fe19
FB
1219
1220 if (nb_tbs <= 0)
1221 return NULL;
1222 if (tc_ptr < (unsigned long)code_gen_buffer ||
1223 tc_ptr >= (unsigned long)code_gen_ptr)
1224 return NULL;
1225 /* binary search (cf Knuth) */
1226 m_min = 0;
1227 m_max = nb_tbs - 1;
1228 while (m_min <= m_max) {
1229 m = (m_min + m_max) >> 1;
1230 tb = &tbs[m];
1231 v = (unsigned long)tb->tc_ptr;
1232 if (v == tc_ptr)
1233 return tb;
1234 else if (tc_ptr < v) {
1235 m_max = m - 1;
1236 } else {
1237 m_min = m + 1;
1238 }
5fafdf24 1239 }
a513fe19
FB
1240 return &tbs[m_max];
1241}
7501267e 1242
ea041c0e
FB
1243static void tb_reset_jump_recursive(TranslationBlock *tb);
1244
1245static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1246{
1247 TranslationBlock *tb1, *tb_next, **ptb;
1248 unsigned int n1;
1249
1250 tb1 = tb->jmp_next[n];
1251 if (tb1 != NULL) {
1252 /* find head of list */
1253 for(;;) {
1254 n1 = (long)tb1 & 3;
1255 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1256 if (n1 == 2)
1257 break;
1258 tb1 = tb1->jmp_next[n1];
1259 }
1260 /* we are now sure now that tb jumps to tb1 */
1261 tb_next = tb1;
1262
1263 /* remove tb from the jmp_first list */
1264 ptb = &tb_next->jmp_first;
1265 for(;;) {
1266 tb1 = *ptb;
1267 n1 = (long)tb1 & 3;
1268 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1269 if (n1 == n && tb1 == tb)
1270 break;
1271 ptb = &tb1->jmp_next[n1];
1272 }
1273 *ptb = tb->jmp_next[n];
1274 tb->jmp_next[n] = NULL;
3b46e624 1275
ea041c0e
FB
1276 /* suppress the jump to next tb in generated code */
1277 tb_reset_jump(tb, n);
1278
0124311e 1279 /* suppress jumps in the tb on which we could have jumped */
ea041c0e
FB
1280 tb_reset_jump_recursive(tb_next);
1281 }
1282}
1283
1284static void tb_reset_jump_recursive(TranslationBlock *tb)
1285{
1286 tb_reset_jump_recursive2(tb, 0);
1287 tb_reset_jump_recursive2(tb, 1);
1288}
1289
1fddef4b 1290#if defined(TARGET_HAS_ICE)
d720b93d
FB
1291static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1292{
9b3c35e0
JM
1293 target_phys_addr_t addr;
1294 target_ulong pd;
c2f07f81
PB
1295 ram_addr_t ram_addr;
1296 PhysPageDesc *p;
d720b93d 1297
c2f07f81
PB
1298 addr = cpu_get_phys_page_debug(env, pc);
1299 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1300 if (!p) {
1301 pd = IO_MEM_UNASSIGNED;
1302 } else {
1303 pd = p->phys_offset;
1304 }
1305 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
706cd4b5 1306 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
d720b93d 1307}
c27004ec 1308#endif
d720b93d 1309
6658ffb8 1310/* Add a watchpoint. */
a1d1bb31
AL
1311int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1312 int flags, CPUWatchpoint **watchpoint)
6658ffb8 1313{
b4051334 1314 target_ulong len_mask = ~(len - 1);
c0ce998e 1315 CPUWatchpoint *wp;
6658ffb8 1316
b4051334
AL
1317 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1318 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1319 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1320 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1321 return -EINVAL;
1322 }
a1d1bb31
AL
1323 wp = qemu_malloc(sizeof(*wp));
1324 if (!wp)
426cd5d6 1325 return -ENOMEM;
a1d1bb31
AL
1326
1327 wp->vaddr = addr;
b4051334 1328 wp->len_mask = len_mask;
a1d1bb31
AL
1329 wp->flags = flags;
1330
2dc9f411 1331 /* keep all GDB-injected watchpoints in front */
c0ce998e
AL
1332 if (flags & BP_GDB)
1333 TAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1334 else
1335 TAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
6658ffb8 1336
6658ffb8 1337 tlb_flush_page(env, addr);
a1d1bb31
AL
1338
1339 if (watchpoint)
1340 *watchpoint = wp;
1341 return 0;
6658ffb8
PB
1342}
1343
a1d1bb31
AL
1344/* Remove a specific watchpoint. */
1345int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1346 int flags)
6658ffb8 1347{
b4051334 1348 target_ulong len_mask = ~(len - 1);
a1d1bb31 1349 CPUWatchpoint *wp;
6658ffb8 1350
c0ce998e 1351 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334 1352 if (addr == wp->vaddr && len_mask == wp->len_mask
6e140f28 1353 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
a1d1bb31 1354 cpu_watchpoint_remove_by_ref(env, wp);
6658ffb8
PB
1355 return 0;
1356 }
1357 }
a1d1bb31 1358 return -ENOENT;
6658ffb8
PB
1359}
1360
a1d1bb31
AL
1361/* Remove a specific watchpoint by reference. */
1362void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1363{
c0ce998e 1364 TAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
7d03f82f 1365
a1d1bb31
AL
1366 tlb_flush_page(env, watchpoint->vaddr);
1367
1368 qemu_free(watchpoint);
1369}
1370
1371/* Remove all matching watchpoints. */
1372void cpu_watchpoint_remove_all(CPUState *env, int mask)
1373{
c0ce998e 1374 CPUWatchpoint *wp, *next;
a1d1bb31 1375
c0ce998e 1376 TAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
a1d1bb31
AL
1377 if (wp->flags & mask)
1378 cpu_watchpoint_remove_by_ref(env, wp);
c0ce998e 1379 }
7d03f82f
EI
1380}
1381
a1d1bb31
AL
1382/* Add a breakpoint. */
1383int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1384 CPUBreakpoint **breakpoint)
4c3a88a2 1385{
1fddef4b 1386#if defined(TARGET_HAS_ICE)
c0ce998e 1387 CPUBreakpoint *bp;
3b46e624 1388
a1d1bb31
AL
1389 bp = qemu_malloc(sizeof(*bp));
1390 if (!bp)
426cd5d6 1391 return -ENOMEM;
4c3a88a2 1392
a1d1bb31
AL
1393 bp->pc = pc;
1394 bp->flags = flags;
1395
2dc9f411 1396 /* keep all GDB-injected breakpoints in front */
c0ce998e
AL
1397 if (flags & BP_GDB)
1398 TAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1399 else
1400 TAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
3b46e624 1401
d720b93d 1402 breakpoint_invalidate(env, pc);
a1d1bb31
AL
1403
1404 if (breakpoint)
1405 *breakpoint = bp;
4c3a88a2
FB
1406 return 0;
1407#else
a1d1bb31 1408 return -ENOSYS;
4c3a88a2
FB
1409#endif
1410}
1411
a1d1bb31
AL
1412/* Remove a specific breakpoint. */
1413int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1414{
7d03f82f 1415#if defined(TARGET_HAS_ICE)
a1d1bb31
AL
1416 CPUBreakpoint *bp;
1417
c0ce998e 1418 TAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31
AL
1419 if (bp->pc == pc && bp->flags == flags) {
1420 cpu_breakpoint_remove_by_ref(env, bp);
1421 return 0;
1422 }
7d03f82f 1423 }
a1d1bb31
AL
1424 return -ENOENT;
1425#else
1426 return -ENOSYS;
7d03f82f
EI
1427#endif
1428}
1429
a1d1bb31
AL
1430/* Remove a specific breakpoint by reference. */
1431void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
4c3a88a2 1432{
1fddef4b 1433#if defined(TARGET_HAS_ICE)
c0ce998e 1434 TAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
d720b93d 1435
a1d1bb31
AL
1436 breakpoint_invalidate(env, breakpoint->pc);
1437
1438 qemu_free(breakpoint);
1439#endif
1440}
1441
1442/* Remove all matching breakpoints. */
1443void cpu_breakpoint_remove_all(CPUState *env, int mask)
1444{
1445#if defined(TARGET_HAS_ICE)
c0ce998e 1446 CPUBreakpoint *bp, *next;
a1d1bb31 1447
c0ce998e 1448 TAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
a1d1bb31
AL
1449 if (bp->flags & mask)
1450 cpu_breakpoint_remove_by_ref(env, bp);
c0ce998e 1451 }
4c3a88a2
FB
1452#endif
1453}
1454
c33a346e
FB
1455/* enable or disable single step mode. EXCP_DEBUG is returned by the
1456 CPU loop after each instruction */
1457void cpu_single_step(CPUState *env, int enabled)
1458{
1fddef4b 1459#if defined(TARGET_HAS_ICE)
c33a346e
FB
1460 if (env->singlestep_enabled != enabled) {
1461 env->singlestep_enabled = enabled;
1462 /* must flush all the translated code to avoid inconsistancies */
9fa3e853 1463 /* XXX: only flush what is necessary */
0124311e 1464 tb_flush(env);
c33a346e
FB
1465 }
1466#endif
1467}
1468
34865134
FB
1469/* enable or disable low levels log */
1470void cpu_set_log(int log_flags)
1471{
1472 loglevel = log_flags;
1473 if (loglevel && !logfile) {
11fcfab4 1474 logfile = fopen(logfilename, log_append ? "a" : "w");
34865134
FB
1475 if (!logfile) {
1476 perror(logfilename);
1477 _exit(1);
1478 }
9fa3e853
FB
1479#if !defined(CONFIG_SOFTMMU)
1480 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1481 {
b55266b5 1482 static char logfile_buf[4096];
9fa3e853
FB
1483 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1484 }
1485#else
34865134 1486 setvbuf(logfile, NULL, _IOLBF, 0);
9fa3e853 1487#endif
e735b91c
PB
1488 log_append = 1;
1489 }
1490 if (!loglevel && logfile) {
1491 fclose(logfile);
1492 logfile = NULL;
34865134
FB
1493 }
1494}
1495
1496void cpu_set_log_filename(const char *filename)
1497{
1498 logfilename = strdup(filename);
e735b91c
PB
1499 if (logfile) {
1500 fclose(logfile);
1501 logfile = NULL;
1502 }
1503 cpu_set_log(loglevel);
34865134 1504}
c33a346e 1505
0124311e 1506/* mask must never be zero, except for A20 change call */
68a79315 1507void cpu_interrupt(CPUState *env, int mask)
ea041c0e 1508{
d5975363 1509#if !defined(USE_NPTL)
ea041c0e 1510 TranslationBlock *tb;
15a51156 1511 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
d5975363 1512#endif
2e70f6ef 1513 int old_mask;
59817ccb 1514
2e70f6ef 1515 old_mask = env->interrupt_request;
d5975363 1516 /* FIXME: This is probably not threadsafe. A different thread could
bf20dc07 1517 be in the middle of a read-modify-write operation. */
68a79315 1518 env->interrupt_request |= mask;
d5975363
PB
1519#if defined(USE_NPTL)
1520 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1521 problem and hope the cpu will stop of its own accord. For userspace
1522 emulation this often isn't actually as bad as it sounds. Often
1523 signals are used primarily to interrupt blocking syscalls. */
1524#else
2e70f6ef 1525 if (use_icount) {
266910c4 1526 env->icount_decr.u16.high = 0xffff;
2e70f6ef
PB
1527#ifndef CONFIG_USER_ONLY
1528 /* CPU_INTERRUPT_EXIT isn't a real interrupt. It just means
1529 an async event happened and we need to process it. */
1530 if (!can_do_io(env)
1531 && (mask & ~(old_mask | CPU_INTERRUPT_EXIT)) != 0) {
1532 cpu_abort(env, "Raised interrupt while not in I/O function");
1533 }
1534#endif
1535 } else {
1536 tb = env->current_tb;
1537 /* if the cpu is currently executing code, we must unlink it and
1538 all the potentially executing TB */
1539 if (tb && !testandset(&interrupt_lock)) {
1540 env->current_tb = NULL;
1541 tb_reset_jump_recursive(tb);
1542 resetlock(&interrupt_lock);
1543 }
ea041c0e 1544 }
d5975363 1545#endif
ea041c0e
FB
1546}
1547
b54ad049
FB
1548void cpu_reset_interrupt(CPUState *env, int mask)
1549{
1550 env->interrupt_request &= ~mask;
1551}
1552
c7cd6a37 1553const CPULogItem cpu_log_items[] = {
5fafdf24 1554 { CPU_LOG_TB_OUT_ASM, "out_asm",
f193c797
FB
1555 "show generated host assembly code for each compiled TB" },
1556 { CPU_LOG_TB_IN_ASM, "in_asm",
1557 "show target assembly code for each compiled TB" },
5fafdf24 1558 { CPU_LOG_TB_OP, "op",
57fec1fe 1559 "show micro ops for each compiled TB" },
f193c797 1560 { CPU_LOG_TB_OP_OPT, "op_opt",
e01a1157
BS
1561 "show micro ops "
1562#ifdef TARGET_I386
1563 "before eflags optimization and "
f193c797 1564#endif
e01a1157 1565 "after liveness analysis" },
f193c797
FB
1566 { CPU_LOG_INT, "int",
1567 "show interrupts/exceptions in short format" },
1568 { CPU_LOG_EXEC, "exec",
1569 "show trace before each executed TB (lots of logs)" },
9fddaa0c 1570 { CPU_LOG_TB_CPU, "cpu",
e91c8a77 1571 "show CPU state before block translation" },
f193c797
FB
1572#ifdef TARGET_I386
1573 { CPU_LOG_PCALL, "pcall",
1574 "show protected mode far calls/returns/exceptions" },
1575#endif
8e3a9fd2 1576#ifdef DEBUG_IOPORT
fd872598
FB
1577 { CPU_LOG_IOPORT, "ioport",
1578 "show all i/o ports accesses" },
8e3a9fd2 1579#endif
f193c797
FB
1580 { 0, NULL, NULL },
1581};
1582
1583static int cmp1(const char *s1, int n, const char *s2)
1584{
1585 if (strlen(s2) != n)
1586 return 0;
1587 return memcmp(s1, s2, n) == 0;
1588}
3b46e624 1589
f193c797
FB
1590/* takes a comma separated list of log masks. Return 0 if error. */
1591int cpu_str_to_log_mask(const char *str)
1592{
c7cd6a37 1593 const CPULogItem *item;
f193c797
FB
1594 int mask;
1595 const char *p, *p1;
1596
1597 p = str;
1598 mask = 0;
1599 for(;;) {
1600 p1 = strchr(p, ',');
1601 if (!p1)
1602 p1 = p + strlen(p);
8e3a9fd2
FB
1603 if(cmp1(p,p1-p,"all")) {
1604 for(item = cpu_log_items; item->mask != 0; item++) {
1605 mask |= item->mask;
1606 }
1607 } else {
f193c797
FB
1608 for(item = cpu_log_items; item->mask != 0; item++) {
1609 if (cmp1(p, p1 - p, item->name))
1610 goto found;
1611 }
1612 return 0;
8e3a9fd2 1613 }
f193c797
FB
1614 found:
1615 mask |= item->mask;
1616 if (*p1 != ',')
1617 break;
1618 p = p1 + 1;
1619 }
1620 return mask;
1621}
ea041c0e 1622
7501267e
FB
1623void cpu_abort(CPUState *env, const char *fmt, ...)
1624{
1625 va_list ap;
493ae1f0 1626 va_list ap2;
7501267e
FB
1627
1628 va_start(ap, fmt);
493ae1f0 1629 va_copy(ap2, ap);
7501267e
FB
1630 fprintf(stderr, "qemu: fatal: ");
1631 vfprintf(stderr, fmt, ap);
1632 fprintf(stderr, "\n");
1633#ifdef TARGET_I386
7fe48483
FB
1634 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1635#else
1636 cpu_dump_state(env, stderr, fprintf, 0);
7501267e 1637#endif
924edcae 1638 if (logfile) {
f9373291 1639 fprintf(logfile, "qemu: fatal: ");
493ae1f0 1640 vfprintf(logfile, fmt, ap2);
f9373291
JM
1641 fprintf(logfile, "\n");
1642#ifdef TARGET_I386
1643 cpu_dump_state(env, logfile, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1644#else
1645 cpu_dump_state(env, logfile, fprintf, 0);
1646#endif
924edcae
AZ
1647 fflush(logfile);
1648 fclose(logfile);
1649 }
493ae1f0 1650 va_end(ap2);
f9373291 1651 va_end(ap);
7501267e
FB
1652 abort();
1653}
1654
c5be9f08
TS
1655CPUState *cpu_copy(CPUState *env)
1656{
01ba9816 1657 CPUState *new_env = cpu_init(env->cpu_model_str);
c5be9f08
TS
1658 /* preserve chaining and index */
1659 CPUState *next_cpu = new_env->next_cpu;
1660 int cpu_index = new_env->cpu_index;
1661 memcpy(new_env, env, sizeof(CPUState));
1662 new_env->next_cpu = next_cpu;
1663 new_env->cpu_index = cpu_index;
1664 return new_env;
1665}
1666
0124311e
FB
1667#if !defined(CONFIG_USER_ONLY)
1668
5c751e99
EI
1669static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1670{
1671 unsigned int i;
1672
1673 /* Discard jump cache entries for any tb which might potentially
1674 overlap the flushed page. */
1675 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1676 memset (&env->tb_jmp_cache[i], 0,
1677 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1678
1679 i = tb_jmp_cache_hash_page(addr);
1680 memset (&env->tb_jmp_cache[i], 0,
1681 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1682}
1683
ee8b7021
FB
1684/* NOTE: if flush_global is true, also flush global entries (not
1685 implemented yet) */
1686void tlb_flush(CPUState *env, int flush_global)
33417e70 1687{
33417e70 1688 int i;
0124311e 1689
9fa3e853
FB
1690#if defined(DEBUG_TLB)
1691 printf("tlb_flush:\n");
1692#endif
0124311e
FB
1693 /* must reset current TB so that interrupts cannot modify the
1694 links while we are modifying them */
1695 env->current_tb = NULL;
1696
33417e70 1697 for(i = 0; i < CPU_TLB_SIZE; i++) {
84b7b8e7
FB
1698 env->tlb_table[0][i].addr_read = -1;
1699 env->tlb_table[0][i].addr_write = -1;
1700 env->tlb_table[0][i].addr_code = -1;
1701 env->tlb_table[1][i].addr_read = -1;
1702 env->tlb_table[1][i].addr_write = -1;
1703 env->tlb_table[1][i].addr_code = -1;
6fa4cea9
JM
1704#if (NB_MMU_MODES >= 3)
1705 env->tlb_table[2][i].addr_read = -1;
1706 env->tlb_table[2][i].addr_write = -1;
1707 env->tlb_table[2][i].addr_code = -1;
1708#if (NB_MMU_MODES == 4)
1709 env->tlb_table[3][i].addr_read = -1;
1710 env->tlb_table[3][i].addr_write = -1;
1711 env->tlb_table[3][i].addr_code = -1;
1712#endif
1713#endif
33417e70 1714 }
9fa3e853 1715
8a40a180 1716 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
9fa3e853 1717
0a962c02
FB
1718#ifdef USE_KQEMU
1719 if (env->kqemu_enabled) {
1720 kqemu_flush(env, flush_global);
1721 }
9fa3e853 1722#endif
e3db7226 1723 tlb_flush_count++;
33417e70
FB
1724}
1725
274da6b2 1726static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
61382a50 1727{
5fafdf24 1728 if (addr == (tlb_entry->addr_read &
84b7b8e7 1729 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1730 addr == (tlb_entry->addr_write &
84b7b8e7 1731 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1732 addr == (tlb_entry->addr_code &
84b7b8e7
FB
1733 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1734 tlb_entry->addr_read = -1;
1735 tlb_entry->addr_write = -1;
1736 tlb_entry->addr_code = -1;
1737 }
61382a50
FB
1738}
1739
2e12669a 1740void tlb_flush_page(CPUState *env, target_ulong addr)
33417e70 1741{
8a40a180 1742 int i;
0124311e 1743
9fa3e853 1744#if defined(DEBUG_TLB)
108c49b8 1745 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
9fa3e853 1746#endif
0124311e
FB
1747 /* must reset current TB so that interrupts cannot modify the
1748 links while we are modifying them */
1749 env->current_tb = NULL;
61382a50
FB
1750
1751 addr &= TARGET_PAGE_MASK;
1752 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
84b7b8e7
FB
1753 tlb_flush_entry(&env->tlb_table[0][i], addr);
1754 tlb_flush_entry(&env->tlb_table[1][i], addr);
6fa4cea9
JM
1755#if (NB_MMU_MODES >= 3)
1756 tlb_flush_entry(&env->tlb_table[2][i], addr);
1757#if (NB_MMU_MODES == 4)
1758 tlb_flush_entry(&env->tlb_table[3][i], addr);
1759#endif
1760#endif
0124311e 1761
5c751e99 1762 tlb_flush_jmp_cache(env, addr);
9fa3e853 1763
0a962c02
FB
1764#ifdef USE_KQEMU
1765 if (env->kqemu_enabled) {
1766 kqemu_flush_page(env, addr);
1767 }
1768#endif
9fa3e853
FB
1769}
1770
9fa3e853
FB
1771/* update the TLBs so that writes to code in the virtual page 'addr'
1772 can be detected */
6a00d601 1773static void tlb_protect_code(ram_addr_t ram_addr)
9fa3e853 1774{
5fafdf24 1775 cpu_physical_memory_reset_dirty(ram_addr,
6a00d601
FB
1776 ram_addr + TARGET_PAGE_SIZE,
1777 CODE_DIRTY_FLAG);
9fa3e853
FB
1778}
1779
9fa3e853 1780/* update the TLB so that writes in physical page 'phys_addr' are no longer
3a7d929e 1781 tested for self modifying code */
5fafdf24 1782static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 1783 target_ulong vaddr)
9fa3e853 1784{
3a7d929e 1785 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1ccde1cb
FB
1786}
1787
5fafdf24 1788static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1ccde1cb
FB
1789 unsigned long start, unsigned long length)
1790{
1791 unsigned long addr;
84b7b8e7
FB
1792 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1793 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1ccde1cb 1794 if ((addr - start) < length) {
0f459d16 1795 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1ccde1cb
FB
1796 }
1797 }
1798}
1799
3a7d929e 1800void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 1801 int dirty_flags)
1ccde1cb
FB
1802{
1803 CPUState *env;
4f2ac237 1804 unsigned long length, start1;
0a962c02
FB
1805 int i, mask, len;
1806 uint8_t *p;
1ccde1cb
FB
1807
1808 start &= TARGET_PAGE_MASK;
1809 end = TARGET_PAGE_ALIGN(end);
1810
1811 length = end - start;
1812 if (length == 0)
1813 return;
0a962c02 1814 len = length >> TARGET_PAGE_BITS;
3a7d929e 1815#ifdef USE_KQEMU
6a00d601
FB
1816 /* XXX: should not depend on cpu context */
1817 env = first_cpu;
3a7d929e 1818 if (env->kqemu_enabled) {
f23db169
FB
1819 ram_addr_t addr;
1820 addr = start;
1821 for(i = 0; i < len; i++) {
1822 kqemu_set_notdirty(env, addr);
1823 addr += TARGET_PAGE_SIZE;
1824 }
3a7d929e
FB
1825 }
1826#endif
f23db169
FB
1827 mask = ~dirty_flags;
1828 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1829 for(i = 0; i < len; i++)
1830 p[i] &= mask;
1831
1ccde1cb
FB
1832 /* we modify the TLB cache so that the dirty bit will be set again
1833 when accessing the range */
59817ccb 1834 start1 = start + (unsigned long)phys_ram_base;
6a00d601
FB
1835 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1836 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1837 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
6a00d601 1838 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1839 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
6fa4cea9
JM
1840#if (NB_MMU_MODES >= 3)
1841 for(i = 0; i < CPU_TLB_SIZE; i++)
1842 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1843#if (NB_MMU_MODES == 4)
1844 for(i = 0; i < CPU_TLB_SIZE; i++)
1845 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1846#endif
1847#endif
6a00d601 1848 }
1ccde1cb
FB
1849}
1850
74576198
AL
1851int cpu_physical_memory_set_dirty_tracking(int enable)
1852{
1853 in_migration = enable;
1854 return 0;
1855}
1856
1857int cpu_physical_memory_get_dirty_tracking(void)
1858{
1859 return in_migration;
1860}
1861
2bec46dc
AL
1862void cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr, target_phys_addr_t end_addr)
1863{
1864 if (kvm_enabled())
1865 kvm_physical_sync_dirty_bitmap(start_addr, end_addr);
1866}
1867
3a7d929e
FB
1868static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1869{
1870 ram_addr_t ram_addr;
1871
84b7b8e7 1872 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
5fafdf24 1873 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
3a7d929e
FB
1874 tlb_entry->addend - (unsigned long)phys_ram_base;
1875 if (!cpu_physical_memory_is_dirty(ram_addr)) {
0f459d16 1876 tlb_entry->addr_write |= TLB_NOTDIRTY;
3a7d929e
FB
1877 }
1878 }
1879}
1880
1881/* update the TLB according to the current state of the dirty bits */
1882void cpu_tlb_update_dirty(CPUState *env)
1883{
1884 int i;
1885 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1886 tlb_update_dirty(&env->tlb_table[0][i]);
3a7d929e 1887 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1888 tlb_update_dirty(&env->tlb_table[1][i]);
6fa4cea9
JM
1889#if (NB_MMU_MODES >= 3)
1890 for(i = 0; i < CPU_TLB_SIZE; i++)
1891 tlb_update_dirty(&env->tlb_table[2][i]);
1892#if (NB_MMU_MODES == 4)
1893 for(i = 0; i < CPU_TLB_SIZE; i++)
1894 tlb_update_dirty(&env->tlb_table[3][i]);
1895#endif
1896#endif
3a7d929e
FB
1897}
1898
0f459d16 1899static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1ccde1cb 1900{
0f459d16
PB
1901 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
1902 tlb_entry->addr_write = vaddr;
1ccde1cb
FB
1903}
1904
0f459d16
PB
1905/* update the TLB corresponding to virtual page vaddr
1906 so that it is no longer dirty */
1907static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1ccde1cb 1908{
1ccde1cb
FB
1909 int i;
1910
0f459d16 1911 vaddr &= TARGET_PAGE_MASK;
1ccde1cb 1912 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
0f459d16
PB
1913 tlb_set_dirty1(&env->tlb_table[0][i], vaddr);
1914 tlb_set_dirty1(&env->tlb_table[1][i], vaddr);
6fa4cea9 1915#if (NB_MMU_MODES >= 3)
0f459d16 1916 tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
6fa4cea9 1917#if (NB_MMU_MODES == 4)
0f459d16 1918 tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
6fa4cea9
JM
1919#endif
1920#endif
9fa3e853
FB
1921}
1922
59817ccb
FB
1923/* add a new TLB entry. At most one entry for a given virtual address
1924 is permitted. Return 0 if OK or 2 if the page could not be mapped
1925 (can only happen in non SOFTMMU mode for I/O pages or pages
1926 conflicting with the host address space). */
5fafdf24
TS
1927int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1928 target_phys_addr_t paddr, int prot,
6ebbf390 1929 int mmu_idx, int is_softmmu)
9fa3e853 1930{
92e873b9 1931 PhysPageDesc *p;
4f2ac237 1932 unsigned long pd;
9fa3e853 1933 unsigned int index;
4f2ac237 1934 target_ulong address;
0f459d16 1935 target_ulong code_address;
108c49b8 1936 target_phys_addr_t addend;
9fa3e853 1937 int ret;
84b7b8e7 1938 CPUTLBEntry *te;
a1d1bb31 1939 CPUWatchpoint *wp;
0f459d16 1940 target_phys_addr_t iotlb;
9fa3e853 1941
92e873b9 1942 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
9fa3e853
FB
1943 if (!p) {
1944 pd = IO_MEM_UNASSIGNED;
9fa3e853
FB
1945 } else {
1946 pd = p->phys_offset;
9fa3e853
FB
1947 }
1948#if defined(DEBUG_TLB)
6ebbf390
JM
1949 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1950 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
9fa3e853
FB
1951#endif
1952
1953 ret = 0;
0f459d16
PB
1954 address = vaddr;
1955 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1956 /* IO memory case (romd handled later) */
1957 address |= TLB_MMIO;
1958 }
1959 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1960 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
1961 /* Normal RAM. */
1962 iotlb = pd & TARGET_PAGE_MASK;
1963 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
1964 iotlb |= IO_MEM_NOTDIRTY;
1965 else
1966 iotlb |= IO_MEM_ROM;
1967 } else {
1968 /* IO handlers are currently passed a phsical address.
1969 It would be nice to pass an offset from the base address
1970 of that region. This would avoid having to special case RAM,
1971 and avoid full address decoding in every device.
1972 We can't use the high bits of pd for this because
1973 IO_MEM_ROMD uses these as a ram address. */
8da3ff18
PB
1974 iotlb = (pd & ~TARGET_PAGE_MASK);
1975 if (p) {
8da3ff18
PB
1976 iotlb += p->region_offset;
1977 } else {
1978 iotlb += paddr;
1979 }
0f459d16
PB
1980 }
1981
1982 code_address = address;
1983 /* Make accesses to pages with watchpoints go via the
1984 watchpoint trap routines. */
c0ce998e 1985 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
a1d1bb31 1986 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
0f459d16
PB
1987 iotlb = io_mem_watch + paddr;
1988 /* TODO: The memory case can be optimized by not trapping
1989 reads of pages with a write breakpoint. */
1990 address |= TLB_MMIO;
6658ffb8 1991 }
0f459d16 1992 }
d79acba4 1993
0f459d16
PB
1994 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1995 env->iotlb[mmu_idx][index] = iotlb - vaddr;
1996 te = &env->tlb_table[mmu_idx][index];
1997 te->addend = addend - vaddr;
1998 if (prot & PAGE_READ) {
1999 te->addr_read = address;
2000 } else {
2001 te->addr_read = -1;
2002 }
5c751e99 2003
0f459d16
PB
2004 if (prot & PAGE_EXEC) {
2005 te->addr_code = code_address;
2006 } else {
2007 te->addr_code = -1;
2008 }
2009 if (prot & PAGE_WRITE) {
2010 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2011 (pd & IO_MEM_ROMD)) {
2012 /* Write access calls the I/O callback. */
2013 te->addr_write = address | TLB_MMIO;
2014 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2015 !cpu_physical_memory_is_dirty(pd)) {
2016 te->addr_write = address | TLB_NOTDIRTY;
9fa3e853 2017 } else {
0f459d16 2018 te->addr_write = address;
9fa3e853 2019 }
0f459d16
PB
2020 } else {
2021 te->addr_write = -1;
9fa3e853 2022 }
9fa3e853
FB
2023 return ret;
2024}
2025
0124311e
FB
2026#else
2027
ee8b7021 2028void tlb_flush(CPUState *env, int flush_global)
0124311e
FB
2029{
2030}
2031
2e12669a 2032void tlb_flush_page(CPUState *env, target_ulong addr)
0124311e
FB
2033{
2034}
2035
5fafdf24
TS
2036int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2037 target_phys_addr_t paddr, int prot,
6ebbf390 2038 int mmu_idx, int is_softmmu)
9fa3e853
FB
2039{
2040 return 0;
2041}
0124311e 2042
9fa3e853
FB
2043/* dump memory mappings */
2044void page_dump(FILE *f)
33417e70 2045{
9fa3e853
FB
2046 unsigned long start, end;
2047 int i, j, prot, prot1;
2048 PageDesc *p;
33417e70 2049
9fa3e853
FB
2050 fprintf(f, "%-8s %-8s %-8s %s\n",
2051 "start", "end", "size", "prot");
2052 start = -1;
2053 end = -1;
2054 prot = 0;
2055 for(i = 0; i <= L1_SIZE; i++) {
2056 if (i < L1_SIZE)
2057 p = l1_map[i];
2058 else
2059 p = NULL;
2060 for(j = 0;j < L2_SIZE; j++) {
2061 if (!p)
2062 prot1 = 0;
2063 else
2064 prot1 = p[j].flags;
2065 if (prot1 != prot) {
2066 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2067 if (start != -1) {
2068 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
5fafdf24 2069 start, end, end - start,
9fa3e853
FB
2070 prot & PAGE_READ ? 'r' : '-',
2071 prot & PAGE_WRITE ? 'w' : '-',
2072 prot & PAGE_EXEC ? 'x' : '-');
2073 }
2074 if (prot1 != 0)
2075 start = end;
2076 else
2077 start = -1;
2078 prot = prot1;
2079 }
2080 if (!p)
2081 break;
2082 }
33417e70 2083 }
33417e70
FB
2084}
2085
53a5960a 2086int page_get_flags(target_ulong address)
33417e70 2087{
9fa3e853
FB
2088 PageDesc *p;
2089
2090 p = page_find(address >> TARGET_PAGE_BITS);
33417e70 2091 if (!p)
9fa3e853
FB
2092 return 0;
2093 return p->flags;
2094}
2095
2096/* modify the flags of a page and invalidate the code if
2097 necessary. The flag PAGE_WRITE_ORG is positionned automatically
2098 depending on PAGE_WRITE */
53a5960a 2099void page_set_flags(target_ulong start, target_ulong end, int flags)
9fa3e853
FB
2100{
2101 PageDesc *p;
53a5960a 2102 target_ulong addr;
9fa3e853 2103
c8a706fe 2104 /* mmap_lock should already be held. */
9fa3e853
FB
2105 start = start & TARGET_PAGE_MASK;
2106 end = TARGET_PAGE_ALIGN(end);
2107 if (flags & PAGE_WRITE)
2108 flags |= PAGE_WRITE_ORG;
9fa3e853
FB
2109 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2110 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
17e2377a
PB
2111 /* We may be called for host regions that are outside guest
2112 address space. */
2113 if (!p)
2114 return;
9fa3e853
FB
2115 /* if the write protection is set, then we invalidate the code
2116 inside */
5fafdf24 2117 if (!(p->flags & PAGE_WRITE) &&
9fa3e853
FB
2118 (flags & PAGE_WRITE) &&
2119 p->first_tb) {
d720b93d 2120 tb_invalidate_phys_page(addr, 0, NULL);
9fa3e853
FB
2121 }
2122 p->flags = flags;
2123 }
33417e70
FB
2124}
2125
3d97b40b
TS
2126int page_check_range(target_ulong start, target_ulong len, int flags)
2127{
2128 PageDesc *p;
2129 target_ulong end;
2130 target_ulong addr;
2131
55f280c9
AZ
2132 if (start + len < start)
2133 /* we've wrapped around */
2134 return -1;
2135
3d97b40b
TS
2136 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2137 start = start & TARGET_PAGE_MASK;
2138
3d97b40b
TS
2139 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2140 p = page_find(addr >> TARGET_PAGE_BITS);
2141 if( !p )
2142 return -1;
2143 if( !(p->flags & PAGE_VALID) )
2144 return -1;
2145
dae3270c 2146 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
3d97b40b 2147 return -1;
dae3270c
FB
2148 if (flags & PAGE_WRITE) {
2149 if (!(p->flags & PAGE_WRITE_ORG))
2150 return -1;
2151 /* unprotect the page if it was put read-only because it
2152 contains translated code */
2153 if (!(p->flags & PAGE_WRITE)) {
2154 if (!page_unprotect(addr, 0, NULL))
2155 return -1;
2156 }
2157 return 0;
2158 }
3d97b40b
TS
2159 }
2160 return 0;
2161}
2162
9fa3e853
FB
2163/* called from signal handler: invalidate the code and unprotect the
2164 page. Return TRUE if the fault was succesfully handled. */
53a5960a 2165int page_unprotect(target_ulong address, unsigned long pc, void *puc)
9fa3e853
FB
2166{
2167 unsigned int page_index, prot, pindex;
2168 PageDesc *p, *p1;
53a5960a 2169 target_ulong host_start, host_end, addr;
9fa3e853 2170
c8a706fe
PB
2171 /* Technically this isn't safe inside a signal handler. However we
2172 know this only ever happens in a synchronous SEGV handler, so in
2173 practice it seems to be ok. */
2174 mmap_lock();
2175
83fb7adf 2176 host_start = address & qemu_host_page_mask;
9fa3e853
FB
2177 page_index = host_start >> TARGET_PAGE_BITS;
2178 p1 = page_find(page_index);
c8a706fe
PB
2179 if (!p1) {
2180 mmap_unlock();
9fa3e853 2181 return 0;
c8a706fe 2182 }
83fb7adf 2183 host_end = host_start + qemu_host_page_size;
9fa3e853
FB
2184 p = p1;
2185 prot = 0;
2186 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2187 prot |= p->flags;
2188 p++;
2189 }
2190 /* if the page was really writable, then we change its
2191 protection back to writable */
2192 if (prot & PAGE_WRITE_ORG) {
2193 pindex = (address - host_start) >> TARGET_PAGE_BITS;
2194 if (!(p1[pindex].flags & PAGE_WRITE)) {
5fafdf24 2195 mprotect((void *)g2h(host_start), qemu_host_page_size,
9fa3e853
FB
2196 (prot & PAGE_BITS) | PAGE_WRITE);
2197 p1[pindex].flags |= PAGE_WRITE;
2198 /* and since the content will be modified, we must invalidate
2199 the corresponding translated code. */
d720b93d 2200 tb_invalidate_phys_page(address, pc, puc);
9fa3e853
FB
2201#ifdef DEBUG_TB_CHECK
2202 tb_invalidate_check(address);
2203#endif
c8a706fe 2204 mmap_unlock();
9fa3e853
FB
2205 return 1;
2206 }
2207 }
c8a706fe 2208 mmap_unlock();
9fa3e853
FB
2209 return 0;
2210}
2211
6a00d601
FB
2212static inline void tlb_set_dirty(CPUState *env,
2213 unsigned long addr, target_ulong vaddr)
1ccde1cb
FB
2214{
2215}
9fa3e853
FB
2216#endif /* defined(CONFIG_USER_ONLY) */
2217
e2eef170 2218#if !defined(CONFIG_USER_ONLY)
8da3ff18 2219
db7b5426 2220static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
8da3ff18 2221 ram_addr_t memory, ram_addr_t region_offset);
00f82b8a 2222static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
8da3ff18 2223 ram_addr_t orig_memory, ram_addr_t region_offset);
db7b5426
BS
2224#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2225 need_subpage) \
2226 do { \
2227 if (addr > start_addr) \
2228 start_addr2 = 0; \
2229 else { \
2230 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2231 if (start_addr2 > 0) \
2232 need_subpage = 1; \
2233 } \
2234 \
49e9fba2 2235 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
db7b5426
BS
2236 end_addr2 = TARGET_PAGE_SIZE - 1; \
2237 else { \
2238 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2239 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2240 need_subpage = 1; \
2241 } \
2242 } while (0)
2243
33417e70
FB
2244/* register physical memory. 'size' must be a multiple of the target
2245 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
8da3ff18
PB
2246 io memory page. The address used when calling the IO function is
2247 the offset from the start of the region, plus region_offset. Both
2248 start_region and regon_offset are rounded down to a page boundary
2249 before calculating this offset. This should not be a problem unless
2250 the low bits of start_addr and region_offset differ. */
2251void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2252 ram_addr_t size,
2253 ram_addr_t phys_offset,
2254 ram_addr_t region_offset)
33417e70 2255{
108c49b8 2256 target_phys_addr_t addr, end_addr;
92e873b9 2257 PhysPageDesc *p;
9d42037b 2258 CPUState *env;
00f82b8a 2259 ram_addr_t orig_size = size;
db7b5426 2260 void *subpage;
33417e70 2261
da260249
FB
2262#ifdef USE_KQEMU
2263 /* XXX: should not depend on cpu context */
2264 env = first_cpu;
2265 if (env->kqemu_enabled) {
2266 kqemu_set_phys_mem(start_addr, size, phys_offset);
2267 }
2268#endif
7ba1e619
AL
2269 if (kvm_enabled())
2270 kvm_set_phys_mem(start_addr, size, phys_offset);
2271
8da3ff18 2272 region_offset &= TARGET_PAGE_MASK;
5fd386f6 2273 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
49e9fba2
BS
2274 end_addr = start_addr + (target_phys_addr_t)size;
2275 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
db7b5426
BS
2276 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2277 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
00f82b8a 2278 ram_addr_t orig_memory = p->phys_offset;
db7b5426
BS
2279 target_phys_addr_t start_addr2, end_addr2;
2280 int need_subpage = 0;
2281
2282 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2283 need_subpage);
4254fab8 2284 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
db7b5426
BS
2285 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2286 subpage = subpage_init((addr & TARGET_PAGE_MASK),
8da3ff18
PB
2287 &p->phys_offset, orig_memory,
2288 p->region_offset);
db7b5426
BS
2289 } else {
2290 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2291 >> IO_MEM_SHIFT];
2292 }
8da3ff18
PB
2293 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2294 region_offset);
2295 p->region_offset = 0;
db7b5426
BS
2296 } else {
2297 p->phys_offset = phys_offset;
2298 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2299 (phys_offset & IO_MEM_ROMD))
2300 phys_offset += TARGET_PAGE_SIZE;
2301 }
2302 } else {
2303 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2304 p->phys_offset = phys_offset;
8da3ff18 2305 p->region_offset = region_offset;
db7b5426 2306 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
8da3ff18 2307 (phys_offset & IO_MEM_ROMD)) {
db7b5426 2308 phys_offset += TARGET_PAGE_SIZE;
0e8f0967 2309 } else {
db7b5426
BS
2310 target_phys_addr_t start_addr2, end_addr2;
2311 int need_subpage = 0;
2312
2313 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2314 end_addr2, need_subpage);
2315
4254fab8 2316 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
db7b5426 2317 subpage = subpage_init((addr & TARGET_PAGE_MASK),
8da3ff18
PB
2318 &p->phys_offset, IO_MEM_UNASSIGNED,
2319 0);
db7b5426 2320 subpage_register(subpage, start_addr2, end_addr2,
8da3ff18
PB
2321 phys_offset, region_offset);
2322 p->region_offset = 0;
db7b5426
BS
2323 }
2324 }
2325 }
8da3ff18 2326 region_offset += TARGET_PAGE_SIZE;
33417e70 2327 }
3b46e624 2328
9d42037b
FB
2329 /* since each CPU stores ram addresses in its TLB cache, we must
2330 reset the modified entries */
2331 /* XXX: slow ! */
2332 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2333 tlb_flush(env, 1);
2334 }
33417e70
FB
2335}
2336
ba863458 2337/* XXX: temporary until new memory mapping API */
00f82b8a 2338ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
ba863458
FB
2339{
2340 PhysPageDesc *p;
2341
2342 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2343 if (!p)
2344 return IO_MEM_UNASSIGNED;
2345 return p->phys_offset;
2346}
2347
e9a1ab19 2348/* XXX: better than nothing */
00f82b8a 2349ram_addr_t qemu_ram_alloc(ram_addr_t size)
e9a1ab19
FB
2350{
2351 ram_addr_t addr;
7fb4fdcf 2352 if ((phys_ram_alloc_offset + size) > phys_ram_size) {
012a7045 2353 fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 ")\n",
ed441467 2354 (uint64_t)size, (uint64_t)phys_ram_size);
e9a1ab19
FB
2355 abort();
2356 }
2357 addr = phys_ram_alloc_offset;
2358 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2359 return addr;
2360}
2361
2362void qemu_ram_free(ram_addr_t addr)
2363{
2364}
2365
a4193c8a 2366static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
33417e70 2367{
67d3b957 2368#ifdef DEBUG_UNASSIGNED
ab3d1727 2369 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
b4f0a316 2370#endif
e18231a3
BS
2371#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2372 do_unassigned_access(addr, 0, 0, 0, 1);
2373#endif
2374 return 0;
2375}
2376
2377static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2378{
2379#ifdef DEBUG_UNASSIGNED
2380 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2381#endif
2382#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2383 do_unassigned_access(addr, 0, 0, 0, 2);
2384#endif
2385 return 0;
2386}
2387
2388static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2389{
2390#ifdef DEBUG_UNASSIGNED
2391 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2392#endif
2393#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2394 do_unassigned_access(addr, 0, 0, 0, 4);
67d3b957 2395#endif
33417e70
FB
2396 return 0;
2397}
2398
a4193c8a 2399static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
33417e70 2400{
67d3b957 2401#ifdef DEBUG_UNASSIGNED
ab3d1727 2402 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
67d3b957 2403#endif
e18231a3
BS
2404#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2405 do_unassigned_access(addr, 1, 0, 0, 1);
2406#endif
2407}
2408
2409static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2410{
2411#ifdef DEBUG_UNASSIGNED
2412 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2413#endif
2414#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2415 do_unassigned_access(addr, 1, 0, 0, 2);
2416#endif
2417}
2418
2419static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2420{
2421#ifdef DEBUG_UNASSIGNED
2422 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2423#endif
2424#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2425 do_unassigned_access(addr, 1, 0, 0, 4);
b4f0a316 2426#endif
33417e70
FB
2427}
2428
2429static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2430 unassigned_mem_readb,
e18231a3
BS
2431 unassigned_mem_readw,
2432 unassigned_mem_readl,
33417e70
FB
2433};
2434
2435static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2436 unassigned_mem_writeb,
e18231a3
BS
2437 unassigned_mem_writew,
2438 unassigned_mem_writel,
33417e70
FB
2439};
2440
0f459d16
PB
2441static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2442 uint32_t val)
9fa3e853 2443{
3a7d929e 2444 int dirty_flags;
3a7d929e
FB
2445 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2446 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2447#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2448 tb_invalidate_phys_page_fast(ram_addr, 1);
2449 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2450#endif
3a7d929e 2451 }
0f459d16 2452 stb_p(phys_ram_base + ram_addr, val);
f32fc648
FB
2453#ifdef USE_KQEMU
2454 if (cpu_single_env->kqemu_enabled &&
2455 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2456 kqemu_modify_page(cpu_single_env, ram_addr);
2457#endif
f23db169
FB
2458 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2459 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2460 /* we remove the notdirty callback only if the code has been
2461 flushed */
2462 if (dirty_flags == 0xff)
2e70f6ef 2463 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
2464}
2465
0f459d16
PB
2466static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2467 uint32_t val)
9fa3e853 2468{
3a7d929e 2469 int dirty_flags;
3a7d929e
FB
2470 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2471 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2472#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2473 tb_invalidate_phys_page_fast(ram_addr, 2);
2474 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2475#endif
3a7d929e 2476 }
0f459d16 2477 stw_p(phys_ram_base + ram_addr, val);
f32fc648
FB
2478#ifdef USE_KQEMU
2479 if (cpu_single_env->kqemu_enabled &&
2480 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2481 kqemu_modify_page(cpu_single_env, ram_addr);
2482#endif
f23db169
FB
2483 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2484 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2485 /* we remove the notdirty callback only if the code has been
2486 flushed */
2487 if (dirty_flags == 0xff)
2e70f6ef 2488 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
2489}
2490
0f459d16
PB
2491static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2492 uint32_t val)
9fa3e853 2493{
3a7d929e 2494 int dirty_flags;
3a7d929e
FB
2495 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2496 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2497#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2498 tb_invalidate_phys_page_fast(ram_addr, 4);
2499 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2500#endif
3a7d929e 2501 }
0f459d16 2502 stl_p(phys_ram_base + ram_addr, val);
f32fc648
FB
2503#ifdef USE_KQEMU
2504 if (cpu_single_env->kqemu_enabled &&
2505 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2506 kqemu_modify_page(cpu_single_env, ram_addr);
2507#endif
f23db169
FB
2508 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2509 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2510 /* we remove the notdirty callback only if the code has been
2511 flushed */
2512 if (dirty_flags == 0xff)
2e70f6ef 2513 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
2514}
2515
3a7d929e 2516static CPUReadMemoryFunc *error_mem_read[3] = {
9fa3e853
FB
2517 NULL, /* never used */
2518 NULL, /* never used */
2519 NULL, /* never used */
2520};
2521
1ccde1cb
FB
2522static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2523 notdirty_mem_writeb,
2524 notdirty_mem_writew,
2525 notdirty_mem_writel,
2526};
2527
0f459d16 2528/* Generate a debug exception if a watchpoint has been hit. */
b4051334 2529static void check_watchpoint(int offset, int len_mask, int flags)
0f459d16
PB
2530{
2531 CPUState *env = cpu_single_env;
06d55cc1
AL
2532 target_ulong pc, cs_base;
2533 TranslationBlock *tb;
0f459d16 2534 target_ulong vaddr;
a1d1bb31 2535 CPUWatchpoint *wp;
06d55cc1 2536 int cpu_flags;
0f459d16 2537
06d55cc1
AL
2538 if (env->watchpoint_hit) {
2539 /* We re-entered the check after replacing the TB. Now raise
2540 * the debug interrupt so that is will trigger after the
2541 * current instruction. */
2542 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2543 return;
2544 }
2e70f6ef 2545 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
c0ce998e 2546 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334
AL
2547 if ((vaddr == (wp->vaddr & len_mask) ||
2548 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
6e140f28
AL
2549 wp->flags |= BP_WATCHPOINT_HIT;
2550 if (!env->watchpoint_hit) {
2551 env->watchpoint_hit = wp;
2552 tb = tb_find_pc(env->mem_io_pc);
2553 if (!tb) {
2554 cpu_abort(env, "check_watchpoint: could not find TB for "
2555 "pc=%p", (void *)env->mem_io_pc);
2556 }
2557 cpu_restore_state(tb, env, env->mem_io_pc, NULL);
2558 tb_phys_invalidate(tb, -1);
2559 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2560 env->exception_index = EXCP_DEBUG;
2561 } else {
2562 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2563 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
2564 }
2565 cpu_resume_from_signal(env, NULL);
06d55cc1 2566 }
6e140f28
AL
2567 } else {
2568 wp->flags &= ~BP_WATCHPOINT_HIT;
0f459d16
PB
2569 }
2570 }
2571}
2572
6658ffb8
PB
2573/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2574 so these check for a hit then pass through to the normal out-of-line
2575 phys routines. */
2576static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2577{
b4051334 2578 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
6658ffb8
PB
2579 return ldub_phys(addr);
2580}
2581
2582static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2583{
b4051334 2584 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
6658ffb8
PB
2585 return lduw_phys(addr);
2586}
2587
2588static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2589{
b4051334 2590 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
6658ffb8
PB
2591 return ldl_phys(addr);
2592}
2593
6658ffb8
PB
2594static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2595 uint32_t val)
2596{
b4051334 2597 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
6658ffb8
PB
2598 stb_phys(addr, val);
2599}
2600
2601static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2602 uint32_t val)
2603{
b4051334 2604 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
6658ffb8
PB
2605 stw_phys(addr, val);
2606}
2607
2608static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2609 uint32_t val)
2610{
b4051334 2611 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
6658ffb8
PB
2612 stl_phys(addr, val);
2613}
2614
2615static CPUReadMemoryFunc *watch_mem_read[3] = {
2616 watch_mem_readb,
2617 watch_mem_readw,
2618 watch_mem_readl,
2619};
2620
2621static CPUWriteMemoryFunc *watch_mem_write[3] = {
2622 watch_mem_writeb,
2623 watch_mem_writew,
2624 watch_mem_writel,
2625};
6658ffb8 2626
db7b5426
BS
2627static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2628 unsigned int len)
2629{
db7b5426
BS
2630 uint32_t ret;
2631 unsigned int idx;
2632
8da3ff18 2633 idx = SUBPAGE_IDX(addr);
db7b5426
BS
2634#if defined(DEBUG_SUBPAGE)
2635 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2636 mmio, len, addr, idx);
2637#endif
8da3ff18
PB
2638 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len],
2639 addr + mmio->region_offset[idx][0][len]);
db7b5426
BS
2640
2641 return ret;
2642}
2643
2644static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2645 uint32_t value, unsigned int len)
2646{
db7b5426
BS
2647 unsigned int idx;
2648
8da3ff18 2649 idx = SUBPAGE_IDX(addr);
db7b5426
BS
2650#if defined(DEBUG_SUBPAGE)
2651 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2652 mmio, len, addr, idx, value);
2653#endif
8da3ff18
PB
2654 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len],
2655 addr + mmio->region_offset[idx][1][len],
2656 value);
db7b5426
BS
2657}
2658
2659static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2660{
2661#if defined(DEBUG_SUBPAGE)
2662 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2663#endif
2664
2665 return subpage_readlen(opaque, addr, 0);
2666}
2667
2668static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2669 uint32_t value)
2670{
2671#if defined(DEBUG_SUBPAGE)
2672 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2673#endif
2674 subpage_writelen(opaque, addr, value, 0);
2675}
2676
2677static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2678{
2679#if defined(DEBUG_SUBPAGE)
2680 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2681#endif
2682
2683 return subpage_readlen(opaque, addr, 1);
2684}
2685
2686static void subpage_writew (void *opaque, target_phys_addr_t addr,
2687 uint32_t value)
2688{
2689#if defined(DEBUG_SUBPAGE)
2690 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2691#endif
2692 subpage_writelen(opaque, addr, value, 1);
2693}
2694
2695static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2696{
2697#if defined(DEBUG_SUBPAGE)
2698 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2699#endif
2700
2701 return subpage_readlen(opaque, addr, 2);
2702}
2703
2704static void subpage_writel (void *opaque,
2705 target_phys_addr_t addr, uint32_t value)
2706{
2707#if defined(DEBUG_SUBPAGE)
2708 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2709#endif
2710 subpage_writelen(opaque, addr, value, 2);
2711}
2712
2713static CPUReadMemoryFunc *subpage_read[] = {
2714 &subpage_readb,
2715 &subpage_readw,
2716 &subpage_readl,
2717};
2718
2719static CPUWriteMemoryFunc *subpage_write[] = {
2720 &subpage_writeb,
2721 &subpage_writew,
2722 &subpage_writel,
2723};
2724
2725static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
8da3ff18 2726 ram_addr_t memory, ram_addr_t region_offset)
db7b5426
BS
2727{
2728 int idx, eidx;
4254fab8 2729 unsigned int i;
db7b5426
BS
2730
2731 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2732 return -1;
2733 idx = SUBPAGE_IDX(start);
2734 eidx = SUBPAGE_IDX(end);
2735#if defined(DEBUG_SUBPAGE)
2736 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2737 mmio, start, end, idx, eidx, memory);
2738#endif
2739 memory >>= IO_MEM_SHIFT;
2740 for (; idx <= eidx; idx++) {
4254fab8 2741 for (i = 0; i < 4; i++) {
3ee89922
BS
2742 if (io_mem_read[memory][i]) {
2743 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2744 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
8da3ff18 2745 mmio->region_offset[idx][0][i] = region_offset;
3ee89922
BS
2746 }
2747 if (io_mem_write[memory][i]) {
2748 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2749 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
8da3ff18 2750 mmio->region_offset[idx][1][i] = region_offset;
3ee89922 2751 }
4254fab8 2752 }
db7b5426
BS
2753 }
2754
2755 return 0;
2756}
2757
00f82b8a 2758static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
8da3ff18 2759 ram_addr_t orig_memory, ram_addr_t region_offset)
db7b5426
BS
2760{
2761 subpage_t *mmio;
2762 int subpage_memory;
2763
2764 mmio = qemu_mallocz(sizeof(subpage_t));
2765 if (mmio != NULL) {
2766 mmio->base = base;
2767 subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2768#if defined(DEBUG_SUBPAGE)
2769 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2770 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2771#endif
2772 *phys = subpage_memory | IO_MEM_SUBPAGE;
8da3ff18
PB
2773 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory,
2774 region_offset);
db7b5426
BS
2775 }
2776
2777 return mmio;
2778}
2779
33417e70
FB
2780static void io_mem_init(void)
2781{
3a7d929e 2782 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
a4193c8a 2783 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
3a7d929e 2784 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
1ccde1cb
FB
2785 io_mem_nb = 5;
2786
0f459d16 2787 io_mem_watch = cpu_register_io_memory(0, watch_mem_read,
6658ffb8 2788 watch_mem_write, NULL);
1ccde1cb 2789 /* alloc dirty bits array */
0a962c02 2790 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
3a7d929e 2791 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
33417e70
FB
2792}
2793
2794/* mem_read and mem_write are arrays of functions containing the
2795 function to access byte (index 0), word (index 1) and dword (index
3ee89922
BS
2796 2). Functions can be omitted with a NULL function pointer. The
2797 registered functions may be modified dynamically later.
2798 If io_index is non zero, the corresponding io zone is
4254fab8
BS
2799 modified. If it is zero, a new io zone is allocated. The return
2800 value can be used with cpu_register_physical_memory(). (-1) is
2801 returned if error. */
33417e70
FB
2802int cpu_register_io_memory(int io_index,
2803 CPUReadMemoryFunc **mem_read,
a4193c8a
FB
2804 CPUWriteMemoryFunc **mem_write,
2805 void *opaque)
33417e70 2806{
4254fab8 2807 int i, subwidth = 0;
33417e70
FB
2808
2809 if (io_index <= 0) {
b5ff1b31 2810 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
33417e70
FB
2811 return -1;
2812 io_index = io_mem_nb++;
2813 } else {
2814 if (io_index >= IO_MEM_NB_ENTRIES)
2815 return -1;
2816 }
b5ff1b31 2817
33417e70 2818 for(i = 0;i < 3; i++) {
4254fab8
BS
2819 if (!mem_read[i] || !mem_write[i])
2820 subwidth = IO_MEM_SUBWIDTH;
33417e70
FB
2821 io_mem_read[io_index][i] = mem_read[i];
2822 io_mem_write[io_index][i] = mem_write[i];
2823 }
a4193c8a 2824 io_mem_opaque[io_index] = opaque;
4254fab8 2825 return (io_index << IO_MEM_SHIFT) | subwidth;
33417e70 2826}
61382a50 2827
8926b517
FB
2828CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2829{
2830 return io_mem_write[io_index >> IO_MEM_SHIFT];
2831}
2832
2833CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2834{
2835 return io_mem_read[io_index >> IO_MEM_SHIFT];
2836}
2837
e2eef170
PB
2838#endif /* !defined(CONFIG_USER_ONLY) */
2839
13eb76e0
FB
2840/* physical memory access (slow version, mainly for debug) */
2841#if defined(CONFIG_USER_ONLY)
5fafdf24 2842void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
2843 int len, int is_write)
2844{
2845 int l, flags;
2846 target_ulong page;
53a5960a 2847 void * p;
13eb76e0
FB
2848
2849 while (len > 0) {
2850 page = addr & TARGET_PAGE_MASK;
2851 l = (page + TARGET_PAGE_SIZE) - addr;
2852 if (l > len)
2853 l = len;
2854 flags = page_get_flags(page);
2855 if (!(flags & PAGE_VALID))
2856 return;
2857 if (is_write) {
2858 if (!(flags & PAGE_WRITE))
2859 return;
579a97f7 2860 /* XXX: this code should not depend on lock_user */
72fb7daa 2861 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
579a97f7
FB
2862 /* FIXME - should this return an error rather than just fail? */
2863 return;
72fb7daa
AJ
2864 memcpy(p, buf, l);
2865 unlock_user(p, addr, l);
13eb76e0
FB
2866 } else {
2867 if (!(flags & PAGE_READ))
2868 return;
579a97f7 2869 /* XXX: this code should not depend on lock_user */
72fb7daa 2870 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
579a97f7
FB
2871 /* FIXME - should this return an error rather than just fail? */
2872 return;
72fb7daa 2873 memcpy(buf, p, l);
5b257578 2874 unlock_user(p, addr, 0);
13eb76e0
FB
2875 }
2876 len -= l;
2877 buf += l;
2878 addr += l;
2879 }
2880}
8df1cd07 2881
13eb76e0 2882#else
5fafdf24 2883void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
2884 int len, int is_write)
2885{
2886 int l, io_index;
2887 uint8_t *ptr;
2888 uint32_t val;
2e12669a
FB
2889 target_phys_addr_t page;
2890 unsigned long pd;
92e873b9 2891 PhysPageDesc *p;
3b46e624 2892
13eb76e0
FB
2893 while (len > 0) {
2894 page = addr & TARGET_PAGE_MASK;
2895 l = (page + TARGET_PAGE_SIZE) - addr;
2896 if (l > len)
2897 l = len;
92e873b9 2898 p = phys_page_find(page >> TARGET_PAGE_BITS);
13eb76e0
FB
2899 if (!p) {
2900 pd = IO_MEM_UNASSIGNED;
2901 } else {
2902 pd = p->phys_offset;
2903 }
3b46e624 2904
13eb76e0 2905 if (is_write) {
3a7d929e 2906 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
13eb76e0 2907 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
2908 if (p)
2909 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
6a00d601
FB
2910 /* XXX: could force cpu_single_env to NULL to avoid
2911 potential bugs */
13eb76e0 2912 if (l >= 4 && ((addr & 3) == 0)) {
1c213d19 2913 /* 32 bit write access */
c27004ec 2914 val = ldl_p(buf);
a4193c8a 2915 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2916 l = 4;
2917 } else if (l >= 2 && ((addr & 1) == 0)) {
1c213d19 2918 /* 16 bit write access */
c27004ec 2919 val = lduw_p(buf);
a4193c8a 2920 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2921 l = 2;
2922 } else {
1c213d19 2923 /* 8 bit write access */
c27004ec 2924 val = ldub_p(buf);
a4193c8a 2925 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2926 l = 1;
2927 }
2928 } else {
b448f2f3
FB
2929 unsigned long addr1;
2930 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
13eb76e0 2931 /* RAM case */
b448f2f3 2932 ptr = phys_ram_base + addr1;
13eb76e0 2933 memcpy(ptr, buf, l);
3a7d929e
FB
2934 if (!cpu_physical_memory_is_dirty(addr1)) {
2935 /* invalidate code */
2936 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2937 /* set dirty bit */
5fafdf24 2938 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
f23db169 2939 (0xff & ~CODE_DIRTY_FLAG);
3a7d929e 2940 }
13eb76e0
FB
2941 }
2942 } else {
5fafdf24 2943 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2a4188a3 2944 !(pd & IO_MEM_ROMD)) {
13eb76e0
FB
2945 /* I/O case */
2946 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
2947 if (p)
2948 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
13eb76e0
FB
2949 if (l >= 4 && ((addr & 3) == 0)) {
2950 /* 32 bit read access */
a4193c8a 2951 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
c27004ec 2952 stl_p(buf, val);
13eb76e0
FB
2953 l = 4;
2954 } else if (l >= 2 && ((addr & 1) == 0)) {
2955 /* 16 bit read access */
a4193c8a 2956 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
c27004ec 2957 stw_p(buf, val);
13eb76e0
FB
2958 l = 2;
2959 } else {
1c213d19 2960 /* 8 bit read access */
a4193c8a 2961 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
c27004ec 2962 stb_p(buf, val);
13eb76e0
FB
2963 l = 1;
2964 }
2965 } else {
2966 /* RAM case */
5fafdf24 2967 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
13eb76e0
FB
2968 (addr & ~TARGET_PAGE_MASK);
2969 memcpy(buf, ptr, l);
2970 }
2971 }
2972 len -= l;
2973 buf += l;
2974 addr += l;
2975 }
2976}
8df1cd07 2977
d0ecd2aa 2978/* used for ROM loading : can write in RAM and ROM */
5fafdf24 2979void cpu_physical_memory_write_rom(target_phys_addr_t addr,
d0ecd2aa
FB
2980 const uint8_t *buf, int len)
2981{
2982 int l;
2983 uint8_t *ptr;
2984 target_phys_addr_t page;
2985 unsigned long pd;
2986 PhysPageDesc *p;
3b46e624 2987
d0ecd2aa
FB
2988 while (len > 0) {
2989 page = addr & TARGET_PAGE_MASK;
2990 l = (page + TARGET_PAGE_SIZE) - addr;
2991 if (l > len)
2992 l = len;
2993 p = phys_page_find(page >> TARGET_PAGE_BITS);
2994 if (!p) {
2995 pd = IO_MEM_UNASSIGNED;
2996 } else {
2997 pd = p->phys_offset;
2998 }
3b46e624 2999
d0ecd2aa 3000 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2a4188a3
FB
3001 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3002 !(pd & IO_MEM_ROMD)) {
d0ecd2aa
FB
3003 /* do nothing */
3004 } else {
3005 unsigned long addr1;
3006 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3007 /* ROM/RAM case */
3008 ptr = phys_ram_base + addr1;
3009 memcpy(ptr, buf, l);
3010 }
3011 len -= l;
3012 buf += l;
3013 addr += l;
3014 }
3015}
3016
3017
8df1cd07
FB
3018/* warning: addr must be aligned */
3019uint32_t ldl_phys(target_phys_addr_t addr)
3020{
3021 int io_index;
3022 uint8_t *ptr;
3023 uint32_t val;
3024 unsigned long pd;
3025 PhysPageDesc *p;
3026
3027 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3028 if (!p) {
3029 pd = IO_MEM_UNASSIGNED;
3030 } else {
3031 pd = p->phys_offset;
3032 }
3b46e624 3033
5fafdf24 3034 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2a4188a3 3035 !(pd & IO_MEM_ROMD)) {
8df1cd07
FB
3036 /* I/O case */
3037 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
3038 if (p)
3039 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
8df1cd07
FB
3040 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3041 } else {
3042 /* RAM case */
5fafdf24 3043 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
8df1cd07
FB
3044 (addr & ~TARGET_PAGE_MASK);
3045 val = ldl_p(ptr);
3046 }
3047 return val;
3048}
3049
84b7b8e7
FB
3050/* warning: addr must be aligned */
3051uint64_t ldq_phys(target_phys_addr_t addr)
3052{
3053 int io_index;
3054 uint8_t *ptr;
3055 uint64_t val;
3056 unsigned long pd;
3057 PhysPageDesc *p;
3058
3059 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3060 if (!p) {
3061 pd = IO_MEM_UNASSIGNED;
3062 } else {
3063 pd = p->phys_offset;
3064 }
3b46e624 3065
2a4188a3
FB
3066 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3067 !(pd & IO_MEM_ROMD)) {
84b7b8e7
FB
3068 /* I/O case */
3069 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
3070 if (p)
3071 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
84b7b8e7
FB
3072#ifdef TARGET_WORDS_BIGENDIAN
3073 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3074 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3075#else
3076 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3077 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3078#endif
3079 } else {
3080 /* RAM case */
5fafdf24 3081 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
84b7b8e7
FB
3082 (addr & ~TARGET_PAGE_MASK);
3083 val = ldq_p(ptr);
3084 }
3085 return val;
3086}
3087
aab33094
FB
3088/* XXX: optimize */
3089uint32_t ldub_phys(target_phys_addr_t addr)
3090{
3091 uint8_t val;
3092 cpu_physical_memory_read(addr, &val, 1);
3093 return val;
3094}
3095
3096/* XXX: optimize */
3097uint32_t lduw_phys(target_phys_addr_t addr)
3098{
3099 uint16_t val;
3100 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3101 return tswap16(val);
3102}
3103
8df1cd07
FB
3104/* warning: addr must be aligned. The ram page is not masked as dirty
3105 and the code inside is not invalidated. It is useful if the dirty
3106 bits are used to track modified PTEs */
3107void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3108{
3109 int io_index;
3110 uint8_t *ptr;
3111 unsigned long pd;
3112 PhysPageDesc *p;
3113
3114 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3115 if (!p) {
3116 pd = IO_MEM_UNASSIGNED;
3117 } else {
3118 pd = p->phys_offset;
3119 }
3b46e624 3120
3a7d929e 3121 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07 3122 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
3123 if (p)
3124 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
8df1cd07
FB
3125 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3126 } else {
74576198
AL
3127 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3128 ptr = phys_ram_base + addr1;
8df1cd07 3129 stl_p(ptr, val);
74576198
AL
3130
3131 if (unlikely(in_migration)) {
3132 if (!cpu_physical_memory_is_dirty(addr1)) {
3133 /* invalidate code */
3134 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3135 /* set dirty bit */
3136 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3137 (0xff & ~CODE_DIRTY_FLAG);
3138 }
3139 }
8df1cd07
FB
3140 }
3141}
3142
bc98a7ef
JM
3143void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3144{
3145 int io_index;
3146 uint8_t *ptr;
3147 unsigned long pd;
3148 PhysPageDesc *p;
3149
3150 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3151 if (!p) {
3152 pd = IO_MEM_UNASSIGNED;
3153 } else {
3154 pd = p->phys_offset;
3155 }
3b46e624 3156
bc98a7ef
JM
3157 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3158 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
3159 if (p)
3160 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bc98a7ef
JM
3161#ifdef TARGET_WORDS_BIGENDIAN
3162 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3163 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3164#else
3165 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3166 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3167#endif
3168 } else {
5fafdf24 3169 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
bc98a7ef
JM
3170 (addr & ~TARGET_PAGE_MASK);
3171 stq_p(ptr, val);
3172 }
3173}
3174
8df1cd07 3175/* warning: addr must be aligned */
8df1cd07
FB
3176void stl_phys(target_phys_addr_t addr, uint32_t val)
3177{
3178 int io_index;
3179 uint8_t *ptr;
3180 unsigned long pd;
3181 PhysPageDesc *p;
3182
3183 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3184 if (!p) {
3185 pd = IO_MEM_UNASSIGNED;
3186 } else {
3187 pd = p->phys_offset;
3188 }
3b46e624 3189
3a7d929e 3190 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07 3191 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
3192 if (p)
3193 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
8df1cd07
FB
3194 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3195 } else {
3196 unsigned long addr1;
3197 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3198 /* RAM case */
3199 ptr = phys_ram_base + addr1;
3200 stl_p(ptr, val);
3a7d929e
FB
3201 if (!cpu_physical_memory_is_dirty(addr1)) {
3202 /* invalidate code */
3203 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3204 /* set dirty bit */
f23db169
FB
3205 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3206 (0xff & ~CODE_DIRTY_FLAG);
3a7d929e 3207 }
8df1cd07
FB
3208 }
3209}
3210
aab33094
FB
3211/* XXX: optimize */
3212void stb_phys(target_phys_addr_t addr, uint32_t val)
3213{
3214 uint8_t v = val;
3215 cpu_physical_memory_write(addr, &v, 1);
3216}
3217
3218/* XXX: optimize */
3219void stw_phys(target_phys_addr_t addr, uint32_t val)
3220{
3221 uint16_t v = tswap16(val);
3222 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3223}
3224
3225/* XXX: optimize */
3226void stq_phys(target_phys_addr_t addr, uint64_t val)
3227{
3228 val = tswap64(val);
3229 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3230}
3231
13eb76e0
FB
3232#endif
3233
3234/* virtual memory access for debug */
5fafdf24 3235int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
b448f2f3 3236 uint8_t *buf, int len, int is_write)
13eb76e0
FB
3237{
3238 int l;
9b3c35e0
JM
3239 target_phys_addr_t phys_addr;
3240 target_ulong page;
13eb76e0
FB
3241
3242 while (len > 0) {
3243 page = addr & TARGET_PAGE_MASK;
3244 phys_addr = cpu_get_phys_page_debug(env, page);
3245 /* if no physical page mapped, return an error */
3246 if (phys_addr == -1)
3247 return -1;
3248 l = (page + TARGET_PAGE_SIZE) - addr;
3249 if (l > len)
3250 l = len;
5fafdf24 3251 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
b448f2f3 3252 buf, l, is_write);
13eb76e0
FB
3253 len -= l;
3254 buf += l;
3255 addr += l;
3256 }
3257 return 0;
3258}
3259
2e70f6ef
PB
3260/* in deterministic execution mode, instructions doing device I/Os
3261 must be at the end of the TB */
3262void cpu_io_recompile(CPUState *env, void *retaddr)
3263{
3264 TranslationBlock *tb;
3265 uint32_t n, cflags;
3266 target_ulong pc, cs_base;
3267 uint64_t flags;
3268
3269 tb = tb_find_pc((unsigned long)retaddr);
3270 if (!tb) {
3271 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
3272 retaddr);
3273 }
3274 n = env->icount_decr.u16.low + tb->icount;
3275 cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3276 /* Calculate how many instructions had been executed before the fault
bf20dc07 3277 occurred. */
2e70f6ef
PB
3278 n = n - env->icount_decr.u16.low;
3279 /* Generate a new TB ending on the I/O insn. */
3280 n++;
3281 /* On MIPS and SH, delay slot instructions can only be restarted if
3282 they were already the first instruction in the TB. If this is not
bf20dc07 3283 the first instruction in a TB then re-execute the preceding
2e70f6ef
PB
3284 branch. */
3285#if defined(TARGET_MIPS)
3286 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3287 env->active_tc.PC -= 4;
3288 env->icount_decr.u16.low++;
3289 env->hflags &= ~MIPS_HFLAG_BMASK;
3290 }
3291#elif defined(TARGET_SH4)
3292 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3293 && n > 1) {
3294 env->pc -= 2;
3295 env->icount_decr.u16.low++;
3296 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3297 }
3298#endif
3299 /* This should never happen. */
3300 if (n > CF_COUNT_MASK)
3301 cpu_abort(env, "TB too big during recompile");
3302
3303 cflags = n | CF_LAST_IO;
3304 pc = tb->pc;
3305 cs_base = tb->cs_base;
3306 flags = tb->flags;
3307 tb_phys_invalidate(tb, -1);
3308 /* FIXME: In theory this could raise an exception. In practice
3309 we have already translated the block once so it's probably ok. */
3310 tb_gen_code(env, pc, cs_base, flags, cflags);
bf20dc07 3311 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
2e70f6ef
PB
3312 the first in the TB) then we end up generating a whole new TB and
3313 repeating the fault, which is horribly inefficient.
3314 Better would be to execute just this insn uncached, or generate a
3315 second new TB. */
3316 cpu_resume_from_signal(env, NULL);
3317}
3318
e3db7226
FB
3319void dump_exec_info(FILE *f,
3320 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3321{
3322 int i, target_code_size, max_target_code_size;
3323 int direct_jmp_count, direct_jmp2_count, cross_page;
3324 TranslationBlock *tb;
3b46e624 3325
e3db7226
FB
3326 target_code_size = 0;
3327 max_target_code_size = 0;
3328 cross_page = 0;
3329 direct_jmp_count = 0;
3330 direct_jmp2_count = 0;
3331 for(i = 0; i < nb_tbs; i++) {
3332 tb = &tbs[i];
3333 target_code_size += tb->size;
3334 if (tb->size > max_target_code_size)
3335 max_target_code_size = tb->size;
3336 if (tb->page_addr[1] != -1)
3337 cross_page++;
3338 if (tb->tb_next_offset[0] != 0xffff) {
3339 direct_jmp_count++;
3340 if (tb->tb_next_offset[1] != 0xffff) {
3341 direct_jmp2_count++;
3342 }
3343 }
3344 }
3345 /* XXX: avoid using doubles ? */
57fec1fe 3346 cpu_fprintf(f, "Translation buffer state:\n");
26a5f13b
FB
3347 cpu_fprintf(f, "gen code size %ld/%ld\n",
3348 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3349 cpu_fprintf(f, "TB count %d/%d\n",
3350 nb_tbs, code_gen_max_blocks);
5fafdf24 3351 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
e3db7226
FB
3352 nb_tbs ? target_code_size / nb_tbs : 0,
3353 max_target_code_size);
5fafdf24 3354 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
e3db7226
FB
3355 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3356 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
5fafdf24
TS
3357 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3358 cross_page,
e3db7226
FB
3359 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3360 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
5fafdf24 3361 direct_jmp_count,
e3db7226
FB
3362 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3363 direct_jmp2_count,
3364 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
57fec1fe 3365 cpu_fprintf(f, "\nStatistics:\n");
e3db7226
FB
3366 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
3367 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3368 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
b67d9a52 3369 tcg_dump_info(f, cpu_fprintf);
e3db7226
FB
3370}
3371
5fafdf24 3372#if !defined(CONFIG_USER_ONLY)
61382a50
FB
3373
3374#define MMUSUFFIX _cmmu
3375#define GETPC() NULL
3376#define env cpu_single_env
b769d8fe 3377#define SOFTMMU_CODE_ACCESS
61382a50
FB
3378
3379#define SHIFT 0
3380#include "softmmu_template.h"
3381
3382#define SHIFT 1
3383#include "softmmu_template.h"
3384
3385#define SHIFT 2
3386#include "softmmu_template.h"
3387
3388#define SHIFT 3
3389#include "softmmu_template.h"
3390
3391#undef env
3392
3393#endif