]> git.proxmox.com Git - mirror_qemu.git/blame - exec.c
Reset the key modifiers upon client connect
[mirror_qemu.git] / exec.c
CommitLineData
54936004 1/*
fd6ce8f6 2 * virtual page mapping and translated block handling
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
67b915a5 20#include "config.h"
d5a8f07c 21#ifdef _WIN32
4fddf62a 22#define WIN32_LEAN_AND_MEAN
d5a8f07c
FB
23#include <windows.h>
24#else
a98d49b1 25#include <sys/types.h>
d5a8f07c
FB
26#include <sys/mman.h>
27#endif
54936004
FB
28#include <stdlib.h>
29#include <stdio.h>
30#include <stdarg.h>
31#include <string.h>
32#include <errno.h>
33#include <unistd.h>
34#include <inttypes.h>
35
6180a181
FB
36#include "cpu.h"
37#include "exec-all.h"
ca10f867 38#include "qemu-common.h"
b67d9a52 39#include "tcg.h"
b3c7724c 40#include "hw/hw.h"
74576198 41#include "osdep.h"
7ba1e619 42#include "kvm.h"
53a5960a
PB
43#if defined(CONFIG_USER_ONLY)
44#include <qemu.h>
45#endif
54936004 46
fd6ce8f6 47//#define DEBUG_TB_INVALIDATE
66e85a21 48//#define DEBUG_FLUSH
9fa3e853 49//#define DEBUG_TLB
67d3b957 50//#define DEBUG_UNASSIGNED
fd6ce8f6
FB
51
52/* make various TB consistency checks */
5fafdf24
TS
53//#define DEBUG_TB_CHECK
54//#define DEBUG_TLB_CHECK
fd6ce8f6 55
1196be37 56//#define DEBUG_IOPORT
db7b5426 57//#define DEBUG_SUBPAGE
1196be37 58
99773bd4
PB
59#if !defined(CONFIG_USER_ONLY)
60/* TB consistency checks only implemented for usermode emulation. */
61#undef DEBUG_TB_CHECK
62#endif
63
9fa3e853
FB
64#define SMC_BITMAP_USE_THRESHOLD 10
65
66#define MMAP_AREA_START 0x00000000
67#define MMAP_AREA_END 0xa8000000
fd6ce8f6 68
108c49b8
FB
69#if defined(TARGET_SPARC64)
70#define TARGET_PHYS_ADDR_SPACE_BITS 41
5dcb6b91
BS
71#elif defined(TARGET_SPARC)
72#define TARGET_PHYS_ADDR_SPACE_BITS 36
bedb69ea
JM
73#elif defined(TARGET_ALPHA)
74#define TARGET_PHYS_ADDR_SPACE_BITS 42
75#define TARGET_VIRT_ADDR_SPACE_BITS 42
108c49b8
FB
76#elif defined(TARGET_PPC64)
77#define TARGET_PHYS_ADDR_SPACE_BITS 42
00f82b8a
AJ
78#elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
79#define TARGET_PHYS_ADDR_SPACE_BITS 42
80#elif defined(TARGET_I386) && !defined(USE_KQEMU)
81#define TARGET_PHYS_ADDR_SPACE_BITS 36
108c49b8
FB
82#else
83/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
84#define TARGET_PHYS_ADDR_SPACE_BITS 32
85#endif
86
bdaf78e0 87static TranslationBlock *tbs;
26a5f13b 88int code_gen_max_blocks;
9fa3e853 89TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
bdaf78e0 90static int nb_tbs;
eb51d102
FB
91/* any access to the tbs or the page table must use this lock */
92spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
fd6ce8f6 93
141ac468
BS
94#if defined(__arm__) || defined(__sparc_v9__)
95/* The prologue must be reachable with a direct jump. ARM and Sparc64
96 have limited branch ranges (possibly also PPC) so place it in a
d03d860b
BS
97 section close to code segment. */
98#define code_gen_section \
99 __attribute__((__section__(".gen_code"))) \
100 __attribute__((aligned (32)))
101#else
102#define code_gen_section \
103 __attribute__((aligned (32)))
104#endif
105
106uint8_t code_gen_prologue[1024] code_gen_section;
bdaf78e0
BS
107static uint8_t *code_gen_buffer;
108static unsigned long code_gen_buffer_size;
26a5f13b 109/* threshold to flush the translated code buffer */
bdaf78e0 110static unsigned long code_gen_buffer_max_size;
fd6ce8f6
FB
111uint8_t *code_gen_ptr;
112
e2eef170 113#if !defined(CONFIG_USER_ONLY)
00f82b8a 114ram_addr_t phys_ram_size;
9fa3e853
FB
115int phys_ram_fd;
116uint8_t *phys_ram_base;
1ccde1cb 117uint8_t *phys_ram_dirty;
74576198 118static int in_migration;
e9a1ab19 119static ram_addr_t phys_ram_alloc_offset = 0;
e2eef170 120#endif
9fa3e853 121
6a00d601
FB
122CPUState *first_cpu;
123/* current CPU in the current thread. It is only valid inside
124 cpu_exec() */
5fafdf24 125CPUState *cpu_single_env;
2e70f6ef 126/* 0 = Do not count executed instructions.
bf20dc07 127 1 = Precise instruction counting.
2e70f6ef
PB
128 2 = Adaptive rate instruction counting. */
129int use_icount = 0;
130/* Current instruction counter. While executing translated code this may
131 include some instructions that have not yet been executed. */
132int64_t qemu_icount;
6a00d601 133
54936004 134typedef struct PageDesc {
92e873b9 135 /* list of TBs intersecting this ram page */
fd6ce8f6 136 TranslationBlock *first_tb;
9fa3e853
FB
137 /* in order to optimize self modifying code, we count the number
138 of lookups we do to a given page to use a bitmap */
139 unsigned int code_write_count;
140 uint8_t *code_bitmap;
141#if defined(CONFIG_USER_ONLY)
142 unsigned long flags;
143#endif
54936004
FB
144} PageDesc;
145
92e873b9 146typedef struct PhysPageDesc {
0f459d16 147 /* offset in host memory of the page + io_index in the low bits */
00f82b8a 148 ram_addr_t phys_offset;
8da3ff18 149 ram_addr_t region_offset;
92e873b9
FB
150} PhysPageDesc;
151
54936004 152#define L2_BITS 10
bedb69ea
JM
153#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
154/* XXX: this is a temporary hack for alpha target.
155 * In the future, this is to be replaced by a multi-level table
156 * to actually be able to handle the complete 64 bits address space.
157 */
158#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
159#else
03875444 160#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
bedb69ea 161#endif
54936004
FB
162
163#define L1_SIZE (1 << L1_BITS)
164#define L2_SIZE (1 << L2_BITS)
165
83fb7adf
FB
166unsigned long qemu_real_host_page_size;
167unsigned long qemu_host_page_bits;
168unsigned long qemu_host_page_size;
169unsigned long qemu_host_page_mask;
54936004 170
92e873b9 171/* XXX: for system emulation, it could just be an array */
54936004 172static PageDesc *l1_map[L1_SIZE];
bdaf78e0 173static PhysPageDesc **l1_phys_map;
54936004 174
e2eef170
PB
175#if !defined(CONFIG_USER_ONLY)
176static void io_mem_init(void);
177
33417e70 178/* io memory support */
33417e70
FB
179CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
180CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
a4193c8a 181void *io_mem_opaque[IO_MEM_NB_ENTRIES];
33417e70 182static int io_mem_nb;
6658ffb8
PB
183static int io_mem_watch;
184#endif
33417e70 185
34865134 186/* log support */
d9b630fd 187static const char *logfilename = "/tmp/qemu.log";
34865134
FB
188FILE *logfile;
189int loglevel;
e735b91c 190static int log_append = 0;
34865134 191
e3db7226
FB
192/* statistics */
193static int tlb_flush_count;
194static int tb_flush_count;
195static int tb_phys_invalidate_count;
196
db7b5426
BS
197#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
198typedef struct subpage_t {
199 target_phys_addr_t base;
3ee89922
BS
200 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
201 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
202 void *opaque[TARGET_PAGE_SIZE][2][4];
8da3ff18 203 ram_addr_t region_offset[TARGET_PAGE_SIZE][2][4];
db7b5426
BS
204} subpage_t;
205
7cb69cae
FB
206#ifdef _WIN32
207static void map_exec(void *addr, long size)
208{
209 DWORD old_protect;
210 VirtualProtect(addr, size,
211 PAGE_EXECUTE_READWRITE, &old_protect);
212
213}
214#else
215static void map_exec(void *addr, long size)
216{
4369415f 217 unsigned long start, end, page_size;
7cb69cae 218
4369415f 219 page_size = getpagesize();
7cb69cae 220 start = (unsigned long)addr;
4369415f 221 start &= ~(page_size - 1);
7cb69cae
FB
222
223 end = (unsigned long)addr + size;
4369415f
FB
224 end += page_size - 1;
225 end &= ~(page_size - 1);
7cb69cae
FB
226
227 mprotect((void *)start, end - start,
228 PROT_READ | PROT_WRITE | PROT_EXEC);
229}
230#endif
231
b346ff46 232static void page_init(void)
54936004 233{
83fb7adf 234 /* NOTE: we can always suppose that qemu_host_page_size >=
54936004 235 TARGET_PAGE_SIZE */
c2b48b69
AL
236#ifdef _WIN32
237 {
238 SYSTEM_INFO system_info;
239
240 GetSystemInfo(&system_info);
241 qemu_real_host_page_size = system_info.dwPageSize;
242 }
243#else
244 qemu_real_host_page_size = getpagesize();
245#endif
83fb7adf
FB
246 if (qemu_host_page_size == 0)
247 qemu_host_page_size = qemu_real_host_page_size;
248 if (qemu_host_page_size < TARGET_PAGE_SIZE)
249 qemu_host_page_size = TARGET_PAGE_SIZE;
250 qemu_host_page_bits = 0;
251 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
252 qemu_host_page_bits++;
253 qemu_host_page_mask = ~(qemu_host_page_size - 1);
108c49b8
FB
254 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
255 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
50a9569b
AZ
256
257#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
258 {
259 long long startaddr, endaddr;
260 FILE *f;
261 int n;
262
c8a706fe 263 mmap_lock();
0776590d 264 last_brk = (unsigned long)sbrk(0);
50a9569b
AZ
265 f = fopen("/proc/self/maps", "r");
266 if (f) {
267 do {
268 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
269 if (n == 2) {
e0b8d65a
BS
270 startaddr = MIN(startaddr,
271 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
272 endaddr = MIN(endaddr,
273 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
b5fc909e 274 page_set_flags(startaddr & TARGET_PAGE_MASK,
50a9569b
AZ
275 TARGET_PAGE_ALIGN(endaddr),
276 PAGE_RESERVED);
277 }
278 } while (!feof(f));
279 fclose(f);
280 }
c8a706fe 281 mmap_unlock();
50a9569b
AZ
282 }
283#endif
54936004
FB
284}
285
434929bf 286static inline PageDesc **page_l1_map(target_ulong index)
54936004 287{
17e2377a
PB
288#if TARGET_LONG_BITS > 32
289 /* Host memory outside guest VM. For 32-bit targets we have already
290 excluded high addresses. */
d8173e0f 291 if (index > ((target_ulong)L2_SIZE * L1_SIZE))
17e2377a
PB
292 return NULL;
293#endif
434929bf
AL
294 return &l1_map[index >> L2_BITS];
295}
296
297static inline PageDesc *page_find_alloc(target_ulong index)
298{
299 PageDesc **lp, *p;
300 lp = page_l1_map(index);
301 if (!lp)
302 return NULL;
303
54936004
FB
304 p = *lp;
305 if (!p) {
306 /* allocate if not found */
17e2377a
PB
307#if defined(CONFIG_USER_ONLY)
308 unsigned long addr;
309 size_t len = sizeof(PageDesc) * L2_SIZE;
310 /* Don't use qemu_malloc because it may recurse. */
311 p = mmap(0, len, PROT_READ | PROT_WRITE,
312 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
54936004 313 *lp = p;
17e2377a
PB
314 addr = h2g(p);
315 if (addr == (target_ulong)addr) {
316 page_set_flags(addr & TARGET_PAGE_MASK,
317 TARGET_PAGE_ALIGN(addr + len),
318 PAGE_RESERVED);
319 }
320#else
321 p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
322 *lp = p;
323#endif
54936004
FB
324 }
325 return p + (index & (L2_SIZE - 1));
326}
327
00f82b8a 328static inline PageDesc *page_find(target_ulong index)
54936004 329{
434929bf
AL
330 PageDesc **lp, *p;
331 lp = page_l1_map(index);
332 if (!lp)
333 return NULL;
54936004 334
434929bf 335 p = *lp;
54936004
FB
336 if (!p)
337 return 0;
fd6ce8f6
FB
338 return p + (index & (L2_SIZE - 1));
339}
340
108c49b8 341static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
92e873b9 342{
108c49b8 343 void **lp, **p;
e3f4e2a4 344 PhysPageDesc *pd;
92e873b9 345
108c49b8
FB
346 p = (void **)l1_phys_map;
347#if TARGET_PHYS_ADDR_SPACE_BITS > 32
348
349#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
350#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
351#endif
352 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
92e873b9
FB
353 p = *lp;
354 if (!p) {
355 /* allocate if not found */
108c49b8
FB
356 if (!alloc)
357 return NULL;
358 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
359 memset(p, 0, sizeof(void *) * L1_SIZE);
360 *lp = p;
361 }
362#endif
363 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
e3f4e2a4
PB
364 pd = *lp;
365 if (!pd) {
366 int i;
108c49b8
FB
367 /* allocate if not found */
368 if (!alloc)
369 return NULL;
e3f4e2a4
PB
370 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
371 *lp = pd;
372 for (i = 0; i < L2_SIZE; i++)
373 pd[i].phys_offset = IO_MEM_UNASSIGNED;
92e873b9 374 }
e3f4e2a4 375 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
92e873b9
FB
376}
377
108c49b8 378static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
92e873b9 379{
108c49b8 380 return phys_page_find_alloc(index, 0);
92e873b9
FB
381}
382
9fa3e853 383#if !defined(CONFIG_USER_ONLY)
6a00d601 384static void tlb_protect_code(ram_addr_t ram_addr);
5fafdf24 385static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 386 target_ulong vaddr);
c8a706fe
PB
387#define mmap_lock() do { } while(0)
388#define mmap_unlock() do { } while(0)
9fa3e853 389#endif
fd6ce8f6 390
4369415f
FB
391#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
392
393#if defined(CONFIG_USER_ONLY)
394/* Currently it is not recommanded to allocate big chunks of data in
395 user mode. It will change when a dedicated libc will be used */
396#define USE_STATIC_CODE_GEN_BUFFER
397#endif
398
399#ifdef USE_STATIC_CODE_GEN_BUFFER
400static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
401#endif
402
8fcd3692 403static void code_gen_alloc(unsigned long tb_size)
26a5f13b 404{
4369415f
FB
405#ifdef USE_STATIC_CODE_GEN_BUFFER
406 code_gen_buffer = static_code_gen_buffer;
407 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
408 map_exec(code_gen_buffer, code_gen_buffer_size);
409#else
26a5f13b
FB
410 code_gen_buffer_size = tb_size;
411 if (code_gen_buffer_size == 0) {
4369415f
FB
412#if defined(CONFIG_USER_ONLY)
413 /* in user mode, phys_ram_size is not meaningful */
414 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
415#else
26a5f13b 416 /* XXX: needs ajustments */
174a9a1f 417 code_gen_buffer_size = (unsigned long)(phys_ram_size / 4);
4369415f 418#endif
26a5f13b
FB
419 }
420 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
421 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
422 /* The code gen buffer location may have constraints depending on
423 the host cpu and OS */
424#if defined(__linux__)
425 {
426 int flags;
141ac468
BS
427 void *start = NULL;
428
26a5f13b
FB
429 flags = MAP_PRIVATE | MAP_ANONYMOUS;
430#if defined(__x86_64__)
431 flags |= MAP_32BIT;
432 /* Cannot map more than that */
433 if (code_gen_buffer_size > (800 * 1024 * 1024))
434 code_gen_buffer_size = (800 * 1024 * 1024);
141ac468
BS
435#elif defined(__sparc_v9__)
436 // Map the buffer below 2G, so we can use direct calls and branches
437 flags |= MAP_FIXED;
438 start = (void *) 0x60000000UL;
439 if (code_gen_buffer_size > (512 * 1024 * 1024))
440 code_gen_buffer_size = (512 * 1024 * 1024);
1cb0661e 441#elif defined(__arm__)
63d41246 442 /* Map the buffer below 32M, so we can use direct calls and branches */
1cb0661e
AZ
443 flags |= MAP_FIXED;
444 start = (void *) 0x01000000UL;
445 if (code_gen_buffer_size > 16 * 1024 * 1024)
446 code_gen_buffer_size = 16 * 1024 * 1024;
26a5f13b 447#endif
141ac468
BS
448 code_gen_buffer = mmap(start, code_gen_buffer_size,
449 PROT_WRITE | PROT_READ | PROT_EXEC,
26a5f13b
FB
450 flags, -1, 0);
451 if (code_gen_buffer == MAP_FAILED) {
452 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
453 exit(1);
454 }
455 }
06e67a82
AL
456#elif defined(__FreeBSD__)
457 {
458 int flags;
459 void *addr = NULL;
460 flags = MAP_PRIVATE | MAP_ANONYMOUS;
461#if defined(__x86_64__)
462 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
463 * 0x40000000 is free */
464 flags |= MAP_FIXED;
465 addr = (void *)0x40000000;
466 /* Cannot map more than that */
467 if (code_gen_buffer_size > (800 * 1024 * 1024))
468 code_gen_buffer_size = (800 * 1024 * 1024);
469#endif
470 code_gen_buffer = mmap(addr, code_gen_buffer_size,
471 PROT_WRITE | PROT_READ | PROT_EXEC,
472 flags, -1, 0);
473 if (code_gen_buffer == MAP_FAILED) {
474 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
475 exit(1);
476 }
477 }
26a5f13b
FB
478#else
479 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
480 if (!code_gen_buffer) {
481 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
482 exit(1);
483 }
484 map_exec(code_gen_buffer, code_gen_buffer_size);
485#endif
4369415f 486#endif /* !USE_STATIC_CODE_GEN_BUFFER */
26a5f13b
FB
487 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
488 code_gen_buffer_max_size = code_gen_buffer_size -
489 code_gen_max_block_size();
490 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
491 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
492}
493
494/* Must be called before using the QEMU cpus. 'tb_size' is the size
495 (in bytes) allocated to the translation buffer. Zero means default
496 size. */
497void cpu_exec_init_all(unsigned long tb_size)
498{
26a5f13b
FB
499 cpu_gen_init();
500 code_gen_alloc(tb_size);
501 code_gen_ptr = code_gen_buffer;
4369415f 502 page_init();
e2eef170 503#if !defined(CONFIG_USER_ONLY)
26a5f13b 504 io_mem_init();
e2eef170 505#endif
26a5f13b
FB
506}
507
9656f324
PB
508#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
509
510#define CPU_COMMON_SAVE_VERSION 1
511
512static void cpu_common_save(QEMUFile *f, void *opaque)
513{
514 CPUState *env = opaque;
515
516 qemu_put_be32s(f, &env->halted);
517 qemu_put_be32s(f, &env->interrupt_request);
518}
519
520static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
521{
522 CPUState *env = opaque;
523
524 if (version_id != CPU_COMMON_SAVE_VERSION)
525 return -EINVAL;
526
527 qemu_get_be32s(f, &env->halted);
75f482ae 528 qemu_get_be32s(f, &env->interrupt_request);
9656f324
PB
529 tlb_flush(env, 1);
530
531 return 0;
532}
533#endif
534
6a00d601 535void cpu_exec_init(CPUState *env)
fd6ce8f6 536{
6a00d601
FB
537 CPUState **penv;
538 int cpu_index;
539
6a00d601
FB
540 env->next_cpu = NULL;
541 penv = &first_cpu;
542 cpu_index = 0;
543 while (*penv != NULL) {
544 penv = (CPUState **)&(*penv)->next_cpu;
545 cpu_index++;
546 }
547 env->cpu_index = cpu_index;
c0ce998e
AL
548 TAILQ_INIT(&env->breakpoints);
549 TAILQ_INIT(&env->watchpoints);
6a00d601 550 *penv = env;
b3c7724c 551#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
9656f324
PB
552 register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
553 cpu_common_save, cpu_common_load, env);
b3c7724c
PB
554 register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
555 cpu_save, cpu_load, env);
556#endif
fd6ce8f6
FB
557}
558
9fa3e853
FB
559static inline void invalidate_page_bitmap(PageDesc *p)
560{
561 if (p->code_bitmap) {
59817ccb 562 qemu_free(p->code_bitmap);
9fa3e853
FB
563 p->code_bitmap = NULL;
564 }
565 p->code_write_count = 0;
566}
567
fd6ce8f6
FB
568/* set to NULL all the 'first_tb' fields in all PageDescs */
569static void page_flush_tb(void)
570{
571 int i, j;
572 PageDesc *p;
573
574 for(i = 0; i < L1_SIZE; i++) {
575 p = l1_map[i];
576 if (p) {
9fa3e853
FB
577 for(j = 0; j < L2_SIZE; j++) {
578 p->first_tb = NULL;
579 invalidate_page_bitmap(p);
580 p++;
581 }
fd6ce8f6
FB
582 }
583 }
584}
585
586/* flush all the translation blocks */
d4e8164f 587/* XXX: tb_flush is currently not thread safe */
6a00d601 588void tb_flush(CPUState *env1)
fd6ce8f6 589{
6a00d601 590 CPUState *env;
0124311e 591#if defined(DEBUG_FLUSH)
ab3d1727
BS
592 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
593 (unsigned long)(code_gen_ptr - code_gen_buffer),
594 nb_tbs, nb_tbs > 0 ?
595 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
fd6ce8f6 596#endif
26a5f13b 597 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
a208e54a
PB
598 cpu_abort(env1, "Internal error: code buffer overflow\n");
599
fd6ce8f6 600 nb_tbs = 0;
3b46e624 601
6a00d601
FB
602 for(env = first_cpu; env != NULL; env = env->next_cpu) {
603 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
604 }
9fa3e853 605
8a8a608f 606 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
fd6ce8f6 607 page_flush_tb();
9fa3e853 608
fd6ce8f6 609 code_gen_ptr = code_gen_buffer;
d4e8164f
FB
610 /* XXX: flush processor icache at this point if cache flush is
611 expensive */
e3db7226 612 tb_flush_count++;
fd6ce8f6
FB
613}
614
615#ifdef DEBUG_TB_CHECK
616
bc98a7ef 617static void tb_invalidate_check(target_ulong address)
fd6ce8f6
FB
618{
619 TranslationBlock *tb;
620 int i;
621 address &= TARGET_PAGE_MASK;
99773bd4
PB
622 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
623 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
624 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
625 address >= tb->pc + tb->size)) {
626 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
99773bd4 627 address, (long)tb->pc, tb->size);
fd6ce8f6
FB
628 }
629 }
630 }
631}
632
633/* verify that all the pages have correct rights for code */
634static void tb_page_check(void)
635{
636 TranslationBlock *tb;
637 int i, flags1, flags2;
3b46e624 638
99773bd4
PB
639 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
640 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
641 flags1 = page_get_flags(tb->pc);
642 flags2 = page_get_flags(tb->pc + tb->size - 1);
643 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
644 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
99773bd4 645 (long)tb->pc, tb->size, flags1, flags2);
fd6ce8f6
FB
646 }
647 }
648 }
649}
650
bdaf78e0 651static void tb_jmp_check(TranslationBlock *tb)
d4e8164f
FB
652{
653 TranslationBlock *tb1;
654 unsigned int n1;
655
656 /* suppress any remaining jumps to this TB */
657 tb1 = tb->jmp_first;
658 for(;;) {
659 n1 = (long)tb1 & 3;
660 tb1 = (TranslationBlock *)((long)tb1 & ~3);
661 if (n1 == 2)
662 break;
663 tb1 = tb1->jmp_next[n1];
664 }
665 /* check end of list */
666 if (tb1 != tb) {
667 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
668 }
669}
670
fd6ce8f6
FB
671#endif
672
673/* invalidate one TB */
674static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
675 int next_offset)
676{
677 TranslationBlock *tb1;
678 for(;;) {
679 tb1 = *ptb;
680 if (tb1 == tb) {
681 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
682 break;
683 }
684 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
685 }
686}
687
9fa3e853
FB
688static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
689{
690 TranslationBlock *tb1;
691 unsigned int n1;
692
693 for(;;) {
694 tb1 = *ptb;
695 n1 = (long)tb1 & 3;
696 tb1 = (TranslationBlock *)((long)tb1 & ~3);
697 if (tb1 == tb) {
698 *ptb = tb1->page_next[n1];
699 break;
700 }
701 ptb = &tb1->page_next[n1];
702 }
703}
704
d4e8164f
FB
705static inline void tb_jmp_remove(TranslationBlock *tb, int n)
706{
707 TranslationBlock *tb1, **ptb;
708 unsigned int n1;
709
710 ptb = &tb->jmp_next[n];
711 tb1 = *ptb;
712 if (tb1) {
713 /* find tb(n) in circular list */
714 for(;;) {
715 tb1 = *ptb;
716 n1 = (long)tb1 & 3;
717 tb1 = (TranslationBlock *)((long)tb1 & ~3);
718 if (n1 == n && tb1 == tb)
719 break;
720 if (n1 == 2) {
721 ptb = &tb1->jmp_first;
722 } else {
723 ptb = &tb1->jmp_next[n1];
724 }
725 }
726 /* now we can suppress tb(n) from the list */
727 *ptb = tb->jmp_next[n];
728
729 tb->jmp_next[n] = NULL;
730 }
731}
732
733/* reset the jump entry 'n' of a TB so that it is not chained to
734 another TB */
735static inline void tb_reset_jump(TranslationBlock *tb, int n)
736{
737 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
738}
739
2e70f6ef 740void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
fd6ce8f6 741{
6a00d601 742 CPUState *env;
8a40a180 743 PageDesc *p;
d4e8164f 744 unsigned int h, n1;
00f82b8a 745 target_phys_addr_t phys_pc;
8a40a180 746 TranslationBlock *tb1, *tb2;
3b46e624 747
8a40a180
FB
748 /* remove the TB from the hash list */
749 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
750 h = tb_phys_hash_func(phys_pc);
5fafdf24 751 tb_remove(&tb_phys_hash[h], tb,
8a40a180
FB
752 offsetof(TranslationBlock, phys_hash_next));
753
754 /* remove the TB from the page list */
755 if (tb->page_addr[0] != page_addr) {
756 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
757 tb_page_remove(&p->first_tb, tb);
758 invalidate_page_bitmap(p);
759 }
760 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
761 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
762 tb_page_remove(&p->first_tb, tb);
763 invalidate_page_bitmap(p);
764 }
765
36bdbe54 766 tb_invalidated_flag = 1;
59817ccb 767
fd6ce8f6 768 /* remove the TB from the hash list */
8a40a180 769 h = tb_jmp_cache_hash_func(tb->pc);
6a00d601
FB
770 for(env = first_cpu; env != NULL; env = env->next_cpu) {
771 if (env->tb_jmp_cache[h] == tb)
772 env->tb_jmp_cache[h] = NULL;
773 }
d4e8164f
FB
774
775 /* suppress this TB from the two jump lists */
776 tb_jmp_remove(tb, 0);
777 tb_jmp_remove(tb, 1);
778
779 /* suppress any remaining jumps to this TB */
780 tb1 = tb->jmp_first;
781 for(;;) {
782 n1 = (long)tb1 & 3;
783 if (n1 == 2)
784 break;
785 tb1 = (TranslationBlock *)((long)tb1 & ~3);
786 tb2 = tb1->jmp_next[n1];
787 tb_reset_jump(tb1, n1);
788 tb1->jmp_next[n1] = NULL;
789 tb1 = tb2;
790 }
791 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
9fa3e853 792
e3db7226 793 tb_phys_invalidate_count++;
9fa3e853
FB
794}
795
796static inline void set_bits(uint8_t *tab, int start, int len)
797{
798 int end, mask, end1;
799
800 end = start + len;
801 tab += start >> 3;
802 mask = 0xff << (start & 7);
803 if ((start & ~7) == (end & ~7)) {
804 if (start < end) {
805 mask &= ~(0xff << (end & 7));
806 *tab |= mask;
807 }
808 } else {
809 *tab++ |= mask;
810 start = (start + 8) & ~7;
811 end1 = end & ~7;
812 while (start < end1) {
813 *tab++ = 0xff;
814 start += 8;
815 }
816 if (start < end) {
817 mask = ~(0xff << (end & 7));
818 *tab |= mask;
819 }
820 }
821}
822
823static void build_page_bitmap(PageDesc *p)
824{
825 int n, tb_start, tb_end;
826 TranslationBlock *tb;
3b46e624 827
b2a7081a 828 p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
9fa3e853
FB
829 if (!p->code_bitmap)
830 return;
9fa3e853
FB
831
832 tb = p->first_tb;
833 while (tb != NULL) {
834 n = (long)tb & 3;
835 tb = (TranslationBlock *)((long)tb & ~3);
836 /* NOTE: this is subtle as a TB may span two physical pages */
837 if (n == 0) {
838 /* NOTE: tb_end may be after the end of the page, but
839 it is not a problem */
840 tb_start = tb->pc & ~TARGET_PAGE_MASK;
841 tb_end = tb_start + tb->size;
842 if (tb_end > TARGET_PAGE_SIZE)
843 tb_end = TARGET_PAGE_SIZE;
844 } else {
845 tb_start = 0;
846 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
847 }
848 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
849 tb = tb->page_next[n];
850 }
851}
852
2e70f6ef
PB
853TranslationBlock *tb_gen_code(CPUState *env,
854 target_ulong pc, target_ulong cs_base,
855 int flags, int cflags)
d720b93d
FB
856{
857 TranslationBlock *tb;
858 uint8_t *tc_ptr;
859 target_ulong phys_pc, phys_page2, virt_page2;
860 int code_gen_size;
861
c27004ec
FB
862 phys_pc = get_phys_addr_code(env, pc);
863 tb = tb_alloc(pc);
d720b93d
FB
864 if (!tb) {
865 /* flush must be done */
866 tb_flush(env);
867 /* cannot fail at this point */
c27004ec 868 tb = tb_alloc(pc);
2e70f6ef
PB
869 /* Don't forget to invalidate previous TB info. */
870 tb_invalidated_flag = 1;
d720b93d
FB
871 }
872 tc_ptr = code_gen_ptr;
873 tb->tc_ptr = tc_ptr;
874 tb->cs_base = cs_base;
875 tb->flags = flags;
876 tb->cflags = cflags;
d07bde88 877 cpu_gen_code(env, tb, &code_gen_size);
d720b93d 878 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
3b46e624 879
d720b93d 880 /* check next page if needed */
c27004ec 881 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
d720b93d 882 phys_page2 = -1;
c27004ec 883 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
d720b93d
FB
884 phys_page2 = get_phys_addr_code(env, virt_page2);
885 }
886 tb_link_phys(tb, phys_pc, phys_page2);
2e70f6ef 887 return tb;
d720b93d 888}
3b46e624 889
9fa3e853
FB
890/* invalidate all TBs which intersect with the target physical page
891 starting in range [start;end[. NOTE: start and end must refer to
d720b93d
FB
892 the same physical page. 'is_cpu_write_access' should be true if called
893 from a real cpu write access: the virtual CPU will exit the current
894 TB if code is modified inside this TB. */
00f82b8a 895void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
d720b93d
FB
896 int is_cpu_write_access)
897{
6b917547 898 TranslationBlock *tb, *tb_next, *saved_tb;
d720b93d 899 CPUState *env = cpu_single_env;
9fa3e853 900 target_ulong tb_start, tb_end;
6b917547
AL
901 PageDesc *p;
902 int n;
903#ifdef TARGET_HAS_PRECISE_SMC
904 int current_tb_not_found = is_cpu_write_access;
905 TranslationBlock *current_tb = NULL;
906 int current_tb_modified = 0;
907 target_ulong current_pc = 0;
908 target_ulong current_cs_base = 0;
909 int current_flags = 0;
910#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
911
912 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 913 if (!p)
9fa3e853 914 return;
5fafdf24 915 if (!p->code_bitmap &&
d720b93d
FB
916 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
917 is_cpu_write_access) {
9fa3e853
FB
918 /* build code bitmap */
919 build_page_bitmap(p);
920 }
921
922 /* we remove all the TBs in the range [start, end[ */
923 /* XXX: see if in some cases it could be faster to invalidate all the code */
924 tb = p->first_tb;
925 while (tb != NULL) {
926 n = (long)tb & 3;
927 tb = (TranslationBlock *)((long)tb & ~3);
928 tb_next = tb->page_next[n];
929 /* NOTE: this is subtle as a TB may span two physical pages */
930 if (n == 0) {
931 /* NOTE: tb_end may be after the end of the page, but
932 it is not a problem */
933 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
934 tb_end = tb_start + tb->size;
935 } else {
936 tb_start = tb->page_addr[1];
937 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
938 }
939 if (!(tb_end <= start || tb_start >= end)) {
d720b93d
FB
940#ifdef TARGET_HAS_PRECISE_SMC
941 if (current_tb_not_found) {
942 current_tb_not_found = 0;
943 current_tb = NULL;
2e70f6ef 944 if (env->mem_io_pc) {
d720b93d 945 /* now we have a real cpu fault */
2e70f6ef 946 current_tb = tb_find_pc(env->mem_io_pc);
d720b93d
FB
947 }
948 }
949 if (current_tb == tb &&
2e70f6ef 950 (current_tb->cflags & CF_COUNT_MASK) != 1) {
d720b93d
FB
951 /* If we are modifying the current TB, we must stop
952 its execution. We could be more precise by checking
953 that the modification is after the current PC, but it
954 would require a specialized function to partially
955 restore the CPU state */
3b46e624 956
d720b93d 957 current_tb_modified = 1;
5fafdf24 958 cpu_restore_state(current_tb, env,
2e70f6ef 959 env->mem_io_pc, NULL);
6b917547
AL
960 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
961 &current_flags);
d720b93d
FB
962 }
963#endif /* TARGET_HAS_PRECISE_SMC */
6f5a9f7e
FB
964 /* we need to do that to handle the case where a signal
965 occurs while doing tb_phys_invalidate() */
966 saved_tb = NULL;
967 if (env) {
968 saved_tb = env->current_tb;
969 env->current_tb = NULL;
970 }
9fa3e853 971 tb_phys_invalidate(tb, -1);
6f5a9f7e
FB
972 if (env) {
973 env->current_tb = saved_tb;
974 if (env->interrupt_request && env->current_tb)
975 cpu_interrupt(env, env->interrupt_request);
976 }
9fa3e853
FB
977 }
978 tb = tb_next;
979 }
980#if !defined(CONFIG_USER_ONLY)
981 /* if no code remaining, no need to continue to use slow writes */
982 if (!p->first_tb) {
983 invalidate_page_bitmap(p);
d720b93d 984 if (is_cpu_write_access) {
2e70f6ef 985 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
d720b93d
FB
986 }
987 }
988#endif
989#ifdef TARGET_HAS_PRECISE_SMC
990 if (current_tb_modified) {
991 /* we generate a block containing just the instruction
992 modifying the memory. It will ensure that it cannot modify
993 itself */
ea1c1802 994 env->current_tb = NULL;
2e70f6ef 995 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
d720b93d 996 cpu_resume_from_signal(env, NULL);
9fa3e853 997 }
fd6ce8f6 998#endif
9fa3e853 999}
fd6ce8f6 1000
9fa3e853 1001/* len must be <= 8 and start must be a multiple of len */
00f82b8a 1002static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
9fa3e853
FB
1003{
1004 PageDesc *p;
1005 int offset, b;
59817ccb 1006#if 0
a4193c8a
FB
1007 if (1) {
1008 if (loglevel) {
5fafdf24 1009 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
2e70f6ef 1010 cpu_single_env->mem_io_vaddr, len,
5fafdf24 1011 cpu_single_env->eip,
a4193c8a
FB
1012 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1013 }
59817ccb
FB
1014 }
1015#endif
9fa3e853 1016 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 1017 if (!p)
9fa3e853
FB
1018 return;
1019 if (p->code_bitmap) {
1020 offset = start & ~TARGET_PAGE_MASK;
1021 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1022 if (b & ((1 << len) - 1))
1023 goto do_invalidate;
1024 } else {
1025 do_invalidate:
d720b93d 1026 tb_invalidate_phys_page_range(start, start + len, 1);
9fa3e853
FB
1027 }
1028}
1029
9fa3e853 1030#if !defined(CONFIG_SOFTMMU)
00f82b8a 1031static void tb_invalidate_phys_page(target_phys_addr_t addr,
d720b93d 1032 unsigned long pc, void *puc)
9fa3e853 1033{
6b917547 1034 TranslationBlock *tb;
9fa3e853 1035 PageDesc *p;
6b917547 1036 int n;
d720b93d 1037#ifdef TARGET_HAS_PRECISE_SMC
6b917547 1038 TranslationBlock *current_tb = NULL;
d720b93d 1039 CPUState *env = cpu_single_env;
6b917547
AL
1040 int current_tb_modified = 0;
1041 target_ulong current_pc = 0;
1042 target_ulong current_cs_base = 0;
1043 int current_flags = 0;
d720b93d 1044#endif
9fa3e853
FB
1045
1046 addr &= TARGET_PAGE_MASK;
1047 p = page_find(addr >> TARGET_PAGE_BITS);
5fafdf24 1048 if (!p)
9fa3e853
FB
1049 return;
1050 tb = p->first_tb;
d720b93d
FB
1051#ifdef TARGET_HAS_PRECISE_SMC
1052 if (tb && pc != 0) {
1053 current_tb = tb_find_pc(pc);
1054 }
1055#endif
9fa3e853
FB
1056 while (tb != NULL) {
1057 n = (long)tb & 3;
1058 tb = (TranslationBlock *)((long)tb & ~3);
d720b93d
FB
1059#ifdef TARGET_HAS_PRECISE_SMC
1060 if (current_tb == tb &&
2e70f6ef 1061 (current_tb->cflags & CF_COUNT_MASK) != 1) {
d720b93d
FB
1062 /* If we are modifying the current TB, we must stop
1063 its execution. We could be more precise by checking
1064 that the modification is after the current PC, but it
1065 would require a specialized function to partially
1066 restore the CPU state */
3b46e624 1067
d720b93d
FB
1068 current_tb_modified = 1;
1069 cpu_restore_state(current_tb, env, pc, puc);
6b917547
AL
1070 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1071 &current_flags);
d720b93d
FB
1072 }
1073#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
1074 tb_phys_invalidate(tb, addr);
1075 tb = tb->page_next[n];
1076 }
fd6ce8f6 1077 p->first_tb = NULL;
d720b93d
FB
1078#ifdef TARGET_HAS_PRECISE_SMC
1079 if (current_tb_modified) {
1080 /* we generate a block containing just the instruction
1081 modifying the memory. It will ensure that it cannot modify
1082 itself */
ea1c1802 1083 env->current_tb = NULL;
2e70f6ef 1084 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
d720b93d
FB
1085 cpu_resume_from_signal(env, puc);
1086 }
1087#endif
fd6ce8f6 1088}
9fa3e853 1089#endif
fd6ce8f6
FB
1090
1091/* add the tb in the target page and protect it if necessary */
5fafdf24 1092static inline void tb_alloc_page(TranslationBlock *tb,
53a5960a 1093 unsigned int n, target_ulong page_addr)
fd6ce8f6
FB
1094{
1095 PageDesc *p;
9fa3e853
FB
1096 TranslationBlock *last_first_tb;
1097
1098 tb->page_addr[n] = page_addr;
3a7d929e 1099 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
9fa3e853
FB
1100 tb->page_next[n] = p->first_tb;
1101 last_first_tb = p->first_tb;
1102 p->first_tb = (TranslationBlock *)((long)tb | n);
1103 invalidate_page_bitmap(p);
fd6ce8f6 1104
107db443 1105#if defined(TARGET_HAS_SMC) || 1
d720b93d 1106
9fa3e853 1107#if defined(CONFIG_USER_ONLY)
fd6ce8f6 1108 if (p->flags & PAGE_WRITE) {
53a5960a
PB
1109 target_ulong addr;
1110 PageDesc *p2;
9fa3e853
FB
1111 int prot;
1112
fd6ce8f6
FB
1113 /* force the host page as non writable (writes will have a
1114 page fault + mprotect overhead) */
53a5960a 1115 page_addr &= qemu_host_page_mask;
fd6ce8f6 1116 prot = 0;
53a5960a
PB
1117 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1118 addr += TARGET_PAGE_SIZE) {
1119
1120 p2 = page_find (addr >> TARGET_PAGE_BITS);
1121 if (!p2)
1122 continue;
1123 prot |= p2->flags;
1124 p2->flags &= ~PAGE_WRITE;
1125 page_get_flags(addr);
1126 }
5fafdf24 1127 mprotect(g2h(page_addr), qemu_host_page_size,
fd6ce8f6
FB
1128 (prot & PAGE_BITS) & ~PAGE_WRITE);
1129#ifdef DEBUG_TB_INVALIDATE
ab3d1727 1130 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
53a5960a 1131 page_addr);
fd6ce8f6 1132#endif
fd6ce8f6 1133 }
9fa3e853
FB
1134#else
1135 /* if some code is already present, then the pages are already
1136 protected. So we handle the case where only the first TB is
1137 allocated in a physical page */
1138 if (!last_first_tb) {
6a00d601 1139 tlb_protect_code(page_addr);
9fa3e853
FB
1140 }
1141#endif
d720b93d
FB
1142
1143#endif /* TARGET_HAS_SMC */
fd6ce8f6
FB
1144}
1145
1146/* Allocate a new translation block. Flush the translation buffer if
1147 too many translation blocks or too much generated code. */
c27004ec 1148TranslationBlock *tb_alloc(target_ulong pc)
fd6ce8f6
FB
1149{
1150 TranslationBlock *tb;
fd6ce8f6 1151
26a5f13b
FB
1152 if (nb_tbs >= code_gen_max_blocks ||
1153 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
d4e8164f 1154 return NULL;
fd6ce8f6
FB
1155 tb = &tbs[nb_tbs++];
1156 tb->pc = pc;
b448f2f3 1157 tb->cflags = 0;
d4e8164f
FB
1158 return tb;
1159}
1160
2e70f6ef
PB
1161void tb_free(TranslationBlock *tb)
1162{
bf20dc07 1163 /* In practice this is mostly used for single use temporary TB
2e70f6ef
PB
1164 Ignore the hard cases and just back up if this TB happens to
1165 be the last one generated. */
1166 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1167 code_gen_ptr = tb->tc_ptr;
1168 nb_tbs--;
1169 }
1170}
1171
9fa3e853
FB
1172/* add a new TB and link it to the physical page tables. phys_page2 is
1173 (-1) to indicate that only one page contains the TB. */
5fafdf24 1174void tb_link_phys(TranslationBlock *tb,
9fa3e853 1175 target_ulong phys_pc, target_ulong phys_page2)
d4e8164f 1176{
9fa3e853
FB
1177 unsigned int h;
1178 TranslationBlock **ptb;
1179
c8a706fe
PB
1180 /* Grab the mmap lock to stop another thread invalidating this TB
1181 before we are done. */
1182 mmap_lock();
9fa3e853
FB
1183 /* add in the physical hash table */
1184 h = tb_phys_hash_func(phys_pc);
1185 ptb = &tb_phys_hash[h];
1186 tb->phys_hash_next = *ptb;
1187 *ptb = tb;
fd6ce8f6
FB
1188
1189 /* add in the page list */
9fa3e853
FB
1190 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1191 if (phys_page2 != -1)
1192 tb_alloc_page(tb, 1, phys_page2);
1193 else
1194 tb->page_addr[1] = -1;
9fa3e853 1195
d4e8164f
FB
1196 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1197 tb->jmp_next[0] = NULL;
1198 tb->jmp_next[1] = NULL;
1199
1200 /* init original jump addresses */
1201 if (tb->tb_next_offset[0] != 0xffff)
1202 tb_reset_jump(tb, 0);
1203 if (tb->tb_next_offset[1] != 0xffff)
1204 tb_reset_jump(tb, 1);
8a40a180
FB
1205
1206#ifdef DEBUG_TB_CHECK
1207 tb_page_check();
1208#endif
c8a706fe 1209 mmap_unlock();
fd6ce8f6
FB
1210}
1211
9fa3e853
FB
1212/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1213 tb[1].tc_ptr. Return NULL if not found */
1214TranslationBlock *tb_find_pc(unsigned long tc_ptr)
fd6ce8f6 1215{
9fa3e853
FB
1216 int m_min, m_max, m;
1217 unsigned long v;
1218 TranslationBlock *tb;
a513fe19
FB
1219
1220 if (nb_tbs <= 0)
1221 return NULL;
1222 if (tc_ptr < (unsigned long)code_gen_buffer ||
1223 tc_ptr >= (unsigned long)code_gen_ptr)
1224 return NULL;
1225 /* binary search (cf Knuth) */
1226 m_min = 0;
1227 m_max = nb_tbs - 1;
1228 while (m_min <= m_max) {
1229 m = (m_min + m_max) >> 1;
1230 tb = &tbs[m];
1231 v = (unsigned long)tb->tc_ptr;
1232 if (v == tc_ptr)
1233 return tb;
1234 else if (tc_ptr < v) {
1235 m_max = m - 1;
1236 } else {
1237 m_min = m + 1;
1238 }
5fafdf24 1239 }
a513fe19
FB
1240 return &tbs[m_max];
1241}
7501267e 1242
ea041c0e
FB
1243static void tb_reset_jump_recursive(TranslationBlock *tb);
1244
1245static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1246{
1247 TranslationBlock *tb1, *tb_next, **ptb;
1248 unsigned int n1;
1249
1250 tb1 = tb->jmp_next[n];
1251 if (tb1 != NULL) {
1252 /* find head of list */
1253 for(;;) {
1254 n1 = (long)tb1 & 3;
1255 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1256 if (n1 == 2)
1257 break;
1258 tb1 = tb1->jmp_next[n1];
1259 }
1260 /* we are now sure now that tb jumps to tb1 */
1261 tb_next = tb1;
1262
1263 /* remove tb from the jmp_first list */
1264 ptb = &tb_next->jmp_first;
1265 for(;;) {
1266 tb1 = *ptb;
1267 n1 = (long)tb1 & 3;
1268 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1269 if (n1 == n && tb1 == tb)
1270 break;
1271 ptb = &tb1->jmp_next[n1];
1272 }
1273 *ptb = tb->jmp_next[n];
1274 tb->jmp_next[n] = NULL;
3b46e624 1275
ea041c0e
FB
1276 /* suppress the jump to next tb in generated code */
1277 tb_reset_jump(tb, n);
1278
0124311e 1279 /* suppress jumps in the tb on which we could have jumped */
ea041c0e
FB
1280 tb_reset_jump_recursive(tb_next);
1281 }
1282}
1283
1284static void tb_reset_jump_recursive(TranslationBlock *tb)
1285{
1286 tb_reset_jump_recursive2(tb, 0);
1287 tb_reset_jump_recursive2(tb, 1);
1288}
1289
1fddef4b 1290#if defined(TARGET_HAS_ICE)
d720b93d
FB
1291static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1292{
9b3c35e0
JM
1293 target_phys_addr_t addr;
1294 target_ulong pd;
c2f07f81
PB
1295 ram_addr_t ram_addr;
1296 PhysPageDesc *p;
d720b93d 1297
c2f07f81
PB
1298 addr = cpu_get_phys_page_debug(env, pc);
1299 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1300 if (!p) {
1301 pd = IO_MEM_UNASSIGNED;
1302 } else {
1303 pd = p->phys_offset;
1304 }
1305 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
706cd4b5 1306 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
d720b93d 1307}
c27004ec 1308#endif
d720b93d 1309
6658ffb8 1310/* Add a watchpoint. */
a1d1bb31
AL
1311int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1312 int flags, CPUWatchpoint **watchpoint)
6658ffb8 1313{
b4051334 1314 target_ulong len_mask = ~(len - 1);
c0ce998e 1315 CPUWatchpoint *wp;
6658ffb8 1316
b4051334
AL
1317 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1318 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1319 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1320 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1321 return -EINVAL;
1322 }
a1d1bb31
AL
1323 wp = qemu_malloc(sizeof(*wp));
1324 if (!wp)
426cd5d6 1325 return -ENOMEM;
a1d1bb31
AL
1326
1327 wp->vaddr = addr;
b4051334 1328 wp->len_mask = len_mask;
a1d1bb31
AL
1329 wp->flags = flags;
1330
2dc9f411 1331 /* keep all GDB-injected watchpoints in front */
c0ce998e
AL
1332 if (flags & BP_GDB)
1333 TAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1334 else
1335 TAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
6658ffb8 1336
6658ffb8 1337 tlb_flush_page(env, addr);
a1d1bb31
AL
1338
1339 if (watchpoint)
1340 *watchpoint = wp;
1341 return 0;
6658ffb8
PB
1342}
1343
a1d1bb31
AL
1344/* Remove a specific watchpoint. */
1345int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1346 int flags)
6658ffb8 1347{
b4051334 1348 target_ulong len_mask = ~(len - 1);
a1d1bb31 1349 CPUWatchpoint *wp;
6658ffb8 1350
c0ce998e 1351 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334 1352 if (addr == wp->vaddr && len_mask == wp->len_mask
6e140f28 1353 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
a1d1bb31 1354 cpu_watchpoint_remove_by_ref(env, wp);
6658ffb8
PB
1355 return 0;
1356 }
1357 }
a1d1bb31 1358 return -ENOENT;
6658ffb8
PB
1359}
1360
a1d1bb31
AL
1361/* Remove a specific watchpoint by reference. */
1362void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1363{
c0ce998e 1364 TAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
7d03f82f 1365
a1d1bb31
AL
1366 tlb_flush_page(env, watchpoint->vaddr);
1367
1368 qemu_free(watchpoint);
1369}
1370
1371/* Remove all matching watchpoints. */
1372void cpu_watchpoint_remove_all(CPUState *env, int mask)
1373{
c0ce998e 1374 CPUWatchpoint *wp, *next;
a1d1bb31 1375
c0ce998e 1376 TAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
a1d1bb31
AL
1377 if (wp->flags & mask)
1378 cpu_watchpoint_remove_by_ref(env, wp);
c0ce998e 1379 }
7d03f82f
EI
1380}
1381
a1d1bb31
AL
1382/* Add a breakpoint. */
1383int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1384 CPUBreakpoint **breakpoint)
4c3a88a2 1385{
1fddef4b 1386#if defined(TARGET_HAS_ICE)
c0ce998e 1387 CPUBreakpoint *bp;
3b46e624 1388
a1d1bb31
AL
1389 bp = qemu_malloc(sizeof(*bp));
1390 if (!bp)
426cd5d6 1391 return -ENOMEM;
4c3a88a2 1392
a1d1bb31
AL
1393 bp->pc = pc;
1394 bp->flags = flags;
1395
2dc9f411 1396 /* keep all GDB-injected breakpoints in front */
c0ce998e
AL
1397 if (flags & BP_GDB)
1398 TAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1399 else
1400 TAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
3b46e624 1401
d720b93d 1402 breakpoint_invalidate(env, pc);
a1d1bb31
AL
1403
1404 if (breakpoint)
1405 *breakpoint = bp;
4c3a88a2
FB
1406 return 0;
1407#else
a1d1bb31 1408 return -ENOSYS;
4c3a88a2
FB
1409#endif
1410}
1411
a1d1bb31
AL
1412/* Remove a specific breakpoint. */
1413int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1414{
7d03f82f 1415#if defined(TARGET_HAS_ICE)
a1d1bb31
AL
1416 CPUBreakpoint *bp;
1417
c0ce998e 1418 TAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31
AL
1419 if (bp->pc == pc && bp->flags == flags) {
1420 cpu_breakpoint_remove_by_ref(env, bp);
1421 return 0;
1422 }
7d03f82f 1423 }
a1d1bb31
AL
1424 return -ENOENT;
1425#else
1426 return -ENOSYS;
7d03f82f
EI
1427#endif
1428}
1429
a1d1bb31
AL
1430/* Remove a specific breakpoint by reference. */
1431void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
4c3a88a2 1432{
1fddef4b 1433#if defined(TARGET_HAS_ICE)
c0ce998e 1434 TAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
d720b93d 1435
a1d1bb31
AL
1436 breakpoint_invalidate(env, breakpoint->pc);
1437
1438 qemu_free(breakpoint);
1439#endif
1440}
1441
1442/* Remove all matching breakpoints. */
1443void cpu_breakpoint_remove_all(CPUState *env, int mask)
1444{
1445#if defined(TARGET_HAS_ICE)
c0ce998e 1446 CPUBreakpoint *bp, *next;
a1d1bb31 1447
c0ce998e 1448 TAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
a1d1bb31
AL
1449 if (bp->flags & mask)
1450 cpu_breakpoint_remove_by_ref(env, bp);
c0ce998e 1451 }
4c3a88a2
FB
1452#endif
1453}
1454
c33a346e
FB
1455/* enable or disable single step mode. EXCP_DEBUG is returned by the
1456 CPU loop after each instruction */
1457void cpu_single_step(CPUState *env, int enabled)
1458{
1fddef4b 1459#if defined(TARGET_HAS_ICE)
c33a346e
FB
1460 if (env->singlestep_enabled != enabled) {
1461 env->singlestep_enabled = enabled;
1462 /* must flush all the translated code to avoid inconsistancies */
9fa3e853 1463 /* XXX: only flush what is necessary */
0124311e 1464 tb_flush(env);
c33a346e
FB
1465 }
1466#endif
1467}
1468
34865134
FB
1469/* enable or disable low levels log */
1470void cpu_set_log(int log_flags)
1471{
1472 loglevel = log_flags;
1473 if (loglevel && !logfile) {
11fcfab4 1474 logfile = fopen(logfilename, log_append ? "a" : "w");
34865134
FB
1475 if (!logfile) {
1476 perror(logfilename);
1477 _exit(1);
1478 }
9fa3e853
FB
1479#if !defined(CONFIG_SOFTMMU)
1480 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1481 {
b55266b5 1482 static char logfile_buf[4096];
9fa3e853
FB
1483 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1484 }
1485#else
34865134 1486 setvbuf(logfile, NULL, _IOLBF, 0);
9fa3e853 1487#endif
e735b91c
PB
1488 log_append = 1;
1489 }
1490 if (!loglevel && logfile) {
1491 fclose(logfile);
1492 logfile = NULL;
34865134
FB
1493 }
1494}
1495
1496void cpu_set_log_filename(const char *filename)
1497{
1498 logfilename = strdup(filename);
e735b91c
PB
1499 if (logfile) {
1500 fclose(logfile);
1501 logfile = NULL;
1502 }
1503 cpu_set_log(loglevel);
34865134 1504}
c33a346e 1505
0124311e 1506/* mask must never be zero, except for A20 change call */
68a79315 1507void cpu_interrupt(CPUState *env, int mask)
ea041c0e 1508{
d5975363 1509#if !defined(USE_NPTL)
ea041c0e 1510 TranslationBlock *tb;
15a51156 1511 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
d5975363 1512#endif
2e70f6ef 1513 int old_mask;
59817ccb 1514
2e70f6ef 1515 old_mask = env->interrupt_request;
d5975363 1516 /* FIXME: This is probably not threadsafe. A different thread could
bf20dc07 1517 be in the middle of a read-modify-write operation. */
68a79315 1518 env->interrupt_request |= mask;
d5975363
PB
1519#if defined(USE_NPTL)
1520 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1521 problem and hope the cpu will stop of its own accord. For userspace
1522 emulation this often isn't actually as bad as it sounds. Often
1523 signals are used primarily to interrupt blocking syscalls. */
1524#else
2e70f6ef 1525 if (use_icount) {
266910c4 1526 env->icount_decr.u16.high = 0xffff;
2e70f6ef
PB
1527#ifndef CONFIG_USER_ONLY
1528 /* CPU_INTERRUPT_EXIT isn't a real interrupt. It just means
1529 an async event happened and we need to process it. */
1530 if (!can_do_io(env)
1531 && (mask & ~(old_mask | CPU_INTERRUPT_EXIT)) != 0) {
1532 cpu_abort(env, "Raised interrupt while not in I/O function");
1533 }
1534#endif
1535 } else {
1536 tb = env->current_tb;
1537 /* if the cpu is currently executing code, we must unlink it and
1538 all the potentially executing TB */
1539 if (tb && !testandset(&interrupt_lock)) {
1540 env->current_tb = NULL;
1541 tb_reset_jump_recursive(tb);
1542 resetlock(&interrupt_lock);
1543 }
ea041c0e 1544 }
d5975363 1545#endif
ea041c0e
FB
1546}
1547
b54ad049
FB
1548void cpu_reset_interrupt(CPUState *env, int mask)
1549{
1550 env->interrupt_request &= ~mask;
1551}
1552
c7cd6a37 1553const CPULogItem cpu_log_items[] = {
5fafdf24 1554 { CPU_LOG_TB_OUT_ASM, "out_asm",
f193c797
FB
1555 "show generated host assembly code for each compiled TB" },
1556 { CPU_LOG_TB_IN_ASM, "in_asm",
1557 "show target assembly code for each compiled TB" },
5fafdf24 1558 { CPU_LOG_TB_OP, "op",
57fec1fe 1559 "show micro ops for each compiled TB" },
f193c797 1560 { CPU_LOG_TB_OP_OPT, "op_opt",
e01a1157
BS
1561 "show micro ops "
1562#ifdef TARGET_I386
1563 "before eflags optimization and "
f193c797 1564#endif
e01a1157 1565 "after liveness analysis" },
f193c797
FB
1566 { CPU_LOG_INT, "int",
1567 "show interrupts/exceptions in short format" },
1568 { CPU_LOG_EXEC, "exec",
1569 "show trace before each executed TB (lots of logs)" },
9fddaa0c 1570 { CPU_LOG_TB_CPU, "cpu",
e91c8a77 1571 "show CPU state before block translation" },
f193c797
FB
1572#ifdef TARGET_I386
1573 { CPU_LOG_PCALL, "pcall",
1574 "show protected mode far calls/returns/exceptions" },
1575#endif
8e3a9fd2 1576#ifdef DEBUG_IOPORT
fd872598
FB
1577 { CPU_LOG_IOPORT, "ioport",
1578 "show all i/o ports accesses" },
8e3a9fd2 1579#endif
f193c797
FB
1580 { 0, NULL, NULL },
1581};
1582
1583static int cmp1(const char *s1, int n, const char *s2)
1584{
1585 if (strlen(s2) != n)
1586 return 0;
1587 return memcmp(s1, s2, n) == 0;
1588}
3b46e624 1589
f193c797
FB
1590/* takes a comma separated list of log masks. Return 0 if error. */
1591int cpu_str_to_log_mask(const char *str)
1592{
c7cd6a37 1593 const CPULogItem *item;
f193c797
FB
1594 int mask;
1595 const char *p, *p1;
1596
1597 p = str;
1598 mask = 0;
1599 for(;;) {
1600 p1 = strchr(p, ',');
1601 if (!p1)
1602 p1 = p + strlen(p);
8e3a9fd2
FB
1603 if(cmp1(p,p1-p,"all")) {
1604 for(item = cpu_log_items; item->mask != 0; item++) {
1605 mask |= item->mask;
1606 }
1607 } else {
f193c797
FB
1608 for(item = cpu_log_items; item->mask != 0; item++) {
1609 if (cmp1(p, p1 - p, item->name))
1610 goto found;
1611 }
1612 return 0;
8e3a9fd2 1613 }
f193c797
FB
1614 found:
1615 mask |= item->mask;
1616 if (*p1 != ',')
1617 break;
1618 p = p1 + 1;
1619 }
1620 return mask;
1621}
ea041c0e 1622
7501267e
FB
1623void cpu_abort(CPUState *env, const char *fmt, ...)
1624{
1625 va_list ap;
493ae1f0 1626 va_list ap2;
7501267e
FB
1627
1628 va_start(ap, fmt);
493ae1f0 1629 va_copy(ap2, ap);
7501267e
FB
1630 fprintf(stderr, "qemu: fatal: ");
1631 vfprintf(stderr, fmt, ap);
1632 fprintf(stderr, "\n");
1633#ifdef TARGET_I386
7fe48483
FB
1634 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1635#else
1636 cpu_dump_state(env, stderr, fprintf, 0);
7501267e 1637#endif
924edcae 1638 if (logfile) {
f9373291 1639 fprintf(logfile, "qemu: fatal: ");
493ae1f0 1640 vfprintf(logfile, fmt, ap2);
f9373291
JM
1641 fprintf(logfile, "\n");
1642#ifdef TARGET_I386
1643 cpu_dump_state(env, logfile, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1644#else
1645 cpu_dump_state(env, logfile, fprintf, 0);
1646#endif
924edcae
AZ
1647 fflush(logfile);
1648 fclose(logfile);
1649 }
493ae1f0 1650 va_end(ap2);
f9373291 1651 va_end(ap);
7501267e
FB
1652 abort();
1653}
1654
c5be9f08
TS
1655CPUState *cpu_copy(CPUState *env)
1656{
01ba9816 1657 CPUState *new_env = cpu_init(env->cpu_model_str);
c5be9f08
TS
1658 /* preserve chaining and index */
1659 CPUState *next_cpu = new_env->next_cpu;
1660 int cpu_index = new_env->cpu_index;
1661 memcpy(new_env, env, sizeof(CPUState));
1662 new_env->next_cpu = next_cpu;
1663 new_env->cpu_index = cpu_index;
1664 return new_env;
1665}
1666
0124311e
FB
1667#if !defined(CONFIG_USER_ONLY)
1668
5c751e99
EI
1669static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1670{
1671 unsigned int i;
1672
1673 /* Discard jump cache entries for any tb which might potentially
1674 overlap the flushed page. */
1675 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1676 memset (&env->tb_jmp_cache[i], 0,
1677 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1678
1679 i = tb_jmp_cache_hash_page(addr);
1680 memset (&env->tb_jmp_cache[i], 0,
1681 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1682}
1683
ee8b7021
FB
1684/* NOTE: if flush_global is true, also flush global entries (not
1685 implemented yet) */
1686void tlb_flush(CPUState *env, int flush_global)
33417e70 1687{
33417e70 1688 int i;
0124311e 1689
9fa3e853
FB
1690#if defined(DEBUG_TLB)
1691 printf("tlb_flush:\n");
1692#endif
0124311e
FB
1693 /* must reset current TB so that interrupts cannot modify the
1694 links while we are modifying them */
1695 env->current_tb = NULL;
1696
33417e70 1697 for(i = 0; i < CPU_TLB_SIZE; i++) {
84b7b8e7
FB
1698 env->tlb_table[0][i].addr_read = -1;
1699 env->tlb_table[0][i].addr_write = -1;
1700 env->tlb_table[0][i].addr_code = -1;
1701 env->tlb_table[1][i].addr_read = -1;
1702 env->tlb_table[1][i].addr_write = -1;
1703 env->tlb_table[1][i].addr_code = -1;
6fa4cea9
JM
1704#if (NB_MMU_MODES >= 3)
1705 env->tlb_table[2][i].addr_read = -1;
1706 env->tlb_table[2][i].addr_write = -1;
1707 env->tlb_table[2][i].addr_code = -1;
1708#if (NB_MMU_MODES == 4)
1709 env->tlb_table[3][i].addr_read = -1;
1710 env->tlb_table[3][i].addr_write = -1;
1711 env->tlb_table[3][i].addr_code = -1;
1712#endif
1713#endif
33417e70 1714 }
9fa3e853 1715
8a40a180 1716 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
9fa3e853 1717
0a962c02
FB
1718#ifdef USE_KQEMU
1719 if (env->kqemu_enabled) {
1720 kqemu_flush(env, flush_global);
1721 }
9fa3e853 1722#endif
e3db7226 1723 tlb_flush_count++;
33417e70
FB
1724}
1725
274da6b2 1726static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
61382a50 1727{
5fafdf24 1728 if (addr == (tlb_entry->addr_read &
84b7b8e7 1729 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1730 addr == (tlb_entry->addr_write &
84b7b8e7 1731 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1732 addr == (tlb_entry->addr_code &
84b7b8e7
FB
1733 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1734 tlb_entry->addr_read = -1;
1735 tlb_entry->addr_write = -1;
1736 tlb_entry->addr_code = -1;
1737 }
61382a50
FB
1738}
1739
2e12669a 1740void tlb_flush_page(CPUState *env, target_ulong addr)
33417e70 1741{
8a40a180 1742 int i;
0124311e 1743
9fa3e853 1744#if defined(DEBUG_TLB)
108c49b8 1745 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
9fa3e853 1746#endif
0124311e
FB
1747 /* must reset current TB so that interrupts cannot modify the
1748 links while we are modifying them */
1749 env->current_tb = NULL;
61382a50
FB
1750
1751 addr &= TARGET_PAGE_MASK;
1752 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
84b7b8e7
FB
1753 tlb_flush_entry(&env->tlb_table[0][i], addr);
1754 tlb_flush_entry(&env->tlb_table[1][i], addr);
6fa4cea9
JM
1755#if (NB_MMU_MODES >= 3)
1756 tlb_flush_entry(&env->tlb_table[2][i], addr);
1757#if (NB_MMU_MODES == 4)
1758 tlb_flush_entry(&env->tlb_table[3][i], addr);
1759#endif
1760#endif
0124311e 1761
5c751e99 1762 tlb_flush_jmp_cache(env, addr);
9fa3e853 1763
0a962c02
FB
1764#ifdef USE_KQEMU
1765 if (env->kqemu_enabled) {
1766 kqemu_flush_page(env, addr);
1767 }
1768#endif
9fa3e853
FB
1769}
1770
9fa3e853
FB
1771/* update the TLBs so that writes to code in the virtual page 'addr'
1772 can be detected */
6a00d601 1773static void tlb_protect_code(ram_addr_t ram_addr)
9fa3e853 1774{
5fafdf24 1775 cpu_physical_memory_reset_dirty(ram_addr,
6a00d601
FB
1776 ram_addr + TARGET_PAGE_SIZE,
1777 CODE_DIRTY_FLAG);
9fa3e853
FB
1778}
1779
9fa3e853 1780/* update the TLB so that writes in physical page 'phys_addr' are no longer
3a7d929e 1781 tested for self modifying code */
5fafdf24 1782static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 1783 target_ulong vaddr)
9fa3e853 1784{
3a7d929e 1785 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1ccde1cb
FB
1786}
1787
5fafdf24 1788static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1ccde1cb
FB
1789 unsigned long start, unsigned long length)
1790{
1791 unsigned long addr;
84b7b8e7
FB
1792 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1793 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1ccde1cb 1794 if ((addr - start) < length) {
0f459d16 1795 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1ccde1cb
FB
1796 }
1797 }
1798}
1799
3a7d929e 1800void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 1801 int dirty_flags)
1ccde1cb
FB
1802{
1803 CPUState *env;
4f2ac237 1804 unsigned long length, start1;
0a962c02
FB
1805 int i, mask, len;
1806 uint8_t *p;
1ccde1cb
FB
1807
1808 start &= TARGET_PAGE_MASK;
1809 end = TARGET_PAGE_ALIGN(end);
1810
1811 length = end - start;
1812 if (length == 0)
1813 return;
0a962c02 1814 len = length >> TARGET_PAGE_BITS;
3a7d929e 1815#ifdef USE_KQEMU
6a00d601
FB
1816 /* XXX: should not depend on cpu context */
1817 env = first_cpu;
3a7d929e 1818 if (env->kqemu_enabled) {
f23db169
FB
1819 ram_addr_t addr;
1820 addr = start;
1821 for(i = 0; i < len; i++) {
1822 kqemu_set_notdirty(env, addr);
1823 addr += TARGET_PAGE_SIZE;
1824 }
3a7d929e
FB
1825 }
1826#endif
f23db169
FB
1827 mask = ~dirty_flags;
1828 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1829 for(i = 0; i < len; i++)
1830 p[i] &= mask;
1831
1ccde1cb
FB
1832 /* we modify the TLB cache so that the dirty bit will be set again
1833 when accessing the range */
59817ccb 1834 start1 = start + (unsigned long)phys_ram_base;
6a00d601
FB
1835 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1836 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1837 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
6a00d601 1838 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1839 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
6fa4cea9
JM
1840#if (NB_MMU_MODES >= 3)
1841 for(i = 0; i < CPU_TLB_SIZE; i++)
1842 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1843#if (NB_MMU_MODES == 4)
1844 for(i = 0; i < CPU_TLB_SIZE; i++)
1845 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1846#endif
1847#endif
6a00d601 1848 }
1ccde1cb
FB
1849}
1850
74576198
AL
1851int cpu_physical_memory_set_dirty_tracking(int enable)
1852{
1853 in_migration = enable;
1854 return 0;
1855}
1856
1857int cpu_physical_memory_get_dirty_tracking(void)
1858{
1859 return in_migration;
1860}
1861
2bec46dc
AL
1862void cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr, target_phys_addr_t end_addr)
1863{
1864 if (kvm_enabled())
1865 kvm_physical_sync_dirty_bitmap(start_addr, end_addr);
1866}
1867
3a7d929e
FB
1868static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1869{
1870 ram_addr_t ram_addr;
1871
84b7b8e7 1872 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
5fafdf24 1873 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
3a7d929e
FB
1874 tlb_entry->addend - (unsigned long)phys_ram_base;
1875 if (!cpu_physical_memory_is_dirty(ram_addr)) {
0f459d16 1876 tlb_entry->addr_write |= TLB_NOTDIRTY;
3a7d929e
FB
1877 }
1878 }
1879}
1880
1881/* update the TLB according to the current state of the dirty bits */
1882void cpu_tlb_update_dirty(CPUState *env)
1883{
1884 int i;
1885 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1886 tlb_update_dirty(&env->tlb_table[0][i]);
3a7d929e 1887 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1888 tlb_update_dirty(&env->tlb_table[1][i]);
6fa4cea9
JM
1889#if (NB_MMU_MODES >= 3)
1890 for(i = 0; i < CPU_TLB_SIZE; i++)
1891 tlb_update_dirty(&env->tlb_table[2][i]);
1892#if (NB_MMU_MODES == 4)
1893 for(i = 0; i < CPU_TLB_SIZE; i++)
1894 tlb_update_dirty(&env->tlb_table[3][i]);
1895#endif
1896#endif
3a7d929e
FB
1897}
1898
0f459d16 1899static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1ccde1cb 1900{
0f459d16
PB
1901 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
1902 tlb_entry->addr_write = vaddr;
1ccde1cb
FB
1903}
1904
0f459d16
PB
1905/* update the TLB corresponding to virtual page vaddr
1906 so that it is no longer dirty */
1907static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1ccde1cb 1908{
1ccde1cb
FB
1909 int i;
1910
0f459d16 1911 vaddr &= TARGET_PAGE_MASK;
1ccde1cb 1912 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
0f459d16
PB
1913 tlb_set_dirty1(&env->tlb_table[0][i], vaddr);
1914 tlb_set_dirty1(&env->tlb_table[1][i], vaddr);
6fa4cea9 1915#if (NB_MMU_MODES >= 3)
0f459d16 1916 tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
6fa4cea9 1917#if (NB_MMU_MODES == 4)
0f459d16 1918 tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
6fa4cea9
JM
1919#endif
1920#endif
9fa3e853
FB
1921}
1922
59817ccb
FB
1923/* add a new TLB entry. At most one entry for a given virtual address
1924 is permitted. Return 0 if OK or 2 if the page could not be mapped
1925 (can only happen in non SOFTMMU mode for I/O pages or pages
1926 conflicting with the host address space). */
5fafdf24
TS
1927int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1928 target_phys_addr_t paddr, int prot,
6ebbf390 1929 int mmu_idx, int is_softmmu)
9fa3e853 1930{
92e873b9 1931 PhysPageDesc *p;
4f2ac237 1932 unsigned long pd;
9fa3e853 1933 unsigned int index;
4f2ac237 1934 target_ulong address;
0f459d16 1935 target_ulong code_address;
108c49b8 1936 target_phys_addr_t addend;
9fa3e853 1937 int ret;
84b7b8e7 1938 CPUTLBEntry *te;
a1d1bb31 1939 CPUWatchpoint *wp;
0f459d16 1940 target_phys_addr_t iotlb;
9fa3e853 1941
92e873b9 1942 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
9fa3e853
FB
1943 if (!p) {
1944 pd = IO_MEM_UNASSIGNED;
9fa3e853
FB
1945 } else {
1946 pd = p->phys_offset;
9fa3e853
FB
1947 }
1948#if defined(DEBUG_TLB)
6ebbf390
JM
1949 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1950 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
9fa3e853
FB
1951#endif
1952
1953 ret = 0;
0f459d16
PB
1954 address = vaddr;
1955 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1956 /* IO memory case (romd handled later) */
1957 address |= TLB_MMIO;
1958 }
1959 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1960 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
1961 /* Normal RAM. */
1962 iotlb = pd & TARGET_PAGE_MASK;
1963 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
1964 iotlb |= IO_MEM_NOTDIRTY;
1965 else
1966 iotlb |= IO_MEM_ROM;
1967 } else {
1968 /* IO handlers are currently passed a phsical address.
1969 It would be nice to pass an offset from the base address
1970 of that region. This would avoid having to special case RAM,
1971 and avoid full address decoding in every device.
1972 We can't use the high bits of pd for this because
1973 IO_MEM_ROMD uses these as a ram address. */
8da3ff18
PB
1974 iotlb = (pd & ~TARGET_PAGE_MASK);
1975 if (p) {
1976 /* FIXME: What if this isn't page aligned? */
1977 iotlb += p->region_offset;
1978 } else {
1979 iotlb += paddr;
1980 }
0f459d16
PB
1981 }
1982
1983 code_address = address;
1984 /* Make accesses to pages with watchpoints go via the
1985 watchpoint trap routines. */
c0ce998e 1986 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
a1d1bb31 1987 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
0f459d16
PB
1988 iotlb = io_mem_watch + paddr;
1989 /* TODO: The memory case can be optimized by not trapping
1990 reads of pages with a write breakpoint. */
1991 address |= TLB_MMIO;
6658ffb8 1992 }
0f459d16 1993 }
d79acba4 1994
0f459d16
PB
1995 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1996 env->iotlb[mmu_idx][index] = iotlb - vaddr;
1997 te = &env->tlb_table[mmu_idx][index];
1998 te->addend = addend - vaddr;
1999 if (prot & PAGE_READ) {
2000 te->addr_read = address;
2001 } else {
2002 te->addr_read = -1;
2003 }
5c751e99 2004
0f459d16
PB
2005 if (prot & PAGE_EXEC) {
2006 te->addr_code = code_address;
2007 } else {
2008 te->addr_code = -1;
2009 }
2010 if (prot & PAGE_WRITE) {
2011 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2012 (pd & IO_MEM_ROMD)) {
2013 /* Write access calls the I/O callback. */
2014 te->addr_write = address | TLB_MMIO;
2015 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2016 !cpu_physical_memory_is_dirty(pd)) {
2017 te->addr_write = address | TLB_NOTDIRTY;
9fa3e853 2018 } else {
0f459d16 2019 te->addr_write = address;
9fa3e853 2020 }
0f459d16
PB
2021 } else {
2022 te->addr_write = -1;
9fa3e853 2023 }
9fa3e853
FB
2024 return ret;
2025}
2026
0124311e
FB
2027#else
2028
ee8b7021 2029void tlb_flush(CPUState *env, int flush_global)
0124311e
FB
2030{
2031}
2032
2e12669a 2033void tlb_flush_page(CPUState *env, target_ulong addr)
0124311e
FB
2034{
2035}
2036
5fafdf24
TS
2037int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2038 target_phys_addr_t paddr, int prot,
6ebbf390 2039 int mmu_idx, int is_softmmu)
9fa3e853
FB
2040{
2041 return 0;
2042}
0124311e 2043
9fa3e853
FB
2044/* dump memory mappings */
2045void page_dump(FILE *f)
33417e70 2046{
9fa3e853
FB
2047 unsigned long start, end;
2048 int i, j, prot, prot1;
2049 PageDesc *p;
33417e70 2050
9fa3e853
FB
2051 fprintf(f, "%-8s %-8s %-8s %s\n",
2052 "start", "end", "size", "prot");
2053 start = -1;
2054 end = -1;
2055 prot = 0;
2056 for(i = 0; i <= L1_SIZE; i++) {
2057 if (i < L1_SIZE)
2058 p = l1_map[i];
2059 else
2060 p = NULL;
2061 for(j = 0;j < L2_SIZE; j++) {
2062 if (!p)
2063 prot1 = 0;
2064 else
2065 prot1 = p[j].flags;
2066 if (prot1 != prot) {
2067 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2068 if (start != -1) {
2069 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
5fafdf24 2070 start, end, end - start,
9fa3e853
FB
2071 prot & PAGE_READ ? 'r' : '-',
2072 prot & PAGE_WRITE ? 'w' : '-',
2073 prot & PAGE_EXEC ? 'x' : '-');
2074 }
2075 if (prot1 != 0)
2076 start = end;
2077 else
2078 start = -1;
2079 prot = prot1;
2080 }
2081 if (!p)
2082 break;
2083 }
33417e70 2084 }
33417e70
FB
2085}
2086
53a5960a 2087int page_get_flags(target_ulong address)
33417e70 2088{
9fa3e853
FB
2089 PageDesc *p;
2090
2091 p = page_find(address >> TARGET_PAGE_BITS);
33417e70 2092 if (!p)
9fa3e853
FB
2093 return 0;
2094 return p->flags;
2095}
2096
2097/* modify the flags of a page and invalidate the code if
2098 necessary. The flag PAGE_WRITE_ORG is positionned automatically
2099 depending on PAGE_WRITE */
53a5960a 2100void page_set_flags(target_ulong start, target_ulong end, int flags)
9fa3e853
FB
2101{
2102 PageDesc *p;
53a5960a 2103 target_ulong addr;
9fa3e853 2104
c8a706fe 2105 /* mmap_lock should already be held. */
9fa3e853
FB
2106 start = start & TARGET_PAGE_MASK;
2107 end = TARGET_PAGE_ALIGN(end);
2108 if (flags & PAGE_WRITE)
2109 flags |= PAGE_WRITE_ORG;
9fa3e853
FB
2110 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2111 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
17e2377a
PB
2112 /* We may be called for host regions that are outside guest
2113 address space. */
2114 if (!p)
2115 return;
9fa3e853
FB
2116 /* if the write protection is set, then we invalidate the code
2117 inside */
5fafdf24 2118 if (!(p->flags & PAGE_WRITE) &&
9fa3e853
FB
2119 (flags & PAGE_WRITE) &&
2120 p->first_tb) {
d720b93d 2121 tb_invalidate_phys_page(addr, 0, NULL);
9fa3e853
FB
2122 }
2123 p->flags = flags;
2124 }
33417e70
FB
2125}
2126
3d97b40b
TS
2127int page_check_range(target_ulong start, target_ulong len, int flags)
2128{
2129 PageDesc *p;
2130 target_ulong end;
2131 target_ulong addr;
2132
55f280c9
AZ
2133 if (start + len < start)
2134 /* we've wrapped around */
2135 return -1;
2136
3d97b40b
TS
2137 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2138 start = start & TARGET_PAGE_MASK;
2139
3d97b40b
TS
2140 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2141 p = page_find(addr >> TARGET_PAGE_BITS);
2142 if( !p )
2143 return -1;
2144 if( !(p->flags & PAGE_VALID) )
2145 return -1;
2146
dae3270c 2147 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
3d97b40b 2148 return -1;
dae3270c
FB
2149 if (flags & PAGE_WRITE) {
2150 if (!(p->flags & PAGE_WRITE_ORG))
2151 return -1;
2152 /* unprotect the page if it was put read-only because it
2153 contains translated code */
2154 if (!(p->flags & PAGE_WRITE)) {
2155 if (!page_unprotect(addr, 0, NULL))
2156 return -1;
2157 }
2158 return 0;
2159 }
3d97b40b
TS
2160 }
2161 return 0;
2162}
2163
9fa3e853
FB
2164/* called from signal handler: invalidate the code and unprotect the
2165 page. Return TRUE if the fault was succesfully handled. */
53a5960a 2166int page_unprotect(target_ulong address, unsigned long pc, void *puc)
9fa3e853
FB
2167{
2168 unsigned int page_index, prot, pindex;
2169 PageDesc *p, *p1;
53a5960a 2170 target_ulong host_start, host_end, addr;
9fa3e853 2171
c8a706fe
PB
2172 /* Technically this isn't safe inside a signal handler. However we
2173 know this only ever happens in a synchronous SEGV handler, so in
2174 practice it seems to be ok. */
2175 mmap_lock();
2176
83fb7adf 2177 host_start = address & qemu_host_page_mask;
9fa3e853
FB
2178 page_index = host_start >> TARGET_PAGE_BITS;
2179 p1 = page_find(page_index);
c8a706fe
PB
2180 if (!p1) {
2181 mmap_unlock();
9fa3e853 2182 return 0;
c8a706fe 2183 }
83fb7adf 2184 host_end = host_start + qemu_host_page_size;
9fa3e853
FB
2185 p = p1;
2186 prot = 0;
2187 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2188 prot |= p->flags;
2189 p++;
2190 }
2191 /* if the page was really writable, then we change its
2192 protection back to writable */
2193 if (prot & PAGE_WRITE_ORG) {
2194 pindex = (address - host_start) >> TARGET_PAGE_BITS;
2195 if (!(p1[pindex].flags & PAGE_WRITE)) {
5fafdf24 2196 mprotect((void *)g2h(host_start), qemu_host_page_size,
9fa3e853
FB
2197 (prot & PAGE_BITS) | PAGE_WRITE);
2198 p1[pindex].flags |= PAGE_WRITE;
2199 /* and since the content will be modified, we must invalidate
2200 the corresponding translated code. */
d720b93d 2201 tb_invalidate_phys_page(address, pc, puc);
9fa3e853
FB
2202#ifdef DEBUG_TB_CHECK
2203 tb_invalidate_check(address);
2204#endif
c8a706fe 2205 mmap_unlock();
9fa3e853
FB
2206 return 1;
2207 }
2208 }
c8a706fe 2209 mmap_unlock();
9fa3e853
FB
2210 return 0;
2211}
2212
6a00d601
FB
2213static inline void tlb_set_dirty(CPUState *env,
2214 unsigned long addr, target_ulong vaddr)
1ccde1cb
FB
2215{
2216}
9fa3e853
FB
2217#endif /* defined(CONFIG_USER_ONLY) */
2218
e2eef170 2219#if !defined(CONFIG_USER_ONLY)
8da3ff18 2220
db7b5426 2221static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
8da3ff18 2222 ram_addr_t memory, ram_addr_t region_offset);
00f82b8a 2223static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
8da3ff18 2224 ram_addr_t orig_memory, ram_addr_t region_offset);
db7b5426
BS
2225#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2226 need_subpage) \
2227 do { \
2228 if (addr > start_addr) \
2229 start_addr2 = 0; \
2230 else { \
2231 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2232 if (start_addr2 > 0) \
2233 need_subpage = 1; \
2234 } \
2235 \
49e9fba2 2236 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
db7b5426
BS
2237 end_addr2 = TARGET_PAGE_SIZE - 1; \
2238 else { \
2239 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2240 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2241 need_subpage = 1; \
2242 } \
2243 } while (0)
2244
33417e70
FB
2245/* register physical memory. 'size' must be a multiple of the target
2246 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
8da3ff18
PB
2247 io memory page. The address used when calling the IO function is
2248 the offset from the start of the region, plus region_offset. Both
2249 start_region and regon_offset are rounded down to a page boundary
2250 before calculating this offset. This should not be a problem unless
2251 the low bits of start_addr and region_offset differ. */
2252void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2253 ram_addr_t size,
2254 ram_addr_t phys_offset,
2255 ram_addr_t region_offset)
33417e70 2256{
108c49b8 2257 target_phys_addr_t addr, end_addr;
92e873b9 2258 PhysPageDesc *p;
9d42037b 2259 CPUState *env;
00f82b8a 2260 ram_addr_t orig_size = size;
db7b5426 2261 void *subpage;
33417e70 2262
da260249
FB
2263#ifdef USE_KQEMU
2264 /* XXX: should not depend on cpu context */
2265 env = first_cpu;
2266 if (env->kqemu_enabled) {
2267 kqemu_set_phys_mem(start_addr, size, phys_offset);
2268 }
2269#endif
7ba1e619
AL
2270 if (kvm_enabled())
2271 kvm_set_phys_mem(start_addr, size, phys_offset);
2272
8da3ff18 2273 region_offset &= TARGET_PAGE_MASK;
5fd386f6 2274 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
49e9fba2
BS
2275 end_addr = start_addr + (target_phys_addr_t)size;
2276 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
db7b5426
BS
2277 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2278 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
00f82b8a 2279 ram_addr_t orig_memory = p->phys_offset;
db7b5426
BS
2280 target_phys_addr_t start_addr2, end_addr2;
2281 int need_subpage = 0;
2282
2283 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2284 need_subpage);
4254fab8 2285 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
db7b5426
BS
2286 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2287 subpage = subpage_init((addr & TARGET_PAGE_MASK),
8da3ff18
PB
2288 &p->phys_offset, orig_memory,
2289 p->region_offset);
db7b5426
BS
2290 } else {
2291 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2292 >> IO_MEM_SHIFT];
2293 }
8da3ff18
PB
2294 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2295 region_offset);
2296 p->region_offset = 0;
db7b5426
BS
2297 } else {
2298 p->phys_offset = phys_offset;
2299 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2300 (phys_offset & IO_MEM_ROMD))
2301 phys_offset += TARGET_PAGE_SIZE;
2302 }
2303 } else {
2304 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2305 p->phys_offset = phys_offset;
8da3ff18 2306 p->region_offset = region_offset;
db7b5426 2307 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
8da3ff18 2308 (phys_offset & IO_MEM_ROMD)) {
db7b5426 2309 phys_offset += TARGET_PAGE_SIZE;
8da3ff18 2310 }else {
db7b5426
BS
2311 target_phys_addr_t start_addr2, end_addr2;
2312 int need_subpage = 0;
2313
2314 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2315 end_addr2, need_subpage);
2316
4254fab8 2317 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
db7b5426 2318 subpage = subpage_init((addr & TARGET_PAGE_MASK),
8da3ff18
PB
2319 &p->phys_offset, IO_MEM_UNASSIGNED,
2320 0);
db7b5426 2321 subpage_register(subpage, start_addr2, end_addr2,
8da3ff18
PB
2322 phys_offset, region_offset);
2323 p->region_offset = 0;
db7b5426
BS
2324 }
2325 }
2326 }
8da3ff18 2327 region_offset += TARGET_PAGE_SIZE;
33417e70 2328 }
3b46e624 2329
9d42037b
FB
2330 /* since each CPU stores ram addresses in its TLB cache, we must
2331 reset the modified entries */
2332 /* XXX: slow ! */
2333 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2334 tlb_flush(env, 1);
2335 }
33417e70
FB
2336}
2337
ba863458 2338/* XXX: temporary until new memory mapping API */
00f82b8a 2339ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
ba863458
FB
2340{
2341 PhysPageDesc *p;
2342
2343 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2344 if (!p)
2345 return IO_MEM_UNASSIGNED;
2346 return p->phys_offset;
2347}
2348
e9a1ab19 2349/* XXX: better than nothing */
00f82b8a 2350ram_addr_t qemu_ram_alloc(ram_addr_t size)
e9a1ab19
FB
2351{
2352 ram_addr_t addr;
7fb4fdcf 2353 if ((phys_ram_alloc_offset + size) > phys_ram_size) {
012a7045 2354 fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 ")\n",
ed441467 2355 (uint64_t)size, (uint64_t)phys_ram_size);
e9a1ab19
FB
2356 abort();
2357 }
2358 addr = phys_ram_alloc_offset;
2359 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2360 return addr;
2361}
2362
2363void qemu_ram_free(ram_addr_t addr)
2364{
2365}
2366
a4193c8a 2367static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
33417e70 2368{
67d3b957 2369#ifdef DEBUG_UNASSIGNED
ab3d1727 2370 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
b4f0a316 2371#endif
e18231a3
BS
2372#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2373 do_unassigned_access(addr, 0, 0, 0, 1);
2374#endif
2375 return 0;
2376}
2377
2378static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2379{
2380#ifdef DEBUG_UNASSIGNED
2381 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2382#endif
2383#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2384 do_unassigned_access(addr, 0, 0, 0, 2);
2385#endif
2386 return 0;
2387}
2388
2389static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2390{
2391#ifdef DEBUG_UNASSIGNED
2392 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2393#endif
2394#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2395 do_unassigned_access(addr, 0, 0, 0, 4);
67d3b957 2396#endif
33417e70
FB
2397 return 0;
2398}
2399
a4193c8a 2400static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
33417e70 2401{
67d3b957 2402#ifdef DEBUG_UNASSIGNED
ab3d1727 2403 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
67d3b957 2404#endif
e18231a3
BS
2405#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2406 do_unassigned_access(addr, 1, 0, 0, 1);
2407#endif
2408}
2409
2410static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2411{
2412#ifdef DEBUG_UNASSIGNED
2413 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2414#endif
2415#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2416 do_unassigned_access(addr, 1, 0, 0, 2);
2417#endif
2418}
2419
2420static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2421{
2422#ifdef DEBUG_UNASSIGNED
2423 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2424#endif
2425#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2426 do_unassigned_access(addr, 1, 0, 0, 4);
b4f0a316 2427#endif
33417e70
FB
2428}
2429
2430static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2431 unassigned_mem_readb,
e18231a3
BS
2432 unassigned_mem_readw,
2433 unassigned_mem_readl,
33417e70
FB
2434};
2435
2436static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2437 unassigned_mem_writeb,
e18231a3
BS
2438 unassigned_mem_writew,
2439 unassigned_mem_writel,
33417e70
FB
2440};
2441
0f459d16
PB
2442static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2443 uint32_t val)
9fa3e853 2444{
3a7d929e 2445 int dirty_flags;
3a7d929e
FB
2446 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2447 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2448#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2449 tb_invalidate_phys_page_fast(ram_addr, 1);
2450 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2451#endif
3a7d929e 2452 }
0f459d16 2453 stb_p(phys_ram_base + ram_addr, val);
f32fc648
FB
2454#ifdef USE_KQEMU
2455 if (cpu_single_env->kqemu_enabled &&
2456 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2457 kqemu_modify_page(cpu_single_env, ram_addr);
2458#endif
f23db169
FB
2459 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2460 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2461 /* we remove the notdirty callback only if the code has been
2462 flushed */
2463 if (dirty_flags == 0xff)
2e70f6ef 2464 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
2465}
2466
0f459d16
PB
2467static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2468 uint32_t val)
9fa3e853 2469{
3a7d929e 2470 int dirty_flags;
3a7d929e
FB
2471 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2472 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2473#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2474 tb_invalidate_phys_page_fast(ram_addr, 2);
2475 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2476#endif
3a7d929e 2477 }
0f459d16 2478 stw_p(phys_ram_base + ram_addr, val);
f32fc648
FB
2479#ifdef USE_KQEMU
2480 if (cpu_single_env->kqemu_enabled &&
2481 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2482 kqemu_modify_page(cpu_single_env, ram_addr);
2483#endif
f23db169
FB
2484 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2485 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2486 /* we remove the notdirty callback only if the code has been
2487 flushed */
2488 if (dirty_flags == 0xff)
2e70f6ef 2489 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
2490}
2491
0f459d16
PB
2492static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2493 uint32_t val)
9fa3e853 2494{
3a7d929e 2495 int dirty_flags;
3a7d929e
FB
2496 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2497 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2498#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2499 tb_invalidate_phys_page_fast(ram_addr, 4);
2500 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2501#endif
3a7d929e 2502 }
0f459d16 2503 stl_p(phys_ram_base + ram_addr, val);
f32fc648
FB
2504#ifdef USE_KQEMU
2505 if (cpu_single_env->kqemu_enabled &&
2506 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2507 kqemu_modify_page(cpu_single_env, ram_addr);
2508#endif
f23db169
FB
2509 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2510 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2511 /* we remove the notdirty callback only if the code has been
2512 flushed */
2513 if (dirty_flags == 0xff)
2e70f6ef 2514 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
2515}
2516
3a7d929e 2517static CPUReadMemoryFunc *error_mem_read[3] = {
9fa3e853
FB
2518 NULL, /* never used */
2519 NULL, /* never used */
2520 NULL, /* never used */
2521};
2522
1ccde1cb
FB
2523static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2524 notdirty_mem_writeb,
2525 notdirty_mem_writew,
2526 notdirty_mem_writel,
2527};
2528
0f459d16 2529/* Generate a debug exception if a watchpoint has been hit. */
b4051334 2530static void check_watchpoint(int offset, int len_mask, int flags)
0f459d16
PB
2531{
2532 CPUState *env = cpu_single_env;
06d55cc1
AL
2533 target_ulong pc, cs_base;
2534 TranslationBlock *tb;
0f459d16 2535 target_ulong vaddr;
a1d1bb31 2536 CPUWatchpoint *wp;
06d55cc1 2537 int cpu_flags;
0f459d16 2538
06d55cc1
AL
2539 if (env->watchpoint_hit) {
2540 /* We re-entered the check after replacing the TB. Now raise
2541 * the debug interrupt so that is will trigger after the
2542 * current instruction. */
2543 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2544 return;
2545 }
2e70f6ef 2546 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
c0ce998e 2547 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334
AL
2548 if ((vaddr == (wp->vaddr & len_mask) ||
2549 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
6e140f28
AL
2550 wp->flags |= BP_WATCHPOINT_HIT;
2551 if (!env->watchpoint_hit) {
2552 env->watchpoint_hit = wp;
2553 tb = tb_find_pc(env->mem_io_pc);
2554 if (!tb) {
2555 cpu_abort(env, "check_watchpoint: could not find TB for "
2556 "pc=%p", (void *)env->mem_io_pc);
2557 }
2558 cpu_restore_state(tb, env, env->mem_io_pc, NULL);
2559 tb_phys_invalidate(tb, -1);
2560 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2561 env->exception_index = EXCP_DEBUG;
2562 } else {
2563 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2564 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
2565 }
2566 cpu_resume_from_signal(env, NULL);
06d55cc1 2567 }
6e140f28
AL
2568 } else {
2569 wp->flags &= ~BP_WATCHPOINT_HIT;
0f459d16
PB
2570 }
2571 }
2572}
2573
6658ffb8
PB
2574/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2575 so these check for a hit then pass through to the normal out-of-line
2576 phys routines. */
2577static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2578{
b4051334 2579 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
6658ffb8
PB
2580 return ldub_phys(addr);
2581}
2582
2583static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2584{
b4051334 2585 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
6658ffb8
PB
2586 return lduw_phys(addr);
2587}
2588
2589static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2590{
b4051334 2591 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
6658ffb8
PB
2592 return ldl_phys(addr);
2593}
2594
6658ffb8
PB
2595static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2596 uint32_t val)
2597{
b4051334 2598 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
6658ffb8
PB
2599 stb_phys(addr, val);
2600}
2601
2602static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2603 uint32_t val)
2604{
b4051334 2605 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
6658ffb8
PB
2606 stw_phys(addr, val);
2607}
2608
2609static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2610 uint32_t val)
2611{
b4051334 2612 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
6658ffb8
PB
2613 stl_phys(addr, val);
2614}
2615
2616static CPUReadMemoryFunc *watch_mem_read[3] = {
2617 watch_mem_readb,
2618 watch_mem_readw,
2619 watch_mem_readl,
2620};
2621
2622static CPUWriteMemoryFunc *watch_mem_write[3] = {
2623 watch_mem_writeb,
2624 watch_mem_writew,
2625 watch_mem_writel,
2626};
6658ffb8 2627
db7b5426
BS
2628static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2629 unsigned int len)
2630{
db7b5426
BS
2631 uint32_t ret;
2632 unsigned int idx;
2633
8da3ff18 2634 idx = SUBPAGE_IDX(addr);
db7b5426
BS
2635#if defined(DEBUG_SUBPAGE)
2636 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2637 mmio, len, addr, idx);
2638#endif
8da3ff18
PB
2639 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len],
2640 addr + mmio->region_offset[idx][0][len]);
db7b5426
BS
2641
2642 return ret;
2643}
2644
2645static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2646 uint32_t value, unsigned int len)
2647{
db7b5426
BS
2648 unsigned int idx;
2649
8da3ff18 2650 idx = SUBPAGE_IDX(addr);
db7b5426
BS
2651#if defined(DEBUG_SUBPAGE)
2652 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2653 mmio, len, addr, idx, value);
2654#endif
8da3ff18
PB
2655 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len],
2656 addr + mmio->region_offset[idx][1][len],
2657 value);
db7b5426
BS
2658}
2659
2660static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2661{
2662#if defined(DEBUG_SUBPAGE)
2663 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2664#endif
2665
2666 return subpage_readlen(opaque, addr, 0);
2667}
2668
2669static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2670 uint32_t value)
2671{
2672#if defined(DEBUG_SUBPAGE)
2673 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2674#endif
2675 subpage_writelen(opaque, addr, value, 0);
2676}
2677
2678static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2679{
2680#if defined(DEBUG_SUBPAGE)
2681 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2682#endif
2683
2684 return subpage_readlen(opaque, addr, 1);
2685}
2686
2687static void subpage_writew (void *opaque, target_phys_addr_t addr,
2688 uint32_t value)
2689{
2690#if defined(DEBUG_SUBPAGE)
2691 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2692#endif
2693 subpage_writelen(opaque, addr, value, 1);
2694}
2695
2696static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2697{
2698#if defined(DEBUG_SUBPAGE)
2699 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2700#endif
2701
2702 return subpage_readlen(opaque, addr, 2);
2703}
2704
2705static void subpage_writel (void *opaque,
2706 target_phys_addr_t addr, uint32_t value)
2707{
2708#if defined(DEBUG_SUBPAGE)
2709 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2710#endif
2711 subpage_writelen(opaque, addr, value, 2);
2712}
2713
2714static CPUReadMemoryFunc *subpage_read[] = {
2715 &subpage_readb,
2716 &subpage_readw,
2717 &subpage_readl,
2718};
2719
2720static CPUWriteMemoryFunc *subpage_write[] = {
2721 &subpage_writeb,
2722 &subpage_writew,
2723 &subpage_writel,
2724};
2725
2726static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
8da3ff18 2727 ram_addr_t memory, ram_addr_t region_offset)
db7b5426
BS
2728{
2729 int idx, eidx;
4254fab8 2730 unsigned int i;
db7b5426
BS
2731
2732 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2733 return -1;
2734 idx = SUBPAGE_IDX(start);
2735 eidx = SUBPAGE_IDX(end);
2736#if defined(DEBUG_SUBPAGE)
2737 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2738 mmio, start, end, idx, eidx, memory);
2739#endif
2740 memory >>= IO_MEM_SHIFT;
2741 for (; idx <= eidx; idx++) {
4254fab8 2742 for (i = 0; i < 4; i++) {
3ee89922
BS
2743 if (io_mem_read[memory][i]) {
2744 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2745 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
8da3ff18 2746 mmio->region_offset[idx][0][i] = region_offset;
3ee89922
BS
2747 }
2748 if (io_mem_write[memory][i]) {
2749 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2750 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
8da3ff18 2751 mmio->region_offset[idx][1][i] = region_offset;
3ee89922 2752 }
4254fab8 2753 }
db7b5426
BS
2754 }
2755
2756 return 0;
2757}
2758
00f82b8a 2759static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
8da3ff18 2760 ram_addr_t orig_memory, ram_addr_t region_offset)
db7b5426
BS
2761{
2762 subpage_t *mmio;
2763 int subpage_memory;
2764
2765 mmio = qemu_mallocz(sizeof(subpage_t));
2766 if (mmio != NULL) {
2767 mmio->base = base;
2768 subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2769#if defined(DEBUG_SUBPAGE)
2770 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2771 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2772#endif
2773 *phys = subpage_memory | IO_MEM_SUBPAGE;
8da3ff18
PB
2774 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory,
2775 region_offset);
db7b5426
BS
2776 }
2777
2778 return mmio;
2779}
2780
33417e70
FB
2781static void io_mem_init(void)
2782{
3a7d929e 2783 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
a4193c8a 2784 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
3a7d929e 2785 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
1ccde1cb
FB
2786 io_mem_nb = 5;
2787
0f459d16 2788 io_mem_watch = cpu_register_io_memory(0, watch_mem_read,
6658ffb8 2789 watch_mem_write, NULL);
1ccde1cb 2790 /* alloc dirty bits array */
0a962c02 2791 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
3a7d929e 2792 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
33417e70
FB
2793}
2794
2795/* mem_read and mem_write are arrays of functions containing the
2796 function to access byte (index 0), word (index 1) and dword (index
3ee89922
BS
2797 2). Functions can be omitted with a NULL function pointer. The
2798 registered functions may be modified dynamically later.
2799 If io_index is non zero, the corresponding io zone is
4254fab8
BS
2800 modified. If it is zero, a new io zone is allocated. The return
2801 value can be used with cpu_register_physical_memory(). (-1) is
2802 returned if error. */
33417e70
FB
2803int cpu_register_io_memory(int io_index,
2804 CPUReadMemoryFunc **mem_read,
a4193c8a
FB
2805 CPUWriteMemoryFunc **mem_write,
2806 void *opaque)
33417e70 2807{
4254fab8 2808 int i, subwidth = 0;
33417e70
FB
2809
2810 if (io_index <= 0) {
b5ff1b31 2811 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
33417e70
FB
2812 return -1;
2813 io_index = io_mem_nb++;
2814 } else {
2815 if (io_index >= IO_MEM_NB_ENTRIES)
2816 return -1;
2817 }
b5ff1b31 2818
33417e70 2819 for(i = 0;i < 3; i++) {
4254fab8
BS
2820 if (!mem_read[i] || !mem_write[i])
2821 subwidth = IO_MEM_SUBWIDTH;
33417e70
FB
2822 io_mem_read[io_index][i] = mem_read[i];
2823 io_mem_write[io_index][i] = mem_write[i];
2824 }
a4193c8a 2825 io_mem_opaque[io_index] = opaque;
4254fab8 2826 return (io_index << IO_MEM_SHIFT) | subwidth;
33417e70 2827}
61382a50 2828
8926b517
FB
2829CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2830{
2831 return io_mem_write[io_index >> IO_MEM_SHIFT];
2832}
2833
2834CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2835{
2836 return io_mem_read[io_index >> IO_MEM_SHIFT];
2837}
2838
e2eef170
PB
2839#endif /* !defined(CONFIG_USER_ONLY) */
2840
13eb76e0
FB
2841/* physical memory access (slow version, mainly for debug) */
2842#if defined(CONFIG_USER_ONLY)
5fafdf24 2843void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
2844 int len, int is_write)
2845{
2846 int l, flags;
2847 target_ulong page;
53a5960a 2848 void * p;
13eb76e0
FB
2849
2850 while (len > 0) {
2851 page = addr & TARGET_PAGE_MASK;
2852 l = (page + TARGET_PAGE_SIZE) - addr;
2853 if (l > len)
2854 l = len;
2855 flags = page_get_flags(page);
2856 if (!(flags & PAGE_VALID))
2857 return;
2858 if (is_write) {
2859 if (!(flags & PAGE_WRITE))
2860 return;
579a97f7 2861 /* XXX: this code should not depend on lock_user */
72fb7daa 2862 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
579a97f7
FB
2863 /* FIXME - should this return an error rather than just fail? */
2864 return;
72fb7daa
AJ
2865 memcpy(p, buf, l);
2866 unlock_user(p, addr, l);
13eb76e0
FB
2867 } else {
2868 if (!(flags & PAGE_READ))
2869 return;
579a97f7 2870 /* XXX: this code should not depend on lock_user */
72fb7daa 2871 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
579a97f7
FB
2872 /* FIXME - should this return an error rather than just fail? */
2873 return;
72fb7daa 2874 memcpy(buf, p, l);
5b257578 2875 unlock_user(p, addr, 0);
13eb76e0
FB
2876 }
2877 len -= l;
2878 buf += l;
2879 addr += l;
2880 }
2881}
8df1cd07 2882
13eb76e0 2883#else
5fafdf24 2884void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
2885 int len, int is_write)
2886{
2887 int l, io_index;
2888 uint8_t *ptr;
2889 uint32_t val;
2e12669a
FB
2890 target_phys_addr_t page;
2891 unsigned long pd;
92e873b9 2892 PhysPageDesc *p;
3b46e624 2893
13eb76e0
FB
2894 while (len > 0) {
2895 page = addr & TARGET_PAGE_MASK;
2896 l = (page + TARGET_PAGE_SIZE) - addr;
2897 if (l > len)
2898 l = len;
92e873b9 2899 p = phys_page_find(page >> TARGET_PAGE_BITS);
13eb76e0
FB
2900 if (!p) {
2901 pd = IO_MEM_UNASSIGNED;
2902 } else {
2903 pd = p->phys_offset;
2904 }
3b46e624 2905
13eb76e0 2906 if (is_write) {
3a7d929e 2907 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
13eb76e0 2908 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
2909 if (p)
2910 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
6a00d601
FB
2911 /* XXX: could force cpu_single_env to NULL to avoid
2912 potential bugs */
13eb76e0 2913 if (l >= 4 && ((addr & 3) == 0)) {
1c213d19 2914 /* 32 bit write access */
c27004ec 2915 val = ldl_p(buf);
a4193c8a 2916 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2917 l = 4;
2918 } else if (l >= 2 && ((addr & 1) == 0)) {
1c213d19 2919 /* 16 bit write access */
c27004ec 2920 val = lduw_p(buf);
a4193c8a 2921 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2922 l = 2;
2923 } else {
1c213d19 2924 /* 8 bit write access */
c27004ec 2925 val = ldub_p(buf);
a4193c8a 2926 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2927 l = 1;
2928 }
2929 } else {
b448f2f3
FB
2930 unsigned long addr1;
2931 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
13eb76e0 2932 /* RAM case */
b448f2f3 2933 ptr = phys_ram_base + addr1;
13eb76e0 2934 memcpy(ptr, buf, l);
3a7d929e
FB
2935 if (!cpu_physical_memory_is_dirty(addr1)) {
2936 /* invalidate code */
2937 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2938 /* set dirty bit */
5fafdf24 2939 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
f23db169 2940 (0xff & ~CODE_DIRTY_FLAG);
3a7d929e 2941 }
13eb76e0
FB
2942 }
2943 } else {
5fafdf24 2944 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2a4188a3 2945 !(pd & IO_MEM_ROMD)) {
13eb76e0
FB
2946 /* I/O case */
2947 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
2948 if (p)
2949 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
13eb76e0
FB
2950 if (l >= 4 && ((addr & 3) == 0)) {
2951 /* 32 bit read access */
a4193c8a 2952 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
c27004ec 2953 stl_p(buf, val);
13eb76e0
FB
2954 l = 4;
2955 } else if (l >= 2 && ((addr & 1) == 0)) {
2956 /* 16 bit read access */
a4193c8a 2957 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
c27004ec 2958 stw_p(buf, val);
13eb76e0
FB
2959 l = 2;
2960 } else {
1c213d19 2961 /* 8 bit read access */
a4193c8a 2962 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
c27004ec 2963 stb_p(buf, val);
13eb76e0
FB
2964 l = 1;
2965 }
2966 } else {
2967 /* RAM case */
5fafdf24 2968 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
13eb76e0
FB
2969 (addr & ~TARGET_PAGE_MASK);
2970 memcpy(buf, ptr, l);
2971 }
2972 }
2973 len -= l;
2974 buf += l;
2975 addr += l;
2976 }
2977}
8df1cd07 2978
d0ecd2aa 2979/* used for ROM loading : can write in RAM and ROM */
5fafdf24 2980void cpu_physical_memory_write_rom(target_phys_addr_t addr,
d0ecd2aa
FB
2981 const uint8_t *buf, int len)
2982{
2983 int l;
2984 uint8_t *ptr;
2985 target_phys_addr_t page;
2986 unsigned long pd;
2987 PhysPageDesc *p;
3b46e624 2988
d0ecd2aa
FB
2989 while (len > 0) {
2990 page = addr & TARGET_PAGE_MASK;
2991 l = (page + TARGET_PAGE_SIZE) - addr;
2992 if (l > len)
2993 l = len;
2994 p = phys_page_find(page >> TARGET_PAGE_BITS);
2995 if (!p) {
2996 pd = IO_MEM_UNASSIGNED;
2997 } else {
2998 pd = p->phys_offset;
2999 }
3b46e624 3000
d0ecd2aa 3001 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2a4188a3
FB
3002 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3003 !(pd & IO_MEM_ROMD)) {
d0ecd2aa
FB
3004 /* do nothing */
3005 } else {
3006 unsigned long addr1;
3007 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3008 /* ROM/RAM case */
3009 ptr = phys_ram_base + addr1;
3010 memcpy(ptr, buf, l);
3011 }
3012 len -= l;
3013 buf += l;
3014 addr += l;
3015 }
3016}
3017
3018
8df1cd07
FB
3019/* warning: addr must be aligned */
3020uint32_t ldl_phys(target_phys_addr_t addr)
3021{
3022 int io_index;
3023 uint8_t *ptr;
3024 uint32_t val;
3025 unsigned long pd;
3026 PhysPageDesc *p;
3027
3028 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3029 if (!p) {
3030 pd = IO_MEM_UNASSIGNED;
3031 } else {
3032 pd = p->phys_offset;
3033 }
3b46e624 3034
5fafdf24 3035 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2a4188a3 3036 !(pd & IO_MEM_ROMD)) {
8df1cd07
FB
3037 /* I/O case */
3038 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
3039 if (p)
3040 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
8df1cd07
FB
3041 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3042 } else {
3043 /* RAM case */
5fafdf24 3044 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
8df1cd07
FB
3045 (addr & ~TARGET_PAGE_MASK);
3046 val = ldl_p(ptr);
3047 }
3048 return val;
3049}
3050
84b7b8e7
FB
3051/* warning: addr must be aligned */
3052uint64_t ldq_phys(target_phys_addr_t addr)
3053{
3054 int io_index;
3055 uint8_t *ptr;
3056 uint64_t val;
3057 unsigned long pd;
3058 PhysPageDesc *p;
3059
3060 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3061 if (!p) {
3062 pd = IO_MEM_UNASSIGNED;
3063 } else {
3064 pd = p->phys_offset;
3065 }
3b46e624 3066
2a4188a3
FB
3067 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3068 !(pd & IO_MEM_ROMD)) {
84b7b8e7
FB
3069 /* I/O case */
3070 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
3071 if (p)
3072 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
84b7b8e7
FB
3073#ifdef TARGET_WORDS_BIGENDIAN
3074 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3075 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3076#else
3077 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3078 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3079#endif
3080 } else {
3081 /* RAM case */
5fafdf24 3082 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
84b7b8e7
FB
3083 (addr & ~TARGET_PAGE_MASK);
3084 val = ldq_p(ptr);
3085 }
3086 return val;
3087}
3088
aab33094
FB
3089/* XXX: optimize */
3090uint32_t ldub_phys(target_phys_addr_t addr)
3091{
3092 uint8_t val;
3093 cpu_physical_memory_read(addr, &val, 1);
3094 return val;
3095}
3096
3097/* XXX: optimize */
3098uint32_t lduw_phys(target_phys_addr_t addr)
3099{
3100 uint16_t val;
3101 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3102 return tswap16(val);
3103}
3104
8df1cd07
FB
3105/* warning: addr must be aligned. The ram page is not masked as dirty
3106 and the code inside is not invalidated. It is useful if the dirty
3107 bits are used to track modified PTEs */
3108void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3109{
3110 int io_index;
3111 uint8_t *ptr;
3112 unsigned long pd;
3113 PhysPageDesc *p;
3114
3115 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3116 if (!p) {
3117 pd = IO_MEM_UNASSIGNED;
3118 } else {
3119 pd = p->phys_offset;
3120 }
3b46e624 3121
3a7d929e 3122 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07 3123 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
3124 if (p)
3125 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
8df1cd07
FB
3126 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3127 } else {
74576198
AL
3128 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3129 ptr = phys_ram_base + addr1;
8df1cd07 3130 stl_p(ptr, val);
74576198
AL
3131
3132 if (unlikely(in_migration)) {
3133 if (!cpu_physical_memory_is_dirty(addr1)) {
3134 /* invalidate code */
3135 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3136 /* set dirty bit */
3137 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3138 (0xff & ~CODE_DIRTY_FLAG);
3139 }
3140 }
8df1cd07
FB
3141 }
3142}
3143
bc98a7ef
JM
3144void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3145{
3146 int io_index;
3147 uint8_t *ptr;
3148 unsigned long pd;
3149 PhysPageDesc *p;
3150
3151 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3152 if (!p) {
3153 pd = IO_MEM_UNASSIGNED;
3154 } else {
3155 pd = p->phys_offset;
3156 }
3b46e624 3157
bc98a7ef
JM
3158 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3159 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
3160 if (p)
3161 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bc98a7ef
JM
3162#ifdef TARGET_WORDS_BIGENDIAN
3163 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3164 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3165#else
3166 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3167 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3168#endif
3169 } else {
5fafdf24 3170 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
bc98a7ef
JM
3171 (addr & ~TARGET_PAGE_MASK);
3172 stq_p(ptr, val);
3173 }
3174}
3175
8df1cd07 3176/* warning: addr must be aligned */
8df1cd07
FB
3177void stl_phys(target_phys_addr_t addr, uint32_t val)
3178{
3179 int io_index;
3180 uint8_t *ptr;
3181 unsigned long pd;
3182 PhysPageDesc *p;
3183
3184 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3185 if (!p) {
3186 pd = IO_MEM_UNASSIGNED;
3187 } else {
3188 pd = p->phys_offset;
3189 }
3b46e624 3190
3a7d929e 3191 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07 3192 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
3193 if (p)
3194 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
8df1cd07
FB
3195 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3196 } else {
3197 unsigned long addr1;
3198 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3199 /* RAM case */
3200 ptr = phys_ram_base + addr1;
3201 stl_p(ptr, val);
3a7d929e
FB
3202 if (!cpu_physical_memory_is_dirty(addr1)) {
3203 /* invalidate code */
3204 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3205 /* set dirty bit */
f23db169
FB
3206 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3207 (0xff & ~CODE_DIRTY_FLAG);
3a7d929e 3208 }
8df1cd07
FB
3209 }
3210}
3211
aab33094
FB
3212/* XXX: optimize */
3213void stb_phys(target_phys_addr_t addr, uint32_t val)
3214{
3215 uint8_t v = val;
3216 cpu_physical_memory_write(addr, &v, 1);
3217}
3218
3219/* XXX: optimize */
3220void stw_phys(target_phys_addr_t addr, uint32_t val)
3221{
3222 uint16_t v = tswap16(val);
3223 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3224}
3225
3226/* XXX: optimize */
3227void stq_phys(target_phys_addr_t addr, uint64_t val)
3228{
3229 val = tswap64(val);
3230 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3231}
3232
13eb76e0
FB
3233#endif
3234
3235/* virtual memory access for debug */
5fafdf24 3236int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
b448f2f3 3237 uint8_t *buf, int len, int is_write)
13eb76e0
FB
3238{
3239 int l;
9b3c35e0
JM
3240 target_phys_addr_t phys_addr;
3241 target_ulong page;
13eb76e0
FB
3242
3243 while (len > 0) {
3244 page = addr & TARGET_PAGE_MASK;
3245 phys_addr = cpu_get_phys_page_debug(env, page);
3246 /* if no physical page mapped, return an error */
3247 if (phys_addr == -1)
3248 return -1;
3249 l = (page + TARGET_PAGE_SIZE) - addr;
3250 if (l > len)
3251 l = len;
5fafdf24 3252 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
b448f2f3 3253 buf, l, is_write);
13eb76e0
FB
3254 len -= l;
3255 buf += l;
3256 addr += l;
3257 }
3258 return 0;
3259}
3260
2e70f6ef
PB
3261/* in deterministic execution mode, instructions doing device I/Os
3262 must be at the end of the TB */
3263void cpu_io_recompile(CPUState *env, void *retaddr)
3264{
3265 TranslationBlock *tb;
3266 uint32_t n, cflags;
3267 target_ulong pc, cs_base;
3268 uint64_t flags;
3269
3270 tb = tb_find_pc((unsigned long)retaddr);
3271 if (!tb) {
3272 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
3273 retaddr);
3274 }
3275 n = env->icount_decr.u16.low + tb->icount;
3276 cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3277 /* Calculate how many instructions had been executed before the fault
bf20dc07 3278 occurred. */
2e70f6ef
PB
3279 n = n - env->icount_decr.u16.low;
3280 /* Generate a new TB ending on the I/O insn. */
3281 n++;
3282 /* On MIPS and SH, delay slot instructions can only be restarted if
3283 they were already the first instruction in the TB. If this is not
bf20dc07 3284 the first instruction in a TB then re-execute the preceding
2e70f6ef
PB
3285 branch. */
3286#if defined(TARGET_MIPS)
3287 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3288 env->active_tc.PC -= 4;
3289 env->icount_decr.u16.low++;
3290 env->hflags &= ~MIPS_HFLAG_BMASK;
3291 }
3292#elif defined(TARGET_SH4)
3293 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3294 && n > 1) {
3295 env->pc -= 2;
3296 env->icount_decr.u16.low++;
3297 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3298 }
3299#endif
3300 /* This should never happen. */
3301 if (n > CF_COUNT_MASK)
3302 cpu_abort(env, "TB too big during recompile");
3303
3304 cflags = n | CF_LAST_IO;
3305 pc = tb->pc;
3306 cs_base = tb->cs_base;
3307 flags = tb->flags;
3308 tb_phys_invalidate(tb, -1);
3309 /* FIXME: In theory this could raise an exception. In practice
3310 we have already translated the block once so it's probably ok. */
3311 tb_gen_code(env, pc, cs_base, flags, cflags);
bf20dc07 3312 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
2e70f6ef
PB
3313 the first in the TB) then we end up generating a whole new TB and
3314 repeating the fault, which is horribly inefficient.
3315 Better would be to execute just this insn uncached, or generate a
3316 second new TB. */
3317 cpu_resume_from_signal(env, NULL);
3318}
3319
e3db7226
FB
3320void dump_exec_info(FILE *f,
3321 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3322{
3323 int i, target_code_size, max_target_code_size;
3324 int direct_jmp_count, direct_jmp2_count, cross_page;
3325 TranslationBlock *tb;
3b46e624 3326
e3db7226
FB
3327 target_code_size = 0;
3328 max_target_code_size = 0;
3329 cross_page = 0;
3330 direct_jmp_count = 0;
3331 direct_jmp2_count = 0;
3332 for(i = 0; i < nb_tbs; i++) {
3333 tb = &tbs[i];
3334 target_code_size += tb->size;
3335 if (tb->size > max_target_code_size)
3336 max_target_code_size = tb->size;
3337 if (tb->page_addr[1] != -1)
3338 cross_page++;
3339 if (tb->tb_next_offset[0] != 0xffff) {
3340 direct_jmp_count++;
3341 if (tb->tb_next_offset[1] != 0xffff) {
3342 direct_jmp2_count++;
3343 }
3344 }
3345 }
3346 /* XXX: avoid using doubles ? */
57fec1fe 3347 cpu_fprintf(f, "Translation buffer state:\n");
26a5f13b
FB
3348 cpu_fprintf(f, "gen code size %ld/%ld\n",
3349 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3350 cpu_fprintf(f, "TB count %d/%d\n",
3351 nb_tbs, code_gen_max_blocks);
5fafdf24 3352 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
e3db7226
FB
3353 nb_tbs ? target_code_size / nb_tbs : 0,
3354 max_target_code_size);
5fafdf24 3355 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
e3db7226
FB
3356 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3357 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
5fafdf24
TS
3358 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3359 cross_page,
e3db7226
FB
3360 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3361 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
5fafdf24 3362 direct_jmp_count,
e3db7226
FB
3363 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3364 direct_jmp2_count,
3365 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
57fec1fe 3366 cpu_fprintf(f, "\nStatistics:\n");
e3db7226
FB
3367 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
3368 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3369 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
b67d9a52 3370 tcg_dump_info(f, cpu_fprintf);
e3db7226
FB
3371}
3372
5fafdf24 3373#if !defined(CONFIG_USER_ONLY)
61382a50
FB
3374
3375#define MMUSUFFIX _cmmu
3376#define GETPC() NULL
3377#define env cpu_single_env
b769d8fe 3378#define SOFTMMU_CODE_ACCESS
61382a50
FB
3379
3380#define SHIFT 0
3381#include "softmmu_template.h"
3382
3383#define SHIFT 1
3384#include "softmmu_template.h"
3385
3386#define SHIFT 2
3387#include "softmmu_template.h"
3388
3389#define SHIFT 3
3390#include "softmmu_template.h"
3391
3392#undef env
3393
3394#endif