]> git.proxmox.com Git - qemu.git/blame - exec.c
Add support for the 'k' (kill) and 'D' (detach) packets (Jason Wessel).
[qemu.git] / exec.c
CommitLineData
54936004 1/*
fd6ce8f6 2 * virtual page mapping and translated block handling
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
67b915a5 20#include "config.h"
d5a8f07c 21#ifdef _WIN32
4fddf62a 22#define WIN32_LEAN_AND_MEAN
d5a8f07c
FB
23#include <windows.h>
24#else
a98d49b1 25#include <sys/types.h>
d5a8f07c
FB
26#include <sys/mman.h>
27#endif
54936004
FB
28#include <stdlib.h>
29#include <stdio.h>
30#include <stdarg.h>
31#include <string.h>
32#include <errno.h>
33#include <unistd.h>
34#include <inttypes.h>
35
6180a181
FB
36#include "cpu.h"
37#include "exec-all.h"
ca10f867 38#include "qemu-common.h"
53a5960a
PB
39#if defined(CONFIG_USER_ONLY)
40#include <qemu.h>
41#endif
54936004 42
fd6ce8f6 43//#define DEBUG_TB_INVALIDATE
66e85a21 44//#define DEBUG_FLUSH
9fa3e853 45//#define DEBUG_TLB
67d3b957 46//#define DEBUG_UNASSIGNED
fd6ce8f6
FB
47
48/* make various TB consistency checks */
5fafdf24
TS
49//#define DEBUG_TB_CHECK
50//#define DEBUG_TLB_CHECK
fd6ce8f6 51
1196be37 52//#define DEBUG_IOPORT
db7b5426 53//#define DEBUG_SUBPAGE
1196be37 54
99773bd4
PB
55#if !defined(CONFIG_USER_ONLY)
56/* TB consistency checks only implemented for usermode emulation. */
57#undef DEBUG_TB_CHECK
58#endif
59
fd6ce8f6 60/* threshold to flush the translated code buffer */
d07bde88 61#define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - code_gen_max_block_size())
fd6ce8f6 62
9fa3e853
FB
63#define SMC_BITMAP_USE_THRESHOLD 10
64
65#define MMAP_AREA_START 0x00000000
66#define MMAP_AREA_END 0xa8000000
fd6ce8f6 67
108c49b8
FB
68#if defined(TARGET_SPARC64)
69#define TARGET_PHYS_ADDR_SPACE_BITS 41
5dcb6b91
BS
70#elif defined(TARGET_SPARC)
71#define TARGET_PHYS_ADDR_SPACE_BITS 36
bedb69ea
JM
72#elif defined(TARGET_ALPHA)
73#define TARGET_PHYS_ADDR_SPACE_BITS 42
74#define TARGET_VIRT_ADDR_SPACE_BITS 42
108c49b8
FB
75#elif defined(TARGET_PPC64)
76#define TARGET_PHYS_ADDR_SPACE_BITS 42
00f82b8a
AJ
77#elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
78#define TARGET_PHYS_ADDR_SPACE_BITS 42
79#elif defined(TARGET_I386) && !defined(USE_KQEMU)
80#define TARGET_PHYS_ADDR_SPACE_BITS 36
108c49b8
FB
81#else
82/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
83#define TARGET_PHYS_ADDR_SPACE_BITS 32
84#endif
85
fd6ce8f6 86TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
9fa3e853 87TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
fd6ce8f6 88int nb_tbs;
eb51d102
FB
89/* any access to the tbs or the page table must use this lock */
90spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
fd6ce8f6 91
7cb69cae 92uint8_t code_gen_prologue[1024] __attribute__((aligned (32)));
b8076a74 93uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] __attribute__((aligned (32)));
fd6ce8f6
FB
94uint8_t *code_gen_ptr;
95
00f82b8a 96ram_addr_t phys_ram_size;
9fa3e853
FB
97int phys_ram_fd;
98uint8_t *phys_ram_base;
1ccde1cb 99uint8_t *phys_ram_dirty;
e9a1ab19 100static ram_addr_t phys_ram_alloc_offset = 0;
9fa3e853 101
6a00d601
FB
102CPUState *first_cpu;
103/* current CPU in the current thread. It is only valid inside
104 cpu_exec() */
5fafdf24 105CPUState *cpu_single_env;
6a00d601 106
54936004 107typedef struct PageDesc {
92e873b9 108 /* list of TBs intersecting this ram page */
fd6ce8f6 109 TranslationBlock *first_tb;
9fa3e853
FB
110 /* in order to optimize self modifying code, we count the number
111 of lookups we do to a given page to use a bitmap */
112 unsigned int code_write_count;
113 uint8_t *code_bitmap;
114#if defined(CONFIG_USER_ONLY)
115 unsigned long flags;
116#endif
54936004
FB
117} PageDesc;
118
92e873b9
FB
119typedef struct PhysPageDesc {
120 /* offset in host memory of the page + io_index in the low 12 bits */
00f82b8a 121 ram_addr_t phys_offset;
92e873b9
FB
122} PhysPageDesc;
123
54936004 124#define L2_BITS 10
bedb69ea
JM
125#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
126/* XXX: this is a temporary hack for alpha target.
127 * In the future, this is to be replaced by a multi-level table
128 * to actually be able to handle the complete 64 bits address space.
129 */
130#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
131#else
03875444 132#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
bedb69ea 133#endif
54936004
FB
134
135#define L1_SIZE (1 << L1_BITS)
136#define L2_SIZE (1 << L2_BITS)
137
33417e70 138static void io_mem_init(void);
fd6ce8f6 139
83fb7adf
FB
140unsigned long qemu_real_host_page_size;
141unsigned long qemu_host_page_bits;
142unsigned long qemu_host_page_size;
143unsigned long qemu_host_page_mask;
54936004 144
92e873b9 145/* XXX: for system emulation, it could just be an array */
54936004 146static PageDesc *l1_map[L1_SIZE];
0a962c02 147PhysPageDesc **l1_phys_map;
54936004 148
33417e70 149/* io memory support */
33417e70
FB
150CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
151CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
a4193c8a 152void *io_mem_opaque[IO_MEM_NB_ENTRIES];
33417e70 153static int io_mem_nb;
6658ffb8
PB
154#if defined(CONFIG_SOFTMMU)
155static int io_mem_watch;
156#endif
33417e70 157
34865134
FB
158/* log support */
159char *logfilename = "/tmp/qemu.log";
160FILE *logfile;
161int loglevel;
e735b91c 162static int log_append = 0;
34865134 163
e3db7226
FB
164/* statistics */
165static int tlb_flush_count;
166static int tb_flush_count;
167static int tb_phys_invalidate_count;
168
db7b5426
BS
169#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
170typedef struct subpage_t {
171 target_phys_addr_t base;
3ee89922
BS
172 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
173 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
174 void *opaque[TARGET_PAGE_SIZE][2][4];
db7b5426
BS
175} subpage_t;
176
7cb69cae
FB
177#ifdef _WIN32
178static void map_exec(void *addr, long size)
179{
180 DWORD old_protect;
181 VirtualProtect(addr, size,
182 PAGE_EXECUTE_READWRITE, &old_protect);
183
184}
185#else
186static void map_exec(void *addr, long size)
187{
188 unsigned long start, end;
189
190 start = (unsigned long)addr;
191 start &= ~(qemu_real_host_page_size - 1);
192
193 end = (unsigned long)addr + size;
194 end += qemu_real_host_page_size - 1;
195 end &= ~(qemu_real_host_page_size - 1);
196
197 mprotect((void *)start, end - start,
198 PROT_READ | PROT_WRITE | PROT_EXEC);
199}
200#endif
201
b346ff46 202static void page_init(void)
54936004 203{
83fb7adf 204 /* NOTE: we can always suppose that qemu_host_page_size >=
54936004 205 TARGET_PAGE_SIZE */
67b915a5 206#ifdef _WIN32
d5a8f07c
FB
207 {
208 SYSTEM_INFO system_info;
209 DWORD old_protect;
3b46e624 210
d5a8f07c
FB
211 GetSystemInfo(&system_info);
212 qemu_real_host_page_size = system_info.dwPageSize;
d5a8f07c 213 }
67b915a5 214#else
83fb7adf 215 qemu_real_host_page_size = getpagesize();
67b915a5 216#endif
7cb69cae
FB
217 map_exec(code_gen_buffer, sizeof(code_gen_buffer));
218 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
d5a8f07c 219
83fb7adf
FB
220 if (qemu_host_page_size == 0)
221 qemu_host_page_size = qemu_real_host_page_size;
222 if (qemu_host_page_size < TARGET_PAGE_SIZE)
223 qemu_host_page_size = TARGET_PAGE_SIZE;
224 qemu_host_page_bits = 0;
225 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
226 qemu_host_page_bits++;
227 qemu_host_page_mask = ~(qemu_host_page_size - 1);
108c49b8
FB
228 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
229 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
50a9569b
AZ
230
231#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
232 {
233 long long startaddr, endaddr;
234 FILE *f;
235 int n;
236
237 f = fopen("/proc/self/maps", "r");
238 if (f) {
239 do {
240 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
241 if (n == 2) {
e0b8d65a
BS
242 startaddr = MIN(startaddr,
243 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
244 endaddr = MIN(endaddr,
245 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
50a9569b
AZ
246 page_set_flags(TARGET_PAGE_ALIGN(startaddr),
247 TARGET_PAGE_ALIGN(endaddr),
248 PAGE_RESERVED);
249 }
250 } while (!feof(f));
251 fclose(f);
252 }
253 }
254#endif
54936004
FB
255}
256
00f82b8a 257static inline PageDesc *page_find_alloc(target_ulong index)
54936004 258{
54936004
FB
259 PageDesc **lp, *p;
260
54936004
FB
261 lp = &l1_map[index >> L2_BITS];
262 p = *lp;
263 if (!p) {
264 /* allocate if not found */
59817ccb 265 p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
fd6ce8f6 266 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
54936004
FB
267 *lp = p;
268 }
269 return p + (index & (L2_SIZE - 1));
270}
271
00f82b8a 272static inline PageDesc *page_find(target_ulong index)
54936004 273{
54936004
FB
274 PageDesc *p;
275
54936004
FB
276 p = l1_map[index >> L2_BITS];
277 if (!p)
278 return 0;
fd6ce8f6
FB
279 return p + (index & (L2_SIZE - 1));
280}
281
108c49b8 282static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
92e873b9 283{
108c49b8 284 void **lp, **p;
e3f4e2a4 285 PhysPageDesc *pd;
92e873b9 286
108c49b8
FB
287 p = (void **)l1_phys_map;
288#if TARGET_PHYS_ADDR_SPACE_BITS > 32
289
290#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
291#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
292#endif
293 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
92e873b9
FB
294 p = *lp;
295 if (!p) {
296 /* allocate if not found */
108c49b8
FB
297 if (!alloc)
298 return NULL;
299 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
300 memset(p, 0, sizeof(void *) * L1_SIZE);
301 *lp = p;
302 }
303#endif
304 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
e3f4e2a4
PB
305 pd = *lp;
306 if (!pd) {
307 int i;
108c49b8
FB
308 /* allocate if not found */
309 if (!alloc)
310 return NULL;
e3f4e2a4
PB
311 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
312 *lp = pd;
313 for (i = 0; i < L2_SIZE; i++)
314 pd[i].phys_offset = IO_MEM_UNASSIGNED;
92e873b9 315 }
e3f4e2a4 316 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
92e873b9
FB
317}
318
108c49b8 319static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
92e873b9 320{
108c49b8 321 return phys_page_find_alloc(index, 0);
92e873b9
FB
322}
323
9fa3e853 324#if !defined(CONFIG_USER_ONLY)
6a00d601 325static void tlb_protect_code(ram_addr_t ram_addr);
5fafdf24 326static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 327 target_ulong vaddr);
9fa3e853 328#endif
fd6ce8f6 329
6a00d601 330void cpu_exec_init(CPUState *env)
fd6ce8f6 331{
6a00d601
FB
332 CPUState **penv;
333 int cpu_index;
334
fd6ce8f6 335 if (!code_gen_ptr) {
57fec1fe 336 cpu_gen_init();
fd6ce8f6 337 code_gen_ptr = code_gen_buffer;
b346ff46 338 page_init();
33417e70 339 io_mem_init();
fd6ce8f6 340 }
6a00d601
FB
341 env->next_cpu = NULL;
342 penv = &first_cpu;
343 cpu_index = 0;
344 while (*penv != NULL) {
345 penv = (CPUState **)&(*penv)->next_cpu;
346 cpu_index++;
347 }
348 env->cpu_index = cpu_index;
6658ffb8 349 env->nb_watchpoints = 0;
6a00d601 350 *penv = env;
fd6ce8f6
FB
351}
352
9fa3e853
FB
353static inline void invalidate_page_bitmap(PageDesc *p)
354{
355 if (p->code_bitmap) {
59817ccb 356 qemu_free(p->code_bitmap);
9fa3e853
FB
357 p->code_bitmap = NULL;
358 }
359 p->code_write_count = 0;
360}
361
fd6ce8f6
FB
362/* set to NULL all the 'first_tb' fields in all PageDescs */
363static void page_flush_tb(void)
364{
365 int i, j;
366 PageDesc *p;
367
368 for(i = 0; i < L1_SIZE; i++) {
369 p = l1_map[i];
370 if (p) {
9fa3e853
FB
371 for(j = 0; j < L2_SIZE; j++) {
372 p->first_tb = NULL;
373 invalidate_page_bitmap(p);
374 p++;
375 }
fd6ce8f6
FB
376 }
377 }
378}
379
380/* flush all the translation blocks */
d4e8164f 381/* XXX: tb_flush is currently not thread safe */
6a00d601 382void tb_flush(CPUState *env1)
fd6ce8f6 383{
6a00d601 384 CPUState *env;
0124311e 385#if defined(DEBUG_FLUSH)
ab3d1727
BS
386 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
387 (unsigned long)(code_gen_ptr - code_gen_buffer),
388 nb_tbs, nb_tbs > 0 ?
389 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
fd6ce8f6 390#endif
a208e54a
PB
391 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > CODE_GEN_BUFFER_SIZE)
392 cpu_abort(env1, "Internal error: code buffer overflow\n");
393
fd6ce8f6 394 nb_tbs = 0;
3b46e624 395
6a00d601
FB
396 for(env = first_cpu; env != NULL; env = env->next_cpu) {
397 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
398 }
9fa3e853 399
8a8a608f 400 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
fd6ce8f6 401 page_flush_tb();
9fa3e853 402
fd6ce8f6 403 code_gen_ptr = code_gen_buffer;
d4e8164f
FB
404 /* XXX: flush processor icache at this point if cache flush is
405 expensive */
e3db7226 406 tb_flush_count++;
fd6ce8f6
FB
407}
408
409#ifdef DEBUG_TB_CHECK
410
bc98a7ef 411static void tb_invalidate_check(target_ulong address)
fd6ce8f6
FB
412{
413 TranslationBlock *tb;
414 int i;
415 address &= TARGET_PAGE_MASK;
99773bd4
PB
416 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
417 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
418 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
419 address >= tb->pc + tb->size)) {
420 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
99773bd4 421 address, (long)tb->pc, tb->size);
fd6ce8f6
FB
422 }
423 }
424 }
425}
426
427/* verify that all the pages have correct rights for code */
428static void tb_page_check(void)
429{
430 TranslationBlock *tb;
431 int i, flags1, flags2;
3b46e624 432
99773bd4
PB
433 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
434 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
435 flags1 = page_get_flags(tb->pc);
436 flags2 = page_get_flags(tb->pc + tb->size - 1);
437 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
438 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
99773bd4 439 (long)tb->pc, tb->size, flags1, flags2);
fd6ce8f6
FB
440 }
441 }
442 }
443}
444
d4e8164f
FB
445void tb_jmp_check(TranslationBlock *tb)
446{
447 TranslationBlock *tb1;
448 unsigned int n1;
449
450 /* suppress any remaining jumps to this TB */
451 tb1 = tb->jmp_first;
452 for(;;) {
453 n1 = (long)tb1 & 3;
454 tb1 = (TranslationBlock *)((long)tb1 & ~3);
455 if (n1 == 2)
456 break;
457 tb1 = tb1->jmp_next[n1];
458 }
459 /* check end of list */
460 if (tb1 != tb) {
461 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
462 }
463}
464
fd6ce8f6
FB
465#endif
466
467/* invalidate one TB */
468static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
469 int next_offset)
470{
471 TranslationBlock *tb1;
472 for(;;) {
473 tb1 = *ptb;
474 if (tb1 == tb) {
475 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
476 break;
477 }
478 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
479 }
480}
481
9fa3e853
FB
482static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
483{
484 TranslationBlock *tb1;
485 unsigned int n1;
486
487 for(;;) {
488 tb1 = *ptb;
489 n1 = (long)tb1 & 3;
490 tb1 = (TranslationBlock *)((long)tb1 & ~3);
491 if (tb1 == tb) {
492 *ptb = tb1->page_next[n1];
493 break;
494 }
495 ptb = &tb1->page_next[n1];
496 }
497}
498
d4e8164f
FB
499static inline void tb_jmp_remove(TranslationBlock *tb, int n)
500{
501 TranslationBlock *tb1, **ptb;
502 unsigned int n1;
503
504 ptb = &tb->jmp_next[n];
505 tb1 = *ptb;
506 if (tb1) {
507 /* find tb(n) in circular list */
508 for(;;) {
509 tb1 = *ptb;
510 n1 = (long)tb1 & 3;
511 tb1 = (TranslationBlock *)((long)tb1 & ~3);
512 if (n1 == n && tb1 == tb)
513 break;
514 if (n1 == 2) {
515 ptb = &tb1->jmp_first;
516 } else {
517 ptb = &tb1->jmp_next[n1];
518 }
519 }
520 /* now we can suppress tb(n) from the list */
521 *ptb = tb->jmp_next[n];
522
523 tb->jmp_next[n] = NULL;
524 }
525}
526
527/* reset the jump entry 'n' of a TB so that it is not chained to
528 another TB */
529static inline void tb_reset_jump(TranslationBlock *tb, int n)
530{
531 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
532}
533
00f82b8a 534static inline void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
fd6ce8f6 535{
6a00d601 536 CPUState *env;
8a40a180 537 PageDesc *p;
d4e8164f 538 unsigned int h, n1;
00f82b8a 539 target_phys_addr_t phys_pc;
8a40a180 540 TranslationBlock *tb1, *tb2;
3b46e624 541
8a40a180
FB
542 /* remove the TB from the hash list */
543 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
544 h = tb_phys_hash_func(phys_pc);
5fafdf24 545 tb_remove(&tb_phys_hash[h], tb,
8a40a180
FB
546 offsetof(TranslationBlock, phys_hash_next));
547
548 /* remove the TB from the page list */
549 if (tb->page_addr[0] != page_addr) {
550 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
551 tb_page_remove(&p->first_tb, tb);
552 invalidate_page_bitmap(p);
553 }
554 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
555 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
556 tb_page_remove(&p->first_tb, tb);
557 invalidate_page_bitmap(p);
558 }
559
36bdbe54 560 tb_invalidated_flag = 1;
59817ccb 561
fd6ce8f6 562 /* remove the TB from the hash list */
8a40a180 563 h = tb_jmp_cache_hash_func(tb->pc);
6a00d601
FB
564 for(env = first_cpu; env != NULL; env = env->next_cpu) {
565 if (env->tb_jmp_cache[h] == tb)
566 env->tb_jmp_cache[h] = NULL;
567 }
d4e8164f
FB
568
569 /* suppress this TB from the two jump lists */
570 tb_jmp_remove(tb, 0);
571 tb_jmp_remove(tb, 1);
572
573 /* suppress any remaining jumps to this TB */
574 tb1 = tb->jmp_first;
575 for(;;) {
576 n1 = (long)tb1 & 3;
577 if (n1 == 2)
578 break;
579 tb1 = (TranslationBlock *)((long)tb1 & ~3);
580 tb2 = tb1->jmp_next[n1];
581 tb_reset_jump(tb1, n1);
582 tb1->jmp_next[n1] = NULL;
583 tb1 = tb2;
584 }
585 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
9fa3e853 586
e3db7226 587 tb_phys_invalidate_count++;
9fa3e853
FB
588}
589
590static inline void set_bits(uint8_t *tab, int start, int len)
591{
592 int end, mask, end1;
593
594 end = start + len;
595 tab += start >> 3;
596 mask = 0xff << (start & 7);
597 if ((start & ~7) == (end & ~7)) {
598 if (start < end) {
599 mask &= ~(0xff << (end & 7));
600 *tab |= mask;
601 }
602 } else {
603 *tab++ |= mask;
604 start = (start + 8) & ~7;
605 end1 = end & ~7;
606 while (start < end1) {
607 *tab++ = 0xff;
608 start += 8;
609 }
610 if (start < end) {
611 mask = ~(0xff << (end & 7));
612 *tab |= mask;
613 }
614 }
615}
616
617static void build_page_bitmap(PageDesc *p)
618{
619 int n, tb_start, tb_end;
620 TranslationBlock *tb;
3b46e624 621
59817ccb 622 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
9fa3e853
FB
623 if (!p->code_bitmap)
624 return;
625 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
626
627 tb = p->first_tb;
628 while (tb != NULL) {
629 n = (long)tb & 3;
630 tb = (TranslationBlock *)((long)tb & ~3);
631 /* NOTE: this is subtle as a TB may span two physical pages */
632 if (n == 0) {
633 /* NOTE: tb_end may be after the end of the page, but
634 it is not a problem */
635 tb_start = tb->pc & ~TARGET_PAGE_MASK;
636 tb_end = tb_start + tb->size;
637 if (tb_end > TARGET_PAGE_SIZE)
638 tb_end = TARGET_PAGE_SIZE;
639 } else {
640 tb_start = 0;
641 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
642 }
643 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
644 tb = tb->page_next[n];
645 }
646}
647
d720b93d
FB
648#ifdef TARGET_HAS_PRECISE_SMC
649
5fafdf24 650static void tb_gen_code(CPUState *env,
d720b93d
FB
651 target_ulong pc, target_ulong cs_base, int flags,
652 int cflags)
653{
654 TranslationBlock *tb;
655 uint8_t *tc_ptr;
656 target_ulong phys_pc, phys_page2, virt_page2;
657 int code_gen_size;
658
c27004ec
FB
659 phys_pc = get_phys_addr_code(env, pc);
660 tb = tb_alloc(pc);
d720b93d
FB
661 if (!tb) {
662 /* flush must be done */
663 tb_flush(env);
664 /* cannot fail at this point */
c27004ec 665 tb = tb_alloc(pc);
d720b93d
FB
666 }
667 tc_ptr = code_gen_ptr;
668 tb->tc_ptr = tc_ptr;
669 tb->cs_base = cs_base;
670 tb->flags = flags;
671 tb->cflags = cflags;
d07bde88 672 cpu_gen_code(env, tb, &code_gen_size);
d720b93d 673 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
3b46e624 674
d720b93d 675 /* check next page if needed */
c27004ec 676 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
d720b93d 677 phys_page2 = -1;
c27004ec 678 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
d720b93d
FB
679 phys_page2 = get_phys_addr_code(env, virt_page2);
680 }
681 tb_link_phys(tb, phys_pc, phys_page2);
682}
683#endif
3b46e624 684
9fa3e853
FB
685/* invalidate all TBs which intersect with the target physical page
686 starting in range [start;end[. NOTE: start and end must refer to
d720b93d
FB
687 the same physical page. 'is_cpu_write_access' should be true if called
688 from a real cpu write access: the virtual CPU will exit the current
689 TB if code is modified inside this TB. */
00f82b8a 690void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
d720b93d
FB
691 int is_cpu_write_access)
692{
693 int n, current_tb_modified, current_tb_not_found, current_flags;
d720b93d 694 CPUState *env = cpu_single_env;
9fa3e853 695 PageDesc *p;
ea1c1802 696 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
9fa3e853 697 target_ulong tb_start, tb_end;
d720b93d 698 target_ulong current_pc, current_cs_base;
9fa3e853
FB
699
700 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 701 if (!p)
9fa3e853 702 return;
5fafdf24 703 if (!p->code_bitmap &&
d720b93d
FB
704 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
705 is_cpu_write_access) {
9fa3e853
FB
706 /* build code bitmap */
707 build_page_bitmap(p);
708 }
709
710 /* we remove all the TBs in the range [start, end[ */
711 /* XXX: see if in some cases it could be faster to invalidate all the code */
d720b93d
FB
712 current_tb_not_found = is_cpu_write_access;
713 current_tb_modified = 0;
714 current_tb = NULL; /* avoid warning */
715 current_pc = 0; /* avoid warning */
716 current_cs_base = 0; /* avoid warning */
717 current_flags = 0; /* avoid warning */
9fa3e853
FB
718 tb = p->first_tb;
719 while (tb != NULL) {
720 n = (long)tb & 3;
721 tb = (TranslationBlock *)((long)tb & ~3);
722 tb_next = tb->page_next[n];
723 /* NOTE: this is subtle as a TB may span two physical pages */
724 if (n == 0) {
725 /* NOTE: tb_end may be after the end of the page, but
726 it is not a problem */
727 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
728 tb_end = tb_start + tb->size;
729 } else {
730 tb_start = tb->page_addr[1];
731 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
732 }
733 if (!(tb_end <= start || tb_start >= end)) {
d720b93d
FB
734#ifdef TARGET_HAS_PRECISE_SMC
735 if (current_tb_not_found) {
736 current_tb_not_found = 0;
737 current_tb = NULL;
738 if (env->mem_write_pc) {
739 /* now we have a real cpu fault */
740 current_tb = tb_find_pc(env->mem_write_pc);
741 }
742 }
743 if (current_tb == tb &&
744 !(current_tb->cflags & CF_SINGLE_INSN)) {
745 /* If we are modifying the current TB, we must stop
746 its execution. We could be more precise by checking
747 that the modification is after the current PC, but it
748 would require a specialized function to partially
749 restore the CPU state */
3b46e624 750
d720b93d 751 current_tb_modified = 1;
5fafdf24 752 cpu_restore_state(current_tb, env,
d720b93d
FB
753 env->mem_write_pc, NULL);
754#if defined(TARGET_I386)
755 current_flags = env->hflags;
756 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
757 current_cs_base = (target_ulong)env->segs[R_CS].base;
758 current_pc = current_cs_base + env->eip;
759#else
760#error unsupported CPU
761#endif
762 }
763#endif /* TARGET_HAS_PRECISE_SMC */
6f5a9f7e
FB
764 /* we need to do that to handle the case where a signal
765 occurs while doing tb_phys_invalidate() */
766 saved_tb = NULL;
767 if (env) {
768 saved_tb = env->current_tb;
769 env->current_tb = NULL;
770 }
9fa3e853 771 tb_phys_invalidate(tb, -1);
6f5a9f7e
FB
772 if (env) {
773 env->current_tb = saved_tb;
774 if (env->interrupt_request && env->current_tb)
775 cpu_interrupt(env, env->interrupt_request);
776 }
9fa3e853
FB
777 }
778 tb = tb_next;
779 }
780#if !defined(CONFIG_USER_ONLY)
781 /* if no code remaining, no need to continue to use slow writes */
782 if (!p->first_tb) {
783 invalidate_page_bitmap(p);
d720b93d
FB
784 if (is_cpu_write_access) {
785 tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
786 }
787 }
788#endif
789#ifdef TARGET_HAS_PRECISE_SMC
790 if (current_tb_modified) {
791 /* we generate a block containing just the instruction
792 modifying the memory. It will ensure that it cannot modify
793 itself */
ea1c1802 794 env->current_tb = NULL;
5fafdf24 795 tb_gen_code(env, current_pc, current_cs_base, current_flags,
d720b93d
FB
796 CF_SINGLE_INSN);
797 cpu_resume_from_signal(env, NULL);
9fa3e853 798 }
fd6ce8f6 799#endif
9fa3e853 800}
fd6ce8f6 801
9fa3e853 802/* len must be <= 8 and start must be a multiple of len */
00f82b8a 803static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
9fa3e853
FB
804{
805 PageDesc *p;
806 int offset, b;
59817ccb 807#if 0
a4193c8a
FB
808 if (1) {
809 if (loglevel) {
5fafdf24
TS
810 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
811 cpu_single_env->mem_write_vaddr, len,
812 cpu_single_env->eip,
a4193c8a
FB
813 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
814 }
59817ccb
FB
815 }
816#endif
9fa3e853 817 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 818 if (!p)
9fa3e853
FB
819 return;
820 if (p->code_bitmap) {
821 offset = start & ~TARGET_PAGE_MASK;
822 b = p->code_bitmap[offset >> 3] >> (offset & 7);
823 if (b & ((1 << len) - 1))
824 goto do_invalidate;
825 } else {
826 do_invalidate:
d720b93d 827 tb_invalidate_phys_page_range(start, start + len, 1);
9fa3e853
FB
828 }
829}
830
9fa3e853 831#if !defined(CONFIG_SOFTMMU)
00f82b8a 832static void tb_invalidate_phys_page(target_phys_addr_t addr,
d720b93d 833 unsigned long pc, void *puc)
9fa3e853 834{
d720b93d
FB
835 int n, current_flags, current_tb_modified;
836 target_ulong current_pc, current_cs_base;
9fa3e853 837 PageDesc *p;
d720b93d
FB
838 TranslationBlock *tb, *current_tb;
839#ifdef TARGET_HAS_PRECISE_SMC
840 CPUState *env = cpu_single_env;
841#endif
9fa3e853
FB
842
843 addr &= TARGET_PAGE_MASK;
844 p = page_find(addr >> TARGET_PAGE_BITS);
5fafdf24 845 if (!p)
9fa3e853
FB
846 return;
847 tb = p->first_tb;
d720b93d
FB
848 current_tb_modified = 0;
849 current_tb = NULL;
850 current_pc = 0; /* avoid warning */
851 current_cs_base = 0; /* avoid warning */
852 current_flags = 0; /* avoid warning */
853#ifdef TARGET_HAS_PRECISE_SMC
854 if (tb && pc != 0) {
855 current_tb = tb_find_pc(pc);
856 }
857#endif
9fa3e853
FB
858 while (tb != NULL) {
859 n = (long)tb & 3;
860 tb = (TranslationBlock *)((long)tb & ~3);
d720b93d
FB
861#ifdef TARGET_HAS_PRECISE_SMC
862 if (current_tb == tb &&
863 !(current_tb->cflags & CF_SINGLE_INSN)) {
864 /* If we are modifying the current TB, we must stop
865 its execution. We could be more precise by checking
866 that the modification is after the current PC, but it
867 would require a specialized function to partially
868 restore the CPU state */
3b46e624 869
d720b93d
FB
870 current_tb_modified = 1;
871 cpu_restore_state(current_tb, env, pc, puc);
872#if defined(TARGET_I386)
873 current_flags = env->hflags;
874 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
875 current_cs_base = (target_ulong)env->segs[R_CS].base;
876 current_pc = current_cs_base + env->eip;
877#else
878#error unsupported CPU
879#endif
880 }
881#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
882 tb_phys_invalidate(tb, addr);
883 tb = tb->page_next[n];
884 }
fd6ce8f6 885 p->first_tb = NULL;
d720b93d
FB
886#ifdef TARGET_HAS_PRECISE_SMC
887 if (current_tb_modified) {
888 /* we generate a block containing just the instruction
889 modifying the memory. It will ensure that it cannot modify
890 itself */
ea1c1802 891 env->current_tb = NULL;
5fafdf24 892 tb_gen_code(env, current_pc, current_cs_base, current_flags,
d720b93d
FB
893 CF_SINGLE_INSN);
894 cpu_resume_from_signal(env, puc);
895 }
896#endif
fd6ce8f6 897}
9fa3e853 898#endif
fd6ce8f6
FB
899
900/* add the tb in the target page and protect it if necessary */
5fafdf24 901static inline void tb_alloc_page(TranslationBlock *tb,
53a5960a 902 unsigned int n, target_ulong page_addr)
fd6ce8f6
FB
903{
904 PageDesc *p;
9fa3e853
FB
905 TranslationBlock *last_first_tb;
906
907 tb->page_addr[n] = page_addr;
3a7d929e 908 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
9fa3e853
FB
909 tb->page_next[n] = p->first_tb;
910 last_first_tb = p->first_tb;
911 p->first_tb = (TranslationBlock *)((long)tb | n);
912 invalidate_page_bitmap(p);
fd6ce8f6 913
107db443 914#if defined(TARGET_HAS_SMC) || 1
d720b93d 915
9fa3e853 916#if defined(CONFIG_USER_ONLY)
fd6ce8f6 917 if (p->flags & PAGE_WRITE) {
53a5960a
PB
918 target_ulong addr;
919 PageDesc *p2;
9fa3e853
FB
920 int prot;
921
fd6ce8f6
FB
922 /* force the host page as non writable (writes will have a
923 page fault + mprotect overhead) */
53a5960a 924 page_addr &= qemu_host_page_mask;
fd6ce8f6 925 prot = 0;
53a5960a
PB
926 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
927 addr += TARGET_PAGE_SIZE) {
928
929 p2 = page_find (addr >> TARGET_PAGE_BITS);
930 if (!p2)
931 continue;
932 prot |= p2->flags;
933 p2->flags &= ~PAGE_WRITE;
934 page_get_flags(addr);
935 }
5fafdf24 936 mprotect(g2h(page_addr), qemu_host_page_size,
fd6ce8f6
FB
937 (prot & PAGE_BITS) & ~PAGE_WRITE);
938#ifdef DEBUG_TB_INVALIDATE
ab3d1727 939 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
53a5960a 940 page_addr);
fd6ce8f6 941#endif
fd6ce8f6 942 }
9fa3e853
FB
943#else
944 /* if some code is already present, then the pages are already
945 protected. So we handle the case where only the first TB is
946 allocated in a physical page */
947 if (!last_first_tb) {
6a00d601 948 tlb_protect_code(page_addr);
9fa3e853
FB
949 }
950#endif
d720b93d
FB
951
952#endif /* TARGET_HAS_SMC */
fd6ce8f6
FB
953}
954
955/* Allocate a new translation block. Flush the translation buffer if
956 too many translation blocks or too much generated code. */
c27004ec 957TranslationBlock *tb_alloc(target_ulong pc)
fd6ce8f6
FB
958{
959 TranslationBlock *tb;
fd6ce8f6 960
5fafdf24 961 if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
fd6ce8f6 962 (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
d4e8164f 963 return NULL;
fd6ce8f6
FB
964 tb = &tbs[nb_tbs++];
965 tb->pc = pc;
b448f2f3 966 tb->cflags = 0;
d4e8164f
FB
967 return tb;
968}
969
9fa3e853
FB
970/* add a new TB and link it to the physical page tables. phys_page2 is
971 (-1) to indicate that only one page contains the TB. */
5fafdf24 972void tb_link_phys(TranslationBlock *tb,
9fa3e853 973 target_ulong phys_pc, target_ulong phys_page2)
d4e8164f 974{
9fa3e853
FB
975 unsigned int h;
976 TranslationBlock **ptb;
977
978 /* add in the physical hash table */
979 h = tb_phys_hash_func(phys_pc);
980 ptb = &tb_phys_hash[h];
981 tb->phys_hash_next = *ptb;
982 *ptb = tb;
fd6ce8f6
FB
983
984 /* add in the page list */
9fa3e853
FB
985 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
986 if (phys_page2 != -1)
987 tb_alloc_page(tb, 1, phys_page2);
988 else
989 tb->page_addr[1] = -1;
9fa3e853 990
d4e8164f
FB
991 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
992 tb->jmp_next[0] = NULL;
993 tb->jmp_next[1] = NULL;
994
995 /* init original jump addresses */
996 if (tb->tb_next_offset[0] != 0xffff)
997 tb_reset_jump(tb, 0);
998 if (tb->tb_next_offset[1] != 0xffff)
999 tb_reset_jump(tb, 1);
8a40a180
FB
1000
1001#ifdef DEBUG_TB_CHECK
1002 tb_page_check();
1003#endif
fd6ce8f6
FB
1004}
1005
9fa3e853
FB
1006/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1007 tb[1].tc_ptr. Return NULL if not found */
1008TranslationBlock *tb_find_pc(unsigned long tc_ptr)
fd6ce8f6 1009{
9fa3e853
FB
1010 int m_min, m_max, m;
1011 unsigned long v;
1012 TranslationBlock *tb;
a513fe19
FB
1013
1014 if (nb_tbs <= 0)
1015 return NULL;
1016 if (tc_ptr < (unsigned long)code_gen_buffer ||
1017 tc_ptr >= (unsigned long)code_gen_ptr)
1018 return NULL;
1019 /* binary search (cf Knuth) */
1020 m_min = 0;
1021 m_max = nb_tbs - 1;
1022 while (m_min <= m_max) {
1023 m = (m_min + m_max) >> 1;
1024 tb = &tbs[m];
1025 v = (unsigned long)tb->tc_ptr;
1026 if (v == tc_ptr)
1027 return tb;
1028 else if (tc_ptr < v) {
1029 m_max = m - 1;
1030 } else {
1031 m_min = m + 1;
1032 }
5fafdf24 1033 }
a513fe19
FB
1034 return &tbs[m_max];
1035}
7501267e 1036
ea041c0e
FB
1037static void tb_reset_jump_recursive(TranslationBlock *tb);
1038
1039static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1040{
1041 TranslationBlock *tb1, *tb_next, **ptb;
1042 unsigned int n1;
1043
1044 tb1 = tb->jmp_next[n];
1045 if (tb1 != NULL) {
1046 /* find head of list */
1047 for(;;) {
1048 n1 = (long)tb1 & 3;
1049 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1050 if (n1 == 2)
1051 break;
1052 tb1 = tb1->jmp_next[n1];
1053 }
1054 /* we are now sure now that tb jumps to tb1 */
1055 tb_next = tb1;
1056
1057 /* remove tb from the jmp_first list */
1058 ptb = &tb_next->jmp_first;
1059 for(;;) {
1060 tb1 = *ptb;
1061 n1 = (long)tb1 & 3;
1062 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1063 if (n1 == n && tb1 == tb)
1064 break;
1065 ptb = &tb1->jmp_next[n1];
1066 }
1067 *ptb = tb->jmp_next[n];
1068 tb->jmp_next[n] = NULL;
3b46e624 1069
ea041c0e
FB
1070 /* suppress the jump to next tb in generated code */
1071 tb_reset_jump(tb, n);
1072
0124311e 1073 /* suppress jumps in the tb on which we could have jumped */
ea041c0e
FB
1074 tb_reset_jump_recursive(tb_next);
1075 }
1076}
1077
1078static void tb_reset_jump_recursive(TranslationBlock *tb)
1079{
1080 tb_reset_jump_recursive2(tb, 0);
1081 tb_reset_jump_recursive2(tb, 1);
1082}
1083
1fddef4b 1084#if defined(TARGET_HAS_ICE)
d720b93d
FB
1085static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1086{
9b3c35e0
JM
1087 target_phys_addr_t addr;
1088 target_ulong pd;
c2f07f81
PB
1089 ram_addr_t ram_addr;
1090 PhysPageDesc *p;
d720b93d 1091
c2f07f81
PB
1092 addr = cpu_get_phys_page_debug(env, pc);
1093 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1094 if (!p) {
1095 pd = IO_MEM_UNASSIGNED;
1096 } else {
1097 pd = p->phys_offset;
1098 }
1099 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
706cd4b5 1100 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
d720b93d 1101}
c27004ec 1102#endif
d720b93d 1103
6658ffb8
PB
1104/* Add a watchpoint. */
1105int cpu_watchpoint_insert(CPUState *env, target_ulong addr)
1106{
1107 int i;
1108
1109 for (i = 0; i < env->nb_watchpoints; i++) {
1110 if (addr == env->watchpoint[i].vaddr)
1111 return 0;
1112 }
1113 if (env->nb_watchpoints >= MAX_WATCHPOINTS)
1114 return -1;
1115
1116 i = env->nb_watchpoints++;
1117 env->watchpoint[i].vaddr = addr;
1118 tlb_flush_page(env, addr);
1119 /* FIXME: This flush is needed because of the hack to make memory ops
1120 terminate the TB. It can be removed once the proper IO trap and
1121 re-execute bits are in. */
1122 tb_flush(env);
1123 return i;
1124}
1125
1126/* Remove a watchpoint. */
1127int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
1128{
1129 int i;
1130
1131 for (i = 0; i < env->nb_watchpoints; i++) {
1132 if (addr == env->watchpoint[i].vaddr) {
1133 env->nb_watchpoints--;
1134 env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
1135 tlb_flush_page(env, addr);
1136 return 0;
1137 }
1138 }
1139 return -1;
1140}
1141
7d03f82f
EI
1142/* Remove all watchpoints. */
1143void cpu_watchpoint_remove_all(CPUState *env) {
1144 int i;
1145
1146 for (i = 0; i < env->nb_watchpoints; i++) {
1147 tlb_flush_page(env, env->watchpoint[i].vaddr);
1148 }
1149 env->nb_watchpoints = 0;
1150}
1151
c33a346e
FB
1152/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1153 breakpoint is reached */
2e12669a 1154int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
4c3a88a2 1155{
1fddef4b 1156#if defined(TARGET_HAS_ICE)
4c3a88a2 1157 int i;
3b46e624 1158
4c3a88a2
FB
1159 for(i = 0; i < env->nb_breakpoints; i++) {
1160 if (env->breakpoints[i] == pc)
1161 return 0;
1162 }
1163
1164 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1165 return -1;
1166 env->breakpoints[env->nb_breakpoints++] = pc;
3b46e624 1167
d720b93d 1168 breakpoint_invalidate(env, pc);
4c3a88a2
FB
1169 return 0;
1170#else
1171 return -1;
1172#endif
1173}
1174
7d03f82f
EI
1175/* remove all breakpoints */
1176void cpu_breakpoint_remove_all(CPUState *env) {
1177#if defined(TARGET_HAS_ICE)
1178 int i;
1179 for(i = 0; i < env->nb_breakpoints; i++) {
1180 breakpoint_invalidate(env, env->breakpoints[i]);
1181 }
1182 env->nb_breakpoints = 0;
1183#endif
1184}
1185
4c3a88a2 1186/* remove a breakpoint */
2e12669a 1187int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
4c3a88a2 1188{
1fddef4b 1189#if defined(TARGET_HAS_ICE)
4c3a88a2
FB
1190 int i;
1191 for(i = 0; i < env->nb_breakpoints; i++) {
1192 if (env->breakpoints[i] == pc)
1193 goto found;
1194 }
1195 return -1;
1196 found:
4c3a88a2 1197 env->nb_breakpoints--;
1fddef4b
FB
1198 if (i < env->nb_breakpoints)
1199 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
d720b93d
FB
1200
1201 breakpoint_invalidate(env, pc);
4c3a88a2
FB
1202 return 0;
1203#else
1204 return -1;
1205#endif
1206}
1207
c33a346e
FB
1208/* enable or disable single step mode. EXCP_DEBUG is returned by the
1209 CPU loop after each instruction */
1210void cpu_single_step(CPUState *env, int enabled)
1211{
1fddef4b 1212#if defined(TARGET_HAS_ICE)
c33a346e
FB
1213 if (env->singlestep_enabled != enabled) {
1214 env->singlestep_enabled = enabled;
1215 /* must flush all the translated code to avoid inconsistancies */
9fa3e853 1216 /* XXX: only flush what is necessary */
0124311e 1217 tb_flush(env);
c33a346e
FB
1218 }
1219#endif
1220}
1221
34865134
FB
1222/* enable or disable low levels log */
1223void cpu_set_log(int log_flags)
1224{
1225 loglevel = log_flags;
1226 if (loglevel && !logfile) {
11fcfab4 1227 logfile = fopen(logfilename, log_append ? "a" : "w");
34865134
FB
1228 if (!logfile) {
1229 perror(logfilename);
1230 _exit(1);
1231 }
9fa3e853
FB
1232#if !defined(CONFIG_SOFTMMU)
1233 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1234 {
1235 static uint8_t logfile_buf[4096];
1236 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1237 }
1238#else
34865134 1239 setvbuf(logfile, NULL, _IOLBF, 0);
9fa3e853 1240#endif
e735b91c
PB
1241 log_append = 1;
1242 }
1243 if (!loglevel && logfile) {
1244 fclose(logfile);
1245 logfile = NULL;
34865134
FB
1246 }
1247}
1248
1249void cpu_set_log_filename(const char *filename)
1250{
1251 logfilename = strdup(filename);
e735b91c
PB
1252 if (logfile) {
1253 fclose(logfile);
1254 logfile = NULL;
1255 }
1256 cpu_set_log(loglevel);
34865134 1257}
c33a346e 1258
0124311e 1259/* mask must never be zero, except for A20 change call */
68a79315 1260void cpu_interrupt(CPUState *env, int mask)
ea041c0e
FB
1261{
1262 TranslationBlock *tb;
15a51156 1263 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
59817ccb 1264
68a79315 1265 env->interrupt_request |= mask;
ea041c0e
FB
1266 /* if the cpu is currently executing code, we must unlink it and
1267 all the potentially executing TB */
1268 tb = env->current_tb;
ee8b7021
FB
1269 if (tb && !testandset(&interrupt_lock)) {
1270 env->current_tb = NULL;
ea041c0e 1271 tb_reset_jump_recursive(tb);
15a51156 1272 resetlock(&interrupt_lock);
ea041c0e
FB
1273 }
1274}
1275
b54ad049
FB
1276void cpu_reset_interrupt(CPUState *env, int mask)
1277{
1278 env->interrupt_request &= ~mask;
1279}
1280
f193c797 1281CPULogItem cpu_log_items[] = {
5fafdf24 1282 { CPU_LOG_TB_OUT_ASM, "out_asm",
f193c797
FB
1283 "show generated host assembly code for each compiled TB" },
1284 { CPU_LOG_TB_IN_ASM, "in_asm",
1285 "show target assembly code for each compiled TB" },
5fafdf24 1286 { CPU_LOG_TB_OP, "op",
57fec1fe 1287 "show micro ops for each compiled TB" },
f193c797 1288 { CPU_LOG_TB_OP_OPT, "op_opt",
e01a1157
BS
1289 "show micro ops "
1290#ifdef TARGET_I386
1291 "before eflags optimization and "
f193c797 1292#endif
e01a1157 1293 "after liveness analysis" },
f193c797
FB
1294 { CPU_LOG_INT, "int",
1295 "show interrupts/exceptions in short format" },
1296 { CPU_LOG_EXEC, "exec",
1297 "show trace before each executed TB (lots of logs)" },
9fddaa0c 1298 { CPU_LOG_TB_CPU, "cpu",
e91c8a77 1299 "show CPU state before block translation" },
f193c797
FB
1300#ifdef TARGET_I386
1301 { CPU_LOG_PCALL, "pcall",
1302 "show protected mode far calls/returns/exceptions" },
1303#endif
8e3a9fd2 1304#ifdef DEBUG_IOPORT
fd872598
FB
1305 { CPU_LOG_IOPORT, "ioport",
1306 "show all i/o ports accesses" },
8e3a9fd2 1307#endif
f193c797
FB
1308 { 0, NULL, NULL },
1309};
1310
1311static int cmp1(const char *s1, int n, const char *s2)
1312{
1313 if (strlen(s2) != n)
1314 return 0;
1315 return memcmp(s1, s2, n) == 0;
1316}
3b46e624 1317
f193c797
FB
1318/* takes a comma separated list of log masks. Return 0 if error. */
1319int cpu_str_to_log_mask(const char *str)
1320{
1321 CPULogItem *item;
1322 int mask;
1323 const char *p, *p1;
1324
1325 p = str;
1326 mask = 0;
1327 for(;;) {
1328 p1 = strchr(p, ',');
1329 if (!p1)
1330 p1 = p + strlen(p);
8e3a9fd2
FB
1331 if(cmp1(p,p1-p,"all")) {
1332 for(item = cpu_log_items; item->mask != 0; item++) {
1333 mask |= item->mask;
1334 }
1335 } else {
f193c797
FB
1336 for(item = cpu_log_items; item->mask != 0; item++) {
1337 if (cmp1(p, p1 - p, item->name))
1338 goto found;
1339 }
1340 return 0;
8e3a9fd2 1341 }
f193c797
FB
1342 found:
1343 mask |= item->mask;
1344 if (*p1 != ',')
1345 break;
1346 p = p1 + 1;
1347 }
1348 return mask;
1349}
ea041c0e 1350
7501267e
FB
1351void cpu_abort(CPUState *env, const char *fmt, ...)
1352{
1353 va_list ap;
493ae1f0 1354 va_list ap2;
7501267e
FB
1355
1356 va_start(ap, fmt);
493ae1f0 1357 va_copy(ap2, ap);
7501267e
FB
1358 fprintf(stderr, "qemu: fatal: ");
1359 vfprintf(stderr, fmt, ap);
1360 fprintf(stderr, "\n");
1361#ifdef TARGET_I386
7fe48483
FB
1362 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1363#else
1364 cpu_dump_state(env, stderr, fprintf, 0);
7501267e 1365#endif
924edcae 1366 if (logfile) {
f9373291 1367 fprintf(logfile, "qemu: fatal: ");
493ae1f0 1368 vfprintf(logfile, fmt, ap2);
f9373291
JM
1369 fprintf(logfile, "\n");
1370#ifdef TARGET_I386
1371 cpu_dump_state(env, logfile, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1372#else
1373 cpu_dump_state(env, logfile, fprintf, 0);
1374#endif
924edcae
AZ
1375 fflush(logfile);
1376 fclose(logfile);
1377 }
493ae1f0 1378 va_end(ap2);
f9373291 1379 va_end(ap);
7501267e
FB
1380 abort();
1381}
1382
c5be9f08
TS
1383CPUState *cpu_copy(CPUState *env)
1384{
01ba9816 1385 CPUState *new_env = cpu_init(env->cpu_model_str);
c5be9f08
TS
1386 /* preserve chaining and index */
1387 CPUState *next_cpu = new_env->next_cpu;
1388 int cpu_index = new_env->cpu_index;
1389 memcpy(new_env, env, sizeof(CPUState));
1390 new_env->next_cpu = next_cpu;
1391 new_env->cpu_index = cpu_index;
1392 return new_env;
1393}
1394
0124311e
FB
1395#if !defined(CONFIG_USER_ONLY)
1396
5c751e99
EI
1397static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1398{
1399 unsigned int i;
1400
1401 /* Discard jump cache entries for any tb which might potentially
1402 overlap the flushed page. */
1403 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1404 memset (&env->tb_jmp_cache[i], 0,
1405 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1406
1407 i = tb_jmp_cache_hash_page(addr);
1408 memset (&env->tb_jmp_cache[i], 0,
1409 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1410}
1411
ee8b7021
FB
1412/* NOTE: if flush_global is true, also flush global entries (not
1413 implemented yet) */
1414void tlb_flush(CPUState *env, int flush_global)
33417e70 1415{
33417e70 1416 int i;
0124311e 1417
9fa3e853
FB
1418#if defined(DEBUG_TLB)
1419 printf("tlb_flush:\n");
1420#endif
0124311e
FB
1421 /* must reset current TB so that interrupts cannot modify the
1422 links while we are modifying them */
1423 env->current_tb = NULL;
1424
33417e70 1425 for(i = 0; i < CPU_TLB_SIZE; i++) {
84b7b8e7
FB
1426 env->tlb_table[0][i].addr_read = -1;
1427 env->tlb_table[0][i].addr_write = -1;
1428 env->tlb_table[0][i].addr_code = -1;
1429 env->tlb_table[1][i].addr_read = -1;
1430 env->tlb_table[1][i].addr_write = -1;
1431 env->tlb_table[1][i].addr_code = -1;
6fa4cea9
JM
1432#if (NB_MMU_MODES >= 3)
1433 env->tlb_table[2][i].addr_read = -1;
1434 env->tlb_table[2][i].addr_write = -1;
1435 env->tlb_table[2][i].addr_code = -1;
1436#if (NB_MMU_MODES == 4)
1437 env->tlb_table[3][i].addr_read = -1;
1438 env->tlb_table[3][i].addr_write = -1;
1439 env->tlb_table[3][i].addr_code = -1;
1440#endif
1441#endif
33417e70 1442 }
9fa3e853 1443
8a40a180 1444 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
9fa3e853
FB
1445
1446#if !defined(CONFIG_SOFTMMU)
1447 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
0a962c02
FB
1448#endif
1449#ifdef USE_KQEMU
1450 if (env->kqemu_enabled) {
1451 kqemu_flush(env, flush_global);
1452 }
9fa3e853 1453#endif
e3db7226 1454 tlb_flush_count++;
33417e70
FB
1455}
1456
274da6b2 1457static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
61382a50 1458{
5fafdf24 1459 if (addr == (tlb_entry->addr_read &
84b7b8e7 1460 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1461 addr == (tlb_entry->addr_write &
84b7b8e7 1462 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1463 addr == (tlb_entry->addr_code &
84b7b8e7
FB
1464 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1465 tlb_entry->addr_read = -1;
1466 tlb_entry->addr_write = -1;
1467 tlb_entry->addr_code = -1;
1468 }
61382a50
FB
1469}
1470
2e12669a 1471void tlb_flush_page(CPUState *env, target_ulong addr)
33417e70 1472{
8a40a180 1473 int i;
0124311e 1474
9fa3e853 1475#if defined(DEBUG_TLB)
108c49b8 1476 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
9fa3e853 1477#endif
0124311e
FB
1478 /* must reset current TB so that interrupts cannot modify the
1479 links while we are modifying them */
1480 env->current_tb = NULL;
61382a50
FB
1481
1482 addr &= TARGET_PAGE_MASK;
1483 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
84b7b8e7
FB
1484 tlb_flush_entry(&env->tlb_table[0][i], addr);
1485 tlb_flush_entry(&env->tlb_table[1][i], addr);
6fa4cea9
JM
1486#if (NB_MMU_MODES >= 3)
1487 tlb_flush_entry(&env->tlb_table[2][i], addr);
1488#if (NB_MMU_MODES == 4)
1489 tlb_flush_entry(&env->tlb_table[3][i], addr);
1490#endif
1491#endif
0124311e 1492
5c751e99 1493 tlb_flush_jmp_cache(env, addr);
9fa3e853 1494
0124311e 1495#if !defined(CONFIG_SOFTMMU)
9fa3e853 1496 if (addr < MMAP_AREA_END)
0124311e 1497 munmap((void *)addr, TARGET_PAGE_SIZE);
61382a50 1498#endif
0a962c02
FB
1499#ifdef USE_KQEMU
1500 if (env->kqemu_enabled) {
1501 kqemu_flush_page(env, addr);
1502 }
1503#endif
9fa3e853
FB
1504}
1505
9fa3e853
FB
1506/* update the TLBs so that writes to code in the virtual page 'addr'
1507 can be detected */
6a00d601 1508static void tlb_protect_code(ram_addr_t ram_addr)
9fa3e853 1509{
5fafdf24 1510 cpu_physical_memory_reset_dirty(ram_addr,
6a00d601
FB
1511 ram_addr + TARGET_PAGE_SIZE,
1512 CODE_DIRTY_FLAG);
9fa3e853
FB
1513}
1514
9fa3e853 1515/* update the TLB so that writes in physical page 'phys_addr' are no longer
3a7d929e 1516 tested for self modifying code */
5fafdf24 1517static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 1518 target_ulong vaddr)
9fa3e853 1519{
3a7d929e 1520 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1ccde1cb
FB
1521}
1522
5fafdf24 1523static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1ccde1cb
FB
1524 unsigned long start, unsigned long length)
1525{
1526 unsigned long addr;
84b7b8e7
FB
1527 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1528 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1ccde1cb 1529 if ((addr - start) < length) {
84b7b8e7 1530 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1ccde1cb
FB
1531 }
1532 }
1533}
1534
3a7d929e 1535void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 1536 int dirty_flags)
1ccde1cb
FB
1537{
1538 CPUState *env;
4f2ac237 1539 unsigned long length, start1;
0a962c02
FB
1540 int i, mask, len;
1541 uint8_t *p;
1ccde1cb
FB
1542
1543 start &= TARGET_PAGE_MASK;
1544 end = TARGET_PAGE_ALIGN(end);
1545
1546 length = end - start;
1547 if (length == 0)
1548 return;
0a962c02 1549 len = length >> TARGET_PAGE_BITS;
3a7d929e 1550#ifdef USE_KQEMU
6a00d601
FB
1551 /* XXX: should not depend on cpu context */
1552 env = first_cpu;
3a7d929e 1553 if (env->kqemu_enabled) {
f23db169
FB
1554 ram_addr_t addr;
1555 addr = start;
1556 for(i = 0; i < len; i++) {
1557 kqemu_set_notdirty(env, addr);
1558 addr += TARGET_PAGE_SIZE;
1559 }
3a7d929e
FB
1560 }
1561#endif
f23db169
FB
1562 mask = ~dirty_flags;
1563 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1564 for(i = 0; i < len; i++)
1565 p[i] &= mask;
1566
1ccde1cb
FB
1567 /* we modify the TLB cache so that the dirty bit will be set again
1568 when accessing the range */
59817ccb 1569 start1 = start + (unsigned long)phys_ram_base;
6a00d601
FB
1570 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1571 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1572 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
6a00d601 1573 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1574 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
6fa4cea9
JM
1575#if (NB_MMU_MODES >= 3)
1576 for(i = 0; i < CPU_TLB_SIZE; i++)
1577 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1578#if (NB_MMU_MODES == 4)
1579 for(i = 0; i < CPU_TLB_SIZE; i++)
1580 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1581#endif
1582#endif
6a00d601 1583 }
59817ccb
FB
1584
1585#if !defined(CONFIG_SOFTMMU)
1586 /* XXX: this is expensive */
1587 {
1588 VirtPageDesc *p;
1589 int j;
1590 target_ulong addr;
1591
1592 for(i = 0; i < L1_SIZE; i++) {
1593 p = l1_virt_map[i];
1594 if (p) {
1595 addr = i << (TARGET_PAGE_BITS + L2_BITS);
1596 for(j = 0; j < L2_SIZE; j++) {
1597 if (p->valid_tag == virt_valid_tag &&
1598 p->phys_addr >= start && p->phys_addr < end &&
1599 (p->prot & PROT_WRITE)) {
1600 if (addr < MMAP_AREA_END) {
5fafdf24 1601 mprotect((void *)addr, TARGET_PAGE_SIZE,
59817ccb
FB
1602 p->prot & ~PROT_WRITE);
1603 }
1604 }
1605 addr += TARGET_PAGE_SIZE;
1606 p++;
1607 }
1608 }
1609 }
1610 }
1611#endif
1ccde1cb
FB
1612}
1613
3a7d929e
FB
1614static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1615{
1616 ram_addr_t ram_addr;
1617
84b7b8e7 1618 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
5fafdf24 1619 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
3a7d929e
FB
1620 tlb_entry->addend - (unsigned long)phys_ram_base;
1621 if (!cpu_physical_memory_is_dirty(ram_addr)) {
84b7b8e7 1622 tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
3a7d929e
FB
1623 }
1624 }
1625}
1626
1627/* update the TLB according to the current state of the dirty bits */
1628void cpu_tlb_update_dirty(CPUState *env)
1629{
1630 int i;
1631 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1632 tlb_update_dirty(&env->tlb_table[0][i]);
3a7d929e 1633 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1634 tlb_update_dirty(&env->tlb_table[1][i]);
6fa4cea9
JM
1635#if (NB_MMU_MODES >= 3)
1636 for(i = 0; i < CPU_TLB_SIZE; i++)
1637 tlb_update_dirty(&env->tlb_table[2][i]);
1638#if (NB_MMU_MODES == 4)
1639 for(i = 0; i < CPU_TLB_SIZE; i++)
1640 tlb_update_dirty(&env->tlb_table[3][i]);
1641#endif
1642#endif
3a7d929e
FB
1643}
1644
5fafdf24 1645static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
108c49b8 1646 unsigned long start)
1ccde1cb
FB
1647{
1648 unsigned long addr;
84b7b8e7
FB
1649 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1650 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1ccde1cb 1651 if (addr == start) {
84b7b8e7 1652 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;
1ccde1cb
FB
1653 }
1654 }
1655}
1656
1657/* update the TLB corresponding to virtual page vaddr and phys addr
1658 addr so that it is no longer dirty */
6a00d601
FB
1659static inline void tlb_set_dirty(CPUState *env,
1660 unsigned long addr, target_ulong vaddr)
1ccde1cb 1661{
1ccde1cb
FB
1662 int i;
1663
1ccde1cb
FB
1664 addr &= TARGET_PAGE_MASK;
1665 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
84b7b8e7
FB
1666 tlb_set_dirty1(&env->tlb_table[0][i], addr);
1667 tlb_set_dirty1(&env->tlb_table[1][i], addr);
6fa4cea9
JM
1668#if (NB_MMU_MODES >= 3)
1669 tlb_set_dirty1(&env->tlb_table[2][i], addr);
1670#if (NB_MMU_MODES == 4)
1671 tlb_set_dirty1(&env->tlb_table[3][i], addr);
1672#endif
1673#endif
9fa3e853
FB
1674}
1675
59817ccb
FB
1676/* add a new TLB entry. At most one entry for a given virtual address
1677 is permitted. Return 0 if OK or 2 if the page could not be mapped
1678 (can only happen in non SOFTMMU mode for I/O pages or pages
1679 conflicting with the host address space). */
5fafdf24
TS
1680int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1681 target_phys_addr_t paddr, int prot,
6ebbf390 1682 int mmu_idx, int is_softmmu)
9fa3e853 1683{
92e873b9 1684 PhysPageDesc *p;
4f2ac237 1685 unsigned long pd;
9fa3e853 1686 unsigned int index;
4f2ac237 1687 target_ulong address;
108c49b8 1688 target_phys_addr_t addend;
9fa3e853 1689 int ret;
84b7b8e7 1690 CPUTLBEntry *te;
6658ffb8 1691 int i;
9fa3e853 1692
92e873b9 1693 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
9fa3e853
FB
1694 if (!p) {
1695 pd = IO_MEM_UNASSIGNED;
9fa3e853
FB
1696 } else {
1697 pd = p->phys_offset;
9fa3e853
FB
1698 }
1699#if defined(DEBUG_TLB)
6ebbf390
JM
1700 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1701 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
9fa3e853
FB
1702#endif
1703
1704 ret = 0;
1705#if !defined(CONFIG_SOFTMMU)
5fafdf24 1706 if (is_softmmu)
9fa3e853
FB
1707#endif
1708 {
2a4188a3 1709 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
9fa3e853
FB
1710 /* IO memory case */
1711 address = vaddr | pd;
1712 addend = paddr;
1713 } else {
1714 /* standard memory */
1715 address = vaddr;
1716 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1717 }
6658ffb8
PB
1718
1719 /* Make accesses to pages with watchpoints go via the
1720 watchpoint trap routines. */
1721 for (i = 0; i < env->nb_watchpoints; i++) {
1722 if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
1723 if (address & ~TARGET_PAGE_MASK) {
d79acba4 1724 env->watchpoint[i].addend = 0;
6658ffb8
PB
1725 address = vaddr | io_mem_watch;
1726 } else {
d79acba4
AZ
1727 env->watchpoint[i].addend = pd - paddr +
1728 (unsigned long) phys_ram_base;
6658ffb8
PB
1729 /* TODO: Figure out how to make read watchpoints coexist
1730 with code. */
1731 pd = (pd & TARGET_PAGE_MASK) | io_mem_watch | IO_MEM_ROMD;
1732 }
1733 }
1734 }
d79acba4 1735
90f18422 1736 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
9fa3e853 1737 addend -= vaddr;
6ebbf390 1738 te = &env->tlb_table[mmu_idx][index];
84b7b8e7 1739 te->addend = addend;
67b915a5 1740 if (prot & PAGE_READ) {
84b7b8e7
FB
1741 te->addr_read = address;
1742 } else {
1743 te->addr_read = -1;
1744 }
5c751e99
EI
1745
1746 if (te->addr_code != -1) {
1747 tlb_flush_jmp_cache(env, te->addr_code);
1748 }
84b7b8e7
FB
1749 if (prot & PAGE_EXEC) {
1750 te->addr_code = address;
9fa3e853 1751 } else {
84b7b8e7 1752 te->addr_code = -1;
9fa3e853 1753 }
67b915a5 1754 if (prot & PAGE_WRITE) {
5fafdf24 1755 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
856074ec
FB
1756 (pd & IO_MEM_ROMD)) {
1757 /* write access calls the I/O callback */
5fafdf24 1758 te->addr_write = vaddr |
856074ec 1759 (pd & ~(TARGET_PAGE_MASK | IO_MEM_ROMD));
5fafdf24 1760 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1ccde1cb 1761 !cpu_physical_memory_is_dirty(pd)) {
84b7b8e7 1762 te->addr_write = vaddr | IO_MEM_NOTDIRTY;
9fa3e853 1763 } else {
84b7b8e7 1764 te->addr_write = address;
9fa3e853
FB
1765 }
1766 } else {
84b7b8e7 1767 te->addr_write = -1;
9fa3e853
FB
1768 }
1769 }
1770#if !defined(CONFIG_SOFTMMU)
1771 else {
1772 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1773 /* IO access: no mapping is done as it will be handled by the
1774 soft MMU */
1775 if (!(env->hflags & HF_SOFTMMU_MASK))
1776 ret = 2;
1777 } else {
1778 void *map_addr;
59817ccb
FB
1779
1780 if (vaddr >= MMAP_AREA_END) {
1781 ret = 2;
1782 } else {
1783 if (prot & PROT_WRITE) {
5fafdf24 1784 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
d720b93d 1785#if defined(TARGET_HAS_SMC) || 1
59817ccb 1786 first_tb ||
d720b93d 1787#endif
5fafdf24 1788 ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
59817ccb
FB
1789 !cpu_physical_memory_is_dirty(pd))) {
1790 /* ROM: we do as if code was inside */
1791 /* if code is present, we only map as read only and save the
1792 original mapping */
1793 VirtPageDesc *vp;
3b46e624 1794
90f18422 1795 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
59817ccb
FB
1796 vp->phys_addr = pd;
1797 vp->prot = prot;
1798 vp->valid_tag = virt_valid_tag;
1799 prot &= ~PAGE_WRITE;
1800 }
1801 }
5fafdf24 1802 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
59817ccb
FB
1803 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1804 if (map_addr == MAP_FAILED) {
1805 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1806 paddr, vaddr);
9fa3e853 1807 }
9fa3e853
FB
1808 }
1809 }
1810 }
1811#endif
1812 return ret;
1813}
1814
1815/* called from signal handler: invalidate the code and unprotect the
1816 page. Return TRUE if the fault was succesfully handled. */
53a5960a 1817int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
9fa3e853
FB
1818{
1819#if !defined(CONFIG_SOFTMMU)
1820 VirtPageDesc *vp;
1821
1822#if defined(DEBUG_TLB)
1823 printf("page_unprotect: addr=0x%08x\n", addr);
1824#endif
1825 addr &= TARGET_PAGE_MASK;
59817ccb
FB
1826
1827 /* if it is not mapped, no need to worry here */
1828 if (addr >= MMAP_AREA_END)
1829 return 0;
9fa3e853
FB
1830 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1831 if (!vp)
1832 return 0;
1833 /* NOTE: in this case, validate_tag is _not_ tested as it
1834 validates only the code TLB */
1835 if (vp->valid_tag != virt_valid_tag)
1836 return 0;
1837 if (!(vp->prot & PAGE_WRITE))
1838 return 0;
1839#if defined(DEBUG_TLB)
5fafdf24 1840 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
9fa3e853
FB
1841 addr, vp->phys_addr, vp->prot);
1842#endif
59817ccb
FB
1843 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1844 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1845 (unsigned long)addr, vp->prot);
d720b93d 1846 /* set the dirty bit */
0a962c02 1847 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
d720b93d
FB
1848 /* flush the code inside */
1849 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
9fa3e853
FB
1850 return 1;
1851#else
1852 return 0;
1853#endif
33417e70
FB
1854}
1855
0124311e
FB
1856#else
1857
ee8b7021 1858void tlb_flush(CPUState *env, int flush_global)
0124311e
FB
1859{
1860}
1861
2e12669a 1862void tlb_flush_page(CPUState *env, target_ulong addr)
0124311e
FB
1863{
1864}
1865
5fafdf24
TS
1866int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1867 target_phys_addr_t paddr, int prot,
6ebbf390 1868 int mmu_idx, int is_softmmu)
9fa3e853
FB
1869{
1870 return 0;
1871}
0124311e 1872
9fa3e853
FB
1873/* dump memory mappings */
1874void page_dump(FILE *f)
33417e70 1875{
9fa3e853
FB
1876 unsigned long start, end;
1877 int i, j, prot, prot1;
1878 PageDesc *p;
33417e70 1879
9fa3e853
FB
1880 fprintf(f, "%-8s %-8s %-8s %s\n",
1881 "start", "end", "size", "prot");
1882 start = -1;
1883 end = -1;
1884 prot = 0;
1885 for(i = 0; i <= L1_SIZE; i++) {
1886 if (i < L1_SIZE)
1887 p = l1_map[i];
1888 else
1889 p = NULL;
1890 for(j = 0;j < L2_SIZE; j++) {
1891 if (!p)
1892 prot1 = 0;
1893 else
1894 prot1 = p[j].flags;
1895 if (prot1 != prot) {
1896 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1897 if (start != -1) {
1898 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
5fafdf24 1899 start, end, end - start,
9fa3e853
FB
1900 prot & PAGE_READ ? 'r' : '-',
1901 prot & PAGE_WRITE ? 'w' : '-',
1902 prot & PAGE_EXEC ? 'x' : '-');
1903 }
1904 if (prot1 != 0)
1905 start = end;
1906 else
1907 start = -1;
1908 prot = prot1;
1909 }
1910 if (!p)
1911 break;
1912 }
33417e70 1913 }
33417e70
FB
1914}
1915
53a5960a 1916int page_get_flags(target_ulong address)
33417e70 1917{
9fa3e853
FB
1918 PageDesc *p;
1919
1920 p = page_find(address >> TARGET_PAGE_BITS);
33417e70 1921 if (!p)
9fa3e853
FB
1922 return 0;
1923 return p->flags;
1924}
1925
1926/* modify the flags of a page and invalidate the code if
1927 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1928 depending on PAGE_WRITE */
53a5960a 1929void page_set_flags(target_ulong start, target_ulong end, int flags)
9fa3e853
FB
1930{
1931 PageDesc *p;
53a5960a 1932 target_ulong addr;
9fa3e853
FB
1933
1934 start = start & TARGET_PAGE_MASK;
1935 end = TARGET_PAGE_ALIGN(end);
1936 if (flags & PAGE_WRITE)
1937 flags |= PAGE_WRITE_ORG;
1938 spin_lock(&tb_lock);
1939 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1940 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1941 /* if the write protection is set, then we invalidate the code
1942 inside */
5fafdf24 1943 if (!(p->flags & PAGE_WRITE) &&
9fa3e853
FB
1944 (flags & PAGE_WRITE) &&
1945 p->first_tb) {
d720b93d 1946 tb_invalidate_phys_page(addr, 0, NULL);
9fa3e853
FB
1947 }
1948 p->flags = flags;
1949 }
1950 spin_unlock(&tb_lock);
33417e70
FB
1951}
1952
3d97b40b
TS
1953int page_check_range(target_ulong start, target_ulong len, int flags)
1954{
1955 PageDesc *p;
1956 target_ulong end;
1957 target_ulong addr;
1958
1959 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
1960 start = start & TARGET_PAGE_MASK;
1961
1962 if( end < start )
1963 /* we've wrapped around */
1964 return -1;
1965 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1966 p = page_find(addr >> TARGET_PAGE_BITS);
1967 if( !p )
1968 return -1;
1969 if( !(p->flags & PAGE_VALID) )
1970 return -1;
1971
dae3270c 1972 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
3d97b40b 1973 return -1;
dae3270c
FB
1974 if (flags & PAGE_WRITE) {
1975 if (!(p->flags & PAGE_WRITE_ORG))
1976 return -1;
1977 /* unprotect the page if it was put read-only because it
1978 contains translated code */
1979 if (!(p->flags & PAGE_WRITE)) {
1980 if (!page_unprotect(addr, 0, NULL))
1981 return -1;
1982 }
1983 return 0;
1984 }
3d97b40b
TS
1985 }
1986 return 0;
1987}
1988
9fa3e853
FB
1989/* called from signal handler: invalidate the code and unprotect the
1990 page. Return TRUE if the fault was succesfully handled. */
53a5960a 1991int page_unprotect(target_ulong address, unsigned long pc, void *puc)
9fa3e853
FB
1992{
1993 unsigned int page_index, prot, pindex;
1994 PageDesc *p, *p1;
53a5960a 1995 target_ulong host_start, host_end, addr;
9fa3e853 1996
83fb7adf 1997 host_start = address & qemu_host_page_mask;
9fa3e853
FB
1998 page_index = host_start >> TARGET_PAGE_BITS;
1999 p1 = page_find(page_index);
2000 if (!p1)
2001 return 0;
83fb7adf 2002 host_end = host_start + qemu_host_page_size;
9fa3e853
FB
2003 p = p1;
2004 prot = 0;
2005 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2006 prot |= p->flags;
2007 p++;
2008 }
2009 /* if the page was really writable, then we change its
2010 protection back to writable */
2011 if (prot & PAGE_WRITE_ORG) {
2012 pindex = (address - host_start) >> TARGET_PAGE_BITS;
2013 if (!(p1[pindex].flags & PAGE_WRITE)) {
5fafdf24 2014 mprotect((void *)g2h(host_start), qemu_host_page_size,
9fa3e853
FB
2015 (prot & PAGE_BITS) | PAGE_WRITE);
2016 p1[pindex].flags |= PAGE_WRITE;
2017 /* and since the content will be modified, we must invalidate
2018 the corresponding translated code. */
d720b93d 2019 tb_invalidate_phys_page(address, pc, puc);
9fa3e853
FB
2020#ifdef DEBUG_TB_CHECK
2021 tb_invalidate_check(address);
2022#endif
2023 return 1;
2024 }
2025 }
2026 return 0;
2027}
2028
6a00d601
FB
2029static inline void tlb_set_dirty(CPUState *env,
2030 unsigned long addr, target_ulong vaddr)
1ccde1cb
FB
2031{
2032}
9fa3e853
FB
2033#endif /* defined(CONFIG_USER_ONLY) */
2034
db7b5426 2035static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
00f82b8a
AJ
2036 ram_addr_t memory);
2037static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2038 ram_addr_t orig_memory);
db7b5426
BS
2039#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2040 need_subpage) \
2041 do { \
2042 if (addr > start_addr) \
2043 start_addr2 = 0; \
2044 else { \
2045 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2046 if (start_addr2 > 0) \
2047 need_subpage = 1; \
2048 } \
2049 \
49e9fba2 2050 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
db7b5426
BS
2051 end_addr2 = TARGET_PAGE_SIZE - 1; \
2052 else { \
2053 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2054 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2055 need_subpage = 1; \
2056 } \
2057 } while (0)
2058
33417e70
FB
2059/* register physical memory. 'size' must be a multiple of the target
2060 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2061 io memory page */
5fafdf24 2062void cpu_register_physical_memory(target_phys_addr_t start_addr,
00f82b8a
AJ
2063 ram_addr_t size,
2064 ram_addr_t phys_offset)
33417e70 2065{
108c49b8 2066 target_phys_addr_t addr, end_addr;
92e873b9 2067 PhysPageDesc *p;
9d42037b 2068 CPUState *env;
00f82b8a 2069 ram_addr_t orig_size = size;
db7b5426 2070 void *subpage;
33417e70 2071
5fd386f6 2072 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
49e9fba2
BS
2073 end_addr = start_addr + (target_phys_addr_t)size;
2074 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
db7b5426
BS
2075 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2076 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
00f82b8a 2077 ram_addr_t orig_memory = p->phys_offset;
db7b5426
BS
2078 target_phys_addr_t start_addr2, end_addr2;
2079 int need_subpage = 0;
2080
2081 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2082 need_subpage);
4254fab8 2083 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
db7b5426
BS
2084 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2085 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2086 &p->phys_offset, orig_memory);
2087 } else {
2088 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2089 >> IO_MEM_SHIFT];
2090 }
2091 subpage_register(subpage, start_addr2, end_addr2, phys_offset);
2092 } else {
2093 p->phys_offset = phys_offset;
2094 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2095 (phys_offset & IO_MEM_ROMD))
2096 phys_offset += TARGET_PAGE_SIZE;
2097 }
2098 } else {
2099 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2100 p->phys_offset = phys_offset;
2101 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2102 (phys_offset & IO_MEM_ROMD))
2103 phys_offset += TARGET_PAGE_SIZE;
2104 else {
2105 target_phys_addr_t start_addr2, end_addr2;
2106 int need_subpage = 0;
2107
2108 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2109 end_addr2, need_subpage);
2110
4254fab8 2111 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
db7b5426
BS
2112 subpage = subpage_init((addr & TARGET_PAGE_MASK),
2113 &p->phys_offset, IO_MEM_UNASSIGNED);
2114 subpage_register(subpage, start_addr2, end_addr2,
2115 phys_offset);
2116 }
2117 }
2118 }
33417e70 2119 }
3b46e624 2120
9d42037b
FB
2121 /* since each CPU stores ram addresses in its TLB cache, we must
2122 reset the modified entries */
2123 /* XXX: slow ! */
2124 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2125 tlb_flush(env, 1);
2126 }
33417e70
FB
2127}
2128
ba863458 2129/* XXX: temporary until new memory mapping API */
00f82b8a 2130ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
ba863458
FB
2131{
2132 PhysPageDesc *p;
2133
2134 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2135 if (!p)
2136 return IO_MEM_UNASSIGNED;
2137 return p->phys_offset;
2138}
2139
e9a1ab19 2140/* XXX: better than nothing */
00f82b8a 2141ram_addr_t qemu_ram_alloc(ram_addr_t size)
e9a1ab19
FB
2142{
2143 ram_addr_t addr;
7fb4fdcf 2144 if ((phys_ram_alloc_offset + size) > phys_ram_size) {
00f82b8a 2145 fprintf(stderr, "Not enough memory (requested_size = %lu, max memory = %ld)\n",
03875444 2146 size, phys_ram_size);
e9a1ab19
FB
2147 abort();
2148 }
2149 addr = phys_ram_alloc_offset;
2150 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2151 return addr;
2152}
2153
2154void qemu_ram_free(ram_addr_t addr)
2155{
2156}
2157
a4193c8a 2158static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
33417e70 2159{
67d3b957 2160#ifdef DEBUG_UNASSIGNED
ab3d1727 2161 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
b4f0a316
BS
2162#endif
2163#ifdef TARGET_SPARC
6c36d3fa 2164 do_unassigned_access(addr, 0, 0, 0);
f1ccf904
TS
2165#elif TARGET_CRIS
2166 do_unassigned_access(addr, 0, 0, 0);
67d3b957 2167#endif
33417e70
FB
2168 return 0;
2169}
2170
a4193c8a 2171static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
33417e70 2172{
67d3b957 2173#ifdef DEBUG_UNASSIGNED
ab3d1727 2174 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
67d3b957 2175#endif
b4f0a316 2176#ifdef TARGET_SPARC
6c36d3fa 2177 do_unassigned_access(addr, 1, 0, 0);
f1ccf904
TS
2178#elif TARGET_CRIS
2179 do_unassigned_access(addr, 1, 0, 0);
b4f0a316 2180#endif
33417e70
FB
2181}
2182
2183static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2184 unassigned_mem_readb,
2185 unassigned_mem_readb,
2186 unassigned_mem_readb,
2187};
2188
2189static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2190 unassigned_mem_writeb,
2191 unassigned_mem_writeb,
2192 unassigned_mem_writeb,
2193};
2194
3a7d929e 2195static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 2196{
3a7d929e
FB
2197 unsigned long ram_addr;
2198 int dirty_flags;
2199 ram_addr = addr - (unsigned long)phys_ram_base;
2200 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2201 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2202#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2203 tb_invalidate_phys_page_fast(ram_addr, 1);
2204 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2205#endif
3a7d929e 2206 }
c27004ec 2207 stb_p((uint8_t *)(long)addr, val);
f32fc648
FB
2208#ifdef USE_KQEMU
2209 if (cpu_single_env->kqemu_enabled &&
2210 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2211 kqemu_modify_page(cpu_single_env, ram_addr);
2212#endif
f23db169
FB
2213 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2214 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2215 /* we remove the notdirty callback only if the code has been
2216 flushed */
2217 if (dirty_flags == 0xff)
6a00d601 2218 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
9fa3e853
FB
2219}
2220
3a7d929e 2221static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 2222{
3a7d929e
FB
2223 unsigned long ram_addr;
2224 int dirty_flags;
2225 ram_addr = addr - (unsigned long)phys_ram_base;
2226 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2227 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2228#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2229 tb_invalidate_phys_page_fast(ram_addr, 2);
2230 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2231#endif
3a7d929e 2232 }
c27004ec 2233 stw_p((uint8_t *)(long)addr, val);
f32fc648
FB
2234#ifdef USE_KQEMU
2235 if (cpu_single_env->kqemu_enabled &&
2236 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2237 kqemu_modify_page(cpu_single_env, ram_addr);
2238#endif
f23db169
FB
2239 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2240 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2241 /* we remove the notdirty callback only if the code has been
2242 flushed */
2243 if (dirty_flags == 0xff)
6a00d601 2244 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
9fa3e853
FB
2245}
2246
3a7d929e 2247static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 2248{
3a7d929e
FB
2249 unsigned long ram_addr;
2250 int dirty_flags;
2251 ram_addr = addr - (unsigned long)phys_ram_base;
2252 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2253 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2254#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2255 tb_invalidate_phys_page_fast(ram_addr, 4);
2256 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2257#endif
3a7d929e 2258 }
c27004ec 2259 stl_p((uint8_t *)(long)addr, val);
f32fc648
FB
2260#ifdef USE_KQEMU
2261 if (cpu_single_env->kqemu_enabled &&
2262 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2263 kqemu_modify_page(cpu_single_env, ram_addr);
2264#endif
f23db169
FB
2265 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2266 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2267 /* we remove the notdirty callback only if the code has been
2268 flushed */
2269 if (dirty_flags == 0xff)
6a00d601 2270 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
9fa3e853
FB
2271}
2272
3a7d929e 2273static CPUReadMemoryFunc *error_mem_read[3] = {
9fa3e853
FB
2274 NULL, /* never used */
2275 NULL, /* never used */
2276 NULL, /* never used */
2277};
2278
1ccde1cb
FB
2279static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2280 notdirty_mem_writeb,
2281 notdirty_mem_writew,
2282 notdirty_mem_writel,
2283};
2284
6658ffb8
PB
2285#if defined(CONFIG_SOFTMMU)
2286/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2287 so these check for a hit then pass through to the normal out-of-line
2288 phys routines. */
2289static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2290{
2291 return ldub_phys(addr);
2292}
2293
2294static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2295{
2296 return lduw_phys(addr);
2297}
2298
2299static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2300{
2301 return ldl_phys(addr);
2302}
2303
2304/* Generate a debug exception if a watchpoint has been hit.
2305 Returns the real physical address of the access. addr will be a host
d79acba4 2306 address in case of a RAM location. */
6658ffb8
PB
2307static target_ulong check_watchpoint(target_phys_addr_t addr)
2308{
2309 CPUState *env = cpu_single_env;
2310 target_ulong watch;
2311 target_ulong retaddr;
2312 int i;
2313
2314 retaddr = addr;
2315 for (i = 0; i < env->nb_watchpoints; i++) {
2316 watch = env->watchpoint[i].vaddr;
2317 if (((env->mem_write_vaddr ^ watch) & TARGET_PAGE_MASK) == 0) {
d79acba4 2318 retaddr = addr - env->watchpoint[i].addend;
6658ffb8
PB
2319 if (((addr ^ watch) & ~TARGET_PAGE_MASK) == 0) {
2320 cpu_single_env->watchpoint_hit = i + 1;
2321 cpu_interrupt(cpu_single_env, CPU_INTERRUPT_DEBUG);
2322 break;
2323 }
2324 }
2325 }
2326 return retaddr;
2327}
2328
2329static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2330 uint32_t val)
2331{
2332 addr = check_watchpoint(addr);
2333 stb_phys(addr, val);
2334}
2335
2336static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2337 uint32_t val)
2338{
2339 addr = check_watchpoint(addr);
2340 stw_phys(addr, val);
2341}
2342
2343static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2344 uint32_t val)
2345{
2346 addr = check_watchpoint(addr);
2347 stl_phys(addr, val);
2348}
2349
2350static CPUReadMemoryFunc *watch_mem_read[3] = {
2351 watch_mem_readb,
2352 watch_mem_readw,
2353 watch_mem_readl,
2354};
2355
2356static CPUWriteMemoryFunc *watch_mem_write[3] = {
2357 watch_mem_writeb,
2358 watch_mem_writew,
2359 watch_mem_writel,
2360};
2361#endif
2362
db7b5426
BS
2363static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2364 unsigned int len)
2365{
db7b5426
BS
2366 uint32_t ret;
2367 unsigned int idx;
2368
2369 idx = SUBPAGE_IDX(addr - mmio->base);
2370#if defined(DEBUG_SUBPAGE)
2371 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2372 mmio, len, addr, idx);
2373#endif
3ee89922 2374 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len], addr);
db7b5426
BS
2375
2376 return ret;
2377}
2378
2379static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2380 uint32_t value, unsigned int len)
2381{
db7b5426
BS
2382 unsigned int idx;
2383
2384 idx = SUBPAGE_IDX(addr - mmio->base);
2385#if defined(DEBUG_SUBPAGE)
2386 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2387 mmio, len, addr, idx, value);
2388#endif
3ee89922 2389 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len], addr, value);
db7b5426
BS
2390}
2391
2392static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2393{
2394#if defined(DEBUG_SUBPAGE)
2395 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2396#endif
2397
2398 return subpage_readlen(opaque, addr, 0);
2399}
2400
2401static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2402 uint32_t value)
2403{
2404#if defined(DEBUG_SUBPAGE)
2405 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2406#endif
2407 subpage_writelen(opaque, addr, value, 0);
2408}
2409
2410static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2411{
2412#if defined(DEBUG_SUBPAGE)
2413 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2414#endif
2415
2416 return subpage_readlen(opaque, addr, 1);
2417}
2418
2419static void subpage_writew (void *opaque, target_phys_addr_t addr,
2420 uint32_t value)
2421{
2422#if defined(DEBUG_SUBPAGE)
2423 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2424#endif
2425 subpage_writelen(opaque, addr, value, 1);
2426}
2427
2428static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2429{
2430#if defined(DEBUG_SUBPAGE)
2431 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2432#endif
2433
2434 return subpage_readlen(opaque, addr, 2);
2435}
2436
2437static void subpage_writel (void *opaque,
2438 target_phys_addr_t addr, uint32_t value)
2439{
2440#if defined(DEBUG_SUBPAGE)
2441 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2442#endif
2443 subpage_writelen(opaque, addr, value, 2);
2444}
2445
2446static CPUReadMemoryFunc *subpage_read[] = {
2447 &subpage_readb,
2448 &subpage_readw,
2449 &subpage_readl,
2450};
2451
2452static CPUWriteMemoryFunc *subpage_write[] = {
2453 &subpage_writeb,
2454 &subpage_writew,
2455 &subpage_writel,
2456};
2457
2458static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
00f82b8a 2459 ram_addr_t memory)
db7b5426
BS
2460{
2461 int idx, eidx;
4254fab8 2462 unsigned int i;
db7b5426
BS
2463
2464 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2465 return -1;
2466 idx = SUBPAGE_IDX(start);
2467 eidx = SUBPAGE_IDX(end);
2468#if defined(DEBUG_SUBPAGE)
2469 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2470 mmio, start, end, idx, eidx, memory);
2471#endif
2472 memory >>= IO_MEM_SHIFT;
2473 for (; idx <= eidx; idx++) {
4254fab8 2474 for (i = 0; i < 4; i++) {
3ee89922
BS
2475 if (io_mem_read[memory][i]) {
2476 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2477 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2478 }
2479 if (io_mem_write[memory][i]) {
2480 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2481 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2482 }
4254fab8 2483 }
db7b5426
BS
2484 }
2485
2486 return 0;
2487}
2488
00f82b8a
AJ
2489static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2490 ram_addr_t orig_memory)
db7b5426
BS
2491{
2492 subpage_t *mmio;
2493 int subpage_memory;
2494
2495 mmio = qemu_mallocz(sizeof(subpage_t));
2496 if (mmio != NULL) {
2497 mmio->base = base;
2498 subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2499#if defined(DEBUG_SUBPAGE)
2500 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2501 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2502#endif
2503 *phys = subpage_memory | IO_MEM_SUBPAGE;
2504 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory);
2505 }
2506
2507 return mmio;
2508}
2509
33417e70
FB
2510static void io_mem_init(void)
2511{
3a7d929e 2512 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
a4193c8a 2513 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
3a7d929e 2514 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
1ccde1cb
FB
2515 io_mem_nb = 5;
2516
6658ffb8
PB
2517#if defined(CONFIG_SOFTMMU)
2518 io_mem_watch = cpu_register_io_memory(-1, watch_mem_read,
2519 watch_mem_write, NULL);
2520#endif
1ccde1cb 2521 /* alloc dirty bits array */
0a962c02 2522 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
3a7d929e 2523 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
33417e70
FB
2524}
2525
2526/* mem_read and mem_write are arrays of functions containing the
2527 function to access byte (index 0), word (index 1) and dword (index
3ee89922
BS
2528 2). Functions can be omitted with a NULL function pointer. The
2529 registered functions may be modified dynamically later.
2530 If io_index is non zero, the corresponding io zone is
4254fab8
BS
2531 modified. If it is zero, a new io zone is allocated. The return
2532 value can be used with cpu_register_physical_memory(). (-1) is
2533 returned if error. */
33417e70
FB
2534int cpu_register_io_memory(int io_index,
2535 CPUReadMemoryFunc **mem_read,
a4193c8a
FB
2536 CPUWriteMemoryFunc **mem_write,
2537 void *opaque)
33417e70 2538{
4254fab8 2539 int i, subwidth = 0;
33417e70
FB
2540
2541 if (io_index <= 0) {
b5ff1b31 2542 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
33417e70
FB
2543 return -1;
2544 io_index = io_mem_nb++;
2545 } else {
2546 if (io_index >= IO_MEM_NB_ENTRIES)
2547 return -1;
2548 }
b5ff1b31 2549
33417e70 2550 for(i = 0;i < 3; i++) {
4254fab8
BS
2551 if (!mem_read[i] || !mem_write[i])
2552 subwidth = IO_MEM_SUBWIDTH;
33417e70
FB
2553 io_mem_read[io_index][i] = mem_read[i];
2554 io_mem_write[io_index][i] = mem_write[i];
2555 }
a4193c8a 2556 io_mem_opaque[io_index] = opaque;
4254fab8 2557 return (io_index << IO_MEM_SHIFT) | subwidth;
33417e70 2558}
61382a50 2559
8926b517
FB
2560CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2561{
2562 return io_mem_write[io_index >> IO_MEM_SHIFT];
2563}
2564
2565CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2566{
2567 return io_mem_read[io_index >> IO_MEM_SHIFT];
2568}
2569
13eb76e0
FB
2570/* physical memory access (slow version, mainly for debug) */
2571#if defined(CONFIG_USER_ONLY)
5fafdf24 2572void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
2573 int len, int is_write)
2574{
2575 int l, flags;
2576 target_ulong page;
53a5960a 2577 void * p;
13eb76e0
FB
2578
2579 while (len > 0) {
2580 page = addr & TARGET_PAGE_MASK;
2581 l = (page + TARGET_PAGE_SIZE) - addr;
2582 if (l > len)
2583 l = len;
2584 flags = page_get_flags(page);
2585 if (!(flags & PAGE_VALID))
2586 return;
2587 if (is_write) {
2588 if (!(flags & PAGE_WRITE))
2589 return;
579a97f7 2590 /* XXX: this code should not depend on lock_user */
72fb7daa 2591 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
579a97f7
FB
2592 /* FIXME - should this return an error rather than just fail? */
2593 return;
72fb7daa
AJ
2594 memcpy(p, buf, l);
2595 unlock_user(p, addr, l);
13eb76e0
FB
2596 } else {
2597 if (!(flags & PAGE_READ))
2598 return;
579a97f7 2599 /* XXX: this code should not depend on lock_user */
72fb7daa 2600 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
579a97f7
FB
2601 /* FIXME - should this return an error rather than just fail? */
2602 return;
72fb7daa 2603 memcpy(buf, p, l);
5b257578 2604 unlock_user(p, addr, 0);
13eb76e0
FB
2605 }
2606 len -= l;
2607 buf += l;
2608 addr += l;
2609 }
2610}
8df1cd07 2611
13eb76e0 2612#else
5fafdf24 2613void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
2614 int len, int is_write)
2615{
2616 int l, io_index;
2617 uint8_t *ptr;
2618 uint32_t val;
2e12669a
FB
2619 target_phys_addr_t page;
2620 unsigned long pd;
92e873b9 2621 PhysPageDesc *p;
3b46e624 2622
13eb76e0
FB
2623 while (len > 0) {
2624 page = addr & TARGET_PAGE_MASK;
2625 l = (page + TARGET_PAGE_SIZE) - addr;
2626 if (l > len)
2627 l = len;
92e873b9 2628 p = phys_page_find(page >> TARGET_PAGE_BITS);
13eb76e0
FB
2629 if (!p) {
2630 pd = IO_MEM_UNASSIGNED;
2631 } else {
2632 pd = p->phys_offset;
2633 }
3b46e624 2634
13eb76e0 2635 if (is_write) {
3a7d929e 2636 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
13eb76e0 2637 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
6a00d601
FB
2638 /* XXX: could force cpu_single_env to NULL to avoid
2639 potential bugs */
13eb76e0 2640 if (l >= 4 && ((addr & 3) == 0)) {
1c213d19 2641 /* 32 bit write access */
c27004ec 2642 val = ldl_p(buf);
a4193c8a 2643 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2644 l = 4;
2645 } else if (l >= 2 && ((addr & 1) == 0)) {
1c213d19 2646 /* 16 bit write access */
c27004ec 2647 val = lduw_p(buf);
a4193c8a 2648 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2649 l = 2;
2650 } else {
1c213d19 2651 /* 8 bit write access */
c27004ec 2652 val = ldub_p(buf);
a4193c8a 2653 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2654 l = 1;
2655 }
2656 } else {
b448f2f3
FB
2657 unsigned long addr1;
2658 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
13eb76e0 2659 /* RAM case */
b448f2f3 2660 ptr = phys_ram_base + addr1;
13eb76e0 2661 memcpy(ptr, buf, l);
3a7d929e
FB
2662 if (!cpu_physical_memory_is_dirty(addr1)) {
2663 /* invalidate code */
2664 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2665 /* set dirty bit */
5fafdf24 2666 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
f23db169 2667 (0xff & ~CODE_DIRTY_FLAG);
3a7d929e 2668 }
13eb76e0
FB
2669 }
2670 } else {
5fafdf24 2671 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2a4188a3 2672 !(pd & IO_MEM_ROMD)) {
13eb76e0
FB
2673 /* I/O case */
2674 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2675 if (l >= 4 && ((addr & 3) == 0)) {
2676 /* 32 bit read access */
a4193c8a 2677 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
c27004ec 2678 stl_p(buf, val);
13eb76e0
FB
2679 l = 4;
2680 } else if (l >= 2 && ((addr & 1) == 0)) {
2681 /* 16 bit read access */
a4193c8a 2682 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
c27004ec 2683 stw_p(buf, val);
13eb76e0
FB
2684 l = 2;
2685 } else {
1c213d19 2686 /* 8 bit read access */
a4193c8a 2687 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
c27004ec 2688 stb_p(buf, val);
13eb76e0
FB
2689 l = 1;
2690 }
2691 } else {
2692 /* RAM case */
5fafdf24 2693 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
13eb76e0
FB
2694 (addr & ~TARGET_PAGE_MASK);
2695 memcpy(buf, ptr, l);
2696 }
2697 }
2698 len -= l;
2699 buf += l;
2700 addr += l;
2701 }
2702}
8df1cd07 2703
d0ecd2aa 2704/* used for ROM loading : can write in RAM and ROM */
5fafdf24 2705void cpu_physical_memory_write_rom(target_phys_addr_t addr,
d0ecd2aa
FB
2706 const uint8_t *buf, int len)
2707{
2708 int l;
2709 uint8_t *ptr;
2710 target_phys_addr_t page;
2711 unsigned long pd;
2712 PhysPageDesc *p;
3b46e624 2713
d0ecd2aa
FB
2714 while (len > 0) {
2715 page = addr & TARGET_PAGE_MASK;
2716 l = (page + TARGET_PAGE_SIZE) - addr;
2717 if (l > len)
2718 l = len;
2719 p = phys_page_find(page >> TARGET_PAGE_BITS);
2720 if (!p) {
2721 pd = IO_MEM_UNASSIGNED;
2722 } else {
2723 pd = p->phys_offset;
2724 }
3b46e624 2725
d0ecd2aa 2726 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2a4188a3
FB
2727 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2728 !(pd & IO_MEM_ROMD)) {
d0ecd2aa
FB
2729 /* do nothing */
2730 } else {
2731 unsigned long addr1;
2732 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2733 /* ROM/RAM case */
2734 ptr = phys_ram_base + addr1;
2735 memcpy(ptr, buf, l);
2736 }
2737 len -= l;
2738 buf += l;
2739 addr += l;
2740 }
2741}
2742
2743
8df1cd07
FB
2744/* warning: addr must be aligned */
2745uint32_t ldl_phys(target_phys_addr_t addr)
2746{
2747 int io_index;
2748 uint8_t *ptr;
2749 uint32_t val;
2750 unsigned long pd;
2751 PhysPageDesc *p;
2752
2753 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2754 if (!p) {
2755 pd = IO_MEM_UNASSIGNED;
2756 } else {
2757 pd = p->phys_offset;
2758 }
3b46e624 2759
5fafdf24 2760 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2a4188a3 2761 !(pd & IO_MEM_ROMD)) {
8df1cd07
FB
2762 /* I/O case */
2763 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2764 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2765 } else {
2766 /* RAM case */
5fafdf24 2767 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
8df1cd07
FB
2768 (addr & ~TARGET_PAGE_MASK);
2769 val = ldl_p(ptr);
2770 }
2771 return val;
2772}
2773
84b7b8e7
FB
2774/* warning: addr must be aligned */
2775uint64_t ldq_phys(target_phys_addr_t addr)
2776{
2777 int io_index;
2778 uint8_t *ptr;
2779 uint64_t val;
2780 unsigned long pd;
2781 PhysPageDesc *p;
2782
2783 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2784 if (!p) {
2785 pd = IO_MEM_UNASSIGNED;
2786 } else {
2787 pd = p->phys_offset;
2788 }
3b46e624 2789
2a4188a3
FB
2790 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2791 !(pd & IO_MEM_ROMD)) {
84b7b8e7
FB
2792 /* I/O case */
2793 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2794#ifdef TARGET_WORDS_BIGENDIAN
2795 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2796 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2797#else
2798 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2799 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2800#endif
2801 } else {
2802 /* RAM case */
5fafdf24 2803 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
84b7b8e7
FB
2804 (addr & ~TARGET_PAGE_MASK);
2805 val = ldq_p(ptr);
2806 }
2807 return val;
2808}
2809
aab33094
FB
2810/* XXX: optimize */
2811uint32_t ldub_phys(target_phys_addr_t addr)
2812{
2813 uint8_t val;
2814 cpu_physical_memory_read(addr, &val, 1);
2815 return val;
2816}
2817
2818/* XXX: optimize */
2819uint32_t lduw_phys(target_phys_addr_t addr)
2820{
2821 uint16_t val;
2822 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2823 return tswap16(val);
2824}
2825
8df1cd07
FB
2826/* warning: addr must be aligned. The ram page is not masked as dirty
2827 and the code inside is not invalidated. It is useful if the dirty
2828 bits are used to track modified PTEs */
2829void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2830{
2831 int io_index;
2832 uint8_t *ptr;
2833 unsigned long pd;
2834 PhysPageDesc *p;
2835
2836 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2837 if (!p) {
2838 pd = IO_MEM_UNASSIGNED;
2839 } else {
2840 pd = p->phys_offset;
2841 }
3b46e624 2842
3a7d929e 2843 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07
FB
2844 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2845 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2846 } else {
5fafdf24 2847 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
8df1cd07
FB
2848 (addr & ~TARGET_PAGE_MASK);
2849 stl_p(ptr, val);
2850 }
2851}
2852
bc98a7ef
JM
2853void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
2854{
2855 int io_index;
2856 uint8_t *ptr;
2857 unsigned long pd;
2858 PhysPageDesc *p;
2859
2860 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2861 if (!p) {
2862 pd = IO_MEM_UNASSIGNED;
2863 } else {
2864 pd = p->phys_offset;
2865 }
3b46e624 2866
bc98a7ef
JM
2867 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2868 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2869#ifdef TARGET_WORDS_BIGENDIAN
2870 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
2871 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
2872#else
2873 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2874 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
2875#endif
2876 } else {
5fafdf24 2877 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
bc98a7ef
JM
2878 (addr & ~TARGET_PAGE_MASK);
2879 stq_p(ptr, val);
2880 }
2881}
2882
8df1cd07 2883/* warning: addr must be aligned */
8df1cd07
FB
2884void stl_phys(target_phys_addr_t addr, uint32_t val)
2885{
2886 int io_index;
2887 uint8_t *ptr;
2888 unsigned long pd;
2889 PhysPageDesc *p;
2890
2891 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2892 if (!p) {
2893 pd = IO_MEM_UNASSIGNED;
2894 } else {
2895 pd = p->phys_offset;
2896 }
3b46e624 2897
3a7d929e 2898 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07
FB
2899 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2900 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2901 } else {
2902 unsigned long addr1;
2903 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2904 /* RAM case */
2905 ptr = phys_ram_base + addr1;
2906 stl_p(ptr, val);
3a7d929e
FB
2907 if (!cpu_physical_memory_is_dirty(addr1)) {
2908 /* invalidate code */
2909 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2910 /* set dirty bit */
f23db169
FB
2911 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2912 (0xff & ~CODE_DIRTY_FLAG);
3a7d929e 2913 }
8df1cd07
FB
2914 }
2915}
2916
aab33094
FB
2917/* XXX: optimize */
2918void stb_phys(target_phys_addr_t addr, uint32_t val)
2919{
2920 uint8_t v = val;
2921 cpu_physical_memory_write(addr, &v, 1);
2922}
2923
2924/* XXX: optimize */
2925void stw_phys(target_phys_addr_t addr, uint32_t val)
2926{
2927 uint16_t v = tswap16(val);
2928 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2929}
2930
2931/* XXX: optimize */
2932void stq_phys(target_phys_addr_t addr, uint64_t val)
2933{
2934 val = tswap64(val);
2935 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
2936}
2937
13eb76e0
FB
2938#endif
2939
2940/* virtual memory access for debug */
5fafdf24 2941int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
b448f2f3 2942 uint8_t *buf, int len, int is_write)
13eb76e0
FB
2943{
2944 int l;
9b3c35e0
JM
2945 target_phys_addr_t phys_addr;
2946 target_ulong page;
13eb76e0
FB
2947
2948 while (len > 0) {
2949 page = addr & TARGET_PAGE_MASK;
2950 phys_addr = cpu_get_phys_page_debug(env, page);
2951 /* if no physical page mapped, return an error */
2952 if (phys_addr == -1)
2953 return -1;
2954 l = (page + TARGET_PAGE_SIZE) - addr;
2955 if (l > len)
2956 l = len;
5fafdf24 2957 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
b448f2f3 2958 buf, l, is_write);
13eb76e0
FB
2959 len -= l;
2960 buf += l;
2961 addr += l;
2962 }
2963 return 0;
2964}
2965
e3db7226
FB
2966void dump_exec_info(FILE *f,
2967 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2968{
2969 int i, target_code_size, max_target_code_size;
2970 int direct_jmp_count, direct_jmp2_count, cross_page;
2971 TranslationBlock *tb;
3b46e624 2972
e3db7226
FB
2973 target_code_size = 0;
2974 max_target_code_size = 0;
2975 cross_page = 0;
2976 direct_jmp_count = 0;
2977 direct_jmp2_count = 0;
2978 for(i = 0; i < nb_tbs; i++) {
2979 tb = &tbs[i];
2980 target_code_size += tb->size;
2981 if (tb->size > max_target_code_size)
2982 max_target_code_size = tb->size;
2983 if (tb->page_addr[1] != -1)
2984 cross_page++;
2985 if (tb->tb_next_offset[0] != 0xffff) {
2986 direct_jmp_count++;
2987 if (tb->tb_next_offset[1] != 0xffff) {
2988 direct_jmp2_count++;
2989 }
2990 }
2991 }
2992 /* XXX: avoid using doubles ? */
57fec1fe 2993 cpu_fprintf(f, "Translation buffer state:\n");
e3db7226 2994 cpu_fprintf(f, "TB count %d\n", nb_tbs);
5fafdf24 2995 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
e3db7226
FB
2996 nb_tbs ? target_code_size / nb_tbs : 0,
2997 max_target_code_size);
5fafdf24 2998 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
e3db7226
FB
2999 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3000 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
5fafdf24
TS
3001 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3002 cross_page,
e3db7226
FB
3003 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3004 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
5fafdf24 3005 direct_jmp_count,
e3db7226
FB
3006 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3007 direct_jmp2_count,
3008 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
57fec1fe 3009 cpu_fprintf(f, "\nStatistics:\n");
e3db7226
FB
3010 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
3011 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3012 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
57fec1fe
FB
3013#ifdef CONFIG_PROFILER
3014 {
3015 int64_t tot;
3016 tot = dyngen_interm_time + dyngen_code_time;
3017 cpu_fprintf(f, "JIT cycles %" PRId64 " (%0.3f s at 2.4 GHz)\n",
3018 tot, tot / 2.4e9);
3019 cpu_fprintf(f, "translated TBs %" PRId64 " (aborted=%" PRId64 " %0.1f%%)\n",
3020 dyngen_tb_count,
3021 dyngen_tb_count1 - dyngen_tb_count,
3022 dyngen_tb_count1 ? (double)(dyngen_tb_count1 - dyngen_tb_count) / dyngen_tb_count1 * 100.0 : 0);
3023 cpu_fprintf(f, "avg ops/TB %0.1f max=%d\n",
3024 dyngen_tb_count ? (double)dyngen_op_count / dyngen_tb_count : 0, dyngen_op_count_max);
3025 cpu_fprintf(f, "old ops/total ops %0.1f%%\n",
3026 dyngen_op_count ? (double)dyngen_old_op_count / dyngen_op_count * 100.0 : 0);
3027 cpu_fprintf(f, "deleted ops/TB %0.2f\n",
3028 dyngen_tb_count ?
3029 (double)dyngen_tcg_del_op_count / dyngen_tb_count : 0);
3030 cpu_fprintf(f, "cycles/op %0.1f\n",
3031 dyngen_op_count ? (double)tot / dyngen_op_count : 0);
3032 cpu_fprintf(f, "cycles/in byte %0.1f\n",
3033 dyngen_code_in_len ? (double)tot / dyngen_code_in_len : 0);
3034 cpu_fprintf(f, "cycles/out byte %0.1f\n",
3035 dyngen_code_out_len ? (double)tot / dyngen_code_out_len : 0);
3036 if (tot == 0)
3037 tot = 1;
3038 cpu_fprintf(f, " gen_interm time %0.1f%%\n",
3039 (double)dyngen_interm_time / tot * 100.0);
3040 cpu_fprintf(f, " gen_code time %0.1f%%\n",
3041 (double)dyngen_code_time / tot * 100.0);
3042 cpu_fprintf(f, "cpu_restore count %" PRId64 "\n",
3043 dyngen_restore_count);
3044 cpu_fprintf(f, " avg cycles %0.1f\n",
3045 dyngen_restore_count ? (double)dyngen_restore_time / dyngen_restore_count : 0);
3046 {
3047 extern void dump_op_count(void);
3048 dump_op_count();
3049 }
3050 }
3051#endif
e3db7226
FB
3052}
3053
5fafdf24 3054#if !defined(CONFIG_USER_ONLY)
61382a50
FB
3055
3056#define MMUSUFFIX _cmmu
3057#define GETPC() NULL
3058#define env cpu_single_env
b769d8fe 3059#define SOFTMMU_CODE_ACCESS
61382a50
FB
3060
3061#define SHIFT 0
3062#include "softmmu_template.h"
3063
3064#define SHIFT 1
3065#include "softmmu_template.h"
3066
3067#define SHIFT 2
3068#include "softmmu_template.h"
3069
3070#define SHIFT 3
3071#include "softmmu_template.h"
3072
3073#undef env
3074
3075#endif