]> git.proxmox.com Git - mirror_qemu.git/blame - exec.c
Fix typo in previous patch.
[mirror_qemu.git] / exec.c
CommitLineData
54936004 1/*
fd6ce8f6 2 * virtual page mapping and translated block handling
54936004
FB
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
67b915a5 20#include "config.h"
d5a8f07c
FB
21#ifdef _WIN32
22#include <windows.h>
23#else
a98d49b1 24#include <sys/types.h>
d5a8f07c
FB
25#include <sys/mman.h>
26#endif
54936004
FB
27#include <stdlib.h>
28#include <stdio.h>
29#include <stdarg.h>
30#include <string.h>
31#include <errno.h>
32#include <unistd.h>
33#include <inttypes.h>
34
6180a181
FB
35#include "cpu.h"
36#include "exec-all.h"
53a5960a
PB
37#if defined(CONFIG_USER_ONLY)
38#include <qemu.h>
39#endif
54936004 40
fd6ce8f6 41//#define DEBUG_TB_INVALIDATE
66e85a21 42//#define DEBUG_FLUSH
9fa3e853 43//#define DEBUG_TLB
fd6ce8f6
FB
44
45/* make various TB consistency checks */
46//#define DEBUG_TB_CHECK
98857888 47//#define DEBUG_TLB_CHECK
fd6ce8f6
FB
48
49/* threshold to flush the translated code buffer */
50#define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
51
9fa3e853
FB
52#define SMC_BITMAP_USE_THRESHOLD 10
53
54#define MMAP_AREA_START 0x00000000
55#define MMAP_AREA_END 0xa8000000
fd6ce8f6 56
108c49b8
FB
57#if defined(TARGET_SPARC64)
58#define TARGET_PHYS_ADDR_SPACE_BITS 41
59#elif defined(TARGET_PPC64)
60#define TARGET_PHYS_ADDR_SPACE_BITS 42
61#else
62/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
63#define TARGET_PHYS_ADDR_SPACE_BITS 32
64#endif
65
fd6ce8f6 66TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
9fa3e853 67TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
fd6ce8f6 68int nb_tbs;
eb51d102
FB
69/* any access to the tbs or the page table must use this lock */
70spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
fd6ce8f6 71
b8076a74 72uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] __attribute__((aligned (32)));
fd6ce8f6
FB
73uint8_t *code_gen_ptr;
74
9fa3e853
FB
75int phys_ram_size;
76int phys_ram_fd;
77uint8_t *phys_ram_base;
1ccde1cb 78uint8_t *phys_ram_dirty;
9fa3e853 79
6a00d601
FB
80CPUState *first_cpu;
81/* current CPU in the current thread. It is only valid inside
82 cpu_exec() */
83CPUState *cpu_single_env;
84
54936004 85typedef struct PageDesc {
92e873b9 86 /* list of TBs intersecting this ram page */
fd6ce8f6 87 TranslationBlock *first_tb;
9fa3e853
FB
88 /* in order to optimize self modifying code, we count the number
89 of lookups we do to a given page to use a bitmap */
90 unsigned int code_write_count;
91 uint8_t *code_bitmap;
92#if defined(CONFIG_USER_ONLY)
93 unsigned long flags;
94#endif
54936004
FB
95} PageDesc;
96
92e873b9
FB
97typedef struct PhysPageDesc {
98 /* offset in host memory of the page + io_index in the low 12 bits */
e04f40b5 99 uint32_t phys_offset;
92e873b9
FB
100} PhysPageDesc;
101
54936004
FB
102#define L2_BITS 10
103#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
104
105#define L1_SIZE (1 << L1_BITS)
106#define L2_SIZE (1 << L2_BITS)
107
33417e70 108static void io_mem_init(void);
fd6ce8f6 109
83fb7adf
FB
110unsigned long qemu_real_host_page_size;
111unsigned long qemu_host_page_bits;
112unsigned long qemu_host_page_size;
113unsigned long qemu_host_page_mask;
54936004 114
92e873b9 115/* XXX: for system emulation, it could just be an array */
54936004 116static PageDesc *l1_map[L1_SIZE];
0a962c02 117PhysPageDesc **l1_phys_map;
54936004 118
33417e70 119/* io memory support */
33417e70
FB
120CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
121CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
a4193c8a 122void *io_mem_opaque[IO_MEM_NB_ENTRIES];
33417e70
FB
123static int io_mem_nb;
124
34865134
FB
125/* log support */
126char *logfilename = "/tmp/qemu.log";
127FILE *logfile;
128int loglevel;
129
e3db7226
FB
130/* statistics */
131static int tlb_flush_count;
132static int tb_flush_count;
133static int tb_phys_invalidate_count;
134
b346ff46 135static void page_init(void)
54936004 136{
83fb7adf 137 /* NOTE: we can always suppose that qemu_host_page_size >=
54936004 138 TARGET_PAGE_SIZE */
67b915a5 139#ifdef _WIN32
d5a8f07c
FB
140 {
141 SYSTEM_INFO system_info;
142 DWORD old_protect;
143
144 GetSystemInfo(&system_info);
145 qemu_real_host_page_size = system_info.dwPageSize;
146
147 VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
148 PAGE_EXECUTE_READWRITE, &old_protect);
149 }
67b915a5 150#else
83fb7adf 151 qemu_real_host_page_size = getpagesize();
d5a8f07c
FB
152 {
153 unsigned long start, end;
154
155 start = (unsigned long)code_gen_buffer;
156 start &= ~(qemu_real_host_page_size - 1);
157
158 end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
159 end += qemu_real_host_page_size - 1;
160 end &= ~(qemu_real_host_page_size - 1);
161
162 mprotect((void *)start, end - start,
163 PROT_READ | PROT_WRITE | PROT_EXEC);
164 }
67b915a5 165#endif
d5a8f07c 166
83fb7adf
FB
167 if (qemu_host_page_size == 0)
168 qemu_host_page_size = qemu_real_host_page_size;
169 if (qemu_host_page_size < TARGET_PAGE_SIZE)
170 qemu_host_page_size = TARGET_PAGE_SIZE;
171 qemu_host_page_bits = 0;
172 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
173 qemu_host_page_bits++;
174 qemu_host_page_mask = ~(qemu_host_page_size - 1);
108c49b8
FB
175 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
176 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
54936004
FB
177}
178
fd6ce8f6 179static inline PageDesc *page_find_alloc(unsigned int index)
54936004 180{
54936004
FB
181 PageDesc **lp, *p;
182
54936004
FB
183 lp = &l1_map[index >> L2_BITS];
184 p = *lp;
185 if (!p) {
186 /* allocate if not found */
59817ccb 187 p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
fd6ce8f6 188 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
54936004
FB
189 *lp = p;
190 }
191 return p + (index & (L2_SIZE - 1));
192}
193
fd6ce8f6 194static inline PageDesc *page_find(unsigned int index)
54936004 195{
54936004
FB
196 PageDesc *p;
197
54936004
FB
198 p = l1_map[index >> L2_BITS];
199 if (!p)
200 return 0;
fd6ce8f6
FB
201 return p + (index & (L2_SIZE - 1));
202}
203
108c49b8 204static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
92e873b9 205{
108c49b8 206 void **lp, **p;
92e873b9 207
108c49b8
FB
208 p = (void **)l1_phys_map;
209#if TARGET_PHYS_ADDR_SPACE_BITS > 32
210
211#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
212#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
213#endif
214 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
92e873b9
FB
215 p = *lp;
216 if (!p) {
217 /* allocate if not found */
108c49b8
FB
218 if (!alloc)
219 return NULL;
220 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
221 memset(p, 0, sizeof(void *) * L1_SIZE);
222 *lp = p;
223 }
224#endif
225 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
226 p = *lp;
227 if (!p) {
228 /* allocate if not found */
229 if (!alloc)
230 return NULL;
0a962c02 231 p = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
92e873b9
FB
232 memset(p, 0, sizeof(PhysPageDesc) * L2_SIZE);
233 *lp = p;
234 }
108c49b8 235 return ((PhysPageDesc *)p) + (index & (L2_SIZE - 1));
92e873b9
FB
236}
237
108c49b8 238static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
92e873b9 239{
108c49b8 240 return phys_page_find_alloc(index, 0);
92e873b9
FB
241}
242
9fa3e853 243#if !defined(CONFIG_USER_ONLY)
6a00d601 244static void tlb_protect_code(ram_addr_t ram_addr);
3a7d929e
FB
245static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
246 target_ulong vaddr);
9fa3e853 247#endif
fd6ce8f6 248
6a00d601 249void cpu_exec_init(CPUState *env)
fd6ce8f6 250{
6a00d601
FB
251 CPUState **penv;
252 int cpu_index;
253
fd6ce8f6
FB
254 if (!code_gen_ptr) {
255 code_gen_ptr = code_gen_buffer;
b346ff46 256 page_init();
33417e70 257 io_mem_init();
fd6ce8f6 258 }
6a00d601
FB
259 env->next_cpu = NULL;
260 penv = &first_cpu;
261 cpu_index = 0;
262 while (*penv != NULL) {
263 penv = (CPUState **)&(*penv)->next_cpu;
264 cpu_index++;
265 }
266 env->cpu_index = cpu_index;
267 *penv = env;
fd6ce8f6
FB
268}
269
9fa3e853
FB
270static inline void invalidate_page_bitmap(PageDesc *p)
271{
272 if (p->code_bitmap) {
59817ccb 273 qemu_free(p->code_bitmap);
9fa3e853
FB
274 p->code_bitmap = NULL;
275 }
276 p->code_write_count = 0;
277}
278
fd6ce8f6
FB
279/* set to NULL all the 'first_tb' fields in all PageDescs */
280static void page_flush_tb(void)
281{
282 int i, j;
283 PageDesc *p;
284
285 for(i = 0; i < L1_SIZE; i++) {
286 p = l1_map[i];
287 if (p) {
9fa3e853
FB
288 for(j = 0; j < L2_SIZE; j++) {
289 p->first_tb = NULL;
290 invalidate_page_bitmap(p);
291 p++;
292 }
fd6ce8f6
FB
293 }
294 }
295}
296
297/* flush all the translation blocks */
d4e8164f 298/* XXX: tb_flush is currently not thread safe */
6a00d601 299void tb_flush(CPUState *env1)
fd6ce8f6 300{
6a00d601 301 CPUState *env;
0124311e 302#if defined(DEBUG_FLUSH)
fd6ce8f6
FB
303 printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
304 code_gen_ptr - code_gen_buffer,
305 nb_tbs,
0124311e 306 nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0);
fd6ce8f6
FB
307#endif
308 nb_tbs = 0;
6a00d601
FB
309
310 for(env = first_cpu; env != NULL; env = env->next_cpu) {
311 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
312 }
9fa3e853 313
8a8a608f 314 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
fd6ce8f6 315 page_flush_tb();
9fa3e853 316
fd6ce8f6 317 code_gen_ptr = code_gen_buffer;
d4e8164f
FB
318 /* XXX: flush processor icache at this point if cache flush is
319 expensive */
e3db7226 320 tb_flush_count++;
fd6ce8f6
FB
321}
322
323#ifdef DEBUG_TB_CHECK
324
325static void tb_invalidate_check(unsigned long address)
326{
327 TranslationBlock *tb;
328 int i;
329 address &= TARGET_PAGE_MASK;
330 for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
331 for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
332 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
333 address >= tb->pc + tb->size)) {
334 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
335 address, tb->pc, tb->size);
336 }
337 }
338 }
339}
340
341/* verify that all the pages have correct rights for code */
342static void tb_page_check(void)
343{
344 TranslationBlock *tb;
345 int i, flags1, flags2;
346
347 for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
348 for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
349 flags1 = page_get_flags(tb->pc);
350 flags2 = page_get_flags(tb->pc + tb->size - 1);
351 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
352 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
353 tb->pc, tb->size, flags1, flags2);
354 }
355 }
356 }
357}
358
d4e8164f
FB
359void tb_jmp_check(TranslationBlock *tb)
360{
361 TranslationBlock *tb1;
362 unsigned int n1;
363
364 /* suppress any remaining jumps to this TB */
365 tb1 = tb->jmp_first;
366 for(;;) {
367 n1 = (long)tb1 & 3;
368 tb1 = (TranslationBlock *)((long)tb1 & ~3);
369 if (n1 == 2)
370 break;
371 tb1 = tb1->jmp_next[n1];
372 }
373 /* check end of list */
374 if (tb1 != tb) {
375 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
376 }
377}
378
fd6ce8f6
FB
379#endif
380
381/* invalidate one TB */
382static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
383 int next_offset)
384{
385 TranslationBlock *tb1;
386 for(;;) {
387 tb1 = *ptb;
388 if (tb1 == tb) {
389 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
390 break;
391 }
392 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
393 }
394}
395
9fa3e853
FB
396static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
397{
398 TranslationBlock *tb1;
399 unsigned int n1;
400
401 for(;;) {
402 tb1 = *ptb;
403 n1 = (long)tb1 & 3;
404 tb1 = (TranslationBlock *)((long)tb1 & ~3);
405 if (tb1 == tb) {
406 *ptb = tb1->page_next[n1];
407 break;
408 }
409 ptb = &tb1->page_next[n1];
410 }
411}
412
d4e8164f
FB
413static inline void tb_jmp_remove(TranslationBlock *tb, int n)
414{
415 TranslationBlock *tb1, **ptb;
416 unsigned int n1;
417
418 ptb = &tb->jmp_next[n];
419 tb1 = *ptb;
420 if (tb1) {
421 /* find tb(n) in circular list */
422 for(;;) {
423 tb1 = *ptb;
424 n1 = (long)tb1 & 3;
425 tb1 = (TranslationBlock *)((long)tb1 & ~3);
426 if (n1 == n && tb1 == tb)
427 break;
428 if (n1 == 2) {
429 ptb = &tb1->jmp_first;
430 } else {
431 ptb = &tb1->jmp_next[n1];
432 }
433 }
434 /* now we can suppress tb(n) from the list */
435 *ptb = tb->jmp_next[n];
436
437 tb->jmp_next[n] = NULL;
438 }
439}
440
441/* reset the jump entry 'n' of a TB so that it is not chained to
442 another TB */
443static inline void tb_reset_jump(TranslationBlock *tb, int n)
444{
445 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
446}
447
8a40a180 448static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
fd6ce8f6 449{
6a00d601 450 CPUState *env;
8a40a180 451 PageDesc *p;
d4e8164f 452 unsigned int h, n1;
8a40a180
FB
453 target_ulong phys_pc;
454 TranslationBlock *tb1, *tb2;
d4e8164f 455
8a40a180
FB
456 /* remove the TB from the hash list */
457 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
458 h = tb_phys_hash_func(phys_pc);
459 tb_remove(&tb_phys_hash[h], tb,
460 offsetof(TranslationBlock, phys_hash_next));
461
462 /* remove the TB from the page list */
463 if (tb->page_addr[0] != page_addr) {
464 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
465 tb_page_remove(&p->first_tb, tb);
466 invalidate_page_bitmap(p);
467 }
468 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
469 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
470 tb_page_remove(&p->first_tb, tb);
471 invalidate_page_bitmap(p);
472 }
473
36bdbe54 474 tb_invalidated_flag = 1;
59817ccb 475
fd6ce8f6 476 /* remove the TB from the hash list */
8a40a180 477 h = tb_jmp_cache_hash_func(tb->pc);
6a00d601
FB
478 for(env = first_cpu; env != NULL; env = env->next_cpu) {
479 if (env->tb_jmp_cache[h] == tb)
480 env->tb_jmp_cache[h] = NULL;
481 }
d4e8164f
FB
482
483 /* suppress this TB from the two jump lists */
484 tb_jmp_remove(tb, 0);
485 tb_jmp_remove(tb, 1);
486
487 /* suppress any remaining jumps to this TB */
488 tb1 = tb->jmp_first;
489 for(;;) {
490 n1 = (long)tb1 & 3;
491 if (n1 == 2)
492 break;
493 tb1 = (TranslationBlock *)((long)tb1 & ~3);
494 tb2 = tb1->jmp_next[n1];
495 tb_reset_jump(tb1, n1);
496 tb1->jmp_next[n1] = NULL;
497 tb1 = tb2;
498 }
499 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
9fa3e853 500
e3db7226 501 tb_phys_invalidate_count++;
9fa3e853
FB
502}
503
504static inline void set_bits(uint8_t *tab, int start, int len)
505{
506 int end, mask, end1;
507
508 end = start + len;
509 tab += start >> 3;
510 mask = 0xff << (start & 7);
511 if ((start & ~7) == (end & ~7)) {
512 if (start < end) {
513 mask &= ~(0xff << (end & 7));
514 *tab |= mask;
515 }
516 } else {
517 *tab++ |= mask;
518 start = (start + 8) & ~7;
519 end1 = end & ~7;
520 while (start < end1) {
521 *tab++ = 0xff;
522 start += 8;
523 }
524 if (start < end) {
525 mask = ~(0xff << (end & 7));
526 *tab |= mask;
527 }
528 }
529}
530
531static void build_page_bitmap(PageDesc *p)
532{
533 int n, tb_start, tb_end;
534 TranslationBlock *tb;
535
59817ccb 536 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
9fa3e853
FB
537 if (!p->code_bitmap)
538 return;
539 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
540
541 tb = p->first_tb;
542 while (tb != NULL) {
543 n = (long)tb & 3;
544 tb = (TranslationBlock *)((long)tb & ~3);
545 /* NOTE: this is subtle as a TB may span two physical pages */
546 if (n == 0) {
547 /* NOTE: tb_end may be after the end of the page, but
548 it is not a problem */
549 tb_start = tb->pc & ~TARGET_PAGE_MASK;
550 tb_end = tb_start + tb->size;
551 if (tb_end > TARGET_PAGE_SIZE)
552 tb_end = TARGET_PAGE_SIZE;
553 } else {
554 tb_start = 0;
555 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
556 }
557 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
558 tb = tb->page_next[n];
559 }
560}
561
d720b93d
FB
562#ifdef TARGET_HAS_PRECISE_SMC
563
564static void tb_gen_code(CPUState *env,
565 target_ulong pc, target_ulong cs_base, int flags,
566 int cflags)
567{
568 TranslationBlock *tb;
569 uint8_t *tc_ptr;
570 target_ulong phys_pc, phys_page2, virt_page2;
571 int code_gen_size;
572
c27004ec
FB
573 phys_pc = get_phys_addr_code(env, pc);
574 tb = tb_alloc(pc);
d720b93d
FB
575 if (!tb) {
576 /* flush must be done */
577 tb_flush(env);
578 /* cannot fail at this point */
c27004ec 579 tb = tb_alloc(pc);
d720b93d
FB
580 }
581 tc_ptr = code_gen_ptr;
582 tb->tc_ptr = tc_ptr;
583 tb->cs_base = cs_base;
584 tb->flags = flags;
585 tb->cflags = cflags;
586 cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
587 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
588
589 /* check next page if needed */
c27004ec 590 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
d720b93d 591 phys_page2 = -1;
c27004ec 592 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
d720b93d
FB
593 phys_page2 = get_phys_addr_code(env, virt_page2);
594 }
595 tb_link_phys(tb, phys_pc, phys_page2);
596}
597#endif
598
9fa3e853
FB
599/* invalidate all TBs which intersect with the target physical page
600 starting in range [start;end[. NOTE: start and end must refer to
d720b93d
FB
601 the same physical page. 'is_cpu_write_access' should be true if called
602 from a real cpu write access: the virtual CPU will exit the current
603 TB if code is modified inside this TB. */
604void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
605 int is_cpu_write_access)
606{
607 int n, current_tb_modified, current_tb_not_found, current_flags;
d720b93d 608 CPUState *env = cpu_single_env;
9fa3e853 609 PageDesc *p;
ea1c1802 610 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
9fa3e853 611 target_ulong tb_start, tb_end;
d720b93d 612 target_ulong current_pc, current_cs_base;
9fa3e853
FB
613
614 p = page_find(start >> TARGET_PAGE_BITS);
615 if (!p)
616 return;
617 if (!p->code_bitmap &&
d720b93d
FB
618 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
619 is_cpu_write_access) {
9fa3e853
FB
620 /* build code bitmap */
621 build_page_bitmap(p);
622 }
623
624 /* we remove all the TBs in the range [start, end[ */
625 /* XXX: see if in some cases it could be faster to invalidate all the code */
d720b93d
FB
626 current_tb_not_found = is_cpu_write_access;
627 current_tb_modified = 0;
628 current_tb = NULL; /* avoid warning */
629 current_pc = 0; /* avoid warning */
630 current_cs_base = 0; /* avoid warning */
631 current_flags = 0; /* avoid warning */
9fa3e853
FB
632 tb = p->first_tb;
633 while (tb != NULL) {
634 n = (long)tb & 3;
635 tb = (TranslationBlock *)((long)tb & ~3);
636 tb_next = tb->page_next[n];
637 /* NOTE: this is subtle as a TB may span two physical pages */
638 if (n == 0) {
639 /* NOTE: tb_end may be after the end of the page, but
640 it is not a problem */
641 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
642 tb_end = tb_start + tb->size;
643 } else {
644 tb_start = tb->page_addr[1];
645 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
646 }
647 if (!(tb_end <= start || tb_start >= end)) {
d720b93d
FB
648#ifdef TARGET_HAS_PRECISE_SMC
649 if (current_tb_not_found) {
650 current_tb_not_found = 0;
651 current_tb = NULL;
652 if (env->mem_write_pc) {
653 /* now we have a real cpu fault */
654 current_tb = tb_find_pc(env->mem_write_pc);
655 }
656 }
657 if (current_tb == tb &&
658 !(current_tb->cflags & CF_SINGLE_INSN)) {
659 /* If we are modifying the current TB, we must stop
660 its execution. We could be more precise by checking
661 that the modification is after the current PC, but it
662 would require a specialized function to partially
663 restore the CPU state */
664
665 current_tb_modified = 1;
666 cpu_restore_state(current_tb, env,
667 env->mem_write_pc, NULL);
668#if defined(TARGET_I386)
669 current_flags = env->hflags;
670 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
671 current_cs_base = (target_ulong)env->segs[R_CS].base;
672 current_pc = current_cs_base + env->eip;
673#else
674#error unsupported CPU
675#endif
676 }
677#endif /* TARGET_HAS_PRECISE_SMC */
6f5a9f7e
FB
678 /* we need to do that to handle the case where a signal
679 occurs while doing tb_phys_invalidate() */
680 saved_tb = NULL;
681 if (env) {
682 saved_tb = env->current_tb;
683 env->current_tb = NULL;
684 }
9fa3e853 685 tb_phys_invalidate(tb, -1);
6f5a9f7e
FB
686 if (env) {
687 env->current_tb = saved_tb;
688 if (env->interrupt_request && env->current_tb)
689 cpu_interrupt(env, env->interrupt_request);
690 }
9fa3e853
FB
691 }
692 tb = tb_next;
693 }
694#if !defined(CONFIG_USER_ONLY)
695 /* if no code remaining, no need to continue to use slow writes */
696 if (!p->first_tb) {
697 invalidate_page_bitmap(p);
d720b93d
FB
698 if (is_cpu_write_access) {
699 tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
700 }
701 }
702#endif
703#ifdef TARGET_HAS_PRECISE_SMC
704 if (current_tb_modified) {
705 /* we generate a block containing just the instruction
706 modifying the memory. It will ensure that it cannot modify
707 itself */
ea1c1802 708 env->current_tb = NULL;
d720b93d
FB
709 tb_gen_code(env, current_pc, current_cs_base, current_flags,
710 CF_SINGLE_INSN);
711 cpu_resume_from_signal(env, NULL);
9fa3e853 712 }
fd6ce8f6 713#endif
9fa3e853 714}
fd6ce8f6 715
9fa3e853 716/* len must be <= 8 and start must be a multiple of len */
d720b93d 717static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
9fa3e853
FB
718{
719 PageDesc *p;
720 int offset, b;
59817ccb 721#if 0
a4193c8a
FB
722 if (1) {
723 if (loglevel) {
724 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
725 cpu_single_env->mem_write_vaddr, len,
726 cpu_single_env->eip,
727 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
728 }
59817ccb
FB
729 }
730#endif
9fa3e853
FB
731 p = page_find(start >> TARGET_PAGE_BITS);
732 if (!p)
733 return;
734 if (p->code_bitmap) {
735 offset = start & ~TARGET_PAGE_MASK;
736 b = p->code_bitmap[offset >> 3] >> (offset & 7);
737 if (b & ((1 << len) - 1))
738 goto do_invalidate;
739 } else {
740 do_invalidate:
d720b93d 741 tb_invalidate_phys_page_range(start, start + len, 1);
9fa3e853
FB
742 }
743}
744
9fa3e853 745#if !defined(CONFIG_SOFTMMU)
d720b93d
FB
746static void tb_invalidate_phys_page(target_ulong addr,
747 unsigned long pc, void *puc)
9fa3e853 748{
d720b93d
FB
749 int n, current_flags, current_tb_modified;
750 target_ulong current_pc, current_cs_base;
9fa3e853 751 PageDesc *p;
d720b93d
FB
752 TranslationBlock *tb, *current_tb;
753#ifdef TARGET_HAS_PRECISE_SMC
754 CPUState *env = cpu_single_env;
755#endif
9fa3e853
FB
756
757 addr &= TARGET_PAGE_MASK;
758 p = page_find(addr >> TARGET_PAGE_BITS);
759 if (!p)
760 return;
761 tb = p->first_tb;
d720b93d
FB
762 current_tb_modified = 0;
763 current_tb = NULL;
764 current_pc = 0; /* avoid warning */
765 current_cs_base = 0; /* avoid warning */
766 current_flags = 0; /* avoid warning */
767#ifdef TARGET_HAS_PRECISE_SMC
768 if (tb && pc != 0) {
769 current_tb = tb_find_pc(pc);
770 }
771#endif
9fa3e853
FB
772 while (tb != NULL) {
773 n = (long)tb & 3;
774 tb = (TranslationBlock *)((long)tb & ~3);
d720b93d
FB
775#ifdef TARGET_HAS_PRECISE_SMC
776 if (current_tb == tb &&
777 !(current_tb->cflags & CF_SINGLE_INSN)) {
778 /* If we are modifying the current TB, we must stop
779 its execution. We could be more precise by checking
780 that the modification is after the current PC, but it
781 would require a specialized function to partially
782 restore the CPU state */
783
784 current_tb_modified = 1;
785 cpu_restore_state(current_tb, env, pc, puc);
786#if defined(TARGET_I386)
787 current_flags = env->hflags;
788 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
789 current_cs_base = (target_ulong)env->segs[R_CS].base;
790 current_pc = current_cs_base + env->eip;
791#else
792#error unsupported CPU
793#endif
794 }
795#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
796 tb_phys_invalidate(tb, addr);
797 tb = tb->page_next[n];
798 }
fd6ce8f6 799 p->first_tb = NULL;
d720b93d
FB
800#ifdef TARGET_HAS_PRECISE_SMC
801 if (current_tb_modified) {
802 /* we generate a block containing just the instruction
803 modifying the memory. It will ensure that it cannot modify
804 itself */
ea1c1802 805 env->current_tb = NULL;
d720b93d
FB
806 tb_gen_code(env, current_pc, current_cs_base, current_flags,
807 CF_SINGLE_INSN);
808 cpu_resume_from_signal(env, puc);
809 }
810#endif
fd6ce8f6 811}
9fa3e853 812#endif
fd6ce8f6
FB
813
814/* add the tb in the target page and protect it if necessary */
9fa3e853 815static inline void tb_alloc_page(TranslationBlock *tb,
53a5960a 816 unsigned int n, target_ulong page_addr)
fd6ce8f6
FB
817{
818 PageDesc *p;
9fa3e853
FB
819 TranslationBlock *last_first_tb;
820
821 tb->page_addr[n] = page_addr;
3a7d929e 822 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
9fa3e853
FB
823 tb->page_next[n] = p->first_tb;
824 last_first_tb = p->first_tb;
825 p->first_tb = (TranslationBlock *)((long)tb | n);
826 invalidate_page_bitmap(p);
fd6ce8f6 827
107db443 828#if defined(TARGET_HAS_SMC) || 1
d720b93d 829
9fa3e853 830#if defined(CONFIG_USER_ONLY)
fd6ce8f6 831 if (p->flags & PAGE_WRITE) {
53a5960a
PB
832 target_ulong addr;
833 PageDesc *p2;
9fa3e853
FB
834 int prot;
835
fd6ce8f6
FB
836 /* force the host page as non writable (writes will have a
837 page fault + mprotect overhead) */
53a5960a 838 page_addr &= qemu_host_page_mask;
fd6ce8f6 839 prot = 0;
53a5960a
PB
840 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
841 addr += TARGET_PAGE_SIZE) {
842
843 p2 = page_find (addr >> TARGET_PAGE_BITS);
844 if (!p2)
845 continue;
846 prot |= p2->flags;
847 p2->flags &= ~PAGE_WRITE;
848 page_get_flags(addr);
849 }
850 mprotect(g2h(page_addr), qemu_host_page_size,
fd6ce8f6
FB
851 (prot & PAGE_BITS) & ~PAGE_WRITE);
852#ifdef DEBUG_TB_INVALIDATE
853 printf("protecting code page: 0x%08lx\n",
53a5960a 854 page_addr);
fd6ce8f6 855#endif
fd6ce8f6 856 }
9fa3e853
FB
857#else
858 /* if some code is already present, then the pages are already
859 protected. So we handle the case where only the first TB is
860 allocated in a physical page */
861 if (!last_first_tb) {
6a00d601 862 tlb_protect_code(page_addr);
9fa3e853
FB
863 }
864#endif
d720b93d
FB
865
866#endif /* TARGET_HAS_SMC */
fd6ce8f6
FB
867}
868
869/* Allocate a new translation block. Flush the translation buffer if
870 too many translation blocks or too much generated code. */
c27004ec 871TranslationBlock *tb_alloc(target_ulong pc)
fd6ce8f6
FB
872{
873 TranslationBlock *tb;
fd6ce8f6
FB
874
875 if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
876 (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
d4e8164f 877 return NULL;
fd6ce8f6
FB
878 tb = &tbs[nb_tbs++];
879 tb->pc = pc;
b448f2f3 880 tb->cflags = 0;
d4e8164f
FB
881 return tb;
882}
883
9fa3e853
FB
884/* add a new TB and link it to the physical page tables. phys_page2 is
885 (-1) to indicate that only one page contains the TB. */
886void tb_link_phys(TranslationBlock *tb,
887 target_ulong phys_pc, target_ulong phys_page2)
d4e8164f 888{
9fa3e853
FB
889 unsigned int h;
890 TranslationBlock **ptb;
891
892 /* add in the physical hash table */
893 h = tb_phys_hash_func(phys_pc);
894 ptb = &tb_phys_hash[h];
895 tb->phys_hash_next = *ptb;
896 *ptb = tb;
fd6ce8f6
FB
897
898 /* add in the page list */
9fa3e853
FB
899 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
900 if (phys_page2 != -1)
901 tb_alloc_page(tb, 1, phys_page2);
902 else
903 tb->page_addr[1] = -1;
9fa3e853 904
d4e8164f
FB
905 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
906 tb->jmp_next[0] = NULL;
907 tb->jmp_next[1] = NULL;
b448f2f3
FB
908#ifdef USE_CODE_COPY
909 tb->cflags &= ~CF_FP_USED;
910 if (tb->cflags & CF_TB_FP_USED)
911 tb->cflags |= CF_FP_USED;
912#endif
d4e8164f
FB
913
914 /* init original jump addresses */
915 if (tb->tb_next_offset[0] != 0xffff)
916 tb_reset_jump(tb, 0);
917 if (tb->tb_next_offset[1] != 0xffff)
918 tb_reset_jump(tb, 1);
8a40a180
FB
919
920#ifdef DEBUG_TB_CHECK
921 tb_page_check();
922#endif
fd6ce8f6
FB
923}
924
9fa3e853
FB
925/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
926 tb[1].tc_ptr. Return NULL if not found */
927TranslationBlock *tb_find_pc(unsigned long tc_ptr)
fd6ce8f6 928{
9fa3e853
FB
929 int m_min, m_max, m;
930 unsigned long v;
931 TranslationBlock *tb;
a513fe19
FB
932
933 if (nb_tbs <= 0)
934 return NULL;
935 if (tc_ptr < (unsigned long)code_gen_buffer ||
936 tc_ptr >= (unsigned long)code_gen_ptr)
937 return NULL;
938 /* binary search (cf Knuth) */
939 m_min = 0;
940 m_max = nb_tbs - 1;
941 while (m_min <= m_max) {
942 m = (m_min + m_max) >> 1;
943 tb = &tbs[m];
944 v = (unsigned long)tb->tc_ptr;
945 if (v == tc_ptr)
946 return tb;
947 else if (tc_ptr < v) {
948 m_max = m - 1;
949 } else {
950 m_min = m + 1;
951 }
952 }
953 return &tbs[m_max];
954}
7501267e 955
ea041c0e
FB
956static void tb_reset_jump_recursive(TranslationBlock *tb);
957
958static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
959{
960 TranslationBlock *tb1, *tb_next, **ptb;
961 unsigned int n1;
962
963 tb1 = tb->jmp_next[n];
964 if (tb1 != NULL) {
965 /* find head of list */
966 for(;;) {
967 n1 = (long)tb1 & 3;
968 tb1 = (TranslationBlock *)((long)tb1 & ~3);
969 if (n1 == 2)
970 break;
971 tb1 = tb1->jmp_next[n1];
972 }
973 /* we are now sure now that tb jumps to tb1 */
974 tb_next = tb1;
975
976 /* remove tb from the jmp_first list */
977 ptb = &tb_next->jmp_first;
978 for(;;) {
979 tb1 = *ptb;
980 n1 = (long)tb1 & 3;
981 tb1 = (TranslationBlock *)((long)tb1 & ~3);
982 if (n1 == n && tb1 == tb)
983 break;
984 ptb = &tb1->jmp_next[n1];
985 }
986 *ptb = tb->jmp_next[n];
987 tb->jmp_next[n] = NULL;
988
989 /* suppress the jump to next tb in generated code */
990 tb_reset_jump(tb, n);
991
0124311e 992 /* suppress jumps in the tb on which we could have jumped */
ea041c0e
FB
993 tb_reset_jump_recursive(tb_next);
994 }
995}
996
997static void tb_reset_jump_recursive(TranslationBlock *tb)
998{
999 tb_reset_jump_recursive2(tb, 0);
1000 tb_reset_jump_recursive2(tb, 1);
1001}
1002
1fddef4b 1003#if defined(TARGET_HAS_ICE)
d720b93d
FB
1004static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1005{
c2f07f81
PB
1006 target_ulong addr, pd;
1007 ram_addr_t ram_addr;
1008 PhysPageDesc *p;
d720b93d 1009
c2f07f81
PB
1010 addr = cpu_get_phys_page_debug(env, pc);
1011 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1012 if (!p) {
1013 pd = IO_MEM_UNASSIGNED;
1014 } else {
1015 pd = p->phys_offset;
1016 }
1017 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
706cd4b5 1018 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
d720b93d 1019}
c27004ec 1020#endif
d720b93d 1021
c33a346e
FB
1022/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1023 breakpoint is reached */
2e12669a 1024int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
4c3a88a2 1025{
1fddef4b 1026#if defined(TARGET_HAS_ICE)
4c3a88a2 1027 int i;
d720b93d 1028
4c3a88a2
FB
1029 for(i = 0; i < env->nb_breakpoints; i++) {
1030 if (env->breakpoints[i] == pc)
1031 return 0;
1032 }
1033
1034 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1035 return -1;
1036 env->breakpoints[env->nb_breakpoints++] = pc;
d720b93d
FB
1037
1038 breakpoint_invalidate(env, pc);
4c3a88a2
FB
1039 return 0;
1040#else
1041 return -1;
1042#endif
1043}
1044
1045/* remove a breakpoint */
2e12669a 1046int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
4c3a88a2 1047{
1fddef4b 1048#if defined(TARGET_HAS_ICE)
4c3a88a2
FB
1049 int i;
1050 for(i = 0; i < env->nb_breakpoints; i++) {
1051 if (env->breakpoints[i] == pc)
1052 goto found;
1053 }
1054 return -1;
1055 found:
4c3a88a2 1056 env->nb_breakpoints--;
1fddef4b
FB
1057 if (i < env->nb_breakpoints)
1058 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
d720b93d
FB
1059
1060 breakpoint_invalidate(env, pc);
4c3a88a2
FB
1061 return 0;
1062#else
1063 return -1;
1064#endif
1065}
1066
c33a346e
FB
1067/* enable or disable single step mode. EXCP_DEBUG is returned by the
1068 CPU loop after each instruction */
1069void cpu_single_step(CPUState *env, int enabled)
1070{
1fddef4b 1071#if defined(TARGET_HAS_ICE)
c33a346e
FB
1072 if (env->singlestep_enabled != enabled) {
1073 env->singlestep_enabled = enabled;
1074 /* must flush all the translated code to avoid inconsistancies */
9fa3e853 1075 /* XXX: only flush what is necessary */
0124311e 1076 tb_flush(env);
c33a346e
FB
1077 }
1078#endif
1079}
1080
34865134
FB
1081/* enable or disable low levels log */
1082void cpu_set_log(int log_flags)
1083{
1084 loglevel = log_flags;
1085 if (loglevel && !logfile) {
1086 logfile = fopen(logfilename, "w");
1087 if (!logfile) {
1088 perror(logfilename);
1089 _exit(1);
1090 }
9fa3e853
FB
1091#if !defined(CONFIG_SOFTMMU)
1092 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1093 {
1094 static uint8_t logfile_buf[4096];
1095 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1096 }
1097#else
34865134 1098 setvbuf(logfile, NULL, _IOLBF, 0);
9fa3e853 1099#endif
34865134
FB
1100 }
1101}
1102
1103void cpu_set_log_filename(const char *filename)
1104{
1105 logfilename = strdup(filename);
1106}
c33a346e 1107
0124311e 1108/* mask must never be zero, except for A20 change call */
68a79315 1109void cpu_interrupt(CPUState *env, int mask)
ea041c0e
FB
1110{
1111 TranslationBlock *tb;
ee8b7021 1112 static int interrupt_lock;
59817ccb 1113
68a79315 1114 env->interrupt_request |= mask;
ea041c0e
FB
1115 /* if the cpu is currently executing code, we must unlink it and
1116 all the potentially executing TB */
1117 tb = env->current_tb;
ee8b7021
FB
1118 if (tb && !testandset(&interrupt_lock)) {
1119 env->current_tb = NULL;
ea041c0e 1120 tb_reset_jump_recursive(tb);
ee8b7021 1121 interrupt_lock = 0;
ea041c0e
FB
1122 }
1123}
1124
b54ad049
FB
1125void cpu_reset_interrupt(CPUState *env, int mask)
1126{
1127 env->interrupt_request &= ~mask;
1128}
1129
f193c797
FB
1130CPULogItem cpu_log_items[] = {
1131 { CPU_LOG_TB_OUT_ASM, "out_asm",
1132 "show generated host assembly code for each compiled TB" },
1133 { CPU_LOG_TB_IN_ASM, "in_asm",
1134 "show target assembly code for each compiled TB" },
1135 { CPU_LOG_TB_OP, "op",
1136 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1137#ifdef TARGET_I386
1138 { CPU_LOG_TB_OP_OPT, "op_opt",
1139 "show micro ops after optimization for each compiled TB" },
1140#endif
1141 { CPU_LOG_INT, "int",
1142 "show interrupts/exceptions in short format" },
1143 { CPU_LOG_EXEC, "exec",
1144 "show trace before each executed TB (lots of logs)" },
9fddaa0c
FB
1145 { CPU_LOG_TB_CPU, "cpu",
1146 "show CPU state before bloc translation" },
f193c797
FB
1147#ifdef TARGET_I386
1148 { CPU_LOG_PCALL, "pcall",
1149 "show protected mode far calls/returns/exceptions" },
1150#endif
8e3a9fd2 1151#ifdef DEBUG_IOPORT
fd872598
FB
1152 { CPU_LOG_IOPORT, "ioport",
1153 "show all i/o ports accesses" },
8e3a9fd2 1154#endif
f193c797
FB
1155 { 0, NULL, NULL },
1156};
1157
1158static int cmp1(const char *s1, int n, const char *s2)
1159{
1160 if (strlen(s2) != n)
1161 return 0;
1162 return memcmp(s1, s2, n) == 0;
1163}
1164
1165/* takes a comma separated list of log masks. Return 0 if error. */
1166int cpu_str_to_log_mask(const char *str)
1167{
1168 CPULogItem *item;
1169 int mask;
1170 const char *p, *p1;
1171
1172 p = str;
1173 mask = 0;
1174 for(;;) {
1175 p1 = strchr(p, ',');
1176 if (!p1)
1177 p1 = p + strlen(p);
8e3a9fd2
FB
1178 if(cmp1(p,p1-p,"all")) {
1179 for(item = cpu_log_items; item->mask != 0; item++) {
1180 mask |= item->mask;
1181 }
1182 } else {
f193c797
FB
1183 for(item = cpu_log_items; item->mask != 0; item++) {
1184 if (cmp1(p, p1 - p, item->name))
1185 goto found;
1186 }
1187 return 0;
8e3a9fd2 1188 }
f193c797
FB
1189 found:
1190 mask |= item->mask;
1191 if (*p1 != ',')
1192 break;
1193 p = p1 + 1;
1194 }
1195 return mask;
1196}
ea041c0e 1197
7501267e
FB
1198void cpu_abort(CPUState *env, const char *fmt, ...)
1199{
1200 va_list ap;
1201
1202 va_start(ap, fmt);
1203 fprintf(stderr, "qemu: fatal: ");
1204 vfprintf(stderr, fmt, ap);
1205 fprintf(stderr, "\n");
1206#ifdef TARGET_I386
7fe48483
FB
1207 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1208#else
1209 cpu_dump_state(env, stderr, fprintf, 0);
7501267e
FB
1210#endif
1211 va_end(ap);
1212 abort();
1213}
1214
0124311e
FB
1215#if !defined(CONFIG_USER_ONLY)
1216
ee8b7021
FB
1217/* NOTE: if flush_global is true, also flush global entries (not
1218 implemented yet) */
1219void tlb_flush(CPUState *env, int flush_global)
33417e70 1220{
33417e70 1221 int i;
0124311e 1222
9fa3e853
FB
1223#if defined(DEBUG_TLB)
1224 printf("tlb_flush:\n");
1225#endif
0124311e
FB
1226 /* must reset current TB so that interrupts cannot modify the
1227 links while we are modifying them */
1228 env->current_tb = NULL;
1229
33417e70 1230 for(i = 0; i < CPU_TLB_SIZE; i++) {
84b7b8e7
FB
1231 env->tlb_table[0][i].addr_read = -1;
1232 env->tlb_table[0][i].addr_write = -1;
1233 env->tlb_table[0][i].addr_code = -1;
1234 env->tlb_table[1][i].addr_read = -1;
1235 env->tlb_table[1][i].addr_write = -1;
1236 env->tlb_table[1][i].addr_code = -1;
33417e70 1237 }
9fa3e853 1238
8a40a180 1239 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
9fa3e853
FB
1240
1241#if !defined(CONFIG_SOFTMMU)
1242 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
0a962c02
FB
1243#endif
1244#ifdef USE_KQEMU
1245 if (env->kqemu_enabled) {
1246 kqemu_flush(env, flush_global);
1247 }
9fa3e853 1248#endif
e3db7226 1249 tlb_flush_count++;
33417e70
FB
1250}
1251
274da6b2 1252static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
61382a50 1253{
84b7b8e7
FB
1254 if (addr == (tlb_entry->addr_read &
1255 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1256 addr == (tlb_entry->addr_write &
1257 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1258 addr == (tlb_entry->addr_code &
1259 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1260 tlb_entry->addr_read = -1;
1261 tlb_entry->addr_write = -1;
1262 tlb_entry->addr_code = -1;
1263 }
61382a50
FB
1264}
1265
2e12669a 1266void tlb_flush_page(CPUState *env, target_ulong addr)
33417e70 1267{
8a40a180 1268 int i;
9fa3e853 1269 TranslationBlock *tb;
0124311e 1270
9fa3e853 1271#if defined(DEBUG_TLB)
108c49b8 1272 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
9fa3e853 1273#endif
0124311e
FB
1274 /* must reset current TB so that interrupts cannot modify the
1275 links while we are modifying them */
1276 env->current_tb = NULL;
61382a50
FB
1277
1278 addr &= TARGET_PAGE_MASK;
1279 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
84b7b8e7
FB
1280 tlb_flush_entry(&env->tlb_table[0][i], addr);
1281 tlb_flush_entry(&env->tlb_table[1][i], addr);
0124311e 1282
8a40a180
FB
1283 for(i = 0; i < TB_JMP_CACHE_SIZE; i++) {
1284 tb = env->tb_jmp_cache[i];
1285 if (tb &&
1286 ((tb->pc & TARGET_PAGE_MASK) == addr ||
1287 ((tb->pc + tb->size - 1) & TARGET_PAGE_MASK) == addr)) {
1288 env->tb_jmp_cache[i] = NULL;
9fa3e853
FB
1289 }
1290 }
1291
0124311e 1292#if !defined(CONFIG_SOFTMMU)
9fa3e853 1293 if (addr < MMAP_AREA_END)
0124311e 1294 munmap((void *)addr, TARGET_PAGE_SIZE);
61382a50 1295#endif
0a962c02
FB
1296#ifdef USE_KQEMU
1297 if (env->kqemu_enabled) {
1298 kqemu_flush_page(env, addr);
1299 }
1300#endif
9fa3e853
FB
1301}
1302
9fa3e853
FB
1303/* update the TLBs so that writes to code in the virtual page 'addr'
1304 can be detected */
6a00d601 1305static void tlb_protect_code(ram_addr_t ram_addr)
9fa3e853 1306{
6a00d601
FB
1307 cpu_physical_memory_reset_dirty(ram_addr,
1308 ram_addr + TARGET_PAGE_SIZE,
1309 CODE_DIRTY_FLAG);
9fa3e853
FB
1310}
1311
9fa3e853 1312/* update the TLB so that writes in physical page 'phys_addr' are no longer
3a7d929e
FB
1313 tested for self modifying code */
1314static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1315 target_ulong vaddr)
9fa3e853 1316{
3a7d929e 1317 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1ccde1cb
FB
1318}
1319
1320static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1321 unsigned long start, unsigned long length)
1322{
1323 unsigned long addr;
84b7b8e7
FB
1324 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1325 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1ccde1cb 1326 if ((addr - start) < length) {
84b7b8e7 1327 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1ccde1cb
FB
1328 }
1329 }
1330}
1331
3a7d929e 1332void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 1333 int dirty_flags)
1ccde1cb
FB
1334{
1335 CPUState *env;
4f2ac237 1336 unsigned long length, start1;
0a962c02
FB
1337 int i, mask, len;
1338 uint8_t *p;
1ccde1cb
FB
1339
1340 start &= TARGET_PAGE_MASK;
1341 end = TARGET_PAGE_ALIGN(end);
1342
1343 length = end - start;
1344 if (length == 0)
1345 return;
0a962c02 1346 len = length >> TARGET_PAGE_BITS;
3a7d929e 1347#ifdef USE_KQEMU
6a00d601
FB
1348 /* XXX: should not depend on cpu context */
1349 env = first_cpu;
3a7d929e 1350 if (env->kqemu_enabled) {
f23db169
FB
1351 ram_addr_t addr;
1352 addr = start;
1353 for(i = 0; i < len; i++) {
1354 kqemu_set_notdirty(env, addr);
1355 addr += TARGET_PAGE_SIZE;
1356 }
3a7d929e
FB
1357 }
1358#endif
f23db169
FB
1359 mask = ~dirty_flags;
1360 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1361 for(i = 0; i < len; i++)
1362 p[i] &= mask;
1363
1ccde1cb
FB
1364 /* we modify the TLB cache so that the dirty bit will be set again
1365 when accessing the range */
59817ccb 1366 start1 = start + (unsigned long)phys_ram_base;
6a00d601
FB
1367 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1368 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1369 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
6a00d601 1370 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1371 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
6a00d601 1372 }
59817ccb
FB
1373
1374#if !defined(CONFIG_SOFTMMU)
1375 /* XXX: this is expensive */
1376 {
1377 VirtPageDesc *p;
1378 int j;
1379 target_ulong addr;
1380
1381 for(i = 0; i < L1_SIZE; i++) {
1382 p = l1_virt_map[i];
1383 if (p) {
1384 addr = i << (TARGET_PAGE_BITS + L2_BITS);
1385 for(j = 0; j < L2_SIZE; j++) {
1386 if (p->valid_tag == virt_valid_tag &&
1387 p->phys_addr >= start && p->phys_addr < end &&
1388 (p->prot & PROT_WRITE)) {
1389 if (addr < MMAP_AREA_END) {
1390 mprotect((void *)addr, TARGET_PAGE_SIZE,
1391 p->prot & ~PROT_WRITE);
1392 }
1393 }
1394 addr += TARGET_PAGE_SIZE;
1395 p++;
1396 }
1397 }
1398 }
1399 }
1400#endif
1ccde1cb
FB
1401}
1402
3a7d929e
FB
1403static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1404{
1405 ram_addr_t ram_addr;
1406
84b7b8e7
FB
1407 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1408 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
3a7d929e
FB
1409 tlb_entry->addend - (unsigned long)phys_ram_base;
1410 if (!cpu_physical_memory_is_dirty(ram_addr)) {
84b7b8e7 1411 tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
3a7d929e
FB
1412 }
1413 }
1414}
1415
1416/* update the TLB according to the current state of the dirty bits */
1417void cpu_tlb_update_dirty(CPUState *env)
1418{
1419 int i;
1420 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1421 tlb_update_dirty(&env->tlb_table[0][i]);
3a7d929e 1422 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1423 tlb_update_dirty(&env->tlb_table[1][i]);
3a7d929e
FB
1424}
1425
1ccde1cb 1426static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
108c49b8 1427 unsigned long start)
1ccde1cb
FB
1428{
1429 unsigned long addr;
84b7b8e7
FB
1430 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1431 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1ccde1cb 1432 if (addr == start) {
84b7b8e7 1433 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;
1ccde1cb
FB
1434 }
1435 }
1436}
1437
1438/* update the TLB corresponding to virtual page vaddr and phys addr
1439 addr so that it is no longer dirty */
6a00d601
FB
1440static inline void tlb_set_dirty(CPUState *env,
1441 unsigned long addr, target_ulong vaddr)
1ccde1cb 1442{
1ccde1cb
FB
1443 int i;
1444
1ccde1cb
FB
1445 addr &= TARGET_PAGE_MASK;
1446 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
84b7b8e7
FB
1447 tlb_set_dirty1(&env->tlb_table[0][i], addr);
1448 tlb_set_dirty1(&env->tlb_table[1][i], addr);
9fa3e853
FB
1449}
1450
59817ccb
FB
1451/* add a new TLB entry. At most one entry for a given virtual address
1452 is permitted. Return 0 if OK or 2 if the page could not be mapped
1453 (can only happen in non SOFTMMU mode for I/O pages or pages
1454 conflicting with the host address space). */
84b7b8e7
FB
1455int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1456 target_phys_addr_t paddr, int prot,
1457 int is_user, int is_softmmu)
9fa3e853 1458{
92e873b9 1459 PhysPageDesc *p;
4f2ac237 1460 unsigned long pd;
9fa3e853 1461 unsigned int index;
4f2ac237 1462 target_ulong address;
108c49b8 1463 target_phys_addr_t addend;
9fa3e853 1464 int ret;
84b7b8e7 1465 CPUTLBEntry *te;
9fa3e853 1466
92e873b9 1467 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
9fa3e853
FB
1468 if (!p) {
1469 pd = IO_MEM_UNASSIGNED;
9fa3e853
FB
1470 } else {
1471 pd = p->phys_offset;
9fa3e853
FB
1472 }
1473#if defined(DEBUG_TLB)
3a7d929e 1474 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x u=%d smmu=%d pd=0x%08lx\n",
84b7b8e7 1475 vaddr, (int)paddr, prot, is_user, is_softmmu, pd);
9fa3e853
FB
1476#endif
1477
1478 ret = 0;
1479#if !defined(CONFIG_SOFTMMU)
1480 if (is_softmmu)
1481#endif
1482 {
1483 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1484 /* IO memory case */
1485 address = vaddr | pd;
1486 addend = paddr;
1487 } else {
1488 /* standard memory */
1489 address = vaddr;
1490 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1491 }
1492
90f18422 1493 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
9fa3e853 1494 addend -= vaddr;
84b7b8e7
FB
1495 te = &env->tlb_table[is_user][index];
1496 te->addend = addend;
67b915a5 1497 if (prot & PAGE_READ) {
84b7b8e7
FB
1498 te->addr_read = address;
1499 } else {
1500 te->addr_read = -1;
1501 }
1502 if (prot & PAGE_EXEC) {
1503 te->addr_code = address;
9fa3e853 1504 } else {
84b7b8e7 1505 te->addr_code = -1;
9fa3e853 1506 }
67b915a5 1507 if (prot & PAGE_WRITE) {
9fa3e853
FB
1508 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM) {
1509 /* ROM: access is ignored (same as unassigned) */
84b7b8e7 1510 te->addr_write = vaddr | IO_MEM_ROM;
3a7d929e 1511 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1ccde1cb 1512 !cpu_physical_memory_is_dirty(pd)) {
84b7b8e7 1513 te->addr_write = vaddr | IO_MEM_NOTDIRTY;
9fa3e853 1514 } else {
84b7b8e7 1515 te->addr_write = address;
9fa3e853
FB
1516 }
1517 } else {
84b7b8e7 1518 te->addr_write = -1;
9fa3e853
FB
1519 }
1520 }
1521#if !defined(CONFIG_SOFTMMU)
1522 else {
1523 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1524 /* IO access: no mapping is done as it will be handled by the
1525 soft MMU */
1526 if (!(env->hflags & HF_SOFTMMU_MASK))
1527 ret = 2;
1528 } else {
1529 void *map_addr;
59817ccb
FB
1530
1531 if (vaddr >= MMAP_AREA_END) {
1532 ret = 2;
1533 } else {
1534 if (prot & PROT_WRITE) {
1535 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
d720b93d 1536#if defined(TARGET_HAS_SMC) || 1
59817ccb 1537 first_tb ||
d720b93d 1538#endif
59817ccb
FB
1539 ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1540 !cpu_physical_memory_is_dirty(pd))) {
1541 /* ROM: we do as if code was inside */
1542 /* if code is present, we only map as read only and save the
1543 original mapping */
1544 VirtPageDesc *vp;
1545
90f18422 1546 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
59817ccb
FB
1547 vp->phys_addr = pd;
1548 vp->prot = prot;
1549 vp->valid_tag = virt_valid_tag;
1550 prot &= ~PAGE_WRITE;
1551 }
1552 }
1553 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
1554 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1555 if (map_addr == MAP_FAILED) {
1556 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1557 paddr, vaddr);
9fa3e853 1558 }
9fa3e853
FB
1559 }
1560 }
1561 }
1562#endif
1563 return ret;
1564}
1565
1566/* called from signal handler: invalidate the code and unprotect the
1567 page. Return TRUE if the fault was succesfully handled. */
53a5960a 1568int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
9fa3e853
FB
1569{
1570#if !defined(CONFIG_SOFTMMU)
1571 VirtPageDesc *vp;
1572
1573#if defined(DEBUG_TLB)
1574 printf("page_unprotect: addr=0x%08x\n", addr);
1575#endif
1576 addr &= TARGET_PAGE_MASK;
59817ccb
FB
1577
1578 /* if it is not mapped, no need to worry here */
1579 if (addr >= MMAP_AREA_END)
1580 return 0;
9fa3e853
FB
1581 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1582 if (!vp)
1583 return 0;
1584 /* NOTE: in this case, validate_tag is _not_ tested as it
1585 validates only the code TLB */
1586 if (vp->valid_tag != virt_valid_tag)
1587 return 0;
1588 if (!(vp->prot & PAGE_WRITE))
1589 return 0;
1590#if defined(DEBUG_TLB)
1591 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1592 addr, vp->phys_addr, vp->prot);
1593#endif
59817ccb
FB
1594 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1595 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1596 (unsigned long)addr, vp->prot);
d720b93d 1597 /* set the dirty bit */
0a962c02 1598 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
d720b93d
FB
1599 /* flush the code inside */
1600 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
9fa3e853
FB
1601 return 1;
1602#else
1603 return 0;
1604#endif
33417e70
FB
1605}
1606
0124311e
FB
1607#else
1608
ee8b7021 1609void tlb_flush(CPUState *env, int flush_global)
0124311e
FB
1610{
1611}
1612
2e12669a 1613void tlb_flush_page(CPUState *env, target_ulong addr)
0124311e
FB
1614{
1615}
1616
84b7b8e7
FB
1617int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1618 target_phys_addr_t paddr, int prot,
1619 int is_user, int is_softmmu)
9fa3e853
FB
1620{
1621 return 0;
1622}
0124311e 1623
9fa3e853
FB
1624/* dump memory mappings */
1625void page_dump(FILE *f)
33417e70 1626{
9fa3e853
FB
1627 unsigned long start, end;
1628 int i, j, prot, prot1;
1629 PageDesc *p;
33417e70 1630
9fa3e853
FB
1631 fprintf(f, "%-8s %-8s %-8s %s\n",
1632 "start", "end", "size", "prot");
1633 start = -1;
1634 end = -1;
1635 prot = 0;
1636 for(i = 0; i <= L1_SIZE; i++) {
1637 if (i < L1_SIZE)
1638 p = l1_map[i];
1639 else
1640 p = NULL;
1641 for(j = 0;j < L2_SIZE; j++) {
1642 if (!p)
1643 prot1 = 0;
1644 else
1645 prot1 = p[j].flags;
1646 if (prot1 != prot) {
1647 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1648 if (start != -1) {
1649 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1650 start, end, end - start,
1651 prot & PAGE_READ ? 'r' : '-',
1652 prot & PAGE_WRITE ? 'w' : '-',
1653 prot & PAGE_EXEC ? 'x' : '-');
1654 }
1655 if (prot1 != 0)
1656 start = end;
1657 else
1658 start = -1;
1659 prot = prot1;
1660 }
1661 if (!p)
1662 break;
1663 }
33417e70 1664 }
33417e70
FB
1665}
1666
53a5960a 1667int page_get_flags(target_ulong address)
33417e70 1668{
9fa3e853
FB
1669 PageDesc *p;
1670
1671 p = page_find(address >> TARGET_PAGE_BITS);
33417e70 1672 if (!p)
9fa3e853
FB
1673 return 0;
1674 return p->flags;
1675}
1676
1677/* modify the flags of a page and invalidate the code if
1678 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1679 depending on PAGE_WRITE */
53a5960a 1680void page_set_flags(target_ulong start, target_ulong end, int flags)
9fa3e853
FB
1681{
1682 PageDesc *p;
53a5960a 1683 target_ulong addr;
9fa3e853
FB
1684
1685 start = start & TARGET_PAGE_MASK;
1686 end = TARGET_PAGE_ALIGN(end);
1687 if (flags & PAGE_WRITE)
1688 flags |= PAGE_WRITE_ORG;
1689 spin_lock(&tb_lock);
1690 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1691 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1692 /* if the write protection is set, then we invalidate the code
1693 inside */
1694 if (!(p->flags & PAGE_WRITE) &&
1695 (flags & PAGE_WRITE) &&
1696 p->first_tb) {
d720b93d 1697 tb_invalidate_phys_page(addr, 0, NULL);
9fa3e853
FB
1698 }
1699 p->flags = flags;
1700 }
1701 spin_unlock(&tb_lock);
33417e70
FB
1702}
1703
9fa3e853
FB
1704/* called from signal handler: invalidate the code and unprotect the
1705 page. Return TRUE if the fault was succesfully handled. */
53a5960a 1706int page_unprotect(target_ulong address, unsigned long pc, void *puc)
9fa3e853
FB
1707{
1708 unsigned int page_index, prot, pindex;
1709 PageDesc *p, *p1;
53a5960a 1710 target_ulong host_start, host_end, addr;
9fa3e853 1711
83fb7adf 1712 host_start = address & qemu_host_page_mask;
9fa3e853
FB
1713 page_index = host_start >> TARGET_PAGE_BITS;
1714 p1 = page_find(page_index);
1715 if (!p1)
1716 return 0;
83fb7adf 1717 host_end = host_start + qemu_host_page_size;
9fa3e853
FB
1718 p = p1;
1719 prot = 0;
1720 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1721 prot |= p->flags;
1722 p++;
1723 }
1724 /* if the page was really writable, then we change its
1725 protection back to writable */
1726 if (prot & PAGE_WRITE_ORG) {
1727 pindex = (address - host_start) >> TARGET_PAGE_BITS;
1728 if (!(p1[pindex].flags & PAGE_WRITE)) {
53a5960a 1729 mprotect((void *)g2h(host_start), qemu_host_page_size,
9fa3e853
FB
1730 (prot & PAGE_BITS) | PAGE_WRITE);
1731 p1[pindex].flags |= PAGE_WRITE;
1732 /* and since the content will be modified, we must invalidate
1733 the corresponding translated code. */
d720b93d 1734 tb_invalidate_phys_page(address, pc, puc);
9fa3e853
FB
1735#ifdef DEBUG_TB_CHECK
1736 tb_invalidate_check(address);
1737#endif
1738 return 1;
1739 }
1740 }
1741 return 0;
1742}
1743
1744/* call this function when system calls directly modify a memory area */
53a5960a
PB
1745/* ??? This should be redundant now we have lock_user. */
1746void page_unprotect_range(target_ulong data, target_ulong data_size)
9fa3e853 1747{
53a5960a 1748 target_ulong start, end, addr;
9fa3e853 1749
53a5960a 1750 start = data;
9fa3e853
FB
1751 end = start + data_size;
1752 start &= TARGET_PAGE_MASK;
1753 end = TARGET_PAGE_ALIGN(end);
1754 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
d720b93d 1755 page_unprotect(addr, 0, NULL);
9fa3e853
FB
1756 }
1757}
1758
6a00d601
FB
1759static inline void tlb_set_dirty(CPUState *env,
1760 unsigned long addr, target_ulong vaddr)
1ccde1cb
FB
1761{
1762}
9fa3e853
FB
1763#endif /* defined(CONFIG_USER_ONLY) */
1764
33417e70
FB
1765/* register physical memory. 'size' must be a multiple of the target
1766 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1767 io memory page */
2e12669a
FB
1768void cpu_register_physical_memory(target_phys_addr_t start_addr,
1769 unsigned long size,
1770 unsigned long phys_offset)
33417e70 1771{
108c49b8 1772 target_phys_addr_t addr, end_addr;
92e873b9 1773 PhysPageDesc *p;
33417e70 1774
5fd386f6 1775 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
33417e70 1776 end_addr = start_addr + size;
5fd386f6 1777 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
108c49b8 1778 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
9fa3e853
FB
1779 p->phys_offset = phys_offset;
1780 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM)
33417e70
FB
1781 phys_offset += TARGET_PAGE_SIZE;
1782 }
1783}
1784
a4193c8a 1785static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
33417e70
FB
1786{
1787 return 0;
1788}
1789
a4193c8a 1790static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
33417e70
FB
1791{
1792}
1793
1794static CPUReadMemoryFunc *unassigned_mem_read[3] = {
1795 unassigned_mem_readb,
1796 unassigned_mem_readb,
1797 unassigned_mem_readb,
1798};
1799
1800static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
1801 unassigned_mem_writeb,
1802 unassigned_mem_writeb,
1803 unassigned_mem_writeb,
1804};
1805
3a7d929e 1806static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 1807{
3a7d929e
FB
1808 unsigned long ram_addr;
1809 int dirty_flags;
1810 ram_addr = addr - (unsigned long)phys_ram_base;
1811 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1812 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 1813#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
1814 tb_invalidate_phys_page_fast(ram_addr, 1);
1815 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 1816#endif
3a7d929e 1817 }
c27004ec 1818 stb_p((uint8_t *)(long)addr, val);
f32fc648
FB
1819#ifdef USE_KQEMU
1820 if (cpu_single_env->kqemu_enabled &&
1821 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
1822 kqemu_modify_page(cpu_single_env, ram_addr);
1823#endif
f23db169
FB
1824 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1825 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
1826 /* we remove the notdirty callback only if the code has been
1827 flushed */
1828 if (dirty_flags == 0xff)
6a00d601 1829 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
9fa3e853
FB
1830}
1831
3a7d929e 1832static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 1833{
3a7d929e
FB
1834 unsigned long ram_addr;
1835 int dirty_flags;
1836 ram_addr = addr - (unsigned long)phys_ram_base;
1837 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1838 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 1839#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
1840 tb_invalidate_phys_page_fast(ram_addr, 2);
1841 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 1842#endif
3a7d929e 1843 }
c27004ec 1844 stw_p((uint8_t *)(long)addr, val);
f32fc648
FB
1845#ifdef USE_KQEMU
1846 if (cpu_single_env->kqemu_enabled &&
1847 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
1848 kqemu_modify_page(cpu_single_env, ram_addr);
1849#endif
f23db169
FB
1850 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1851 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
1852 /* we remove the notdirty callback only if the code has been
1853 flushed */
1854 if (dirty_flags == 0xff)
6a00d601 1855 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
9fa3e853
FB
1856}
1857
3a7d929e 1858static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 1859{
3a7d929e
FB
1860 unsigned long ram_addr;
1861 int dirty_flags;
1862 ram_addr = addr - (unsigned long)phys_ram_base;
1863 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1864 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 1865#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
1866 tb_invalidate_phys_page_fast(ram_addr, 4);
1867 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 1868#endif
3a7d929e 1869 }
c27004ec 1870 stl_p((uint8_t *)(long)addr, val);
f32fc648
FB
1871#ifdef USE_KQEMU
1872 if (cpu_single_env->kqemu_enabled &&
1873 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
1874 kqemu_modify_page(cpu_single_env, ram_addr);
1875#endif
f23db169
FB
1876 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1877 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
1878 /* we remove the notdirty callback only if the code has been
1879 flushed */
1880 if (dirty_flags == 0xff)
6a00d601 1881 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
9fa3e853
FB
1882}
1883
3a7d929e 1884static CPUReadMemoryFunc *error_mem_read[3] = {
9fa3e853
FB
1885 NULL, /* never used */
1886 NULL, /* never used */
1887 NULL, /* never used */
1888};
1889
1ccde1cb
FB
1890static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
1891 notdirty_mem_writeb,
1892 notdirty_mem_writew,
1893 notdirty_mem_writel,
1894};
1895
33417e70
FB
1896static void io_mem_init(void)
1897{
3a7d929e 1898 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
a4193c8a 1899 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
3a7d929e 1900 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
1ccde1cb
FB
1901 io_mem_nb = 5;
1902
1903 /* alloc dirty bits array */
0a962c02 1904 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
3a7d929e 1905 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
33417e70
FB
1906}
1907
1908/* mem_read and mem_write are arrays of functions containing the
1909 function to access byte (index 0), word (index 1) and dword (index
1910 2). All functions must be supplied. If io_index is non zero, the
1911 corresponding io zone is modified. If it is zero, a new io zone is
1912 allocated. The return value can be used with
1913 cpu_register_physical_memory(). (-1) is returned if error. */
1914int cpu_register_io_memory(int io_index,
1915 CPUReadMemoryFunc **mem_read,
a4193c8a
FB
1916 CPUWriteMemoryFunc **mem_write,
1917 void *opaque)
33417e70
FB
1918{
1919 int i;
1920
1921 if (io_index <= 0) {
b5ff1b31 1922 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
33417e70
FB
1923 return -1;
1924 io_index = io_mem_nb++;
1925 } else {
1926 if (io_index >= IO_MEM_NB_ENTRIES)
1927 return -1;
1928 }
b5ff1b31 1929
33417e70
FB
1930 for(i = 0;i < 3; i++) {
1931 io_mem_read[io_index][i] = mem_read[i];
1932 io_mem_write[io_index][i] = mem_write[i];
1933 }
a4193c8a 1934 io_mem_opaque[io_index] = opaque;
33417e70
FB
1935 return io_index << IO_MEM_SHIFT;
1936}
61382a50 1937
8926b517
FB
1938CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
1939{
1940 return io_mem_write[io_index >> IO_MEM_SHIFT];
1941}
1942
1943CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
1944{
1945 return io_mem_read[io_index >> IO_MEM_SHIFT];
1946}
1947
13eb76e0
FB
1948/* physical memory access (slow version, mainly for debug) */
1949#if defined(CONFIG_USER_ONLY)
2e12669a 1950void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
1951 int len, int is_write)
1952{
1953 int l, flags;
1954 target_ulong page;
53a5960a 1955 void * p;
13eb76e0
FB
1956
1957 while (len > 0) {
1958 page = addr & TARGET_PAGE_MASK;
1959 l = (page + TARGET_PAGE_SIZE) - addr;
1960 if (l > len)
1961 l = len;
1962 flags = page_get_flags(page);
1963 if (!(flags & PAGE_VALID))
1964 return;
1965 if (is_write) {
1966 if (!(flags & PAGE_WRITE))
1967 return;
53a5960a
PB
1968 p = lock_user(addr, len, 0);
1969 memcpy(p, buf, len);
1970 unlock_user(p, addr, len);
13eb76e0
FB
1971 } else {
1972 if (!(flags & PAGE_READ))
1973 return;
53a5960a
PB
1974 p = lock_user(addr, len, 1);
1975 memcpy(buf, p, len);
1976 unlock_user(p, addr, 0);
13eb76e0
FB
1977 }
1978 len -= l;
1979 buf += l;
1980 addr += l;
1981 }
1982}
8df1cd07 1983
13eb76e0 1984#else
2e12669a 1985void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
1986 int len, int is_write)
1987{
1988 int l, io_index;
1989 uint8_t *ptr;
1990 uint32_t val;
2e12669a
FB
1991 target_phys_addr_t page;
1992 unsigned long pd;
92e873b9 1993 PhysPageDesc *p;
13eb76e0
FB
1994
1995 while (len > 0) {
1996 page = addr & TARGET_PAGE_MASK;
1997 l = (page + TARGET_PAGE_SIZE) - addr;
1998 if (l > len)
1999 l = len;
92e873b9 2000 p = phys_page_find(page >> TARGET_PAGE_BITS);
13eb76e0
FB
2001 if (!p) {
2002 pd = IO_MEM_UNASSIGNED;
2003 } else {
2004 pd = p->phys_offset;
2005 }
2006
2007 if (is_write) {
3a7d929e 2008 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
13eb76e0 2009 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
6a00d601
FB
2010 /* XXX: could force cpu_single_env to NULL to avoid
2011 potential bugs */
13eb76e0 2012 if (l >= 4 && ((addr & 3) == 0)) {
1c213d19 2013 /* 32 bit write access */
c27004ec 2014 val = ldl_p(buf);
a4193c8a 2015 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2016 l = 4;
2017 } else if (l >= 2 && ((addr & 1) == 0)) {
1c213d19 2018 /* 16 bit write access */
c27004ec 2019 val = lduw_p(buf);
a4193c8a 2020 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2021 l = 2;
2022 } else {
1c213d19 2023 /* 8 bit write access */
c27004ec 2024 val = ldub_p(buf);
a4193c8a 2025 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2026 l = 1;
2027 }
2028 } else {
b448f2f3
FB
2029 unsigned long addr1;
2030 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
13eb76e0 2031 /* RAM case */
b448f2f3 2032 ptr = phys_ram_base + addr1;
13eb76e0 2033 memcpy(ptr, buf, l);
3a7d929e
FB
2034 if (!cpu_physical_memory_is_dirty(addr1)) {
2035 /* invalidate code */
2036 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2037 /* set dirty bit */
f23db169
FB
2038 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2039 (0xff & ~CODE_DIRTY_FLAG);
3a7d929e 2040 }
13eb76e0
FB
2041 }
2042 } else {
3a7d929e 2043 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
13eb76e0
FB
2044 /* I/O case */
2045 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2046 if (l >= 4 && ((addr & 3) == 0)) {
2047 /* 32 bit read access */
a4193c8a 2048 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
c27004ec 2049 stl_p(buf, val);
13eb76e0
FB
2050 l = 4;
2051 } else if (l >= 2 && ((addr & 1) == 0)) {
2052 /* 16 bit read access */
a4193c8a 2053 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
c27004ec 2054 stw_p(buf, val);
13eb76e0
FB
2055 l = 2;
2056 } else {
1c213d19 2057 /* 8 bit read access */
a4193c8a 2058 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
c27004ec 2059 stb_p(buf, val);
13eb76e0
FB
2060 l = 1;
2061 }
2062 } else {
2063 /* RAM case */
2064 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2065 (addr & ~TARGET_PAGE_MASK);
2066 memcpy(buf, ptr, l);
2067 }
2068 }
2069 len -= l;
2070 buf += l;
2071 addr += l;
2072 }
2073}
8df1cd07
FB
2074
2075/* warning: addr must be aligned */
2076uint32_t ldl_phys(target_phys_addr_t addr)
2077{
2078 int io_index;
2079 uint8_t *ptr;
2080 uint32_t val;
2081 unsigned long pd;
2082 PhysPageDesc *p;
2083
2084 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2085 if (!p) {
2086 pd = IO_MEM_UNASSIGNED;
2087 } else {
2088 pd = p->phys_offset;
2089 }
2090
3a7d929e 2091 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
8df1cd07
FB
2092 /* I/O case */
2093 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2094 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2095 } else {
2096 /* RAM case */
2097 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2098 (addr & ~TARGET_PAGE_MASK);
2099 val = ldl_p(ptr);
2100 }
2101 return val;
2102}
2103
84b7b8e7
FB
2104/* warning: addr must be aligned */
2105uint64_t ldq_phys(target_phys_addr_t addr)
2106{
2107 int io_index;
2108 uint8_t *ptr;
2109 uint64_t val;
2110 unsigned long pd;
2111 PhysPageDesc *p;
2112
2113 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2114 if (!p) {
2115 pd = IO_MEM_UNASSIGNED;
2116 } else {
2117 pd = p->phys_offset;
2118 }
2119
2120 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
2121 /* I/O case */
2122 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2123#ifdef TARGET_WORDS_BIGENDIAN
2124 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2125 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2126#else
2127 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2128 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2129#endif
2130 } else {
2131 /* RAM case */
2132 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2133 (addr & ~TARGET_PAGE_MASK);
2134 val = ldq_p(ptr);
2135 }
2136 return val;
2137}
2138
aab33094
FB
2139/* XXX: optimize */
2140uint32_t ldub_phys(target_phys_addr_t addr)
2141{
2142 uint8_t val;
2143 cpu_physical_memory_read(addr, &val, 1);
2144 return val;
2145}
2146
2147/* XXX: optimize */
2148uint32_t lduw_phys(target_phys_addr_t addr)
2149{
2150 uint16_t val;
2151 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2152 return tswap16(val);
2153}
2154
8df1cd07
FB
2155/* warning: addr must be aligned. The ram page is not masked as dirty
2156 and the code inside is not invalidated. It is useful if the dirty
2157 bits are used to track modified PTEs */
2158void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2159{
2160 int io_index;
2161 uint8_t *ptr;
2162 unsigned long pd;
2163 PhysPageDesc *p;
2164
2165 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2166 if (!p) {
2167 pd = IO_MEM_UNASSIGNED;
2168 } else {
2169 pd = p->phys_offset;
2170 }
2171
3a7d929e 2172 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07
FB
2173 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2174 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2175 } else {
2176 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2177 (addr & ~TARGET_PAGE_MASK);
2178 stl_p(ptr, val);
2179 }
2180}
2181
2182/* warning: addr must be aligned */
8df1cd07
FB
2183void stl_phys(target_phys_addr_t addr, uint32_t val)
2184{
2185 int io_index;
2186 uint8_t *ptr;
2187 unsigned long pd;
2188 PhysPageDesc *p;
2189
2190 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2191 if (!p) {
2192 pd = IO_MEM_UNASSIGNED;
2193 } else {
2194 pd = p->phys_offset;
2195 }
2196
3a7d929e 2197 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07
FB
2198 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2199 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2200 } else {
2201 unsigned long addr1;
2202 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2203 /* RAM case */
2204 ptr = phys_ram_base + addr1;
2205 stl_p(ptr, val);
3a7d929e
FB
2206 if (!cpu_physical_memory_is_dirty(addr1)) {
2207 /* invalidate code */
2208 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2209 /* set dirty bit */
f23db169
FB
2210 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2211 (0xff & ~CODE_DIRTY_FLAG);
3a7d929e 2212 }
8df1cd07
FB
2213 }
2214}
2215
aab33094
FB
2216/* XXX: optimize */
2217void stb_phys(target_phys_addr_t addr, uint32_t val)
2218{
2219 uint8_t v = val;
2220 cpu_physical_memory_write(addr, &v, 1);
2221}
2222
2223/* XXX: optimize */
2224void stw_phys(target_phys_addr_t addr, uint32_t val)
2225{
2226 uint16_t v = tswap16(val);
2227 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2228}
2229
2230/* XXX: optimize */
2231void stq_phys(target_phys_addr_t addr, uint64_t val)
2232{
2233 val = tswap64(val);
2234 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
2235}
2236
13eb76e0
FB
2237#endif
2238
2239/* virtual memory access for debug */
b448f2f3
FB
2240int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
2241 uint8_t *buf, int len, int is_write)
13eb76e0
FB
2242{
2243 int l;
2244 target_ulong page, phys_addr;
2245
2246 while (len > 0) {
2247 page = addr & TARGET_PAGE_MASK;
2248 phys_addr = cpu_get_phys_page_debug(env, page);
2249 /* if no physical page mapped, return an error */
2250 if (phys_addr == -1)
2251 return -1;
2252 l = (page + TARGET_PAGE_SIZE) - addr;
2253 if (l > len)
2254 l = len;
b448f2f3
FB
2255 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
2256 buf, l, is_write);
13eb76e0
FB
2257 len -= l;
2258 buf += l;
2259 addr += l;
2260 }
2261 return 0;
2262}
2263
e3db7226
FB
2264void dump_exec_info(FILE *f,
2265 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2266{
2267 int i, target_code_size, max_target_code_size;
2268 int direct_jmp_count, direct_jmp2_count, cross_page;
2269 TranslationBlock *tb;
2270
2271 target_code_size = 0;
2272 max_target_code_size = 0;
2273 cross_page = 0;
2274 direct_jmp_count = 0;
2275 direct_jmp2_count = 0;
2276 for(i = 0; i < nb_tbs; i++) {
2277 tb = &tbs[i];
2278 target_code_size += tb->size;
2279 if (tb->size > max_target_code_size)
2280 max_target_code_size = tb->size;
2281 if (tb->page_addr[1] != -1)
2282 cross_page++;
2283 if (tb->tb_next_offset[0] != 0xffff) {
2284 direct_jmp_count++;
2285 if (tb->tb_next_offset[1] != 0xffff) {
2286 direct_jmp2_count++;
2287 }
2288 }
2289 }
2290 /* XXX: avoid using doubles ? */
2291 cpu_fprintf(f, "TB count %d\n", nb_tbs);
2292 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
2293 nb_tbs ? target_code_size / nb_tbs : 0,
2294 max_target_code_size);
2295 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
2296 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
2297 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
2298 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
2299 cross_page,
2300 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
2301 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
2302 direct_jmp_count,
2303 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
2304 direct_jmp2_count,
2305 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
2306 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
2307 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
2308 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
2309}
2310
61382a50
FB
2311#if !defined(CONFIG_USER_ONLY)
2312
2313#define MMUSUFFIX _cmmu
2314#define GETPC() NULL
2315#define env cpu_single_env
b769d8fe 2316#define SOFTMMU_CODE_ACCESS
61382a50
FB
2317
2318#define SHIFT 0
2319#include "softmmu_template.h"
2320
2321#define SHIFT 1
2322#include "softmmu_template.h"
2323
2324#define SHIFT 2
2325#include "softmmu_template.h"
2326
2327#define SHIFT 3
2328#include "softmmu_template.h"
2329
2330#undef env
2331
2332#endif