]> git.proxmox.com Git - qemu.git/blame - exec.c
Move configure --help output before gcc checks.
[qemu.git] / exec.c
CommitLineData
54936004 1/*
fd6ce8f6 2 * virtual page mapping and translated block handling
54936004
FB
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
67b915a5 20#include "config.h"
d5a8f07c
FB
21#ifdef _WIN32
22#include <windows.h>
23#else
a98d49b1 24#include <sys/types.h>
d5a8f07c
FB
25#include <sys/mman.h>
26#endif
54936004
FB
27#include <stdlib.h>
28#include <stdio.h>
29#include <stdarg.h>
30#include <string.h>
31#include <errno.h>
32#include <unistd.h>
33#include <inttypes.h>
34
6180a181
FB
35#include "cpu.h"
36#include "exec-all.h"
53a5960a
PB
37#if defined(CONFIG_USER_ONLY)
38#include <qemu.h>
39#endif
54936004 40
fd6ce8f6 41//#define DEBUG_TB_INVALIDATE
66e85a21 42//#define DEBUG_FLUSH
9fa3e853 43//#define DEBUG_TLB
fd6ce8f6
FB
44
45/* make various TB consistency checks */
46//#define DEBUG_TB_CHECK
98857888 47//#define DEBUG_TLB_CHECK
fd6ce8f6
FB
48
49/* threshold to flush the translated code buffer */
50#define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
51
9fa3e853
FB
52#define SMC_BITMAP_USE_THRESHOLD 10
53
54#define MMAP_AREA_START 0x00000000
55#define MMAP_AREA_END 0xa8000000
fd6ce8f6 56
108c49b8
FB
57#if defined(TARGET_SPARC64)
58#define TARGET_PHYS_ADDR_SPACE_BITS 41
59#elif defined(TARGET_PPC64)
60#define TARGET_PHYS_ADDR_SPACE_BITS 42
61#else
62/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
63#define TARGET_PHYS_ADDR_SPACE_BITS 32
64#endif
65
fd6ce8f6 66TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
9fa3e853 67TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
fd6ce8f6 68int nb_tbs;
eb51d102
FB
69/* any access to the tbs or the page table must use this lock */
70spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
fd6ce8f6 71
b8076a74 72uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] __attribute__((aligned (32)));
fd6ce8f6
FB
73uint8_t *code_gen_ptr;
74
9fa3e853
FB
75int phys_ram_size;
76int phys_ram_fd;
77uint8_t *phys_ram_base;
1ccde1cb 78uint8_t *phys_ram_dirty;
9fa3e853 79
6a00d601
FB
80CPUState *first_cpu;
81/* current CPU in the current thread. It is only valid inside
82 cpu_exec() */
83CPUState *cpu_single_env;
84
54936004 85typedef struct PageDesc {
92e873b9 86 /* list of TBs intersecting this ram page */
fd6ce8f6 87 TranslationBlock *first_tb;
9fa3e853
FB
88 /* in order to optimize self modifying code, we count the number
89 of lookups we do to a given page to use a bitmap */
90 unsigned int code_write_count;
91 uint8_t *code_bitmap;
92#if defined(CONFIG_USER_ONLY)
93 unsigned long flags;
94#endif
54936004
FB
95} PageDesc;
96
92e873b9
FB
97typedef struct PhysPageDesc {
98 /* offset in host memory of the page + io_index in the low 12 bits */
e04f40b5 99 uint32_t phys_offset;
92e873b9
FB
100} PhysPageDesc;
101
54936004
FB
102#define L2_BITS 10
103#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
104
105#define L1_SIZE (1 << L1_BITS)
106#define L2_SIZE (1 << L2_BITS)
107
33417e70 108static void io_mem_init(void);
fd6ce8f6 109
83fb7adf
FB
110unsigned long qemu_real_host_page_size;
111unsigned long qemu_host_page_bits;
112unsigned long qemu_host_page_size;
113unsigned long qemu_host_page_mask;
54936004 114
92e873b9 115/* XXX: for system emulation, it could just be an array */
54936004 116static PageDesc *l1_map[L1_SIZE];
0a962c02 117PhysPageDesc **l1_phys_map;
54936004 118
33417e70 119/* io memory support */
33417e70
FB
120CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
121CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
a4193c8a 122void *io_mem_opaque[IO_MEM_NB_ENTRIES];
33417e70
FB
123static int io_mem_nb;
124
34865134
FB
125/* log support */
126char *logfilename = "/tmp/qemu.log";
127FILE *logfile;
128int loglevel;
129
e3db7226
FB
130/* statistics */
131static int tlb_flush_count;
132static int tb_flush_count;
133static int tb_phys_invalidate_count;
134
b346ff46 135static void page_init(void)
54936004 136{
83fb7adf 137 /* NOTE: we can always suppose that qemu_host_page_size >=
54936004 138 TARGET_PAGE_SIZE */
67b915a5 139#ifdef _WIN32
d5a8f07c
FB
140 {
141 SYSTEM_INFO system_info;
142 DWORD old_protect;
143
144 GetSystemInfo(&system_info);
145 qemu_real_host_page_size = system_info.dwPageSize;
146
147 VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
148 PAGE_EXECUTE_READWRITE, &old_protect);
149 }
67b915a5 150#else
83fb7adf 151 qemu_real_host_page_size = getpagesize();
d5a8f07c
FB
152 {
153 unsigned long start, end;
154
155 start = (unsigned long)code_gen_buffer;
156 start &= ~(qemu_real_host_page_size - 1);
157
158 end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
159 end += qemu_real_host_page_size - 1;
160 end &= ~(qemu_real_host_page_size - 1);
161
162 mprotect((void *)start, end - start,
163 PROT_READ | PROT_WRITE | PROT_EXEC);
164 }
67b915a5 165#endif
d5a8f07c 166
83fb7adf
FB
167 if (qemu_host_page_size == 0)
168 qemu_host_page_size = qemu_real_host_page_size;
169 if (qemu_host_page_size < TARGET_PAGE_SIZE)
170 qemu_host_page_size = TARGET_PAGE_SIZE;
171 qemu_host_page_bits = 0;
172 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
173 qemu_host_page_bits++;
174 qemu_host_page_mask = ~(qemu_host_page_size - 1);
108c49b8
FB
175 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
176 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
54936004
FB
177}
178
fd6ce8f6 179static inline PageDesc *page_find_alloc(unsigned int index)
54936004 180{
54936004
FB
181 PageDesc **lp, *p;
182
54936004
FB
183 lp = &l1_map[index >> L2_BITS];
184 p = *lp;
185 if (!p) {
186 /* allocate if not found */
59817ccb 187 p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
fd6ce8f6 188 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
54936004
FB
189 *lp = p;
190 }
191 return p + (index & (L2_SIZE - 1));
192}
193
fd6ce8f6 194static inline PageDesc *page_find(unsigned int index)
54936004 195{
54936004
FB
196 PageDesc *p;
197
54936004
FB
198 p = l1_map[index >> L2_BITS];
199 if (!p)
200 return 0;
fd6ce8f6
FB
201 return p + (index & (L2_SIZE - 1));
202}
203
108c49b8 204static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
92e873b9 205{
108c49b8 206 void **lp, **p;
92e873b9 207
108c49b8
FB
208 p = (void **)l1_phys_map;
209#if TARGET_PHYS_ADDR_SPACE_BITS > 32
210
211#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
212#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
213#endif
214 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
92e873b9
FB
215 p = *lp;
216 if (!p) {
217 /* allocate if not found */
108c49b8
FB
218 if (!alloc)
219 return NULL;
220 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
221 memset(p, 0, sizeof(void *) * L1_SIZE);
222 *lp = p;
223 }
224#endif
225 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
226 p = *lp;
227 if (!p) {
228 /* allocate if not found */
229 if (!alloc)
230 return NULL;
0a962c02 231 p = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
92e873b9
FB
232 memset(p, 0, sizeof(PhysPageDesc) * L2_SIZE);
233 *lp = p;
234 }
108c49b8 235 return ((PhysPageDesc *)p) + (index & (L2_SIZE - 1));
92e873b9
FB
236}
237
108c49b8 238static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
92e873b9 239{
108c49b8 240 return phys_page_find_alloc(index, 0);
92e873b9
FB
241}
242
9fa3e853 243#if !defined(CONFIG_USER_ONLY)
6a00d601 244static void tlb_protect_code(ram_addr_t ram_addr);
3a7d929e
FB
245static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
246 target_ulong vaddr);
9fa3e853 247#endif
fd6ce8f6 248
6a00d601 249void cpu_exec_init(CPUState *env)
fd6ce8f6 250{
6a00d601
FB
251 CPUState **penv;
252 int cpu_index;
253
fd6ce8f6
FB
254 if (!code_gen_ptr) {
255 code_gen_ptr = code_gen_buffer;
b346ff46 256 page_init();
33417e70 257 io_mem_init();
fd6ce8f6 258 }
6a00d601
FB
259 env->next_cpu = NULL;
260 penv = &first_cpu;
261 cpu_index = 0;
262 while (*penv != NULL) {
263 penv = (CPUState **)&(*penv)->next_cpu;
264 cpu_index++;
265 }
266 env->cpu_index = cpu_index;
267 *penv = env;
fd6ce8f6
FB
268}
269
9fa3e853
FB
270static inline void invalidate_page_bitmap(PageDesc *p)
271{
272 if (p->code_bitmap) {
59817ccb 273 qemu_free(p->code_bitmap);
9fa3e853
FB
274 p->code_bitmap = NULL;
275 }
276 p->code_write_count = 0;
277}
278
fd6ce8f6
FB
279/* set to NULL all the 'first_tb' fields in all PageDescs */
280static void page_flush_tb(void)
281{
282 int i, j;
283 PageDesc *p;
284
285 for(i = 0; i < L1_SIZE; i++) {
286 p = l1_map[i];
287 if (p) {
9fa3e853
FB
288 for(j = 0; j < L2_SIZE; j++) {
289 p->first_tb = NULL;
290 invalidate_page_bitmap(p);
291 p++;
292 }
fd6ce8f6
FB
293 }
294 }
295}
296
297/* flush all the translation blocks */
d4e8164f 298/* XXX: tb_flush is currently not thread safe */
6a00d601 299void tb_flush(CPUState *env1)
fd6ce8f6 300{
6a00d601 301 CPUState *env;
0124311e 302#if defined(DEBUG_FLUSH)
fd6ce8f6
FB
303 printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
304 code_gen_ptr - code_gen_buffer,
305 nb_tbs,
0124311e 306 nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0);
fd6ce8f6
FB
307#endif
308 nb_tbs = 0;
6a00d601
FB
309
310 for(env = first_cpu; env != NULL; env = env->next_cpu) {
311 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
312 }
9fa3e853 313
8a8a608f 314 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
fd6ce8f6 315 page_flush_tb();
9fa3e853 316
fd6ce8f6 317 code_gen_ptr = code_gen_buffer;
d4e8164f
FB
318 /* XXX: flush processor icache at this point if cache flush is
319 expensive */
e3db7226 320 tb_flush_count++;
fd6ce8f6
FB
321}
322
323#ifdef DEBUG_TB_CHECK
324
325static void tb_invalidate_check(unsigned long address)
326{
327 TranslationBlock *tb;
328 int i;
329 address &= TARGET_PAGE_MASK;
330 for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
331 for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
332 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
333 address >= tb->pc + tb->size)) {
334 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
335 address, tb->pc, tb->size);
336 }
337 }
338 }
339}
340
341/* verify that all the pages have correct rights for code */
342static void tb_page_check(void)
343{
344 TranslationBlock *tb;
345 int i, flags1, flags2;
346
347 for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
348 for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
349 flags1 = page_get_flags(tb->pc);
350 flags2 = page_get_flags(tb->pc + tb->size - 1);
351 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
352 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
353 tb->pc, tb->size, flags1, flags2);
354 }
355 }
356 }
357}
358
d4e8164f
FB
359void tb_jmp_check(TranslationBlock *tb)
360{
361 TranslationBlock *tb1;
362 unsigned int n1;
363
364 /* suppress any remaining jumps to this TB */
365 tb1 = tb->jmp_first;
366 for(;;) {
367 n1 = (long)tb1 & 3;
368 tb1 = (TranslationBlock *)((long)tb1 & ~3);
369 if (n1 == 2)
370 break;
371 tb1 = tb1->jmp_next[n1];
372 }
373 /* check end of list */
374 if (tb1 != tb) {
375 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
376 }
377}
378
fd6ce8f6
FB
379#endif
380
381/* invalidate one TB */
382static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
383 int next_offset)
384{
385 TranslationBlock *tb1;
386 for(;;) {
387 tb1 = *ptb;
388 if (tb1 == tb) {
389 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
390 break;
391 }
392 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
393 }
394}
395
9fa3e853
FB
396static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
397{
398 TranslationBlock *tb1;
399 unsigned int n1;
400
401 for(;;) {
402 tb1 = *ptb;
403 n1 = (long)tb1 & 3;
404 tb1 = (TranslationBlock *)((long)tb1 & ~3);
405 if (tb1 == tb) {
406 *ptb = tb1->page_next[n1];
407 break;
408 }
409 ptb = &tb1->page_next[n1];
410 }
411}
412
d4e8164f
FB
413static inline void tb_jmp_remove(TranslationBlock *tb, int n)
414{
415 TranslationBlock *tb1, **ptb;
416 unsigned int n1;
417
418 ptb = &tb->jmp_next[n];
419 tb1 = *ptb;
420 if (tb1) {
421 /* find tb(n) in circular list */
422 for(;;) {
423 tb1 = *ptb;
424 n1 = (long)tb1 & 3;
425 tb1 = (TranslationBlock *)((long)tb1 & ~3);
426 if (n1 == n && tb1 == tb)
427 break;
428 if (n1 == 2) {
429 ptb = &tb1->jmp_first;
430 } else {
431 ptb = &tb1->jmp_next[n1];
432 }
433 }
434 /* now we can suppress tb(n) from the list */
435 *ptb = tb->jmp_next[n];
436
437 tb->jmp_next[n] = NULL;
438 }
439}
440
441/* reset the jump entry 'n' of a TB so that it is not chained to
442 another TB */
443static inline void tb_reset_jump(TranslationBlock *tb, int n)
444{
445 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
446}
447
8a40a180 448static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
fd6ce8f6 449{
6a00d601 450 CPUState *env;
8a40a180 451 PageDesc *p;
d4e8164f 452 unsigned int h, n1;
8a40a180
FB
453 target_ulong phys_pc;
454 TranslationBlock *tb1, *tb2;
d4e8164f 455
8a40a180
FB
456 /* remove the TB from the hash list */
457 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
458 h = tb_phys_hash_func(phys_pc);
459 tb_remove(&tb_phys_hash[h], tb,
460 offsetof(TranslationBlock, phys_hash_next));
461
462 /* remove the TB from the page list */
463 if (tb->page_addr[0] != page_addr) {
464 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
465 tb_page_remove(&p->first_tb, tb);
466 invalidate_page_bitmap(p);
467 }
468 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
469 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
470 tb_page_remove(&p->first_tb, tb);
471 invalidate_page_bitmap(p);
472 }
473
36bdbe54 474 tb_invalidated_flag = 1;
59817ccb 475
fd6ce8f6 476 /* remove the TB from the hash list */
8a40a180 477 h = tb_jmp_cache_hash_func(tb->pc);
6a00d601
FB
478 for(env = first_cpu; env != NULL; env = env->next_cpu) {
479 if (env->tb_jmp_cache[h] == tb)
480 env->tb_jmp_cache[h] = NULL;
481 }
d4e8164f
FB
482
483 /* suppress this TB from the two jump lists */
484 tb_jmp_remove(tb, 0);
485 tb_jmp_remove(tb, 1);
486
487 /* suppress any remaining jumps to this TB */
488 tb1 = tb->jmp_first;
489 for(;;) {
490 n1 = (long)tb1 & 3;
491 if (n1 == 2)
492 break;
493 tb1 = (TranslationBlock *)((long)tb1 & ~3);
494 tb2 = tb1->jmp_next[n1];
495 tb_reset_jump(tb1, n1);
496 tb1->jmp_next[n1] = NULL;
497 tb1 = tb2;
498 }
499 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
9fa3e853 500
e3db7226 501 tb_phys_invalidate_count++;
9fa3e853
FB
502}
503
504static inline void set_bits(uint8_t *tab, int start, int len)
505{
506 int end, mask, end1;
507
508 end = start + len;
509 tab += start >> 3;
510 mask = 0xff << (start & 7);
511 if ((start & ~7) == (end & ~7)) {
512 if (start < end) {
513 mask &= ~(0xff << (end & 7));
514 *tab |= mask;
515 }
516 } else {
517 *tab++ |= mask;
518 start = (start + 8) & ~7;
519 end1 = end & ~7;
520 while (start < end1) {
521 *tab++ = 0xff;
522 start += 8;
523 }
524 if (start < end) {
525 mask = ~(0xff << (end & 7));
526 *tab |= mask;
527 }
528 }
529}
530
531static void build_page_bitmap(PageDesc *p)
532{
533 int n, tb_start, tb_end;
534 TranslationBlock *tb;
535
59817ccb 536 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
9fa3e853
FB
537 if (!p->code_bitmap)
538 return;
539 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
540
541 tb = p->first_tb;
542 while (tb != NULL) {
543 n = (long)tb & 3;
544 tb = (TranslationBlock *)((long)tb & ~3);
545 /* NOTE: this is subtle as a TB may span two physical pages */
546 if (n == 0) {
547 /* NOTE: tb_end may be after the end of the page, but
548 it is not a problem */
549 tb_start = tb->pc & ~TARGET_PAGE_MASK;
550 tb_end = tb_start + tb->size;
551 if (tb_end > TARGET_PAGE_SIZE)
552 tb_end = TARGET_PAGE_SIZE;
553 } else {
554 tb_start = 0;
555 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
556 }
557 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
558 tb = tb->page_next[n];
559 }
560}
561
d720b93d
FB
562#ifdef TARGET_HAS_PRECISE_SMC
563
564static void tb_gen_code(CPUState *env,
565 target_ulong pc, target_ulong cs_base, int flags,
566 int cflags)
567{
568 TranslationBlock *tb;
569 uint8_t *tc_ptr;
570 target_ulong phys_pc, phys_page2, virt_page2;
571 int code_gen_size;
572
c27004ec
FB
573 phys_pc = get_phys_addr_code(env, pc);
574 tb = tb_alloc(pc);
d720b93d
FB
575 if (!tb) {
576 /* flush must be done */
577 tb_flush(env);
578 /* cannot fail at this point */
c27004ec 579 tb = tb_alloc(pc);
d720b93d
FB
580 }
581 tc_ptr = code_gen_ptr;
582 tb->tc_ptr = tc_ptr;
583 tb->cs_base = cs_base;
584 tb->flags = flags;
585 tb->cflags = cflags;
586 cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
587 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
588
589 /* check next page if needed */
c27004ec 590 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
d720b93d 591 phys_page2 = -1;
c27004ec 592 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
d720b93d
FB
593 phys_page2 = get_phys_addr_code(env, virt_page2);
594 }
595 tb_link_phys(tb, phys_pc, phys_page2);
596}
597#endif
598
9fa3e853
FB
599/* invalidate all TBs which intersect with the target physical page
600 starting in range [start;end[. NOTE: start and end must refer to
d720b93d
FB
601 the same physical page. 'is_cpu_write_access' should be true if called
602 from a real cpu write access: the virtual CPU will exit the current
603 TB if code is modified inside this TB. */
604void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
605 int is_cpu_write_access)
606{
607 int n, current_tb_modified, current_tb_not_found, current_flags;
d720b93d 608 CPUState *env = cpu_single_env;
9fa3e853 609 PageDesc *p;
ea1c1802 610 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
9fa3e853 611 target_ulong tb_start, tb_end;
d720b93d 612 target_ulong current_pc, current_cs_base;
9fa3e853
FB
613
614 p = page_find(start >> TARGET_PAGE_BITS);
615 if (!p)
616 return;
617 if (!p->code_bitmap &&
d720b93d
FB
618 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
619 is_cpu_write_access) {
9fa3e853
FB
620 /* build code bitmap */
621 build_page_bitmap(p);
622 }
623
624 /* we remove all the TBs in the range [start, end[ */
625 /* XXX: see if in some cases it could be faster to invalidate all the code */
d720b93d
FB
626 current_tb_not_found = is_cpu_write_access;
627 current_tb_modified = 0;
628 current_tb = NULL; /* avoid warning */
629 current_pc = 0; /* avoid warning */
630 current_cs_base = 0; /* avoid warning */
631 current_flags = 0; /* avoid warning */
9fa3e853
FB
632 tb = p->first_tb;
633 while (tb != NULL) {
634 n = (long)tb & 3;
635 tb = (TranslationBlock *)((long)tb & ~3);
636 tb_next = tb->page_next[n];
637 /* NOTE: this is subtle as a TB may span two physical pages */
638 if (n == 0) {
639 /* NOTE: tb_end may be after the end of the page, but
640 it is not a problem */
641 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
642 tb_end = tb_start + tb->size;
643 } else {
644 tb_start = tb->page_addr[1];
645 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
646 }
647 if (!(tb_end <= start || tb_start >= end)) {
d720b93d
FB
648#ifdef TARGET_HAS_PRECISE_SMC
649 if (current_tb_not_found) {
650 current_tb_not_found = 0;
651 current_tb = NULL;
652 if (env->mem_write_pc) {
653 /* now we have a real cpu fault */
654 current_tb = tb_find_pc(env->mem_write_pc);
655 }
656 }
657 if (current_tb == tb &&
658 !(current_tb->cflags & CF_SINGLE_INSN)) {
659 /* If we are modifying the current TB, we must stop
660 its execution. We could be more precise by checking
661 that the modification is after the current PC, but it
662 would require a specialized function to partially
663 restore the CPU state */
664
665 current_tb_modified = 1;
666 cpu_restore_state(current_tb, env,
667 env->mem_write_pc, NULL);
668#if defined(TARGET_I386)
669 current_flags = env->hflags;
670 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
671 current_cs_base = (target_ulong)env->segs[R_CS].base;
672 current_pc = current_cs_base + env->eip;
673#else
674#error unsupported CPU
675#endif
676 }
677#endif /* TARGET_HAS_PRECISE_SMC */
6f5a9f7e
FB
678 /* we need to do that to handle the case where a signal
679 occurs while doing tb_phys_invalidate() */
680 saved_tb = NULL;
681 if (env) {
682 saved_tb = env->current_tb;
683 env->current_tb = NULL;
684 }
9fa3e853 685 tb_phys_invalidate(tb, -1);
6f5a9f7e
FB
686 if (env) {
687 env->current_tb = saved_tb;
688 if (env->interrupt_request && env->current_tb)
689 cpu_interrupt(env, env->interrupt_request);
690 }
9fa3e853
FB
691 }
692 tb = tb_next;
693 }
694#if !defined(CONFIG_USER_ONLY)
695 /* if no code remaining, no need to continue to use slow writes */
696 if (!p->first_tb) {
697 invalidate_page_bitmap(p);
d720b93d
FB
698 if (is_cpu_write_access) {
699 tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
700 }
701 }
702#endif
703#ifdef TARGET_HAS_PRECISE_SMC
704 if (current_tb_modified) {
705 /* we generate a block containing just the instruction
706 modifying the memory. It will ensure that it cannot modify
707 itself */
ea1c1802 708 env->current_tb = NULL;
d720b93d
FB
709 tb_gen_code(env, current_pc, current_cs_base, current_flags,
710 CF_SINGLE_INSN);
711 cpu_resume_from_signal(env, NULL);
9fa3e853 712 }
fd6ce8f6 713#endif
9fa3e853 714}
fd6ce8f6 715
9fa3e853 716/* len must be <= 8 and start must be a multiple of len */
d720b93d 717static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
9fa3e853
FB
718{
719 PageDesc *p;
720 int offset, b;
59817ccb 721#if 0
a4193c8a
FB
722 if (1) {
723 if (loglevel) {
724 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
725 cpu_single_env->mem_write_vaddr, len,
726 cpu_single_env->eip,
727 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
728 }
59817ccb
FB
729 }
730#endif
9fa3e853
FB
731 p = page_find(start >> TARGET_PAGE_BITS);
732 if (!p)
733 return;
734 if (p->code_bitmap) {
735 offset = start & ~TARGET_PAGE_MASK;
736 b = p->code_bitmap[offset >> 3] >> (offset & 7);
737 if (b & ((1 << len) - 1))
738 goto do_invalidate;
739 } else {
740 do_invalidate:
d720b93d 741 tb_invalidate_phys_page_range(start, start + len, 1);
9fa3e853
FB
742 }
743}
744
9fa3e853 745#if !defined(CONFIG_SOFTMMU)
d720b93d
FB
746static void tb_invalidate_phys_page(target_ulong addr,
747 unsigned long pc, void *puc)
9fa3e853 748{
d720b93d
FB
749 int n, current_flags, current_tb_modified;
750 target_ulong current_pc, current_cs_base;
9fa3e853 751 PageDesc *p;
d720b93d
FB
752 TranslationBlock *tb, *current_tb;
753#ifdef TARGET_HAS_PRECISE_SMC
754 CPUState *env = cpu_single_env;
755#endif
9fa3e853
FB
756
757 addr &= TARGET_PAGE_MASK;
758 p = page_find(addr >> TARGET_PAGE_BITS);
759 if (!p)
760 return;
761 tb = p->first_tb;
d720b93d
FB
762 current_tb_modified = 0;
763 current_tb = NULL;
764 current_pc = 0; /* avoid warning */
765 current_cs_base = 0; /* avoid warning */
766 current_flags = 0; /* avoid warning */
767#ifdef TARGET_HAS_PRECISE_SMC
768 if (tb && pc != 0) {
769 current_tb = tb_find_pc(pc);
770 }
771#endif
9fa3e853
FB
772 while (tb != NULL) {
773 n = (long)tb & 3;
774 tb = (TranslationBlock *)((long)tb & ~3);
d720b93d
FB
775#ifdef TARGET_HAS_PRECISE_SMC
776 if (current_tb == tb &&
777 !(current_tb->cflags & CF_SINGLE_INSN)) {
778 /* If we are modifying the current TB, we must stop
779 its execution. We could be more precise by checking
780 that the modification is after the current PC, but it
781 would require a specialized function to partially
782 restore the CPU state */
783
784 current_tb_modified = 1;
785 cpu_restore_state(current_tb, env, pc, puc);
786#if defined(TARGET_I386)
787 current_flags = env->hflags;
788 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
789 current_cs_base = (target_ulong)env->segs[R_CS].base;
790 current_pc = current_cs_base + env->eip;
791#else
792#error unsupported CPU
793#endif
794 }
795#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
796 tb_phys_invalidate(tb, addr);
797 tb = tb->page_next[n];
798 }
fd6ce8f6 799 p->first_tb = NULL;
d720b93d
FB
800#ifdef TARGET_HAS_PRECISE_SMC
801 if (current_tb_modified) {
802 /* we generate a block containing just the instruction
803 modifying the memory. It will ensure that it cannot modify
804 itself */
ea1c1802 805 env->current_tb = NULL;
d720b93d
FB
806 tb_gen_code(env, current_pc, current_cs_base, current_flags,
807 CF_SINGLE_INSN);
808 cpu_resume_from_signal(env, puc);
809 }
810#endif
fd6ce8f6 811}
9fa3e853 812#endif
fd6ce8f6
FB
813
814/* add the tb in the target page and protect it if necessary */
9fa3e853 815static inline void tb_alloc_page(TranslationBlock *tb,
53a5960a 816 unsigned int n, target_ulong page_addr)
fd6ce8f6
FB
817{
818 PageDesc *p;
9fa3e853
FB
819 TranslationBlock *last_first_tb;
820
821 tb->page_addr[n] = page_addr;
3a7d929e 822 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
9fa3e853
FB
823 tb->page_next[n] = p->first_tb;
824 last_first_tb = p->first_tb;
825 p->first_tb = (TranslationBlock *)((long)tb | n);
826 invalidate_page_bitmap(p);
fd6ce8f6 827
107db443 828#if defined(TARGET_HAS_SMC) || 1
d720b93d 829
9fa3e853 830#if defined(CONFIG_USER_ONLY)
fd6ce8f6 831 if (p->flags & PAGE_WRITE) {
53a5960a
PB
832 target_ulong addr;
833 PageDesc *p2;
9fa3e853
FB
834 int prot;
835
fd6ce8f6
FB
836 /* force the host page as non writable (writes will have a
837 page fault + mprotect overhead) */
53a5960a 838 page_addr &= qemu_host_page_mask;
fd6ce8f6 839 prot = 0;
53a5960a
PB
840 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
841 addr += TARGET_PAGE_SIZE) {
842
843 p2 = page_find (addr >> TARGET_PAGE_BITS);
844 if (!p2)
845 continue;
846 prot |= p2->flags;
847 p2->flags &= ~PAGE_WRITE;
848 page_get_flags(addr);
849 }
850 mprotect(g2h(page_addr), qemu_host_page_size,
fd6ce8f6
FB
851 (prot & PAGE_BITS) & ~PAGE_WRITE);
852#ifdef DEBUG_TB_INVALIDATE
853 printf("protecting code page: 0x%08lx\n",
53a5960a 854 page_addr);
fd6ce8f6 855#endif
fd6ce8f6 856 }
9fa3e853
FB
857#else
858 /* if some code is already present, then the pages are already
859 protected. So we handle the case where only the first TB is
860 allocated in a physical page */
861 if (!last_first_tb) {
6a00d601 862 tlb_protect_code(page_addr);
9fa3e853
FB
863 }
864#endif
d720b93d
FB
865
866#endif /* TARGET_HAS_SMC */
fd6ce8f6
FB
867}
868
869/* Allocate a new translation block. Flush the translation buffer if
870 too many translation blocks or too much generated code. */
c27004ec 871TranslationBlock *tb_alloc(target_ulong pc)
fd6ce8f6
FB
872{
873 TranslationBlock *tb;
fd6ce8f6
FB
874
875 if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
876 (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
d4e8164f 877 return NULL;
fd6ce8f6
FB
878 tb = &tbs[nb_tbs++];
879 tb->pc = pc;
b448f2f3 880 tb->cflags = 0;
d4e8164f
FB
881 return tb;
882}
883
9fa3e853
FB
884/* add a new TB and link it to the physical page tables. phys_page2 is
885 (-1) to indicate that only one page contains the TB. */
886void tb_link_phys(TranslationBlock *tb,
887 target_ulong phys_pc, target_ulong phys_page2)
d4e8164f 888{
9fa3e853
FB
889 unsigned int h;
890 TranslationBlock **ptb;
891
892 /* add in the physical hash table */
893 h = tb_phys_hash_func(phys_pc);
894 ptb = &tb_phys_hash[h];
895 tb->phys_hash_next = *ptb;
896 *ptb = tb;
fd6ce8f6
FB
897
898 /* add in the page list */
9fa3e853
FB
899 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
900 if (phys_page2 != -1)
901 tb_alloc_page(tb, 1, phys_page2);
902 else
903 tb->page_addr[1] = -1;
9fa3e853 904
d4e8164f
FB
905 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
906 tb->jmp_next[0] = NULL;
907 tb->jmp_next[1] = NULL;
b448f2f3
FB
908#ifdef USE_CODE_COPY
909 tb->cflags &= ~CF_FP_USED;
910 if (tb->cflags & CF_TB_FP_USED)
911 tb->cflags |= CF_FP_USED;
912#endif
d4e8164f
FB
913
914 /* init original jump addresses */
915 if (tb->tb_next_offset[0] != 0xffff)
916 tb_reset_jump(tb, 0);
917 if (tb->tb_next_offset[1] != 0xffff)
918 tb_reset_jump(tb, 1);
8a40a180
FB
919
920#ifdef DEBUG_TB_CHECK
921 tb_page_check();
922#endif
fd6ce8f6
FB
923}
924
9fa3e853
FB
925/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
926 tb[1].tc_ptr. Return NULL if not found */
927TranslationBlock *tb_find_pc(unsigned long tc_ptr)
fd6ce8f6 928{
9fa3e853
FB
929 int m_min, m_max, m;
930 unsigned long v;
931 TranslationBlock *tb;
a513fe19
FB
932
933 if (nb_tbs <= 0)
934 return NULL;
935 if (tc_ptr < (unsigned long)code_gen_buffer ||
936 tc_ptr >= (unsigned long)code_gen_ptr)
937 return NULL;
938 /* binary search (cf Knuth) */
939 m_min = 0;
940 m_max = nb_tbs - 1;
941 while (m_min <= m_max) {
942 m = (m_min + m_max) >> 1;
943 tb = &tbs[m];
944 v = (unsigned long)tb->tc_ptr;
945 if (v == tc_ptr)
946 return tb;
947 else if (tc_ptr < v) {
948 m_max = m - 1;
949 } else {
950 m_min = m + 1;
951 }
952 }
953 return &tbs[m_max];
954}
7501267e 955
ea041c0e
FB
956static void tb_reset_jump_recursive(TranslationBlock *tb);
957
958static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
959{
960 TranslationBlock *tb1, *tb_next, **ptb;
961 unsigned int n1;
962
963 tb1 = tb->jmp_next[n];
964 if (tb1 != NULL) {
965 /* find head of list */
966 for(;;) {
967 n1 = (long)tb1 & 3;
968 tb1 = (TranslationBlock *)((long)tb1 & ~3);
969 if (n1 == 2)
970 break;
971 tb1 = tb1->jmp_next[n1];
972 }
973 /* we are now sure now that tb jumps to tb1 */
974 tb_next = tb1;
975
976 /* remove tb from the jmp_first list */
977 ptb = &tb_next->jmp_first;
978 for(;;) {
979 tb1 = *ptb;
980 n1 = (long)tb1 & 3;
981 tb1 = (TranslationBlock *)((long)tb1 & ~3);
982 if (n1 == n && tb1 == tb)
983 break;
984 ptb = &tb1->jmp_next[n1];
985 }
986 *ptb = tb->jmp_next[n];
987 tb->jmp_next[n] = NULL;
988
989 /* suppress the jump to next tb in generated code */
990 tb_reset_jump(tb, n);
991
0124311e 992 /* suppress jumps in the tb on which we could have jumped */
ea041c0e
FB
993 tb_reset_jump_recursive(tb_next);
994 }
995}
996
997static void tb_reset_jump_recursive(TranslationBlock *tb)
998{
999 tb_reset_jump_recursive2(tb, 0);
1000 tb_reset_jump_recursive2(tb, 1);
1001}
1002
1fddef4b 1003#if defined(TARGET_HAS_ICE)
d720b93d
FB
1004static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1005{
1006 target_ulong phys_addr;
1007
1008 phys_addr = cpu_get_phys_page_debug(env, pc);
1009 tb_invalidate_phys_page_range(phys_addr, phys_addr + 1, 0);
1010}
c27004ec 1011#endif
d720b93d 1012
c33a346e
FB
1013/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1014 breakpoint is reached */
2e12669a 1015int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
4c3a88a2 1016{
1fddef4b 1017#if defined(TARGET_HAS_ICE)
4c3a88a2 1018 int i;
d720b93d 1019
4c3a88a2
FB
1020 for(i = 0; i < env->nb_breakpoints; i++) {
1021 if (env->breakpoints[i] == pc)
1022 return 0;
1023 }
1024
1025 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1026 return -1;
1027 env->breakpoints[env->nb_breakpoints++] = pc;
d720b93d
FB
1028
1029 breakpoint_invalidate(env, pc);
4c3a88a2
FB
1030 return 0;
1031#else
1032 return -1;
1033#endif
1034}
1035
1036/* remove a breakpoint */
2e12669a 1037int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
4c3a88a2 1038{
1fddef4b 1039#if defined(TARGET_HAS_ICE)
4c3a88a2
FB
1040 int i;
1041 for(i = 0; i < env->nb_breakpoints; i++) {
1042 if (env->breakpoints[i] == pc)
1043 goto found;
1044 }
1045 return -1;
1046 found:
4c3a88a2 1047 env->nb_breakpoints--;
1fddef4b
FB
1048 if (i < env->nb_breakpoints)
1049 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
d720b93d
FB
1050
1051 breakpoint_invalidate(env, pc);
4c3a88a2
FB
1052 return 0;
1053#else
1054 return -1;
1055#endif
1056}
1057
c33a346e
FB
1058/* enable or disable single step mode. EXCP_DEBUG is returned by the
1059 CPU loop after each instruction */
1060void cpu_single_step(CPUState *env, int enabled)
1061{
1fddef4b 1062#if defined(TARGET_HAS_ICE)
c33a346e
FB
1063 if (env->singlestep_enabled != enabled) {
1064 env->singlestep_enabled = enabled;
1065 /* must flush all the translated code to avoid inconsistancies */
9fa3e853 1066 /* XXX: only flush what is necessary */
0124311e 1067 tb_flush(env);
c33a346e
FB
1068 }
1069#endif
1070}
1071
34865134
FB
1072/* enable or disable low levels log */
1073void cpu_set_log(int log_flags)
1074{
1075 loglevel = log_flags;
1076 if (loglevel && !logfile) {
1077 logfile = fopen(logfilename, "w");
1078 if (!logfile) {
1079 perror(logfilename);
1080 _exit(1);
1081 }
9fa3e853
FB
1082#if !defined(CONFIG_SOFTMMU)
1083 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1084 {
1085 static uint8_t logfile_buf[4096];
1086 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1087 }
1088#else
34865134 1089 setvbuf(logfile, NULL, _IOLBF, 0);
9fa3e853 1090#endif
34865134
FB
1091 }
1092}
1093
1094void cpu_set_log_filename(const char *filename)
1095{
1096 logfilename = strdup(filename);
1097}
c33a346e 1098
0124311e 1099/* mask must never be zero, except for A20 change call */
68a79315 1100void cpu_interrupt(CPUState *env, int mask)
ea041c0e
FB
1101{
1102 TranslationBlock *tb;
ee8b7021 1103 static int interrupt_lock;
59817ccb 1104
68a79315 1105 env->interrupt_request |= mask;
ea041c0e
FB
1106 /* if the cpu is currently executing code, we must unlink it and
1107 all the potentially executing TB */
1108 tb = env->current_tb;
ee8b7021
FB
1109 if (tb && !testandset(&interrupt_lock)) {
1110 env->current_tb = NULL;
ea041c0e 1111 tb_reset_jump_recursive(tb);
ee8b7021 1112 interrupt_lock = 0;
ea041c0e
FB
1113 }
1114}
1115
b54ad049
FB
1116void cpu_reset_interrupt(CPUState *env, int mask)
1117{
1118 env->interrupt_request &= ~mask;
1119}
1120
f193c797
FB
1121CPULogItem cpu_log_items[] = {
1122 { CPU_LOG_TB_OUT_ASM, "out_asm",
1123 "show generated host assembly code for each compiled TB" },
1124 { CPU_LOG_TB_IN_ASM, "in_asm",
1125 "show target assembly code for each compiled TB" },
1126 { CPU_LOG_TB_OP, "op",
1127 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1128#ifdef TARGET_I386
1129 { CPU_LOG_TB_OP_OPT, "op_opt",
1130 "show micro ops after optimization for each compiled TB" },
1131#endif
1132 { CPU_LOG_INT, "int",
1133 "show interrupts/exceptions in short format" },
1134 { CPU_LOG_EXEC, "exec",
1135 "show trace before each executed TB (lots of logs)" },
9fddaa0c
FB
1136 { CPU_LOG_TB_CPU, "cpu",
1137 "show CPU state before bloc translation" },
f193c797
FB
1138#ifdef TARGET_I386
1139 { CPU_LOG_PCALL, "pcall",
1140 "show protected mode far calls/returns/exceptions" },
1141#endif
8e3a9fd2 1142#ifdef DEBUG_IOPORT
fd872598
FB
1143 { CPU_LOG_IOPORT, "ioport",
1144 "show all i/o ports accesses" },
8e3a9fd2 1145#endif
f193c797
FB
1146 { 0, NULL, NULL },
1147};
1148
1149static int cmp1(const char *s1, int n, const char *s2)
1150{
1151 if (strlen(s2) != n)
1152 return 0;
1153 return memcmp(s1, s2, n) == 0;
1154}
1155
1156/* takes a comma separated list of log masks. Return 0 if error. */
1157int cpu_str_to_log_mask(const char *str)
1158{
1159 CPULogItem *item;
1160 int mask;
1161 const char *p, *p1;
1162
1163 p = str;
1164 mask = 0;
1165 for(;;) {
1166 p1 = strchr(p, ',');
1167 if (!p1)
1168 p1 = p + strlen(p);
8e3a9fd2
FB
1169 if(cmp1(p,p1-p,"all")) {
1170 for(item = cpu_log_items; item->mask != 0; item++) {
1171 mask |= item->mask;
1172 }
1173 } else {
f193c797
FB
1174 for(item = cpu_log_items; item->mask != 0; item++) {
1175 if (cmp1(p, p1 - p, item->name))
1176 goto found;
1177 }
1178 return 0;
8e3a9fd2 1179 }
f193c797
FB
1180 found:
1181 mask |= item->mask;
1182 if (*p1 != ',')
1183 break;
1184 p = p1 + 1;
1185 }
1186 return mask;
1187}
ea041c0e 1188
7501267e
FB
1189void cpu_abort(CPUState *env, const char *fmt, ...)
1190{
1191 va_list ap;
1192
1193 va_start(ap, fmt);
1194 fprintf(stderr, "qemu: fatal: ");
1195 vfprintf(stderr, fmt, ap);
1196 fprintf(stderr, "\n");
1197#ifdef TARGET_I386
7fe48483
FB
1198 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1199#else
1200 cpu_dump_state(env, stderr, fprintf, 0);
7501267e
FB
1201#endif
1202 va_end(ap);
1203 abort();
1204}
1205
0124311e
FB
1206#if !defined(CONFIG_USER_ONLY)
1207
ee8b7021
FB
1208/* NOTE: if flush_global is true, also flush global entries (not
1209 implemented yet) */
1210void tlb_flush(CPUState *env, int flush_global)
33417e70 1211{
33417e70 1212 int i;
0124311e 1213
9fa3e853
FB
1214#if defined(DEBUG_TLB)
1215 printf("tlb_flush:\n");
1216#endif
0124311e
FB
1217 /* must reset current TB so that interrupts cannot modify the
1218 links while we are modifying them */
1219 env->current_tb = NULL;
1220
33417e70 1221 for(i = 0; i < CPU_TLB_SIZE; i++) {
84b7b8e7
FB
1222 env->tlb_table[0][i].addr_read = -1;
1223 env->tlb_table[0][i].addr_write = -1;
1224 env->tlb_table[0][i].addr_code = -1;
1225 env->tlb_table[1][i].addr_read = -1;
1226 env->tlb_table[1][i].addr_write = -1;
1227 env->tlb_table[1][i].addr_code = -1;
33417e70 1228 }
9fa3e853 1229
8a40a180 1230 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
9fa3e853
FB
1231
1232#if !defined(CONFIG_SOFTMMU)
1233 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
0a962c02
FB
1234#endif
1235#ifdef USE_KQEMU
1236 if (env->kqemu_enabled) {
1237 kqemu_flush(env, flush_global);
1238 }
9fa3e853 1239#endif
e3db7226 1240 tlb_flush_count++;
33417e70
FB
1241}
1242
274da6b2 1243static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
61382a50 1244{
84b7b8e7
FB
1245 if (addr == (tlb_entry->addr_read &
1246 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1247 addr == (tlb_entry->addr_write &
1248 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1249 addr == (tlb_entry->addr_code &
1250 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1251 tlb_entry->addr_read = -1;
1252 tlb_entry->addr_write = -1;
1253 tlb_entry->addr_code = -1;
1254 }
61382a50
FB
1255}
1256
2e12669a 1257void tlb_flush_page(CPUState *env, target_ulong addr)
33417e70 1258{
8a40a180 1259 int i;
9fa3e853 1260 TranslationBlock *tb;
0124311e 1261
9fa3e853 1262#if defined(DEBUG_TLB)
108c49b8 1263 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
9fa3e853 1264#endif
0124311e
FB
1265 /* must reset current TB so that interrupts cannot modify the
1266 links while we are modifying them */
1267 env->current_tb = NULL;
61382a50
FB
1268
1269 addr &= TARGET_PAGE_MASK;
1270 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
84b7b8e7
FB
1271 tlb_flush_entry(&env->tlb_table[0][i], addr);
1272 tlb_flush_entry(&env->tlb_table[1][i], addr);
0124311e 1273
8a40a180
FB
1274 for(i = 0; i < TB_JMP_CACHE_SIZE; i++) {
1275 tb = env->tb_jmp_cache[i];
1276 if (tb &&
1277 ((tb->pc & TARGET_PAGE_MASK) == addr ||
1278 ((tb->pc + tb->size - 1) & TARGET_PAGE_MASK) == addr)) {
1279 env->tb_jmp_cache[i] = NULL;
9fa3e853
FB
1280 }
1281 }
1282
0124311e 1283#if !defined(CONFIG_SOFTMMU)
9fa3e853 1284 if (addr < MMAP_AREA_END)
0124311e 1285 munmap((void *)addr, TARGET_PAGE_SIZE);
61382a50 1286#endif
0a962c02
FB
1287#ifdef USE_KQEMU
1288 if (env->kqemu_enabled) {
1289 kqemu_flush_page(env, addr);
1290 }
1291#endif
9fa3e853
FB
1292}
1293
9fa3e853
FB
1294/* update the TLBs so that writes to code in the virtual page 'addr'
1295 can be detected */
6a00d601 1296static void tlb_protect_code(ram_addr_t ram_addr)
9fa3e853 1297{
6a00d601
FB
1298 cpu_physical_memory_reset_dirty(ram_addr,
1299 ram_addr + TARGET_PAGE_SIZE,
1300 CODE_DIRTY_FLAG);
9fa3e853
FB
1301}
1302
9fa3e853 1303/* update the TLB so that writes in physical page 'phys_addr' are no longer
3a7d929e
FB
1304 tested for self modifying code */
1305static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1306 target_ulong vaddr)
9fa3e853 1307{
3a7d929e 1308 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1ccde1cb
FB
1309}
1310
1311static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1312 unsigned long start, unsigned long length)
1313{
1314 unsigned long addr;
84b7b8e7
FB
1315 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1316 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1ccde1cb 1317 if ((addr - start) < length) {
84b7b8e7 1318 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1ccde1cb
FB
1319 }
1320 }
1321}
1322
3a7d929e 1323void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 1324 int dirty_flags)
1ccde1cb
FB
1325{
1326 CPUState *env;
4f2ac237 1327 unsigned long length, start1;
0a962c02
FB
1328 int i, mask, len;
1329 uint8_t *p;
1ccde1cb
FB
1330
1331 start &= TARGET_PAGE_MASK;
1332 end = TARGET_PAGE_ALIGN(end);
1333
1334 length = end - start;
1335 if (length == 0)
1336 return;
0a962c02 1337 len = length >> TARGET_PAGE_BITS;
3a7d929e 1338#ifdef USE_KQEMU
6a00d601
FB
1339 /* XXX: should not depend on cpu context */
1340 env = first_cpu;
3a7d929e 1341 if (env->kqemu_enabled) {
f23db169
FB
1342 ram_addr_t addr;
1343 addr = start;
1344 for(i = 0; i < len; i++) {
1345 kqemu_set_notdirty(env, addr);
1346 addr += TARGET_PAGE_SIZE;
1347 }
3a7d929e
FB
1348 }
1349#endif
f23db169
FB
1350 mask = ~dirty_flags;
1351 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1352 for(i = 0; i < len; i++)
1353 p[i] &= mask;
1354
1ccde1cb
FB
1355 /* we modify the TLB cache so that the dirty bit will be set again
1356 when accessing the range */
59817ccb 1357 start1 = start + (unsigned long)phys_ram_base;
6a00d601
FB
1358 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1359 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1360 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
6a00d601 1361 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1362 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
6a00d601 1363 }
59817ccb
FB
1364
1365#if !defined(CONFIG_SOFTMMU)
1366 /* XXX: this is expensive */
1367 {
1368 VirtPageDesc *p;
1369 int j;
1370 target_ulong addr;
1371
1372 for(i = 0; i < L1_SIZE; i++) {
1373 p = l1_virt_map[i];
1374 if (p) {
1375 addr = i << (TARGET_PAGE_BITS + L2_BITS);
1376 for(j = 0; j < L2_SIZE; j++) {
1377 if (p->valid_tag == virt_valid_tag &&
1378 p->phys_addr >= start && p->phys_addr < end &&
1379 (p->prot & PROT_WRITE)) {
1380 if (addr < MMAP_AREA_END) {
1381 mprotect((void *)addr, TARGET_PAGE_SIZE,
1382 p->prot & ~PROT_WRITE);
1383 }
1384 }
1385 addr += TARGET_PAGE_SIZE;
1386 p++;
1387 }
1388 }
1389 }
1390 }
1391#endif
1ccde1cb
FB
1392}
1393
3a7d929e
FB
1394static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1395{
1396 ram_addr_t ram_addr;
1397
84b7b8e7
FB
1398 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1399 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
3a7d929e
FB
1400 tlb_entry->addend - (unsigned long)phys_ram_base;
1401 if (!cpu_physical_memory_is_dirty(ram_addr)) {
84b7b8e7 1402 tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
3a7d929e
FB
1403 }
1404 }
1405}
1406
1407/* update the TLB according to the current state of the dirty bits */
1408void cpu_tlb_update_dirty(CPUState *env)
1409{
1410 int i;
1411 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1412 tlb_update_dirty(&env->tlb_table[0][i]);
3a7d929e 1413 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1414 tlb_update_dirty(&env->tlb_table[1][i]);
3a7d929e
FB
1415}
1416
1ccde1cb 1417static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
108c49b8 1418 unsigned long start)
1ccde1cb
FB
1419{
1420 unsigned long addr;
84b7b8e7
FB
1421 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1422 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1ccde1cb 1423 if (addr == start) {
84b7b8e7 1424 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;
1ccde1cb
FB
1425 }
1426 }
1427}
1428
1429/* update the TLB corresponding to virtual page vaddr and phys addr
1430 addr so that it is no longer dirty */
6a00d601
FB
1431static inline void tlb_set_dirty(CPUState *env,
1432 unsigned long addr, target_ulong vaddr)
1ccde1cb 1433{
1ccde1cb
FB
1434 int i;
1435
1ccde1cb
FB
1436 addr &= TARGET_PAGE_MASK;
1437 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
84b7b8e7
FB
1438 tlb_set_dirty1(&env->tlb_table[0][i], addr);
1439 tlb_set_dirty1(&env->tlb_table[1][i], addr);
9fa3e853
FB
1440}
1441
59817ccb
FB
1442/* add a new TLB entry. At most one entry for a given virtual address
1443 is permitted. Return 0 if OK or 2 if the page could not be mapped
1444 (can only happen in non SOFTMMU mode for I/O pages or pages
1445 conflicting with the host address space). */
84b7b8e7
FB
1446int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1447 target_phys_addr_t paddr, int prot,
1448 int is_user, int is_softmmu)
9fa3e853 1449{
92e873b9 1450 PhysPageDesc *p;
4f2ac237 1451 unsigned long pd;
9fa3e853 1452 unsigned int index;
4f2ac237 1453 target_ulong address;
108c49b8 1454 target_phys_addr_t addend;
9fa3e853 1455 int ret;
84b7b8e7 1456 CPUTLBEntry *te;
9fa3e853 1457
92e873b9 1458 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
9fa3e853
FB
1459 if (!p) {
1460 pd = IO_MEM_UNASSIGNED;
9fa3e853
FB
1461 } else {
1462 pd = p->phys_offset;
9fa3e853
FB
1463 }
1464#if defined(DEBUG_TLB)
3a7d929e 1465 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x u=%d smmu=%d pd=0x%08lx\n",
84b7b8e7 1466 vaddr, (int)paddr, prot, is_user, is_softmmu, pd);
9fa3e853
FB
1467#endif
1468
1469 ret = 0;
1470#if !defined(CONFIG_SOFTMMU)
1471 if (is_softmmu)
1472#endif
1473 {
1474 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1475 /* IO memory case */
1476 address = vaddr | pd;
1477 addend = paddr;
1478 } else {
1479 /* standard memory */
1480 address = vaddr;
1481 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1482 }
1483
90f18422 1484 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
9fa3e853 1485 addend -= vaddr;
84b7b8e7
FB
1486 te = &env->tlb_table[is_user][index];
1487 te->addend = addend;
67b915a5 1488 if (prot & PAGE_READ) {
84b7b8e7
FB
1489 te->addr_read = address;
1490 } else {
1491 te->addr_read = -1;
1492 }
1493 if (prot & PAGE_EXEC) {
1494 te->addr_code = address;
9fa3e853 1495 } else {
84b7b8e7 1496 te->addr_code = -1;
9fa3e853 1497 }
67b915a5 1498 if (prot & PAGE_WRITE) {
9fa3e853
FB
1499 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM) {
1500 /* ROM: access is ignored (same as unassigned) */
84b7b8e7 1501 te->addr_write = vaddr | IO_MEM_ROM;
3a7d929e 1502 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1ccde1cb 1503 !cpu_physical_memory_is_dirty(pd)) {
84b7b8e7 1504 te->addr_write = vaddr | IO_MEM_NOTDIRTY;
9fa3e853 1505 } else {
84b7b8e7 1506 te->addr_write = address;
9fa3e853
FB
1507 }
1508 } else {
84b7b8e7 1509 te->addr_write = -1;
9fa3e853
FB
1510 }
1511 }
1512#if !defined(CONFIG_SOFTMMU)
1513 else {
1514 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1515 /* IO access: no mapping is done as it will be handled by the
1516 soft MMU */
1517 if (!(env->hflags & HF_SOFTMMU_MASK))
1518 ret = 2;
1519 } else {
1520 void *map_addr;
59817ccb
FB
1521
1522 if (vaddr >= MMAP_AREA_END) {
1523 ret = 2;
1524 } else {
1525 if (prot & PROT_WRITE) {
1526 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
d720b93d 1527#if defined(TARGET_HAS_SMC) || 1
59817ccb 1528 first_tb ||
d720b93d 1529#endif
59817ccb
FB
1530 ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1531 !cpu_physical_memory_is_dirty(pd))) {
1532 /* ROM: we do as if code was inside */
1533 /* if code is present, we only map as read only and save the
1534 original mapping */
1535 VirtPageDesc *vp;
1536
90f18422 1537 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
59817ccb
FB
1538 vp->phys_addr = pd;
1539 vp->prot = prot;
1540 vp->valid_tag = virt_valid_tag;
1541 prot &= ~PAGE_WRITE;
1542 }
1543 }
1544 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
1545 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1546 if (map_addr == MAP_FAILED) {
1547 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1548 paddr, vaddr);
9fa3e853 1549 }
9fa3e853
FB
1550 }
1551 }
1552 }
1553#endif
1554 return ret;
1555}
1556
1557/* called from signal handler: invalidate the code and unprotect the
1558 page. Return TRUE if the fault was succesfully handled. */
53a5960a 1559int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
9fa3e853
FB
1560{
1561#if !defined(CONFIG_SOFTMMU)
1562 VirtPageDesc *vp;
1563
1564#if defined(DEBUG_TLB)
1565 printf("page_unprotect: addr=0x%08x\n", addr);
1566#endif
1567 addr &= TARGET_PAGE_MASK;
59817ccb
FB
1568
1569 /* if it is not mapped, no need to worry here */
1570 if (addr >= MMAP_AREA_END)
1571 return 0;
9fa3e853
FB
1572 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1573 if (!vp)
1574 return 0;
1575 /* NOTE: in this case, validate_tag is _not_ tested as it
1576 validates only the code TLB */
1577 if (vp->valid_tag != virt_valid_tag)
1578 return 0;
1579 if (!(vp->prot & PAGE_WRITE))
1580 return 0;
1581#if defined(DEBUG_TLB)
1582 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1583 addr, vp->phys_addr, vp->prot);
1584#endif
59817ccb
FB
1585 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1586 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1587 (unsigned long)addr, vp->prot);
d720b93d 1588 /* set the dirty bit */
0a962c02 1589 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
d720b93d
FB
1590 /* flush the code inside */
1591 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
9fa3e853
FB
1592 return 1;
1593#else
1594 return 0;
1595#endif
33417e70
FB
1596}
1597
0124311e
FB
1598#else
1599
ee8b7021 1600void tlb_flush(CPUState *env, int flush_global)
0124311e
FB
1601{
1602}
1603
2e12669a 1604void tlb_flush_page(CPUState *env, target_ulong addr)
0124311e
FB
1605{
1606}
1607
84b7b8e7
FB
1608int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1609 target_phys_addr_t paddr, int prot,
1610 int is_user, int is_softmmu)
9fa3e853
FB
1611{
1612 return 0;
1613}
0124311e 1614
9fa3e853
FB
1615/* dump memory mappings */
1616void page_dump(FILE *f)
33417e70 1617{
9fa3e853
FB
1618 unsigned long start, end;
1619 int i, j, prot, prot1;
1620 PageDesc *p;
33417e70 1621
9fa3e853
FB
1622 fprintf(f, "%-8s %-8s %-8s %s\n",
1623 "start", "end", "size", "prot");
1624 start = -1;
1625 end = -1;
1626 prot = 0;
1627 for(i = 0; i <= L1_SIZE; i++) {
1628 if (i < L1_SIZE)
1629 p = l1_map[i];
1630 else
1631 p = NULL;
1632 for(j = 0;j < L2_SIZE; j++) {
1633 if (!p)
1634 prot1 = 0;
1635 else
1636 prot1 = p[j].flags;
1637 if (prot1 != prot) {
1638 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1639 if (start != -1) {
1640 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1641 start, end, end - start,
1642 prot & PAGE_READ ? 'r' : '-',
1643 prot & PAGE_WRITE ? 'w' : '-',
1644 prot & PAGE_EXEC ? 'x' : '-');
1645 }
1646 if (prot1 != 0)
1647 start = end;
1648 else
1649 start = -1;
1650 prot = prot1;
1651 }
1652 if (!p)
1653 break;
1654 }
33417e70 1655 }
33417e70
FB
1656}
1657
53a5960a 1658int page_get_flags(target_ulong address)
33417e70 1659{
9fa3e853
FB
1660 PageDesc *p;
1661
1662 p = page_find(address >> TARGET_PAGE_BITS);
33417e70 1663 if (!p)
9fa3e853
FB
1664 return 0;
1665 return p->flags;
1666}
1667
1668/* modify the flags of a page and invalidate the code if
1669 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1670 depending on PAGE_WRITE */
53a5960a 1671void page_set_flags(target_ulong start, target_ulong end, int flags)
9fa3e853
FB
1672{
1673 PageDesc *p;
53a5960a 1674 target_ulong addr;
9fa3e853
FB
1675
1676 start = start & TARGET_PAGE_MASK;
1677 end = TARGET_PAGE_ALIGN(end);
1678 if (flags & PAGE_WRITE)
1679 flags |= PAGE_WRITE_ORG;
1680 spin_lock(&tb_lock);
1681 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1682 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1683 /* if the write protection is set, then we invalidate the code
1684 inside */
1685 if (!(p->flags & PAGE_WRITE) &&
1686 (flags & PAGE_WRITE) &&
1687 p->first_tb) {
d720b93d 1688 tb_invalidate_phys_page(addr, 0, NULL);
9fa3e853
FB
1689 }
1690 p->flags = flags;
1691 }
1692 spin_unlock(&tb_lock);
33417e70
FB
1693}
1694
9fa3e853
FB
1695/* called from signal handler: invalidate the code and unprotect the
1696 page. Return TRUE if the fault was succesfully handled. */
53a5960a 1697int page_unprotect(target_ulong address, unsigned long pc, void *puc)
9fa3e853
FB
1698{
1699 unsigned int page_index, prot, pindex;
1700 PageDesc *p, *p1;
53a5960a 1701 target_ulong host_start, host_end, addr;
9fa3e853 1702
83fb7adf 1703 host_start = address & qemu_host_page_mask;
9fa3e853
FB
1704 page_index = host_start >> TARGET_PAGE_BITS;
1705 p1 = page_find(page_index);
1706 if (!p1)
1707 return 0;
83fb7adf 1708 host_end = host_start + qemu_host_page_size;
9fa3e853
FB
1709 p = p1;
1710 prot = 0;
1711 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1712 prot |= p->flags;
1713 p++;
1714 }
1715 /* if the page was really writable, then we change its
1716 protection back to writable */
1717 if (prot & PAGE_WRITE_ORG) {
1718 pindex = (address - host_start) >> TARGET_PAGE_BITS;
1719 if (!(p1[pindex].flags & PAGE_WRITE)) {
53a5960a 1720 mprotect((void *)g2h(host_start), qemu_host_page_size,
9fa3e853
FB
1721 (prot & PAGE_BITS) | PAGE_WRITE);
1722 p1[pindex].flags |= PAGE_WRITE;
1723 /* and since the content will be modified, we must invalidate
1724 the corresponding translated code. */
d720b93d 1725 tb_invalidate_phys_page(address, pc, puc);
9fa3e853
FB
1726#ifdef DEBUG_TB_CHECK
1727 tb_invalidate_check(address);
1728#endif
1729 return 1;
1730 }
1731 }
1732 return 0;
1733}
1734
1735/* call this function when system calls directly modify a memory area */
53a5960a
PB
1736/* ??? This should be redundant now we have lock_user. */
1737void page_unprotect_range(target_ulong data, target_ulong data_size)
9fa3e853 1738{
53a5960a 1739 target_ulong start, end, addr;
9fa3e853 1740
53a5960a 1741 start = data;
9fa3e853
FB
1742 end = start + data_size;
1743 start &= TARGET_PAGE_MASK;
1744 end = TARGET_PAGE_ALIGN(end);
1745 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
d720b93d 1746 page_unprotect(addr, 0, NULL);
9fa3e853
FB
1747 }
1748}
1749
6a00d601
FB
1750static inline void tlb_set_dirty(CPUState *env,
1751 unsigned long addr, target_ulong vaddr)
1ccde1cb
FB
1752{
1753}
9fa3e853
FB
1754#endif /* defined(CONFIG_USER_ONLY) */
1755
33417e70
FB
1756/* register physical memory. 'size' must be a multiple of the target
1757 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1758 io memory page */
2e12669a
FB
1759void cpu_register_physical_memory(target_phys_addr_t start_addr,
1760 unsigned long size,
1761 unsigned long phys_offset)
33417e70 1762{
108c49b8 1763 target_phys_addr_t addr, end_addr;
92e873b9 1764 PhysPageDesc *p;
33417e70 1765
5fd386f6 1766 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
33417e70 1767 end_addr = start_addr + size;
5fd386f6 1768 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
108c49b8 1769 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
9fa3e853
FB
1770 p->phys_offset = phys_offset;
1771 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM)
33417e70
FB
1772 phys_offset += TARGET_PAGE_SIZE;
1773 }
1774}
1775
a4193c8a 1776static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
33417e70
FB
1777{
1778 return 0;
1779}
1780
a4193c8a 1781static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
33417e70
FB
1782{
1783}
1784
1785static CPUReadMemoryFunc *unassigned_mem_read[3] = {
1786 unassigned_mem_readb,
1787 unassigned_mem_readb,
1788 unassigned_mem_readb,
1789};
1790
1791static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
1792 unassigned_mem_writeb,
1793 unassigned_mem_writeb,
1794 unassigned_mem_writeb,
1795};
1796
3a7d929e 1797static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 1798{
3a7d929e
FB
1799 unsigned long ram_addr;
1800 int dirty_flags;
1801 ram_addr = addr - (unsigned long)phys_ram_base;
1802 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1803 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 1804#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
1805 tb_invalidate_phys_page_fast(ram_addr, 1);
1806 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 1807#endif
3a7d929e 1808 }
c27004ec 1809 stb_p((uint8_t *)(long)addr, val);
f32fc648
FB
1810#ifdef USE_KQEMU
1811 if (cpu_single_env->kqemu_enabled &&
1812 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
1813 kqemu_modify_page(cpu_single_env, ram_addr);
1814#endif
f23db169
FB
1815 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1816 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
1817 /* we remove the notdirty callback only if the code has been
1818 flushed */
1819 if (dirty_flags == 0xff)
6a00d601 1820 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
9fa3e853
FB
1821}
1822
3a7d929e 1823static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 1824{
3a7d929e
FB
1825 unsigned long ram_addr;
1826 int dirty_flags;
1827 ram_addr = addr - (unsigned long)phys_ram_base;
1828 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1829 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 1830#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
1831 tb_invalidate_phys_page_fast(ram_addr, 2);
1832 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 1833#endif
3a7d929e 1834 }
c27004ec 1835 stw_p((uint8_t *)(long)addr, val);
f32fc648
FB
1836#ifdef USE_KQEMU
1837 if (cpu_single_env->kqemu_enabled &&
1838 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
1839 kqemu_modify_page(cpu_single_env, ram_addr);
1840#endif
f23db169
FB
1841 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1842 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
1843 /* we remove the notdirty callback only if the code has been
1844 flushed */
1845 if (dirty_flags == 0xff)
6a00d601 1846 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
9fa3e853
FB
1847}
1848
3a7d929e 1849static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 1850{
3a7d929e
FB
1851 unsigned long ram_addr;
1852 int dirty_flags;
1853 ram_addr = addr - (unsigned long)phys_ram_base;
1854 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1855 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 1856#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
1857 tb_invalidate_phys_page_fast(ram_addr, 4);
1858 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 1859#endif
3a7d929e 1860 }
c27004ec 1861 stl_p((uint8_t *)(long)addr, val);
f32fc648
FB
1862#ifdef USE_KQEMU
1863 if (cpu_single_env->kqemu_enabled &&
1864 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
1865 kqemu_modify_page(cpu_single_env, ram_addr);
1866#endif
f23db169
FB
1867 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1868 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
1869 /* we remove the notdirty callback only if the code has been
1870 flushed */
1871 if (dirty_flags == 0xff)
6a00d601 1872 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
9fa3e853
FB
1873}
1874
3a7d929e 1875static CPUReadMemoryFunc *error_mem_read[3] = {
9fa3e853
FB
1876 NULL, /* never used */
1877 NULL, /* never used */
1878 NULL, /* never used */
1879};
1880
1ccde1cb
FB
1881static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
1882 notdirty_mem_writeb,
1883 notdirty_mem_writew,
1884 notdirty_mem_writel,
1885};
1886
33417e70
FB
1887static void io_mem_init(void)
1888{
3a7d929e 1889 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
a4193c8a 1890 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
3a7d929e 1891 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
1ccde1cb
FB
1892 io_mem_nb = 5;
1893
1894 /* alloc dirty bits array */
0a962c02 1895 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
3a7d929e 1896 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
33417e70
FB
1897}
1898
1899/* mem_read and mem_write are arrays of functions containing the
1900 function to access byte (index 0), word (index 1) and dword (index
1901 2). All functions must be supplied. If io_index is non zero, the
1902 corresponding io zone is modified. If it is zero, a new io zone is
1903 allocated. The return value can be used with
1904 cpu_register_physical_memory(). (-1) is returned if error. */
1905int cpu_register_io_memory(int io_index,
1906 CPUReadMemoryFunc **mem_read,
a4193c8a
FB
1907 CPUWriteMemoryFunc **mem_write,
1908 void *opaque)
33417e70
FB
1909{
1910 int i;
1911
1912 if (io_index <= 0) {
b5ff1b31 1913 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
33417e70
FB
1914 return -1;
1915 io_index = io_mem_nb++;
1916 } else {
1917 if (io_index >= IO_MEM_NB_ENTRIES)
1918 return -1;
1919 }
b5ff1b31 1920
33417e70
FB
1921 for(i = 0;i < 3; i++) {
1922 io_mem_read[io_index][i] = mem_read[i];
1923 io_mem_write[io_index][i] = mem_write[i];
1924 }
a4193c8a 1925 io_mem_opaque[io_index] = opaque;
33417e70
FB
1926 return io_index << IO_MEM_SHIFT;
1927}
61382a50 1928
8926b517
FB
1929CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
1930{
1931 return io_mem_write[io_index >> IO_MEM_SHIFT];
1932}
1933
1934CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
1935{
1936 return io_mem_read[io_index >> IO_MEM_SHIFT];
1937}
1938
13eb76e0
FB
1939/* physical memory access (slow version, mainly for debug) */
1940#if defined(CONFIG_USER_ONLY)
2e12669a 1941void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
1942 int len, int is_write)
1943{
1944 int l, flags;
1945 target_ulong page;
53a5960a 1946 void * p;
13eb76e0
FB
1947
1948 while (len > 0) {
1949 page = addr & TARGET_PAGE_MASK;
1950 l = (page + TARGET_PAGE_SIZE) - addr;
1951 if (l > len)
1952 l = len;
1953 flags = page_get_flags(page);
1954 if (!(flags & PAGE_VALID))
1955 return;
1956 if (is_write) {
1957 if (!(flags & PAGE_WRITE))
1958 return;
53a5960a
PB
1959 p = lock_user(addr, len, 0);
1960 memcpy(p, buf, len);
1961 unlock_user(p, addr, len);
13eb76e0
FB
1962 } else {
1963 if (!(flags & PAGE_READ))
1964 return;
53a5960a
PB
1965 p = lock_user(addr, len, 1);
1966 memcpy(buf, p, len);
1967 unlock_user(p, addr, 0);
13eb76e0
FB
1968 }
1969 len -= l;
1970 buf += l;
1971 addr += l;
1972 }
1973}
8df1cd07 1974
13eb76e0 1975#else
2e12669a 1976void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
1977 int len, int is_write)
1978{
1979 int l, io_index;
1980 uint8_t *ptr;
1981 uint32_t val;
2e12669a
FB
1982 target_phys_addr_t page;
1983 unsigned long pd;
92e873b9 1984 PhysPageDesc *p;
13eb76e0
FB
1985
1986 while (len > 0) {
1987 page = addr & TARGET_PAGE_MASK;
1988 l = (page + TARGET_PAGE_SIZE) - addr;
1989 if (l > len)
1990 l = len;
92e873b9 1991 p = phys_page_find(page >> TARGET_PAGE_BITS);
13eb76e0
FB
1992 if (!p) {
1993 pd = IO_MEM_UNASSIGNED;
1994 } else {
1995 pd = p->phys_offset;
1996 }
1997
1998 if (is_write) {
3a7d929e 1999 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
13eb76e0 2000 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
6a00d601
FB
2001 /* XXX: could force cpu_single_env to NULL to avoid
2002 potential bugs */
13eb76e0 2003 if (l >= 4 && ((addr & 3) == 0)) {
1c213d19 2004 /* 32 bit write access */
c27004ec 2005 val = ldl_p(buf);
a4193c8a 2006 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2007 l = 4;
2008 } else if (l >= 2 && ((addr & 1) == 0)) {
1c213d19 2009 /* 16 bit write access */
c27004ec 2010 val = lduw_p(buf);
a4193c8a 2011 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2012 l = 2;
2013 } else {
1c213d19 2014 /* 8 bit write access */
c27004ec 2015 val = ldub_p(buf);
a4193c8a 2016 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2017 l = 1;
2018 }
2019 } else {
b448f2f3
FB
2020 unsigned long addr1;
2021 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
13eb76e0 2022 /* RAM case */
b448f2f3 2023 ptr = phys_ram_base + addr1;
13eb76e0 2024 memcpy(ptr, buf, l);
3a7d929e
FB
2025 if (!cpu_physical_memory_is_dirty(addr1)) {
2026 /* invalidate code */
2027 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2028 /* set dirty bit */
f23db169
FB
2029 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2030 (0xff & ~CODE_DIRTY_FLAG);
3a7d929e 2031 }
13eb76e0
FB
2032 }
2033 } else {
3a7d929e 2034 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
13eb76e0
FB
2035 /* I/O case */
2036 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2037 if (l >= 4 && ((addr & 3) == 0)) {
2038 /* 32 bit read access */
a4193c8a 2039 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
c27004ec 2040 stl_p(buf, val);
13eb76e0
FB
2041 l = 4;
2042 } else if (l >= 2 && ((addr & 1) == 0)) {
2043 /* 16 bit read access */
a4193c8a 2044 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
c27004ec 2045 stw_p(buf, val);
13eb76e0
FB
2046 l = 2;
2047 } else {
1c213d19 2048 /* 8 bit read access */
a4193c8a 2049 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
c27004ec 2050 stb_p(buf, val);
13eb76e0
FB
2051 l = 1;
2052 }
2053 } else {
2054 /* RAM case */
2055 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2056 (addr & ~TARGET_PAGE_MASK);
2057 memcpy(buf, ptr, l);
2058 }
2059 }
2060 len -= l;
2061 buf += l;
2062 addr += l;
2063 }
2064}
8df1cd07
FB
2065
2066/* warning: addr must be aligned */
2067uint32_t ldl_phys(target_phys_addr_t addr)
2068{
2069 int io_index;
2070 uint8_t *ptr;
2071 uint32_t val;
2072 unsigned long pd;
2073 PhysPageDesc *p;
2074
2075 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2076 if (!p) {
2077 pd = IO_MEM_UNASSIGNED;
2078 } else {
2079 pd = p->phys_offset;
2080 }
2081
3a7d929e 2082 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
8df1cd07
FB
2083 /* I/O case */
2084 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2085 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2086 } else {
2087 /* RAM case */
2088 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2089 (addr & ~TARGET_PAGE_MASK);
2090 val = ldl_p(ptr);
2091 }
2092 return val;
2093}
2094
84b7b8e7
FB
2095/* warning: addr must be aligned */
2096uint64_t ldq_phys(target_phys_addr_t addr)
2097{
2098 int io_index;
2099 uint8_t *ptr;
2100 uint64_t val;
2101 unsigned long pd;
2102 PhysPageDesc *p;
2103
2104 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2105 if (!p) {
2106 pd = IO_MEM_UNASSIGNED;
2107 } else {
2108 pd = p->phys_offset;
2109 }
2110
2111 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
2112 /* I/O case */
2113 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2114#ifdef TARGET_WORDS_BIGENDIAN
2115 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2116 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2117#else
2118 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2119 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2120#endif
2121 } else {
2122 /* RAM case */
2123 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2124 (addr & ~TARGET_PAGE_MASK);
2125 val = ldq_p(ptr);
2126 }
2127 return val;
2128}
2129
aab33094
FB
2130/* XXX: optimize */
2131uint32_t ldub_phys(target_phys_addr_t addr)
2132{
2133 uint8_t val;
2134 cpu_physical_memory_read(addr, &val, 1);
2135 return val;
2136}
2137
2138/* XXX: optimize */
2139uint32_t lduw_phys(target_phys_addr_t addr)
2140{
2141 uint16_t val;
2142 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2143 return tswap16(val);
2144}
2145
8df1cd07
FB
2146/* warning: addr must be aligned. The ram page is not masked as dirty
2147 and the code inside is not invalidated. It is useful if the dirty
2148 bits are used to track modified PTEs */
2149void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2150{
2151 int io_index;
2152 uint8_t *ptr;
2153 unsigned long pd;
2154 PhysPageDesc *p;
2155
2156 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2157 if (!p) {
2158 pd = IO_MEM_UNASSIGNED;
2159 } else {
2160 pd = p->phys_offset;
2161 }
2162
3a7d929e 2163 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07
FB
2164 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2165 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2166 } else {
2167 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2168 (addr & ~TARGET_PAGE_MASK);
2169 stl_p(ptr, val);
2170 }
2171}
2172
2173/* warning: addr must be aligned */
8df1cd07
FB
2174void stl_phys(target_phys_addr_t addr, uint32_t val)
2175{
2176 int io_index;
2177 uint8_t *ptr;
2178 unsigned long pd;
2179 PhysPageDesc *p;
2180
2181 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2182 if (!p) {
2183 pd = IO_MEM_UNASSIGNED;
2184 } else {
2185 pd = p->phys_offset;
2186 }
2187
3a7d929e 2188 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07
FB
2189 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2190 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2191 } else {
2192 unsigned long addr1;
2193 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2194 /* RAM case */
2195 ptr = phys_ram_base + addr1;
2196 stl_p(ptr, val);
3a7d929e
FB
2197 if (!cpu_physical_memory_is_dirty(addr1)) {
2198 /* invalidate code */
2199 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2200 /* set dirty bit */
f23db169
FB
2201 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2202 (0xff & ~CODE_DIRTY_FLAG);
3a7d929e 2203 }
8df1cd07
FB
2204 }
2205}
2206
aab33094
FB
2207/* XXX: optimize */
2208void stb_phys(target_phys_addr_t addr, uint32_t val)
2209{
2210 uint8_t v = val;
2211 cpu_physical_memory_write(addr, &v, 1);
2212}
2213
2214/* XXX: optimize */
2215void stw_phys(target_phys_addr_t addr, uint32_t val)
2216{
2217 uint16_t v = tswap16(val);
2218 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2219}
2220
2221/* XXX: optimize */
2222void stq_phys(target_phys_addr_t addr, uint64_t val)
2223{
2224 val = tswap64(val);
2225 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
2226}
2227
13eb76e0
FB
2228#endif
2229
2230/* virtual memory access for debug */
b448f2f3
FB
2231int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
2232 uint8_t *buf, int len, int is_write)
13eb76e0
FB
2233{
2234 int l;
2235 target_ulong page, phys_addr;
2236
2237 while (len > 0) {
2238 page = addr & TARGET_PAGE_MASK;
2239 phys_addr = cpu_get_phys_page_debug(env, page);
2240 /* if no physical page mapped, return an error */
2241 if (phys_addr == -1)
2242 return -1;
2243 l = (page + TARGET_PAGE_SIZE) - addr;
2244 if (l > len)
2245 l = len;
b448f2f3
FB
2246 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
2247 buf, l, is_write);
13eb76e0
FB
2248 len -= l;
2249 buf += l;
2250 addr += l;
2251 }
2252 return 0;
2253}
2254
e3db7226
FB
2255void dump_exec_info(FILE *f,
2256 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2257{
2258 int i, target_code_size, max_target_code_size;
2259 int direct_jmp_count, direct_jmp2_count, cross_page;
2260 TranslationBlock *tb;
2261
2262 target_code_size = 0;
2263 max_target_code_size = 0;
2264 cross_page = 0;
2265 direct_jmp_count = 0;
2266 direct_jmp2_count = 0;
2267 for(i = 0; i < nb_tbs; i++) {
2268 tb = &tbs[i];
2269 target_code_size += tb->size;
2270 if (tb->size > max_target_code_size)
2271 max_target_code_size = tb->size;
2272 if (tb->page_addr[1] != -1)
2273 cross_page++;
2274 if (tb->tb_next_offset[0] != 0xffff) {
2275 direct_jmp_count++;
2276 if (tb->tb_next_offset[1] != 0xffff) {
2277 direct_jmp2_count++;
2278 }
2279 }
2280 }
2281 /* XXX: avoid using doubles ? */
2282 cpu_fprintf(f, "TB count %d\n", nb_tbs);
2283 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
2284 nb_tbs ? target_code_size / nb_tbs : 0,
2285 max_target_code_size);
2286 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
2287 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
2288 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
2289 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
2290 cross_page,
2291 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
2292 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
2293 direct_jmp_count,
2294 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
2295 direct_jmp2_count,
2296 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
2297 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
2298 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
2299 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
2300}
2301
61382a50
FB
2302#if !defined(CONFIG_USER_ONLY)
2303
2304#define MMUSUFFIX _cmmu
2305#define GETPC() NULL
2306#define env cpu_single_env
b769d8fe 2307#define SOFTMMU_CODE_ACCESS
61382a50
FB
2308
2309#define SHIFT 0
2310#include "softmmu_template.h"
2311
2312#define SHIFT 1
2313#include "softmmu_template.h"
2314
2315#define SHIFT 2
2316#include "softmmu_template.h"
2317
2318#define SHIFT 3
2319#include "softmmu_template.h"
2320
2321#undef env
2322
2323#endif