]> git.proxmox.com Git - mirror_qemu.git/blame - exec.c
Initial SPARC SMP support (Blue Swirl)
[mirror_qemu.git] / exec.c
CommitLineData
54936004 1/*
fd6ce8f6 2 * virtual page mapping and translated block handling
54936004
FB
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
67b915a5 20#include "config.h"
d5a8f07c
FB
21#ifdef _WIN32
22#include <windows.h>
23#else
a98d49b1 24#include <sys/types.h>
d5a8f07c
FB
25#include <sys/mman.h>
26#endif
54936004
FB
27#include <stdlib.h>
28#include <stdio.h>
29#include <stdarg.h>
30#include <string.h>
31#include <errno.h>
32#include <unistd.h>
33#include <inttypes.h>
34
6180a181
FB
35#include "cpu.h"
36#include "exec-all.h"
54936004 37
fd6ce8f6 38//#define DEBUG_TB_INVALIDATE
66e85a21 39//#define DEBUG_FLUSH
9fa3e853 40//#define DEBUG_TLB
fd6ce8f6
FB
41
42/* make various TB consistency checks */
43//#define DEBUG_TB_CHECK
98857888 44//#define DEBUG_TLB_CHECK
fd6ce8f6
FB
45
46/* threshold to flush the translated code buffer */
47#define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
48
9fa3e853
FB
49#define SMC_BITMAP_USE_THRESHOLD 10
50
51#define MMAP_AREA_START 0x00000000
52#define MMAP_AREA_END 0xa8000000
fd6ce8f6 53
108c49b8
FB
54#if defined(TARGET_SPARC64)
55#define TARGET_PHYS_ADDR_SPACE_BITS 41
56#elif defined(TARGET_PPC64)
57#define TARGET_PHYS_ADDR_SPACE_BITS 42
58#else
59/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
60#define TARGET_PHYS_ADDR_SPACE_BITS 32
61#endif
62
fd6ce8f6 63TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
9fa3e853 64TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
fd6ce8f6 65int nb_tbs;
eb51d102
FB
66/* any access to the tbs or the page table must use this lock */
67spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
fd6ce8f6 68
b8076a74 69uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] __attribute__((aligned (32)));
fd6ce8f6
FB
70uint8_t *code_gen_ptr;
71
9fa3e853
FB
72int phys_ram_size;
73int phys_ram_fd;
74uint8_t *phys_ram_base;
1ccde1cb 75uint8_t *phys_ram_dirty;
9fa3e853 76
6a00d601
FB
77CPUState *first_cpu;
78/* current CPU in the current thread. It is only valid inside
79 cpu_exec() */
80CPUState *cpu_single_env;
81
54936004 82typedef struct PageDesc {
92e873b9 83 /* list of TBs intersecting this ram page */
fd6ce8f6 84 TranslationBlock *first_tb;
9fa3e853
FB
85 /* in order to optimize self modifying code, we count the number
86 of lookups we do to a given page to use a bitmap */
87 unsigned int code_write_count;
88 uint8_t *code_bitmap;
89#if defined(CONFIG_USER_ONLY)
90 unsigned long flags;
91#endif
54936004
FB
92} PageDesc;
93
92e873b9
FB
94typedef struct PhysPageDesc {
95 /* offset in host memory of the page + io_index in the low 12 bits */
e04f40b5 96 uint32_t phys_offset;
92e873b9
FB
97} PhysPageDesc;
98
54936004
FB
99#define L2_BITS 10
100#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
101
102#define L1_SIZE (1 << L1_BITS)
103#define L2_SIZE (1 << L2_BITS)
104
33417e70 105static void io_mem_init(void);
fd6ce8f6 106
83fb7adf
FB
107unsigned long qemu_real_host_page_size;
108unsigned long qemu_host_page_bits;
109unsigned long qemu_host_page_size;
110unsigned long qemu_host_page_mask;
54936004 111
92e873b9 112/* XXX: for system emulation, it could just be an array */
54936004 113static PageDesc *l1_map[L1_SIZE];
0a962c02 114PhysPageDesc **l1_phys_map;
54936004 115
33417e70 116/* io memory support */
33417e70
FB
117CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
118CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
a4193c8a 119void *io_mem_opaque[IO_MEM_NB_ENTRIES];
33417e70
FB
120static int io_mem_nb;
121
34865134
FB
122/* log support */
123char *logfilename = "/tmp/qemu.log";
124FILE *logfile;
125int loglevel;
126
e3db7226
FB
127/* statistics */
128static int tlb_flush_count;
129static int tb_flush_count;
130static int tb_phys_invalidate_count;
131
b346ff46 132static void page_init(void)
54936004 133{
83fb7adf 134 /* NOTE: we can always suppose that qemu_host_page_size >=
54936004 135 TARGET_PAGE_SIZE */
67b915a5 136#ifdef _WIN32
d5a8f07c
FB
137 {
138 SYSTEM_INFO system_info;
139 DWORD old_protect;
140
141 GetSystemInfo(&system_info);
142 qemu_real_host_page_size = system_info.dwPageSize;
143
144 VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
145 PAGE_EXECUTE_READWRITE, &old_protect);
146 }
67b915a5 147#else
83fb7adf 148 qemu_real_host_page_size = getpagesize();
d5a8f07c
FB
149 {
150 unsigned long start, end;
151
152 start = (unsigned long)code_gen_buffer;
153 start &= ~(qemu_real_host_page_size - 1);
154
155 end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
156 end += qemu_real_host_page_size - 1;
157 end &= ~(qemu_real_host_page_size - 1);
158
159 mprotect((void *)start, end - start,
160 PROT_READ | PROT_WRITE | PROT_EXEC);
161 }
67b915a5 162#endif
d5a8f07c 163
83fb7adf
FB
164 if (qemu_host_page_size == 0)
165 qemu_host_page_size = qemu_real_host_page_size;
166 if (qemu_host_page_size < TARGET_PAGE_SIZE)
167 qemu_host_page_size = TARGET_PAGE_SIZE;
168 qemu_host_page_bits = 0;
169 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
170 qemu_host_page_bits++;
171 qemu_host_page_mask = ~(qemu_host_page_size - 1);
108c49b8
FB
172 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
173 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
54936004
FB
174}
175
fd6ce8f6 176static inline PageDesc *page_find_alloc(unsigned int index)
54936004 177{
54936004
FB
178 PageDesc **lp, *p;
179
54936004
FB
180 lp = &l1_map[index >> L2_BITS];
181 p = *lp;
182 if (!p) {
183 /* allocate if not found */
59817ccb 184 p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
fd6ce8f6 185 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
54936004
FB
186 *lp = p;
187 }
188 return p + (index & (L2_SIZE - 1));
189}
190
fd6ce8f6 191static inline PageDesc *page_find(unsigned int index)
54936004 192{
54936004
FB
193 PageDesc *p;
194
54936004
FB
195 p = l1_map[index >> L2_BITS];
196 if (!p)
197 return 0;
fd6ce8f6
FB
198 return p + (index & (L2_SIZE - 1));
199}
200
108c49b8 201static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
92e873b9 202{
108c49b8 203 void **lp, **p;
92e873b9 204
108c49b8
FB
205 p = (void **)l1_phys_map;
206#if TARGET_PHYS_ADDR_SPACE_BITS > 32
207
208#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
209#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
210#endif
211 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
92e873b9
FB
212 p = *lp;
213 if (!p) {
214 /* allocate if not found */
108c49b8
FB
215 if (!alloc)
216 return NULL;
217 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
218 memset(p, 0, sizeof(void *) * L1_SIZE);
219 *lp = p;
220 }
221#endif
222 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
223 p = *lp;
224 if (!p) {
225 /* allocate if not found */
226 if (!alloc)
227 return NULL;
0a962c02 228 p = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
92e873b9
FB
229 memset(p, 0, sizeof(PhysPageDesc) * L2_SIZE);
230 *lp = p;
231 }
108c49b8 232 return ((PhysPageDesc *)p) + (index & (L2_SIZE - 1));
92e873b9
FB
233}
234
108c49b8 235static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
92e873b9 236{
108c49b8 237 return phys_page_find_alloc(index, 0);
92e873b9
FB
238}
239
9fa3e853 240#if !defined(CONFIG_USER_ONLY)
6a00d601 241static void tlb_protect_code(ram_addr_t ram_addr);
3a7d929e
FB
242static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
243 target_ulong vaddr);
9fa3e853 244#endif
fd6ce8f6 245
6a00d601 246void cpu_exec_init(CPUState *env)
fd6ce8f6 247{
6a00d601
FB
248 CPUState **penv;
249 int cpu_index;
250
fd6ce8f6
FB
251 if (!code_gen_ptr) {
252 code_gen_ptr = code_gen_buffer;
b346ff46 253 page_init();
33417e70 254 io_mem_init();
fd6ce8f6 255 }
6a00d601
FB
256 env->next_cpu = NULL;
257 penv = &first_cpu;
258 cpu_index = 0;
259 while (*penv != NULL) {
260 penv = (CPUState **)&(*penv)->next_cpu;
261 cpu_index++;
262 }
263 env->cpu_index = cpu_index;
264 *penv = env;
fd6ce8f6
FB
265}
266
9fa3e853
FB
267static inline void invalidate_page_bitmap(PageDesc *p)
268{
269 if (p->code_bitmap) {
59817ccb 270 qemu_free(p->code_bitmap);
9fa3e853
FB
271 p->code_bitmap = NULL;
272 }
273 p->code_write_count = 0;
274}
275
fd6ce8f6
FB
276/* set to NULL all the 'first_tb' fields in all PageDescs */
277static void page_flush_tb(void)
278{
279 int i, j;
280 PageDesc *p;
281
282 for(i = 0; i < L1_SIZE; i++) {
283 p = l1_map[i];
284 if (p) {
9fa3e853
FB
285 for(j = 0; j < L2_SIZE; j++) {
286 p->first_tb = NULL;
287 invalidate_page_bitmap(p);
288 p++;
289 }
fd6ce8f6
FB
290 }
291 }
292}
293
294/* flush all the translation blocks */
d4e8164f 295/* XXX: tb_flush is currently not thread safe */
6a00d601 296void tb_flush(CPUState *env1)
fd6ce8f6 297{
6a00d601 298 CPUState *env;
0124311e 299#if defined(DEBUG_FLUSH)
fd6ce8f6
FB
300 printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
301 code_gen_ptr - code_gen_buffer,
302 nb_tbs,
0124311e 303 nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0);
fd6ce8f6
FB
304#endif
305 nb_tbs = 0;
6a00d601
FB
306
307 for(env = first_cpu; env != NULL; env = env->next_cpu) {
308 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
309 }
9fa3e853 310
8a8a608f 311 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
fd6ce8f6 312 page_flush_tb();
9fa3e853 313
fd6ce8f6 314 code_gen_ptr = code_gen_buffer;
d4e8164f
FB
315 /* XXX: flush processor icache at this point if cache flush is
316 expensive */
e3db7226 317 tb_flush_count++;
fd6ce8f6
FB
318}
319
320#ifdef DEBUG_TB_CHECK
321
322static void tb_invalidate_check(unsigned long address)
323{
324 TranslationBlock *tb;
325 int i;
326 address &= TARGET_PAGE_MASK;
327 for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
328 for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
329 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
330 address >= tb->pc + tb->size)) {
331 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
332 address, tb->pc, tb->size);
333 }
334 }
335 }
336}
337
338/* verify that all the pages have correct rights for code */
339static void tb_page_check(void)
340{
341 TranslationBlock *tb;
342 int i, flags1, flags2;
343
344 for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
345 for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
346 flags1 = page_get_flags(tb->pc);
347 flags2 = page_get_flags(tb->pc + tb->size - 1);
348 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
349 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
350 tb->pc, tb->size, flags1, flags2);
351 }
352 }
353 }
354}
355
d4e8164f
FB
356void tb_jmp_check(TranslationBlock *tb)
357{
358 TranslationBlock *tb1;
359 unsigned int n1;
360
361 /* suppress any remaining jumps to this TB */
362 tb1 = tb->jmp_first;
363 for(;;) {
364 n1 = (long)tb1 & 3;
365 tb1 = (TranslationBlock *)((long)tb1 & ~3);
366 if (n1 == 2)
367 break;
368 tb1 = tb1->jmp_next[n1];
369 }
370 /* check end of list */
371 if (tb1 != tb) {
372 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
373 }
374}
375
fd6ce8f6
FB
376#endif
377
378/* invalidate one TB */
379static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
380 int next_offset)
381{
382 TranslationBlock *tb1;
383 for(;;) {
384 tb1 = *ptb;
385 if (tb1 == tb) {
386 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
387 break;
388 }
389 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
390 }
391}
392
9fa3e853
FB
393static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
394{
395 TranslationBlock *tb1;
396 unsigned int n1;
397
398 for(;;) {
399 tb1 = *ptb;
400 n1 = (long)tb1 & 3;
401 tb1 = (TranslationBlock *)((long)tb1 & ~3);
402 if (tb1 == tb) {
403 *ptb = tb1->page_next[n1];
404 break;
405 }
406 ptb = &tb1->page_next[n1];
407 }
408}
409
d4e8164f
FB
410static inline void tb_jmp_remove(TranslationBlock *tb, int n)
411{
412 TranslationBlock *tb1, **ptb;
413 unsigned int n1;
414
415 ptb = &tb->jmp_next[n];
416 tb1 = *ptb;
417 if (tb1) {
418 /* find tb(n) in circular list */
419 for(;;) {
420 tb1 = *ptb;
421 n1 = (long)tb1 & 3;
422 tb1 = (TranslationBlock *)((long)tb1 & ~3);
423 if (n1 == n && tb1 == tb)
424 break;
425 if (n1 == 2) {
426 ptb = &tb1->jmp_first;
427 } else {
428 ptb = &tb1->jmp_next[n1];
429 }
430 }
431 /* now we can suppress tb(n) from the list */
432 *ptb = tb->jmp_next[n];
433
434 tb->jmp_next[n] = NULL;
435 }
436}
437
438/* reset the jump entry 'n' of a TB so that it is not chained to
439 another TB */
440static inline void tb_reset_jump(TranslationBlock *tb, int n)
441{
442 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
443}
444
8a40a180 445static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
fd6ce8f6 446{
6a00d601 447 CPUState *env;
8a40a180 448 PageDesc *p;
d4e8164f 449 unsigned int h, n1;
8a40a180
FB
450 target_ulong phys_pc;
451 TranslationBlock *tb1, *tb2;
d4e8164f 452
8a40a180
FB
453 /* remove the TB from the hash list */
454 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
455 h = tb_phys_hash_func(phys_pc);
456 tb_remove(&tb_phys_hash[h], tb,
457 offsetof(TranslationBlock, phys_hash_next));
458
459 /* remove the TB from the page list */
460 if (tb->page_addr[0] != page_addr) {
461 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
462 tb_page_remove(&p->first_tb, tb);
463 invalidate_page_bitmap(p);
464 }
465 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
466 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
467 tb_page_remove(&p->first_tb, tb);
468 invalidate_page_bitmap(p);
469 }
470
36bdbe54 471 tb_invalidated_flag = 1;
59817ccb 472
fd6ce8f6 473 /* remove the TB from the hash list */
8a40a180 474 h = tb_jmp_cache_hash_func(tb->pc);
6a00d601
FB
475 for(env = first_cpu; env != NULL; env = env->next_cpu) {
476 if (env->tb_jmp_cache[h] == tb)
477 env->tb_jmp_cache[h] = NULL;
478 }
d4e8164f
FB
479
480 /* suppress this TB from the two jump lists */
481 tb_jmp_remove(tb, 0);
482 tb_jmp_remove(tb, 1);
483
484 /* suppress any remaining jumps to this TB */
485 tb1 = tb->jmp_first;
486 for(;;) {
487 n1 = (long)tb1 & 3;
488 if (n1 == 2)
489 break;
490 tb1 = (TranslationBlock *)((long)tb1 & ~3);
491 tb2 = tb1->jmp_next[n1];
492 tb_reset_jump(tb1, n1);
493 tb1->jmp_next[n1] = NULL;
494 tb1 = tb2;
495 }
496 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
9fa3e853 497
e3db7226 498 tb_phys_invalidate_count++;
9fa3e853
FB
499}
500
501static inline void set_bits(uint8_t *tab, int start, int len)
502{
503 int end, mask, end1;
504
505 end = start + len;
506 tab += start >> 3;
507 mask = 0xff << (start & 7);
508 if ((start & ~7) == (end & ~7)) {
509 if (start < end) {
510 mask &= ~(0xff << (end & 7));
511 *tab |= mask;
512 }
513 } else {
514 *tab++ |= mask;
515 start = (start + 8) & ~7;
516 end1 = end & ~7;
517 while (start < end1) {
518 *tab++ = 0xff;
519 start += 8;
520 }
521 if (start < end) {
522 mask = ~(0xff << (end & 7));
523 *tab |= mask;
524 }
525 }
526}
527
528static void build_page_bitmap(PageDesc *p)
529{
530 int n, tb_start, tb_end;
531 TranslationBlock *tb;
532
59817ccb 533 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
9fa3e853
FB
534 if (!p->code_bitmap)
535 return;
536 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
537
538 tb = p->first_tb;
539 while (tb != NULL) {
540 n = (long)tb & 3;
541 tb = (TranslationBlock *)((long)tb & ~3);
542 /* NOTE: this is subtle as a TB may span two physical pages */
543 if (n == 0) {
544 /* NOTE: tb_end may be after the end of the page, but
545 it is not a problem */
546 tb_start = tb->pc & ~TARGET_PAGE_MASK;
547 tb_end = tb_start + tb->size;
548 if (tb_end > TARGET_PAGE_SIZE)
549 tb_end = TARGET_PAGE_SIZE;
550 } else {
551 tb_start = 0;
552 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
553 }
554 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
555 tb = tb->page_next[n];
556 }
557}
558
d720b93d
FB
559#ifdef TARGET_HAS_PRECISE_SMC
560
561static void tb_gen_code(CPUState *env,
562 target_ulong pc, target_ulong cs_base, int flags,
563 int cflags)
564{
565 TranslationBlock *tb;
566 uint8_t *tc_ptr;
567 target_ulong phys_pc, phys_page2, virt_page2;
568 int code_gen_size;
569
c27004ec
FB
570 phys_pc = get_phys_addr_code(env, pc);
571 tb = tb_alloc(pc);
d720b93d
FB
572 if (!tb) {
573 /* flush must be done */
574 tb_flush(env);
575 /* cannot fail at this point */
c27004ec 576 tb = tb_alloc(pc);
d720b93d
FB
577 }
578 tc_ptr = code_gen_ptr;
579 tb->tc_ptr = tc_ptr;
580 tb->cs_base = cs_base;
581 tb->flags = flags;
582 tb->cflags = cflags;
583 cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
584 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
585
586 /* check next page if needed */
c27004ec 587 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
d720b93d 588 phys_page2 = -1;
c27004ec 589 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
d720b93d
FB
590 phys_page2 = get_phys_addr_code(env, virt_page2);
591 }
592 tb_link_phys(tb, phys_pc, phys_page2);
593}
594#endif
595
9fa3e853
FB
596/* invalidate all TBs which intersect with the target physical page
597 starting in range [start;end[. NOTE: start and end must refer to
d720b93d
FB
598 the same physical page. 'is_cpu_write_access' should be true if called
599 from a real cpu write access: the virtual CPU will exit the current
600 TB if code is modified inside this TB. */
601void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
602 int is_cpu_write_access)
603{
604 int n, current_tb_modified, current_tb_not_found, current_flags;
d720b93d 605 CPUState *env = cpu_single_env;
9fa3e853 606 PageDesc *p;
ea1c1802 607 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
9fa3e853 608 target_ulong tb_start, tb_end;
d720b93d 609 target_ulong current_pc, current_cs_base;
9fa3e853
FB
610
611 p = page_find(start >> TARGET_PAGE_BITS);
612 if (!p)
613 return;
614 if (!p->code_bitmap &&
d720b93d
FB
615 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
616 is_cpu_write_access) {
9fa3e853
FB
617 /* build code bitmap */
618 build_page_bitmap(p);
619 }
620
621 /* we remove all the TBs in the range [start, end[ */
622 /* XXX: see if in some cases it could be faster to invalidate all the code */
d720b93d
FB
623 current_tb_not_found = is_cpu_write_access;
624 current_tb_modified = 0;
625 current_tb = NULL; /* avoid warning */
626 current_pc = 0; /* avoid warning */
627 current_cs_base = 0; /* avoid warning */
628 current_flags = 0; /* avoid warning */
9fa3e853
FB
629 tb = p->first_tb;
630 while (tb != NULL) {
631 n = (long)tb & 3;
632 tb = (TranslationBlock *)((long)tb & ~3);
633 tb_next = tb->page_next[n];
634 /* NOTE: this is subtle as a TB may span two physical pages */
635 if (n == 0) {
636 /* NOTE: tb_end may be after the end of the page, but
637 it is not a problem */
638 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
639 tb_end = tb_start + tb->size;
640 } else {
641 tb_start = tb->page_addr[1];
642 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
643 }
644 if (!(tb_end <= start || tb_start >= end)) {
d720b93d
FB
645#ifdef TARGET_HAS_PRECISE_SMC
646 if (current_tb_not_found) {
647 current_tb_not_found = 0;
648 current_tb = NULL;
649 if (env->mem_write_pc) {
650 /* now we have a real cpu fault */
651 current_tb = tb_find_pc(env->mem_write_pc);
652 }
653 }
654 if (current_tb == tb &&
655 !(current_tb->cflags & CF_SINGLE_INSN)) {
656 /* If we are modifying the current TB, we must stop
657 its execution. We could be more precise by checking
658 that the modification is after the current PC, but it
659 would require a specialized function to partially
660 restore the CPU state */
661
662 current_tb_modified = 1;
663 cpu_restore_state(current_tb, env,
664 env->mem_write_pc, NULL);
665#if defined(TARGET_I386)
666 current_flags = env->hflags;
667 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
668 current_cs_base = (target_ulong)env->segs[R_CS].base;
669 current_pc = current_cs_base + env->eip;
670#else
671#error unsupported CPU
672#endif
673 }
674#endif /* TARGET_HAS_PRECISE_SMC */
6f5a9f7e
FB
675 /* we need to do that to handle the case where a signal
676 occurs while doing tb_phys_invalidate() */
677 saved_tb = NULL;
678 if (env) {
679 saved_tb = env->current_tb;
680 env->current_tb = NULL;
681 }
9fa3e853 682 tb_phys_invalidate(tb, -1);
6f5a9f7e
FB
683 if (env) {
684 env->current_tb = saved_tb;
685 if (env->interrupt_request && env->current_tb)
686 cpu_interrupt(env, env->interrupt_request);
687 }
9fa3e853
FB
688 }
689 tb = tb_next;
690 }
691#if !defined(CONFIG_USER_ONLY)
692 /* if no code remaining, no need to continue to use slow writes */
693 if (!p->first_tb) {
694 invalidate_page_bitmap(p);
d720b93d
FB
695 if (is_cpu_write_access) {
696 tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
697 }
698 }
699#endif
700#ifdef TARGET_HAS_PRECISE_SMC
701 if (current_tb_modified) {
702 /* we generate a block containing just the instruction
703 modifying the memory. It will ensure that it cannot modify
704 itself */
ea1c1802 705 env->current_tb = NULL;
d720b93d
FB
706 tb_gen_code(env, current_pc, current_cs_base, current_flags,
707 CF_SINGLE_INSN);
708 cpu_resume_from_signal(env, NULL);
9fa3e853 709 }
fd6ce8f6 710#endif
9fa3e853 711}
fd6ce8f6 712
9fa3e853 713/* len must be <= 8 and start must be a multiple of len */
d720b93d 714static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
9fa3e853
FB
715{
716 PageDesc *p;
717 int offset, b;
59817ccb 718#if 0
a4193c8a
FB
719 if (1) {
720 if (loglevel) {
721 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
722 cpu_single_env->mem_write_vaddr, len,
723 cpu_single_env->eip,
724 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
725 }
59817ccb
FB
726 }
727#endif
9fa3e853
FB
728 p = page_find(start >> TARGET_PAGE_BITS);
729 if (!p)
730 return;
731 if (p->code_bitmap) {
732 offset = start & ~TARGET_PAGE_MASK;
733 b = p->code_bitmap[offset >> 3] >> (offset & 7);
734 if (b & ((1 << len) - 1))
735 goto do_invalidate;
736 } else {
737 do_invalidate:
d720b93d 738 tb_invalidate_phys_page_range(start, start + len, 1);
9fa3e853
FB
739 }
740}
741
9fa3e853 742#if !defined(CONFIG_SOFTMMU)
d720b93d
FB
743static void tb_invalidate_phys_page(target_ulong addr,
744 unsigned long pc, void *puc)
9fa3e853 745{
d720b93d
FB
746 int n, current_flags, current_tb_modified;
747 target_ulong current_pc, current_cs_base;
9fa3e853 748 PageDesc *p;
d720b93d
FB
749 TranslationBlock *tb, *current_tb;
750#ifdef TARGET_HAS_PRECISE_SMC
751 CPUState *env = cpu_single_env;
752#endif
9fa3e853
FB
753
754 addr &= TARGET_PAGE_MASK;
755 p = page_find(addr >> TARGET_PAGE_BITS);
756 if (!p)
757 return;
758 tb = p->first_tb;
d720b93d
FB
759 current_tb_modified = 0;
760 current_tb = NULL;
761 current_pc = 0; /* avoid warning */
762 current_cs_base = 0; /* avoid warning */
763 current_flags = 0; /* avoid warning */
764#ifdef TARGET_HAS_PRECISE_SMC
765 if (tb && pc != 0) {
766 current_tb = tb_find_pc(pc);
767 }
768#endif
9fa3e853
FB
769 while (tb != NULL) {
770 n = (long)tb & 3;
771 tb = (TranslationBlock *)((long)tb & ~3);
d720b93d
FB
772#ifdef TARGET_HAS_PRECISE_SMC
773 if (current_tb == tb &&
774 !(current_tb->cflags & CF_SINGLE_INSN)) {
775 /* If we are modifying the current TB, we must stop
776 its execution. We could be more precise by checking
777 that the modification is after the current PC, but it
778 would require a specialized function to partially
779 restore the CPU state */
780
781 current_tb_modified = 1;
782 cpu_restore_state(current_tb, env, pc, puc);
783#if defined(TARGET_I386)
784 current_flags = env->hflags;
785 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
786 current_cs_base = (target_ulong)env->segs[R_CS].base;
787 current_pc = current_cs_base + env->eip;
788#else
789#error unsupported CPU
790#endif
791 }
792#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
793 tb_phys_invalidate(tb, addr);
794 tb = tb->page_next[n];
795 }
fd6ce8f6 796 p->first_tb = NULL;
d720b93d
FB
797#ifdef TARGET_HAS_PRECISE_SMC
798 if (current_tb_modified) {
799 /* we generate a block containing just the instruction
800 modifying the memory. It will ensure that it cannot modify
801 itself */
ea1c1802 802 env->current_tb = NULL;
d720b93d
FB
803 tb_gen_code(env, current_pc, current_cs_base, current_flags,
804 CF_SINGLE_INSN);
805 cpu_resume_from_signal(env, puc);
806 }
807#endif
fd6ce8f6 808}
9fa3e853 809#endif
fd6ce8f6
FB
810
811/* add the tb in the target page and protect it if necessary */
9fa3e853
FB
812static inline void tb_alloc_page(TranslationBlock *tb,
813 unsigned int n, unsigned int page_addr)
fd6ce8f6
FB
814{
815 PageDesc *p;
9fa3e853
FB
816 TranslationBlock *last_first_tb;
817
818 tb->page_addr[n] = page_addr;
3a7d929e 819 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
9fa3e853
FB
820 tb->page_next[n] = p->first_tb;
821 last_first_tb = p->first_tb;
822 p->first_tb = (TranslationBlock *)((long)tb | n);
823 invalidate_page_bitmap(p);
fd6ce8f6 824
107db443 825#if defined(TARGET_HAS_SMC) || 1
d720b93d 826
9fa3e853 827#if defined(CONFIG_USER_ONLY)
fd6ce8f6 828 if (p->flags & PAGE_WRITE) {
9fa3e853
FB
829 unsigned long host_start, host_end, addr;
830 int prot;
831
fd6ce8f6
FB
832 /* force the host page as non writable (writes will have a
833 page fault + mprotect overhead) */
83fb7adf
FB
834 host_start = page_addr & qemu_host_page_mask;
835 host_end = host_start + qemu_host_page_size;
fd6ce8f6
FB
836 prot = 0;
837 for(addr = host_start; addr < host_end; addr += TARGET_PAGE_SIZE)
838 prot |= page_get_flags(addr);
83fb7adf 839 mprotect((void *)host_start, qemu_host_page_size,
fd6ce8f6
FB
840 (prot & PAGE_BITS) & ~PAGE_WRITE);
841#ifdef DEBUG_TB_INVALIDATE
842 printf("protecting code page: 0x%08lx\n",
843 host_start);
844#endif
845 p->flags &= ~PAGE_WRITE;
fd6ce8f6 846 }
9fa3e853
FB
847#else
848 /* if some code is already present, then the pages are already
849 protected. So we handle the case where only the first TB is
850 allocated in a physical page */
851 if (!last_first_tb) {
6a00d601 852 tlb_protect_code(page_addr);
9fa3e853
FB
853 }
854#endif
d720b93d
FB
855
856#endif /* TARGET_HAS_SMC */
fd6ce8f6
FB
857}
858
859/* Allocate a new translation block. Flush the translation buffer if
860 too many translation blocks or too much generated code. */
c27004ec 861TranslationBlock *tb_alloc(target_ulong pc)
fd6ce8f6
FB
862{
863 TranslationBlock *tb;
fd6ce8f6
FB
864
865 if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
866 (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
d4e8164f 867 return NULL;
fd6ce8f6
FB
868 tb = &tbs[nb_tbs++];
869 tb->pc = pc;
b448f2f3 870 tb->cflags = 0;
d4e8164f
FB
871 return tb;
872}
873
9fa3e853
FB
874/* add a new TB and link it to the physical page tables. phys_page2 is
875 (-1) to indicate that only one page contains the TB. */
876void tb_link_phys(TranslationBlock *tb,
877 target_ulong phys_pc, target_ulong phys_page2)
d4e8164f 878{
9fa3e853
FB
879 unsigned int h;
880 TranslationBlock **ptb;
881
882 /* add in the physical hash table */
883 h = tb_phys_hash_func(phys_pc);
884 ptb = &tb_phys_hash[h];
885 tb->phys_hash_next = *ptb;
886 *ptb = tb;
fd6ce8f6
FB
887
888 /* add in the page list */
9fa3e853
FB
889 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
890 if (phys_page2 != -1)
891 tb_alloc_page(tb, 1, phys_page2);
892 else
893 tb->page_addr[1] = -1;
9fa3e853 894
d4e8164f
FB
895 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
896 tb->jmp_next[0] = NULL;
897 tb->jmp_next[1] = NULL;
b448f2f3
FB
898#ifdef USE_CODE_COPY
899 tb->cflags &= ~CF_FP_USED;
900 if (tb->cflags & CF_TB_FP_USED)
901 tb->cflags |= CF_FP_USED;
902#endif
d4e8164f
FB
903
904 /* init original jump addresses */
905 if (tb->tb_next_offset[0] != 0xffff)
906 tb_reset_jump(tb, 0);
907 if (tb->tb_next_offset[1] != 0xffff)
908 tb_reset_jump(tb, 1);
8a40a180
FB
909
910#ifdef DEBUG_TB_CHECK
911 tb_page_check();
912#endif
fd6ce8f6
FB
913}
914
9fa3e853
FB
915/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
916 tb[1].tc_ptr. Return NULL if not found */
917TranslationBlock *tb_find_pc(unsigned long tc_ptr)
fd6ce8f6 918{
9fa3e853
FB
919 int m_min, m_max, m;
920 unsigned long v;
921 TranslationBlock *tb;
a513fe19
FB
922
923 if (nb_tbs <= 0)
924 return NULL;
925 if (tc_ptr < (unsigned long)code_gen_buffer ||
926 tc_ptr >= (unsigned long)code_gen_ptr)
927 return NULL;
928 /* binary search (cf Knuth) */
929 m_min = 0;
930 m_max = nb_tbs - 1;
931 while (m_min <= m_max) {
932 m = (m_min + m_max) >> 1;
933 tb = &tbs[m];
934 v = (unsigned long)tb->tc_ptr;
935 if (v == tc_ptr)
936 return tb;
937 else if (tc_ptr < v) {
938 m_max = m - 1;
939 } else {
940 m_min = m + 1;
941 }
942 }
943 return &tbs[m_max];
944}
7501267e 945
ea041c0e
FB
946static void tb_reset_jump_recursive(TranslationBlock *tb);
947
948static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
949{
950 TranslationBlock *tb1, *tb_next, **ptb;
951 unsigned int n1;
952
953 tb1 = tb->jmp_next[n];
954 if (tb1 != NULL) {
955 /* find head of list */
956 for(;;) {
957 n1 = (long)tb1 & 3;
958 tb1 = (TranslationBlock *)((long)tb1 & ~3);
959 if (n1 == 2)
960 break;
961 tb1 = tb1->jmp_next[n1];
962 }
963 /* we are now sure now that tb jumps to tb1 */
964 tb_next = tb1;
965
966 /* remove tb from the jmp_first list */
967 ptb = &tb_next->jmp_first;
968 for(;;) {
969 tb1 = *ptb;
970 n1 = (long)tb1 & 3;
971 tb1 = (TranslationBlock *)((long)tb1 & ~3);
972 if (n1 == n && tb1 == tb)
973 break;
974 ptb = &tb1->jmp_next[n1];
975 }
976 *ptb = tb->jmp_next[n];
977 tb->jmp_next[n] = NULL;
978
979 /* suppress the jump to next tb in generated code */
980 tb_reset_jump(tb, n);
981
0124311e 982 /* suppress jumps in the tb on which we could have jumped */
ea041c0e
FB
983 tb_reset_jump_recursive(tb_next);
984 }
985}
986
987static void tb_reset_jump_recursive(TranslationBlock *tb)
988{
989 tb_reset_jump_recursive2(tb, 0);
990 tb_reset_jump_recursive2(tb, 1);
991}
992
1fddef4b 993#if defined(TARGET_HAS_ICE)
d720b93d
FB
994static void breakpoint_invalidate(CPUState *env, target_ulong pc)
995{
996 target_ulong phys_addr;
997
998 phys_addr = cpu_get_phys_page_debug(env, pc);
999 tb_invalidate_phys_page_range(phys_addr, phys_addr + 1, 0);
1000}
c27004ec 1001#endif
d720b93d 1002
c33a346e
FB
1003/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1004 breakpoint is reached */
2e12669a 1005int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
4c3a88a2 1006{
1fddef4b 1007#if defined(TARGET_HAS_ICE)
4c3a88a2 1008 int i;
d720b93d 1009
4c3a88a2
FB
1010 for(i = 0; i < env->nb_breakpoints; i++) {
1011 if (env->breakpoints[i] == pc)
1012 return 0;
1013 }
1014
1015 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1016 return -1;
1017 env->breakpoints[env->nb_breakpoints++] = pc;
d720b93d
FB
1018
1019 breakpoint_invalidate(env, pc);
4c3a88a2
FB
1020 return 0;
1021#else
1022 return -1;
1023#endif
1024}
1025
1026/* remove a breakpoint */
2e12669a 1027int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
4c3a88a2 1028{
1fddef4b 1029#if defined(TARGET_HAS_ICE)
4c3a88a2
FB
1030 int i;
1031 for(i = 0; i < env->nb_breakpoints; i++) {
1032 if (env->breakpoints[i] == pc)
1033 goto found;
1034 }
1035 return -1;
1036 found:
4c3a88a2 1037 env->nb_breakpoints--;
1fddef4b
FB
1038 if (i < env->nb_breakpoints)
1039 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
d720b93d
FB
1040
1041 breakpoint_invalidate(env, pc);
4c3a88a2
FB
1042 return 0;
1043#else
1044 return -1;
1045#endif
1046}
1047
c33a346e
FB
1048/* enable or disable single step mode. EXCP_DEBUG is returned by the
1049 CPU loop after each instruction */
1050void cpu_single_step(CPUState *env, int enabled)
1051{
1fddef4b 1052#if defined(TARGET_HAS_ICE)
c33a346e
FB
1053 if (env->singlestep_enabled != enabled) {
1054 env->singlestep_enabled = enabled;
1055 /* must flush all the translated code to avoid inconsistancies */
9fa3e853 1056 /* XXX: only flush what is necessary */
0124311e 1057 tb_flush(env);
c33a346e
FB
1058 }
1059#endif
1060}
1061
34865134
FB
1062/* enable or disable low levels log */
1063void cpu_set_log(int log_flags)
1064{
1065 loglevel = log_flags;
1066 if (loglevel && !logfile) {
1067 logfile = fopen(logfilename, "w");
1068 if (!logfile) {
1069 perror(logfilename);
1070 _exit(1);
1071 }
9fa3e853
FB
1072#if !defined(CONFIG_SOFTMMU)
1073 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1074 {
1075 static uint8_t logfile_buf[4096];
1076 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1077 }
1078#else
34865134 1079 setvbuf(logfile, NULL, _IOLBF, 0);
9fa3e853 1080#endif
34865134
FB
1081 }
1082}
1083
1084void cpu_set_log_filename(const char *filename)
1085{
1086 logfilename = strdup(filename);
1087}
c33a346e 1088
0124311e 1089/* mask must never be zero, except for A20 change call */
68a79315 1090void cpu_interrupt(CPUState *env, int mask)
ea041c0e
FB
1091{
1092 TranslationBlock *tb;
ee8b7021 1093 static int interrupt_lock;
59817ccb 1094
68a79315 1095 env->interrupt_request |= mask;
ea041c0e
FB
1096 /* if the cpu is currently executing code, we must unlink it and
1097 all the potentially executing TB */
1098 tb = env->current_tb;
ee8b7021
FB
1099 if (tb && !testandset(&interrupt_lock)) {
1100 env->current_tb = NULL;
ea041c0e 1101 tb_reset_jump_recursive(tb);
ee8b7021 1102 interrupt_lock = 0;
ea041c0e
FB
1103 }
1104}
1105
b54ad049
FB
1106void cpu_reset_interrupt(CPUState *env, int mask)
1107{
1108 env->interrupt_request &= ~mask;
1109}
1110
f193c797
FB
1111CPULogItem cpu_log_items[] = {
1112 { CPU_LOG_TB_OUT_ASM, "out_asm",
1113 "show generated host assembly code for each compiled TB" },
1114 { CPU_LOG_TB_IN_ASM, "in_asm",
1115 "show target assembly code for each compiled TB" },
1116 { CPU_LOG_TB_OP, "op",
1117 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1118#ifdef TARGET_I386
1119 { CPU_LOG_TB_OP_OPT, "op_opt",
1120 "show micro ops after optimization for each compiled TB" },
1121#endif
1122 { CPU_LOG_INT, "int",
1123 "show interrupts/exceptions in short format" },
1124 { CPU_LOG_EXEC, "exec",
1125 "show trace before each executed TB (lots of logs)" },
9fddaa0c
FB
1126 { CPU_LOG_TB_CPU, "cpu",
1127 "show CPU state before bloc translation" },
f193c797
FB
1128#ifdef TARGET_I386
1129 { CPU_LOG_PCALL, "pcall",
1130 "show protected mode far calls/returns/exceptions" },
1131#endif
8e3a9fd2 1132#ifdef DEBUG_IOPORT
fd872598
FB
1133 { CPU_LOG_IOPORT, "ioport",
1134 "show all i/o ports accesses" },
8e3a9fd2 1135#endif
f193c797
FB
1136 { 0, NULL, NULL },
1137};
1138
1139static int cmp1(const char *s1, int n, const char *s2)
1140{
1141 if (strlen(s2) != n)
1142 return 0;
1143 return memcmp(s1, s2, n) == 0;
1144}
1145
1146/* takes a comma separated list of log masks. Return 0 if error. */
1147int cpu_str_to_log_mask(const char *str)
1148{
1149 CPULogItem *item;
1150 int mask;
1151 const char *p, *p1;
1152
1153 p = str;
1154 mask = 0;
1155 for(;;) {
1156 p1 = strchr(p, ',');
1157 if (!p1)
1158 p1 = p + strlen(p);
8e3a9fd2
FB
1159 if(cmp1(p,p1-p,"all")) {
1160 for(item = cpu_log_items; item->mask != 0; item++) {
1161 mask |= item->mask;
1162 }
1163 } else {
f193c797
FB
1164 for(item = cpu_log_items; item->mask != 0; item++) {
1165 if (cmp1(p, p1 - p, item->name))
1166 goto found;
1167 }
1168 return 0;
8e3a9fd2 1169 }
f193c797
FB
1170 found:
1171 mask |= item->mask;
1172 if (*p1 != ',')
1173 break;
1174 p = p1 + 1;
1175 }
1176 return mask;
1177}
ea041c0e 1178
7501267e
FB
1179void cpu_abort(CPUState *env, const char *fmt, ...)
1180{
1181 va_list ap;
1182
1183 va_start(ap, fmt);
1184 fprintf(stderr, "qemu: fatal: ");
1185 vfprintf(stderr, fmt, ap);
1186 fprintf(stderr, "\n");
1187#ifdef TARGET_I386
7fe48483
FB
1188 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1189#else
1190 cpu_dump_state(env, stderr, fprintf, 0);
7501267e
FB
1191#endif
1192 va_end(ap);
1193 abort();
1194}
1195
0124311e
FB
1196#if !defined(CONFIG_USER_ONLY)
1197
ee8b7021
FB
1198/* NOTE: if flush_global is true, also flush global entries (not
1199 implemented yet) */
1200void tlb_flush(CPUState *env, int flush_global)
33417e70 1201{
33417e70 1202 int i;
0124311e 1203
9fa3e853
FB
1204#if defined(DEBUG_TLB)
1205 printf("tlb_flush:\n");
1206#endif
0124311e
FB
1207 /* must reset current TB so that interrupts cannot modify the
1208 links while we are modifying them */
1209 env->current_tb = NULL;
1210
33417e70 1211 for(i = 0; i < CPU_TLB_SIZE; i++) {
84b7b8e7
FB
1212 env->tlb_table[0][i].addr_read = -1;
1213 env->tlb_table[0][i].addr_write = -1;
1214 env->tlb_table[0][i].addr_code = -1;
1215 env->tlb_table[1][i].addr_read = -1;
1216 env->tlb_table[1][i].addr_write = -1;
1217 env->tlb_table[1][i].addr_code = -1;
33417e70 1218 }
9fa3e853 1219
8a40a180 1220 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
9fa3e853
FB
1221
1222#if !defined(CONFIG_SOFTMMU)
1223 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
0a962c02
FB
1224#endif
1225#ifdef USE_KQEMU
1226 if (env->kqemu_enabled) {
1227 kqemu_flush(env, flush_global);
1228 }
9fa3e853 1229#endif
e3db7226 1230 tlb_flush_count++;
33417e70
FB
1231}
1232
274da6b2 1233static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
61382a50 1234{
84b7b8e7
FB
1235 if (addr == (tlb_entry->addr_read &
1236 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1237 addr == (tlb_entry->addr_write &
1238 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1239 addr == (tlb_entry->addr_code &
1240 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1241 tlb_entry->addr_read = -1;
1242 tlb_entry->addr_write = -1;
1243 tlb_entry->addr_code = -1;
1244 }
61382a50
FB
1245}
1246
2e12669a 1247void tlb_flush_page(CPUState *env, target_ulong addr)
33417e70 1248{
8a40a180 1249 int i;
9fa3e853 1250 TranslationBlock *tb;
0124311e 1251
9fa3e853 1252#if defined(DEBUG_TLB)
108c49b8 1253 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
9fa3e853 1254#endif
0124311e
FB
1255 /* must reset current TB so that interrupts cannot modify the
1256 links while we are modifying them */
1257 env->current_tb = NULL;
61382a50
FB
1258
1259 addr &= TARGET_PAGE_MASK;
1260 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
84b7b8e7
FB
1261 tlb_flush_entry(&env->tlb_table[0][i], addr);
1262 tlb_flush_entry(&env->tlb_table[1][i], addr);
0124311e 1263
8a40a180
FB
1264 for(i = 0; i < TB_JMP_CACHE_SIZE; i++) {
1265 tb = env->tb_jmp_cache[i];
1266 if (tb &&
1267 ((tb->pc & TARGET_PAGE_MASK) == addr ||
1268 ((tb->pc + tb->size - 1) & TARGET_PAGE_MASK) == addr)) {
1269 env->tb_jmp_cache[i] = NULL;
9fa3e853
FB
1270 }
1271 }
1272
0124311e 1273#if !defined(CONFIG_SOFTMMU)
9fa3e853 1274 if (addr < MMAP_AREA_END)
0124311e 1275 munmap((void *)addr, TARGET_PAGE_SIZE);
61382a50 1276#endif
0a962c02
FB
1277#ifdef USE_KQEMU
1278 if (env->kqemu_enabled) {
1279 kqemu_flush_page(env, addr);
1280 }
1281#endif
9fa3e853
FB
1282}
1283
9fa3e853
FB
1284/* update the TLBs so that writes to code in the virtual page 'addr'
1285 can be detected */
6a00d601 1286static void tlb_protect_code(ram_addr_t ram_addr)
9fa3e853 1287{
6a00d601
FB
1288 cpu_physical_memory_reset_dirty(ram_addr,
1289 ram_addr + TARGET_PAGE_SIZE,
1290 CODE_DIRTY_FLAG);
9fa3e853
FB
1291}
1292
9fa3e853 1293/* update the TLB so that writes in physical page 'phys_addr' are no longer
3a7d929e
FB
1294 tested for self modifying code */
1295static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1296 target_ulong vaddr)
9fa3e853 1297{
3a7d929e 1298 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1ccde1cb
FB
1299}
1300
1301static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1302 unsigned long start, unsigned long length)
1303{
1304 unsigned long addr;
84b7b8e7
FB
1305 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1306 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1ccde1cb 1307 if ((addr - start) < length) {
84b7b8e7 1308 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1ccde1cb
FB
1309 }
1310 }
1311}
1312
3a7d929e 1313void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 1314 int dirty_flags)
1ccde1cb
FB
1315{
1316 CPUState *env;
4f2ac237 1317 unsigned long length, start1;
0a962c02
FB
1318 int i, mask, len;
1319 uint8_t *p;
1ccde1cb
FB
1320
1321 start &= TARGET_PAGE_MASK;
1322 end = TARGET_PAGE_ALIGN(end);
1323
1324 length = end - start;
1325 if (length == 0)
1326 return;
0a962c02 1327 len = length >> TARGET_PAGE_BITS;
3a7d929e 1328#ifdef USE_KQEMU
6a00d601
FB
1329 /* XXX: should not depend on cpu context */
1330 env = first_cpu;
3a7d929e 1331 if (env->kqemu_enabled) {
f23db169
FB
1332 ram_addr_t addr;
1333 addr = start;
1334 for(i = 0; i < len; i++) {
1335 kqemu_set_notdirty(env, addr);
1336 addr += TARGET_PAGE_SIZE;
1337 }
3a7d929e
FB
1338 }
1339#endif
f23db169
FB
1340 mask = ~dirty_flags;
1341 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1342 for(i = 0; i < len; i++)
1343 p[i] &= mask;
1344
1ccde1cb
FB
1345 /* we modify the TLB cache so that the dirty bit will be set again
1346 when accessing the range */
59817ccb 1347 start1 = start + (unsigned long)phys_ram_base;
6a00d601
FB
1348 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1349 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1350 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
6a00d601 1351 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1352 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
6a00d601 1353 }
59817ccb
FB
1354
1355#if !defined(CONFIG_SOFTMMU)
1356 /* XXX: this is expensive */
1357 {
1358 VirtPageDesc *p;
1359 int j;
1360 target_ulong addr;
1361
1362 for(i = 0; i < L1_SIZE; i++) {
1363 p = l1_virt_map[i];
1364 if (p) {
1365 addr = i << (TARGET_PAGE_BITS + L2_BITS);
1366 for(j = 0; j < L2_SIZE; j++) {
1367 if (p->valid_tag == virt_valid_tag &&
1368 p->phys_addr >= start && p->phys_addr < end &&
1369 (p->prot & PROT_WRITE)) {
1370 if (addr < MMAP_AREA_END) {
1371 mprotect((void *)addr, TARGET_PAGE_SIZE,
1372 p->prot & ~PROT_WRITE);
1373 }
1374 }
1375 addr += TARGET_PAGE_SIZE;
1376 p++;
1377 }
1378 }
1379 }
1380 }
1381#endif
1ccde1cb
FB
1382}
1383
3a7d929e
FB
1384static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1385{
1386 ram_addr_t ram_addr;
1387
84b7b8e7
FB
1388 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1389 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
3a7d929e
FB
1390 tlb_entry->addend - (unsigned long)phys_ram_base;
1391 if (!cpu_physical_memory_is_dirty(ram_addr)) {
84b7b8e7 1392 tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
3a7d929e
FB
1393 }
1394 }
1395}
1396
1397/* update the TLB according to the current state of the dirty bits */
1398void cpu_tlb_update_dirty(CPUState *env)
1399{
1400 int i;
1401 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1402 tlb_update_dirty(&env->tlb_table[0][i]);
3a7d929e 1403 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1404 tlb_update_dirty(&env->tlb_table[1][i]);
3a7d929e
FB
1405}
1406
1ccde1cb 1407static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
108c49b8 1408 unsigned long start)
1ccde1cb
FB
1409{
1410 unsigned long addr;
84b7b8e7
FB
1411 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1412 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1ccde1cb 1413 if (addr == start) {
84b7b8e7 1414 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;
1ccde1cb
FB
1415 }
1416 }
1417}
1418
1419/* update the TLB corresponding to virtual page vaddr and phys addr
1420 addr so that it is no longer dirty */
6a00d601
FB
1421static inline void tlb_set_dirty(CPUState *env,
1422 unsigned long addr, target_ulong vaddr)
1ccde1cb 1423{
1ccde1cb
FB
1424 int i;
1425
1ccde1cb
FB
1426 addr &= TARGET_PAGE_MASK;
1427 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
84b7b8e7
FB
1428 tlb_set_dirty1(&env->tlb_table[0][i], addr);
1429 tlb_set_dirty1(&env->tlb_table[1][i], addr);
9fa3e853
FB
1430}
1431
59817ccb
FB
1432/* add a new TLB entry. At most one entry for a given virtual address
1433 is permitted. Return 0 if OK or 2 if the page could not be mapped
1434 (can only happen in non SOFTMMU mode for I/O pages or pages
1435 conflicting with the host address space). */
84b7b8e7
FB
1436int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1437 target_phys_addr_t paddr, int prot,
1438 int is_user, int is_softmmu)
9fa3e853 1439{
92e873b9 1440 PhysPageDesc *p;
4f2ac237 1441 unsigned long pd;
9fa3e853 1442 unsigned int index;
4f2ac237 1443 target_ulong address;
108c49b8 1444 target_phys_addr_t addend;
9fa3e853 1445 int ret;
84b7b8e7 1446 CPUTLBEntry *te;
9fa3e853 1447
92e873b9 1448 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
9fa3e853
FB
1449 if (!p) {
1450 pd = IO_MEM_UNASSIGNED;
9fa3e853
FB
1451 } else {
1452 pd = p->phys_offset;
9fa3e853
FB
1453 }
1454#if defined(DEBUG_TLB)
3a7d929e 1455 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x u=%d smmu=%d pd=0x%08lx\n",
84b7b8e7 1456 vaddr, (int)paddr, prot, is_user, is_softmmu, pd);
9fa3e853
FB
1457#endif
1458
1459 ret = 0;
1460#if !defined(CONFIG_SOFTMMU)
1461 if (is_softmmu)
1462#endif
1463 {
1464 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1465 /* IO memory case */
1466 address = vaddr | pd;
1467 addend = paddr;
1468 } else {
1469 /* standard memory */
1470 address = vaddr;
1471 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1472 }
1473
90f18422 1474 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
9fa3e853 1475 addend -= vaddr;
84b7b8e7
FB
1476 te = &env->tlb_table[is_user][index];
1477 te->addend = addend;
67b915a5 1478 if (prot & PAGE_READ) {
84b7b8e7
FB
1479 te->addr_read = address;
1480 } else {
1481 te->addr_read = -1;
1482 }
1483 if (prot & PAGE_EXEC) {
1484 te->addr_code = address;
9fa3e853 1485 } else {
84b7b8e7 1486 te->addr_code = -1;
9fa3e853 1487 }
67b915a5 1488 if (prot & PAGE_WRITE) {
9fa3e853
FB
1489 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM) {
1490 /* ROM: access is ignored (same as unassigned) */
84b7b8e7 1491 te->addr_write = vaddr | IO_MEM_ROM;
3a7d929e 1492 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1ccde1cb 1493 !cpu_physical_memory_is_dirty(pd)) {
84b7b8e7 1494 te->addr_write = vaddr | IO_MEM_NOTDIRTY;
9fa3e853 1495 } else {
84b7b8e7 1496 te->addr_write = address;
9fa3e853
FB
1497 }
1498 } else {
84b7b8e7 1499 te->addr_write = -1;
9fa3e853
FB
1500 }
1501 }
1502#if !defined(CONFIG_SOFTMMU)
1503 else {
1504 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1505 /* IO access: no mapping is done as it will be handled by the
1506 soft MMU */
1507 if (!(env->hflags & HF_SOFTMMU_MASK))
1508 ret = 2;
1509 } else {
1510 void *map_addr;
59817ccb
FB
1511
1512 if (vaddr >= MMAP_AREA_END) {
1513 ret = 2;
1514 } else {
1515 if (prot & PROT_WRITE) {
1516 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
d720b93d 1517#if defined(TARGET_HAS_SMC) || 1
59817ccb 1518 first_tb ||
d720b93d 1519#endif
59817ccb
FB
1520 ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1521 !cpu_physical_memory_is_dirty(pd))) {
1522 /* ROM: we do as if code was inside */
1523 /* if code is present, we only map as read only and save the
1524 original mapping */
1525 VirtPageDesc *vp;
1526
90f18422 1527 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
59817ccb
FB
1528 vp->phys_addr = pd;
1529 vp->prot = prot;
1530 vp->valid_tag = virt_valid_tag;
1531 prot &= ~PAGE_WRITE;
1532 }
1533 }
1534 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
1535 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1536 if (map_addr == MAP_FAILED) {
1537 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1538 paddr, vaddr);
9fa3e853 1539 }
9fa3e853
FB
1540 }
1541 }
1542 }
1543#endif
1544 return ret;
1545}
1546
1547/* called from signal handler: invalidate the code and unprotect the
1548 page. Return TRUE if the fault was succesfully handled. */
d720b93d 1549int page_unprotect(unsigned long addr, unsigned long pc, void *puc)
9fa3e853
FB
1550{
1551#if !defined(CONFIG_SOFTMMU)
1552 VirtPageDesc *vp;
1553
1554#if defined(DEBUG_TLB)
1555 printf("page_unprotect: addr=0x%08x\n", addr);
1556#endif
1557 addr &= TARGET_PAGE_MASK;
59817ccb
FB
1558
1559 /* if it is not mapped, no need to worry here */
1560 if (addr >= MMAP_AREA_END)
1561 return 0;
9fa3e853
FB
1562 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1563 if (!vp)
1564 return 0;
1565 /* NOTE: in this case, validate_tag is _not_ tested as it
1566 validates only the code TLB */
1567 if (vp->valid_tag != virt_valid_tag)
1568 return 0;
1569 if (!(vp->prot & PAGE_WRITE))
1570 return 0;
1571#if defined(DEBUG_TLB)
1572 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1573 addr, vp->phys_addr, vp->prot);
1574#endif
59817ccb
FB
1575 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1576 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1577 (unsigned long)addr, vp->prot);
d720b93d 1578 /* set the dirty bit */
0a962c02 1579 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
d720b93d
FB
1580 /* flush the code inside */
1581 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
9fa3e853
FB
1582 return 1;
1583#else
1584 return 0;
1585#endif
33417e70
FB
1586}
1587
0124311e
FB
1588#else
1589
ee8b7021 1590void tlb_flush(CPUState *env, int flush_global)
0124311e
FB
1591{
1592}
1593
2e12669a 1594void tlb_flush_page(CPUState *env, target_ulong addr)
0124311e
FB
1595{
1596}
1597
84b7b8e7
FB
1598int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1599 target_phys_addr_t paddr, int prot,
1600 int is_user, int is_softmmu)
9fa3e853
FB
1601{
1602 return 0;
1603}
0124311e 1604
9fa3e853
FB
1605/* dump memory mappings */
1606void page_dump(FILE *f)
33417e70 1607{
9fa3e853
FB
1608 unsigned long start, end;
1609 int i, j, prot, prot1;
1610 PageDesc *p;
33417e70 1611
9fa3e853
FB
1612 fprintf(f, "%-8s %-8s %-8s %s\n",
1613 "start", "end", "size", "prot");
1614 start = -1;
1615 end = -1;
1616 prot = 0;
1617 for(i = 0; i <= L1_SIZE; i++) {
1618 if (i < L1_SIZE)
1619 p = l1_map[i];
1620 else
1621 p = NULL;
1622 for(j = 0;j < L2_SIZE; j++) {
1623 if (!p)
1624 prot1 = 0;
1625 else
1626 prot1 = p[j].flags;
1627 if (prot1 != prot) {
1628 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1629 if (start != -1) {
1630 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1631 start, end, end - start,
1632 prot & PAGE_READ ? 'r' : '-',
1633 prot & PAGE_WRITE ? 'w' : '-',
1634 prot & PAGE_EXEC ? 'x' : '-');
1635 }
1636 if (prot1 != 0)
1637 start = end;
1638 else
1639 start = -1;
1640 prot = prot1;
1641 }
1642 if (!p)
1643 break;
1644 }
33417e70 1645 }
33417e70
FB
1646}
1647
9fa3e853 1648int page_get_flags(unsigned long address)
33417e70 1649{
9fa3e853
FB
1650 PageDesc *p;
1651
1652 p = page_find(address >> TARGET_PAGE_BITS);
33417e70 1653 if (!p)
9fa3e853
FB
1654 return 0;
1655 return p->flags;
1656}
1657
1658/* modify the flags of a page and invalidate the code if
1659 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1660 depending on PAGE_WRITE */
1661void page_set_flags(unsigned long start, unsigned long end, int flags)
1662{
1663 PageDesc *p;
1664 unsigned long addr;
1665
1666 start = start & TARGET_PAGE_MASK;
1667 end = TARGET_PAGE_ALIGN(end);
1668 if (flags & PAGE_WRITE)
1669 flags |= PAGE_WRITE_ORG;
1670 spin_lock(&tb_lock);
1671 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1672 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1673 /* if the write protection is set, then we invalidate the code
1674 inside */
1675 if (!(p->flags & PAGE_WRITE) &&
1676 (flags & PAGE_WRITE) &&
1677 p->first_tb) {
d720b93d 1678 tb_invalidate_phys_page(addr, 0, NULL);
9fa3e853
FB
1679 }
1680 p->flags = flags;
1681 }
1682 spin_unlock(&tb_lock);
33417e70
FB
1683}
1684
9fa3e853
FB
1685/* called from signal handler: invalidate the code and unprotect the
1686 page. Return TRUE if the fault was succesfully handled. */
d720b93d 1687int page_unprotect(unsigned long address, unsigned long pc, void *puc)
9fa3e853
FB
1688{
1689 unsigned int page_index, prot, pindex;
1690 PageDesc *p, *p1;
1691 unsigned long host_start, host_end, addr;
1692
83fb7adf 1693 host_start = address & qemu_host_page_mask;
9fa3e853
FB
1694 page_index = host_start >> TARGET_PAGE_BITS;
1695 p1 = page_find(page_index);
1696 if (!p1)
1697 return 0;
83fb7adf 1698 host_end = host_start + qemu_host_page_size;
9fa3e853
FB
1699 p = p1;
1700 prot = 0;
1701 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1702 prot |= p->flags;
1703 p++;
1704 }
1705 /* if the page was really writable, then we change its
1706 protection back to writable */
1707 if (prot & PAGE_WRITE_ORG) {
1708 pindex = (address - host_start) >> TARGET_PAGE_BITS;
1709 if (!(p1[pindex].flags & PAGE_WRITE)) {
83fb7adf 1710 mprotect((void *)host_start, qemu_host_page_size,
9fa3e853
FB
1711 (prot & PAGE_BITS) | PAGE_WRITE);
1712 p1[pindex].flags |= PAGE_WRITE;
1713 /* and since the content will be modified, we must invalidate
1714 the corresponding translated code. */
d720b93d 1715 tb_invalidate_phys_page(address, pc, puc);
9fa3e853
FB
1716#ifdef DEBUG_TB_CHECK
1717 tb_invalidate_check(address);
1718#endif
1719 return 1;
1720 }
1721 }
1722 return 0;
1723}
1724
1725/* call this function when system calls directly modify a memory area */
1726void page_unprotect_range(uint8_t *data, unsigned long data_size)
1727{
1728 unsigned long start, end, addr;
1729
1730 start = (unsigned long)data;
1731 end = start + data_size;
1732 start &= TARGET_PAGE_MASK;
1733 end = TARGET_PAGE_ALIGN(end);
1734 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
d720b93d 1735 page_unprotect(addr, 0, NULL);
9fa3e853
FB
1736 }
1737}
1738
6a00d601
FB
1739static inline void tlb_set_dirty(CPUState *env,
1740 unsigned long addr, target_ulong vaddr)
1ccde1cb
FB
1741{
1742}
9fa3e853
FB
1743#endif /* defined(CONFIG_USER_ONLY) */
1744
33417e70
FB
1745/* register physical memory. 'size' must be a multiple of the target
1746 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1747 io memory page */
2e12669a
FB
1748void cpu_register_physical_memory(target_phys_addr_t start_addr,
1749 unsigned long size,
1750 unsigned long phys_offset)
33417e70 1751{
108c49b8 1752 target_phys_addr_t addr, end_addr;
92e873b9 1753 PhysPageDesc *p;
33417e70 1754
5fd386f6 1755 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
33417e70 1756 end_addr = start_addr + size;
5fd386f6 1757 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
108c49b8 1758 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
9fa3e853
FB
1759 p->phys_offset = phys_offset;
1760 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM)
33417e70
FB
1761 phys_offset += TARGET_PAGE_SIZE;
1762 }
1763}
1764
a4193c8a 1765static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
33417e70
FB
1766{
1767 return 0;
1768}
1769
a4193c8a 1770static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
33417e70
FB
1771{
1772}
1773
1774static CPUReadMemoryFunc *unassigned_mem_read[3] = {
1775 unassigned_mem_readb,
1776 unassigned_mem_readb,
1777 unassigned_mem_readb,
1778};
1779
1780static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
1781 unassigned_mem_writeb,
1782 unassigned_mem_writeb,
1783 unassigned_mem_writeb,
1784};
1785
3a7d929e 1786static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 1787{
3a7d929e
FB
1788 unsigned long ram_addr;
1789 int dirty_flags;
1790 ram_addr = addr - (unsigned long)phys_ram_base;
1791 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1792 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 1793#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
1794 tb_invalidate_phys_page_fast(ram_addr, 1);
1795 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 1796#endif
3a7d929e 1797 }
c27004ec 1798 stb_p((uint8_t *)(long)addr, val);
f23db169
FB
1799 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1800 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
1801 /* we remove the notdirty callback only if the code has been
1802 flushed */
1803 if (dirty_flags == 0xff)
6a00d601 1804 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
9fa3e853
FB
1805}
1806
3a7d929e 1807static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 1808{
3a7d929e
FB
1809 unsigned long ram_addr;
1810 int dirty_flags;
1811 ram_addr = addr - (unsigned long)phys_ram_base;
1812 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1813 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 1814#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
1815 tb_invalidate_phys_page_fast(ram_addr, 2);
1816 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 1817#endif
3a7d929e 1818 }
c27004ec 1819 stw_p((uint8_t *)(long)addr, val);
f23db169
FB
1820 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1821 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
1822 /* we remove the notdirty callback only if the code has been
1823 flushed */
1824 if (dirty_flags == 0xff)
6a00d601 1825 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
9fa3e853
FB
1826}
1827
3a7d929e 1828static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 1829{
3a7d929e
FB
1830 unsigned long ram_addr;
1831 int dirty_flags;
1832 ram_addr = addr - (unsigned long)phys_ram_base;
1833 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1834 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 1835#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
1836 tb_invalidate_phys_page_fast(ram_addr, 4);
1837 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 1838#endif
3a7d929e 1839 }
c27004ec 1840 stl_p((uint8_t *)(long)addr, val);
f23db169
FB
1841 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1842 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
1843 /* we remove the notdirty callback only if the code has been
1844 flushed */
1845 if (dirty_flags == 0xff)
6a00d601 1846 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
9fa3e853
FB
1847}
1848
3a7d929e 1849static CPUReadMemoryFunc *error_mem_read[3] = {
9fa3e853
FB
1850 NULL, /* never used */
1851 NULL, /* never used */
1852 NULL, /* never used */
1853};
1854
1ccde1cb
FB
1855static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
1856 notdirty_mem_writeb,
1857 notdirty_mem_writew,
1858 notdirty_mem_writel,
1859};
1860
33417e70
FB
1861static void io_mem_init(void)
1862{
3a7d929e 1863 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
a4193c8a 1864 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
3a7d929e 1865 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
1ccde1cb
FB
1866 io_mem_nb = 5;
1867
1868 /* alloc dirty bits array */
0a962c02 1869 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
3a7d929e 1870 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
33417e70
FB
1871}
1872
1873/* mem_read and mem_write are arrays of functions containing the
1874 function to access byte (index 0), word (index 1) and dword (index
1875 2). All functions must be supplied. If io_index is non zero, the
1876 corresponding io zone is modified. If it is zero, a new io zone is
1877 allocated. The return value can be used with
1878 cpu_register_physical_memory(). (-1) is returned if error. */
1879int cpu_register_io_memory(int io_index,
1880 CPUReadMemoryFunc **mem_read,
a4193c8a
FB
1881 CPUWriteMemoryFunc **mem_write,
1882 void *opaque)
33417e70
FB
1883{
1884 int i;
1885
1886 if (io_index <= 0) {
b5ff1b31 1887 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
33417e70
FB
1888 return -1;
1889 io_index = io_mem_nb++;
1890 } else {
1891 if (io_index >= IO_MEM_NB_ENTRIES)
1892 return -1;
1893 }
b5ff1b31 1894
33417e70
FB
1895 for(i = 0;i < 3; i++) {
1896 io_mem_read[io_index][i] = mem_read[i];
1897 io_mem_write[io_index][i] = mem_write[i];
1898 }
a4193c8a 1899 io_mem_opaque[io_index] = opaque;
33417e70
FB
1900 return io_index << IO_MEM_SHIFT;
1901}
61382a50 1902
8926b517
FB
1903CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
1904{
1905 return io_mem_write[io_index >> IO_MEM_SHIFT];
1906}
1907
1908CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
1909{
1910 return io_mem_read[io_index >> IO_MEM_SHIFT];
1911}
1912
13eb76e0
FB
1913/* physical memory access (slow version, mainly for debug) */
1914#if defined(CONFIG_USER_ONLY)
2e12669a 1915void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
1916 int len, int is_write)
1917{
1918 int l, flags;
1919 target_ulong page;
1920
1921 while (len > 0) {
1922 page = addr & TARGET_PAGE_MASK;
1923 l = (page + TARGET_PAGE_SIZE) - addr;
1924 if (l > len)
1925 l = len;
1926 flags = page_get_flags(page);
1927 if (!(flags & PAGE_VALID))
1928 return;
1929 if (is_write) {
1930 if (!(flags & PAGE_WRITE))
1931 return;
1932 memcpy((uint8_t *)addr, buf, len);
1933 } else {
1934 if (!(flags & PAGE_READ))
1935 return;
1936 memcpy(buf, (uint8_t *)addr, len);
1937 }
1938 len -= l;
1939 buf += l;
1940 addr += l;
1941 }
1942}
8df1cd07 1943
13eb76e0 1944#else
2e12669a 1945void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
1946 int len, int is_write)
1947{
1948 int l, io_index;
1949 uint8_t *ptr;
1950 uint32_t val;
2e12669a
FB
1951 target_phys_addr_t page;
1952 unsigned long pd;
92e873b9 1953 PhysPageDesc *p;
13eb76e0
FB
1954
1955 while (len > 0) {
1956 page = addr & TARGET_PAGE_MASK;
1957 l = (page + TARGET_PAGE_SIZE) - addr;
1958 if (l > len)
1959 l = len;
92e873b9 1960 p = phys_page_find(page >> TARGET_PAGE_BITS);
13eb76e0
FB
1961 if (!p) {
1962 pd = IO_MEM_UNASSIGNED;
1963 } else {
1964 pd = p->phys_offset;
1965 }
1966
1967 if (is_write) {
3a7d929e 1968 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
13eb76e0 1969 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
6a00d601
FB
1970 /* XXX: could force cpu_single_env to NULL to avoid
1971 potential bugs */
13eb76e0 1972 if (l >= 4 && ((addr & 3) == 0)) {
1c213d19 1973 /* 32 bit write access */
c27004ec 1974 val = ldl_p(buf);
a4193c8a 1975 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
1976 l = 4;
1977 } else if (l >= 2 && ((addr & 1) == 0)) {
1c213d19 1978 /* 16 bit write access */
c27004ec 1979 val = lduw_p(buf);
a4193c8a 1980 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
1981 l = 2;
1982 } else {
1c213d19 1983 /* 8 bit write access */
c27004ec 1984 val = ldub_p(buf);
a4193c8a 1985 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
1986 l = 1;
1987 }
1988 } else {
b448f2f3
FB
1989 unsigned long addr1;
1990 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
13eb76e0 1991 /* RAM case */
b448f2f3 1992 ptr = phys_ram_base + addr1;
13eb76e0 1993 memcpy(ptr, buf, l);
3a7d929e
FB
1994 if (!cpu_physical_memory_is_dirty(addr1)) {
1995 /* invalidate code */
1996 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
1997 /* set dirty bit */
f23db169
FB
1998 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
1999 (0xff & ~CODE_DIRTY_FLAG);
3a7d929e 2000 }
13eb76e0
FB
2001 }
2002 } else {
3a7d929e 2003 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
13eb76e0
FB
2004 /* I/O case */
2005 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2006 if (l >= 4 && ((addr & 3) == 0)) {
2007 /* 32 bit read access */
a4193c8a 2008 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
c27004ec 2009 stl_p(buf, val);
13eb76e0
FB
2010 l = 4;
2011 } else if (l >= 2 && ((addr & 1) == 0)) {
2012 /* 16 bit read access */
a4193c8a 2013 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
c27004ec 2014 stw_p(buf, val);
13eb76e0
FB
2015 l = 2;
2016 } else {
1c213d19 2017 /* 8 bit read access */
a4193c8a 2018 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
c27004ec 2019 stb_p(buf, val);
13eb76e0
FB
2020 l = 1;
2021 }
2022 } else {
2023 /* RAM case */
2024 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2025 (addr & ~TARGET_PAGE_MASK);
2026 memcpy(buf, ptr, l);
2027 }
2028 }
2029 len -= l;
2030 buf += l;
2031 addr += l;
2032 }
2033}
8df1cd07
FB
2034
2035/* warning: addr must be aligned */
2036uint32_t ldl_phys(target_phys_addr_t addr)
2037{
2038 int io_index;
2039 uint8_t *ptr;
2040 uint32_t val;
2041 unsigned long pd;
2042 PhysPageDesc *p;
2043
2044 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2045 if (!p) {
2046 pd = IO_MEM_UNASSIGNED;
2047 } else {
2048 pd = p->phys_offset;
2049 }
2050
3a7d929e 2051 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
8df1cd07
FB
2052 /* I/O case */
2053 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2054 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2055 } else {
2056 /* RAM case */
2057 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2058 (addr & ~TARGET_PAGE_MASK);
2059 val = ldl_p(ptr);
2060 }
2061 return val;
2062}
2063
84b7b8e7
FB
2064/* warning: addr must be aligned */
2065uint64_t ldq_phys(target_phys_addr_t addr)
2066{
2067 int io_index;
2068 uint8_t *ptr;
2069 uint64_t val;
2070 unsigned long pd;
2071 PhysPageDesc *p;
2072
2073 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2074 if (!p) {
2075 pd = IO_MEM_UNASSIGNED;
2076 } else {
2077 pd = p->phys_offset;
2078 }
2079
2080 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
2081 /* I/O case */
2082 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2083#ifdef TARGET_WORDS_BIGENDIAN
2084 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2085 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2086#else
2087 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2088 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2089#endif
2090 } else {
2091 /* RAM case */
2092 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2093 (addr & ~TARGET_PAGE_MASK);
2094 val = ldq_p(ptr);
2095 }
2096 return val;
2097}
2098
aab33094
FB
2099/* XXX: optimize */
2100uint32_t ldub_phys(target_phys_addr_t addr)
2101{
2102 uint8_t val;
2103 cpu_physical_memory_read(addr, &val, 1);
2104 return val;
2105}
2106
2107/* XXX: optimize */
2108uint32_t lduw_phys(target_phys_addr_t addr)
2109{
2110 uint16_t val;
2111 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2112 return tswap16(val);
2113}
2114
8df1cd07
FB
2115/* warning: addr must be aligned. The ram page is not masked as dirty
2116 and the code inside is not invalidated. It is useful if the dirty
2117 bits are used to track modified PTEs */
2118void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2119{
2120 int io_index;
2121 uint8_t *ptr;
2122 unsigned long pd;
2123 PhysPageDesc *p;
2124
2125 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2126 if (!p) {
2127 pd = IO_MEM_UNASSIGNED;
2128 } else {
2129 pd = p->phys_offset;
2130 }
2131
3a7d929e 2132 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07
FB
2133 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2134 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2135 } else {
2136 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2137 (addr & ~TARGET_PAGE_MASK);
2138 stl_p(ptr, val);
2139 }
2140}
2141
2142/* warning: addr must be aligned */
8df1cd07
FB
2143void stl_phys(target_phys_addr_t addr, uint32_t val)
2144{
2145 int io_index;
2146 uint8_t *ptr;
2147 unsigned long pd;
2148 PhysPageDesc *p;
2149
2150 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2151 if (!p) {
2152 pd = IO_MEM_UNASSIGNED;
2153 } else {
2154 pd = p->phys_offset;
2155 }
2156
3a7d929e 2157 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07
FB
2158 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2159 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2160 } else {
2161 unsigned long addr1;
2162 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2163 /* RAM case */
2164 ptr = phys_ram_base + addr1;
2165 stl_p(ptr, val);
3a7d929e
FB
2166 if (!cpu_physical_memory_is_dirty(addr1)) {
2167 /* invalidate code */
2168 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2169 /* set dirty bit */
f23db169
FB
2170 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2171 (0xff & ~CODE_DIRTY_FLAG);
3a7d929e 2172 }
8df1cd07
FB
2173 }
2174}
2175
aab33094
FB
2176/* XXX: optimize */
2177void stb_phys(target_phys_addr_t addr, uint32_t val)
2178{
2179 uint8_t v = val;
2180 cpu_physical_memory_write(addr, &v, 1);
2181}
2182
2183/* XXX: optimize */
2184void stw_phys(target_phys_addr_t addr, uint32_t val)
2185{
2186 uint16_t v = tswap16(val);
2187 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2188}
2189
2190/* XXX: optimize */
2191void stq_phys(target_phys_addr_t addr, uint64_t val)
2192{
2193 val = tswap64(val);
2194 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
2195}
2196
13eb76e0
FB
2197#endif
2198
2199/* virtual memory access for debug */
b448f2f3
FB
2200int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
2201 uint8_t *buf, int len, int is_write)
13eb76e0
FB
2202{
2203 int l;
2204 target_ulong page, phys_addr;
2205
2206 while (len > 0) {
2207 page = addr & TARGET_PAGE_MASK;
2208 phys_addr = cpu_get_phys_page_debug(env, page);
2209 /* if no physical page mapped, return an error */
2210 if (phys_addr == -1)
2211 return -1;
2212 l = (page + TARGET_PAGE_SIZE) - addr;
2213 if (l > len)
2214 l = len;
b448f2f3
FB
2215 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
2216 buf, l, is_write);
13eb76e0
FB
2217 len -= l;
2218 buf += l;
2219 addr += l;
2220 }
2221 return 0;
2222}
2223
e3db7226
FB
2224void dump_exec_info(FILE *f,
2225 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2226{
2227 int i, target_code_size, max_target_code_size;
2228 int direct_jmp_count, direct_jmp2_count, cross_page;
2229 TranslationBlock *tb;
2230
2231 target_code_size = 0;
2232 max_target_code_size = 0;
2233 cross_page = 0;
2234 direct_jmp_count = 0;
2235 direct_jmp2_count = 0;
2236 for(i = 0; i < nb_tbs; i++) {
2237 tb = &tbs[i];
2238 target_code_size += tb->size;
2239 if (tb->size > max_target_code_size)
2240 max_target_code_size = tb->size;
2241 if (tb->page_addr[1] != -1)
2242 cross_page++;
2243 if (tb->tb_next_offset[0] != 0xffff) {
2244 direct_jmp_count++;
2245 if (tb->tb_next_offset[1] != 0xffff) {
2246 direct_jmp2_count++;
2247 }
2248 }
2249 }
2250 /* XXX: avoid using doubles ? */
2251 cpu_fprintf(f, "TB count %d\n", nb_tbs);
2252 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
2253 nb_tbs ? target_code_size / nb_tbs : 0,
2254 max_target_code_size);
2255 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
2256 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
2257 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
2258 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
2259 cross_page,
2260 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
2261 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
2262 direct_jmp_count,
2263 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
2264 direct_jmp2_count,
2265 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
2266 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
2267 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
2268 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
2269}
2270
61382a50
FB
2271#if !defined(CONFIG_USER_ONLY)
2272
2273#define MMUSUFFIX _cmmu
2274#define GETPC() NULL
2275#define env cpu_single_env
b769d8fe 2276#define SOFTMMU_CODE_ACCESS
61382a50
FB
2277
2278#define SHIFT 0
2279#include "softmmu_template.h"
2280
2281#define SHIFT 1
2282#include "softmmu_template.h"
2283
2284#define SHIFT 2
2285#include "softmmu_template.h"
2286
2287#define SHIFT 3
2288#include "softmmu_template.h"
2289
2290#undef env
2291
2292#endif