]> git.proxmox.com Git - qemu.git/blame - exec.c
MIPS disas support
[qemu.git] / exec.c
CommitLineData
54936004 1/*
fd6ce8f6 2 * virtual page mapping and translated block handling
54936004
FB
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
67b915a5 20#include "config.h"
d5a8f07c
FB
21#ifdef _WIN32
22#include <windows.h>
23#else
a98d49b1 24#include <sys/types.h>
d5a8f07c
FB
25#include <sys/mman.h>
26#endif
54936004
FB
27#include <stdlib.h>
28#include <stdio.h>
29#include <stdarg.h>
30#include <string.h>
31#include <errno.h>
32#include <unistd.h>
33#include <inttypes.h>
34
6180a181
FB
35#include "cpu.h"
36#include "exec-all.h"
54936004 37
fd6ce8f6 38//#define DEBUG_TB_INVALIDATE
66e85a21 39//#define DEBUG_FLUSH
9fa3e853 40//#define DEBUG_TLB
fd6ce8f6
FB
41
42/* make various TB consistency checks */
43//#define DEBUG_TB_CHECK
98857888 44//#define DEBUG_TLB_CHECK
fd6ce8f6
FB
45
46/* threshold to flush the translated code buffer */
47#define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
48
9fa3e853
FB
49#define SMC_BITMAP_USE_THRESHOLD 10
50
51#define MMAP_AREA_START 0x00000000
52#define MMAP_AREA_END 0xa8000000
fd6ce8f6
FB
53
54TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
55TranslationBlock *tb_hash[CODE_GEN_HASH_SIZE];
9fa3e853 56TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
fd6ce8f6 57int nb_tbs;
eb51d102
FB
58/* any access to the tbs or the page table must use this lock */
59spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
fd6ce8f6 60
b8076a74 61uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] __attribute__((aligned (32)));
fd6ce8f6
FB
62uint8_t *code_gen_ptr;
63
9fa3e853
FB
64int phys_ram_size;
65int phys_ram_fd;
66uint8_t *phys_ram_base;
1ccde1cb 67uint8_t *phys_ram_dirty;
9fa3e853 68
54936004 69typedef struct PageDesc {
92e873b9 70 /* list of TBs intersecting this ram page */
fd6ce8f6 71 TranslationBlock *first_tb;
9fa3e853
FB
72 /* in order to optimize self modifying code, we count the number
73 of lookups we do to a given page to use a bitmap */
74 unsigned int code_write_count;
75 uint8_t *code_bitmap;
76#if defined(CONFIG_USER_ONLY)
77 unsigned long flags;
78#endif
54936004
FB
79} PageDesc;
80
92e873b9
FB
81typedef struct PhysPageDesc {
82 /* offset in host memory of the page + io_index in the low 12 bits */
e04f40b5 83 uint32_t phys_offset;
92e873b9
FB
84} PhysPageDesc;
85
9fa3e853
FB
86typedef struct VirtPageDesc {
87 /* physical address of code page. It is valid only if 'valid_tag'
88 matches 'virt_valid_tag' */
89 target_ulong phys_addr;
90 unsigned int valid_tag;
91#if !defined(CONFIG_SOFTMMU)
92 /* original page access rights. It is valid only if 'valid_tag'
93 matches 'virt_valid_tag' */
94 unsigned int prot;
95#endif
96} VirtPageDesc;
97
54936004
FB
98#define L2_BITS 10
99#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
100
101#define L1_SIZE (1 << L1_BITS)
102#define L2_SIZE (1 << L2_BITS)
103
33417e70 104static void io_mem_init(void);
fd6ce8f6 105
83fb7adf
FB
106unsigned long qemu_real_host_page_size;
107unsigned long qemu_host_page_bits;
108unsigned long qemu_host_page_size;
109unsigned long qemu_host_page_mask;
54936004 110
92e873b9 111/* XXX: for system emulation, it could just be an array */
54936004 112static PageDesc *l1_map[L1_SIZE];
0a962c02 113PhysPageDesc **l1_phys_map;
54936004 114
9fa3e853
FB
115#if !defined(CONFIG_USER_ONLY)
116static VirtPageDesc *l1_virt_map[L1_SIZE];
117static unsigned int virt_valid_tag;
118#endif
119
33417e70 120/* io memory support */
33417e70
FB
121CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
122CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
a4193c8a 123void *io_mem_opaque[IO_MEM_NB_ENTRIES];
33417e70
FB
124static int io_mem_nb;
125
34865134
FB
126/* log support */
127char *logfilename = "/tmp/qemu.log";
128FILE *logfile;
129int loglevel;
130
e3db7226
FB
131/* statistics */
132static int tlb_flush_count;
133static int tb_flush_count;
134static int tb_phys_invalidate_count;
135
b346ff46 136static void page_init(void)
54936004 137{
83fb7adf 138 /* NOTE: we can always suppose that qemu_host_page_size >=
54936004 139 TARGET_PAGE_SIZE */
67b915a5 140#ifdef _WIN32
d5a8f07c
FB
141 {
142 SYSTEM_INFO system_info;
143 DWORD old_protect;
144
145 GetSystemInfo(&system_info);
146 qemu_real_host_page_size = system_info.dwPageSize;
147
148 VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
149 PAGE_EXECUTE_READWRITE, &old_protect);
150 }
67b915a5 151#else
83fb7adf 152 qemu_real_host_page_size = getpagesize();
d5a8f07c
FB
153 {
154 unsigned long start, end;
155
156 start = (unsigned long)code_gen_buffer;
157 start &= ~(qemu_real_host_page_size - 1);
158
159 end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
160 end += qemu_real_host_page_size - 1;
161 end &= ~(qemu_real_host_page_size - 1);
162
163 mprotect((void *)start, end - start,
164 PROT_READ | PROT_WRITE | PROT_EXEC);
165 }
67b915a5 166#endif
d5a8f07c 167
83fb7adf
FB
168 if (qemu_host_page_size == 0)
169 qemu_host_page_size = qemu_real_host_page_size;
170 if (qemu_host_page_size < TARGET_PAGE_SIZE)
171 qemu_host_page_size = TARGET_PAGE_SIZE;
172 qemu_host_page_bits = 0;
173 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
174 qemu_host_page_bits++;
175 qemu_host_page_mask = ~(qemu_host_page_size - 1);
9fa3e853
FB
176#if !defined(CONFIG_USER_ONLY)
177 virt_valid_tag = 1;
178#endif
0a962c02
FB
179 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(PhysPageDesc *));
180 memset(l1_phys_map, 0, L1_SIZE * sizeof(PhysPageDesc *));
54936004
FB
181}
182
fd6ce8f6 183static inline PageDesc *page_find_alloc(unsigned int index)
54936004 184{
54936004
FB
185 PageDesc **lp, *p;
186
54936004
FB
187 lp = &l1_map[index >> L2_BITS];
188 p = *lp;
189 if (!p) {
190 /* allocate if not found */
59817ccb 191 p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
fd6ce8f6 192 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
54936004
FB
193 *lp = p;
194 }
195 return p + (index & (L2_SIZE - 1));
196}
197
fd6ce8f6 198static inline PageDesc *page_find(unsigned int index)
54936004 199{
54936004
FB
200 PageDesc *p;
201
54936004
FB
202 p = l1_map[index >> L2_BITS];
203 if (!p)
204 return 0;
fd6ce8f6
FB
205 return p + (index & (L2_SIZE - 1));
206}
207
92e873b9
FB
208static inline PhysPageDesc *phys_page_find_alloc(unsigned int index)
209{
210 PhysPageDesc **lp, *p;
211
212 lp = &l1_phys_map[index >> L2_BITS];
213 p = *lp;
214 if (!p) {
215 /* allocate if not found */
0a962c02 216 p = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
92e873b9
FB
217 memset(p, 0, sizeof(PhysPageDesc) * L2_SIZE);
218 *lp = p;
219 }
220 return p + (index & (L2_SIZE - 1));
221}
222
223static inline PhysPageDesc *phys_page_find(unsigned int index)
224{
225 PhysPageDesc *p;
226
227 p = l1_phys_map[index >> L2_BITS];
228 if (!p)
229 return 0;
230 return p + (index & (L2_SIZE - 1));
231}
232
9fa3e853 233#if !defined(CONFIG_USER_ONLY)
4f2ac237
FB
234static void tlb_protect_code(CPUState *env, target_ulong addr);
235static void tlb_unprotect_code_phys(CPUState *env, unsigned long phys_addr, target_ulong vaddr);
9fa3e853
FB
236
237static inline VirtPageDesc *virt_page_find_alloc(unsigned int index)
fd6ce8f6 238{
9fa3e853 239 VirtPageDesc **lp, *p;
fd6ce8f6 240
c27004ec
FB
241 /* XXX: should not truncate for 64 bit addresses */
242#if TARGET_LONG_BITS > 32
243 index &= (L1_SIZE - 1);
244#endif
9fa3e853
FB
245 lp = &l1_virt_map[index >> L2_BITS];
246 p = *lp;
247 if (!p) {
248 /* allocate if not found */
59817ccb 249 p = qemu_malloc(sizeof(VirtPageDesc) * L2_SIZE);
9fa3e853
FB
250 memset(p, 0, sizeof(VirtPageDesc) * L2_SIZE);
251 *lp = p;
252 }
253 return p + (index & (L2_SIZE - 1));
254}
255
256static inline VirtPageDesc *virt_page_find(unsigned int index)
257{
258 VirtPageDesc *p;
259
260 p = l1_virt_map[index >> L2_BITS];
fd6ce8f6
FB
261 if (!p)
262 return 0;
9fa3e853 263 return p + (index & (L2_SIZE - 1));
54936004
FB
264}
265
9fa3e853 266static void virt_page_flush(void)
54936004 267{
9fa3e853
FB
268 int i, j;
269 VirtPageDesc *p;
270
271 virt_valid_tag++;
272
273 if (virt_valid_tag == 0) {
274 virt_valid_tag = 1;
275 for(i = 0; i < L1_SIZE; i++) {
276 p = l1_virt_map[i];
277 if (p) {
278 for(j = 0; j < L2_SIZE; j++)
279 p[j].valid_tag = 0;
280 }
fd6ce8f6 281 }
54936004
FB
282 }
283}
9fa3e853
FB
284#else
285static void virt_page_flush(void)
286{
287}
288#endif
fd6ce8f6 289
b346ff46 290void cpu_exec_init(void)
fd6ce8f6
FB
291{
292 if (!code_gen_ptr) {
293 code_gen_ptr = code_gen_buffer;
b346ff46 294 page_init();
33417e70 295 io_mem_init();
fd6ce8f6
FB
296 }
297}
298
9fa3e853
FB
299static inline void invalidate_page_bitmap(PageDesc *p)
300{
301 if (p->code_bitmap) {
59817ccb 302 qemu_free(p->code_bitmap);
9fa3e853
FB
303 p->code_bitmap = NULL;
304 }
305 p->code_write_count = 0;
306}
307
fd6ce8f6
FB
308/* set to NULL all the 'first_tb' fields in all PageDescs */
309static void page_flush_tb(void)
310{
311 int i, j;
312 PageDesc *p;
313
314 for(i = 0; i < L1_SIZE; i++) {
315 p = l1_map[i];
316 if (p) {
9fa3e853
FB
317 for(j = 0; j < L2_SIZE; j++) {
318 p->first_tb = NULL;
319 invalidate_page_bitmap(p);
320 p++;
321 }
fd6ce8f6
FB
322 }
323 }
324}
325
326/* flush all the translation blocks */
d4e8164f 327/* XXX: tb_flush is currently not thread safe */
0124311e 328void tb_flush(CPUState *env)
fd6ce8f6 329{
0124311e 330#if defined(DEBUG_FLUSH)
fd6ce8f6
FB
331 printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
332 code_gen_ptr - code_gen_buffer,
333 nb_tbs,
0124311e 334 nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0);
fd6ce8f6
FB
335#endif
336 nb_tbs = 0;
8a8a608f 337 memset (tb_hash, 0, CODE_GEN_HASH_SIZE * sizeof (void *));
9fa3e853
FB
338 virt_page_flush();
339
8a8a608f 340 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
fd6ce8f6 341 page_flush_tb();
9fa3e853 342
fd6ce8f6 343 code_gen_ptr = code_gen_buffer;
d4e8164f
FB
344 /* XXX: flush processor icache at this point if cache flush is
345 expensive */
e3db7226 346 tb_flush_count++;
fd6ce8f6
FB
347}
348
349#ifdef DEBUG_TB_CHECK
350
351static void tb_invalidate_check(unsigned long address)
352{
353 TranslationBlock *tb;
354 int i;
355 address &= TARGET_PAGE_MASK;
356 for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
357 for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
358 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
359 address >= tb->pc + tb->size)) {
360 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
361 address, tb->pc, tb->size);
362 }
363 }
364 }
365}
366
367/* verify that all the pages have correct rights for code */
368static void tb_page_check(void)
369{
370 TranslationBlock *tb;
371 int i, flags1, flags2;
372
373 for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
374 for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
375 flags1 = page_get_flags(tb->pc);
376 flags2 = page_get_flags(tb->pc + tb->size - 1);
377 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
378 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
379 tb->pc, tb->size, flags1, flags2);
380 }
381 }
382 }
383}
384
d4e8164f
FB
385void tb_jmp_check(TranslationBlock *tb)
386{
387 TranslationBlock *tb1;
388 unsigned int n1;
389
390 /* suppress any remaining jumps to this TB */
391 tb1 = tb->jmp_first;
392 for(;;) {
393 n1 = (long)tb1 & 3;
394 tb1 = (TranslationBlock *)((long)tb1 & ~3);
395 if (n1 == 2)
396 break;
397 tb1 = tb1->jmp_next[n1];
398 }
399 /* check end of list */
400 if (tb1 != tb) {
401 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
402 }
403}
404
fd6ce8f6
FB
405#endif
406
407/* invalidate one TB */
408static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
409 int next_offset)
410{
411 TranslationBlock *tb1;
412 for(;;) {
413 tb1 = *ptb;
414 if (tb1 == tb) {
415 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
416 break;
417 }
418 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
419 }
420}
421
9fa3e853
FB
422static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
423{
424 TranslationBlock *tb1;
425 unsigned int n1;
426
427 for(;;) {
428 tb1 = *ptb;
429 n1 = (long)tb1 & 3;
430 tb1 = (TranslationBlock *)((long)tb1 & ~3);
431 if (tb1 == tb) {
432 *ptb = tb1->page_next[n1];
433 break;
434 }
435 ptb = &tb1->page_next[n1];
436 }
437}
438
d4e8164f
FB
439static inline void tb_jmp_remove(TranslationBlock *tb, int n)
440{
441 TranslationBlock *tb1, **ptb;
442 unsigned int n1;
443
444 ptb = &tb->jmp_next[n];
445 tb1 = *ptb;
446 if (tb1) {
447 /* find tb(n) in circular list */
448 for(;;) {
449 tb1 = *ptb;
450 n1 = (long)tb1 & 3;
451 tb1 = (TranslationBlock *)((long)tb1 & ~3);
452 if (n1 == n && tb1 == tb)
453 break;
454 if (n1 == 2) {
455 ptb = &tb1->jmp_first;
456 } else {
457 ptb = &tb1->jmp_next[n1];
458 }
459 }
460 /* now we can suppress tb(n) from the list */
461 *ptb = tb->jmp_next[n];
462
463 tb->jmp_next[n] = NULL;
464 }
465}
466
467/* reset the jump entry 'n' of a TB so that it is not chained to
468 another TB */
469static inline void tb_reset_jump(TranslationBlock *tb, int n)
470{
471 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
472}
473
9fa3e853 474static inline void tb_invalidate(TranslationBlock *tb)
fd6ce8f6 475{
d4e8164f 476 unsigned int h, n1;
9fa3e853 477 TranslationBlock *tb1, *tb2, **ptb;
d4e8164f 478
36bdbe54 479 tb_invalidated_flag = 1;
59817ccb 480
fd6ce8f6
FB
481 /* remove the TB from the hash list */
482 h = tb_hash_func(tb->pc);
9fa3e853
FB
483 ptb = &tb_hash[h];
484 for(;;) {
485 tb1 = *ptb;
486 /* NOTE: the TB is not necessarily linked in the hash. It
487 indicates that it is not currently used */
488 if (tb1 == NULL)
489 return;
490 if (tb1 == tb) {
491 *ptb = tb1->hash_next;
492 break;
493 }
494 ptb = &tb1->hash_next;
fd6ce8f6 495 }
d4e8164f
FB
496
497 /* suppress this TB from the two jump lists */
498 tb_jmp_remove(tb, 0);
499 tb_jmp_remove(tb, 1);
500
501 /* suppress any remaining jumps to this TB */
502 tb1 = tb->jmp_first;
503 for(;;) {
504 n1 = (long)tb1 & 3;
505 if (n1 == 2)
506 break;
507 tb1 = (TranslationBlock *)((long)tb1 & ~3);
508 tb2 = tb1->jmp_next[n1];
509 tb_reset_jump(tb1, n1);
510 tb1->jmp_next[n1] = NULL;
511 tb1 = tb2;
512 }
513 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
fd6ce8f6
FB
514}
515
9fa3e853 516static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
fd6ce8f6 517{
fd6ce8f6 518 PageDesc *p;
9fa3e853
FB
519 unsigned int h;
520 target_ulong phys_pc;
521
522 /* remove the TB from the hash list */
523 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
524 h = tb_phys_hash_func(phys_pc);
525 tb_remove(&tb_phys_hash[h], tb,
526 offsetof(TranslationBlock, phys_hash_next));
527
528 /* remove the TB from the page list */
529 if (tb->page_addr[0] != page_addr) {
530 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
531 tb_page_remove(&p->first_tb, tb);
532 invalidate_page_bitmap(p);
533 }
534 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
535 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
536 tb_page_remove(&p->first_tb, tb);
537 invalidate_page_bitmap(p);
538 }
539
540 tb_invalidate(tb);
e3db7226 541 tb_phys_invalidate_count++;
9fa3e853
FB
542}
543
544static inline void set_bits(uint8_t *tab, int start, int len)
545{
546 int end, mask, end1;
547
548 end = start + len;
549 tab += start >> 3;
550 mask = 0xff << (start & 7);
551 if ((start & ~7) == (end & ~7)) {
552 if (start < end) {
553 mask &= ~(0xff << (end & 7));
554 *tab |= mask;
555 }
556 } else {
557 *tab++ |= mask;
558 start = (start + 8) & ~7;
559 end1 = end & ~7;
560 while (start < end1) {
561 *tab++ = 0xff;
562 start += 8;
563 }
564 if (start < end) {
565 mask = ~(0xff << (end & 7));
566 *tab |= mask;
567 }
568 }
569}
570
571static void build_page_bitmap(PageDesc *p)
572{
573 int n, tb_start, tb_end;
574 TranslationBlock *tb;
575
59817ccb 576 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
9fa3e853
FB
577 if (!p->code_bitmap)
578 return;
579 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
580
581 tb = p->first_tb;
582 while (tb != NULL) {
583 n = (long)tb & 3;
584 tb = (TranslationBlock *)((long)tb & ~3);
585 /* NOTE: this is subtle as a TB may span two physical pages */
586 if (n == 0) {
587 /* NOTE: tb_end may be after the end of the page, but
588 it is not a problem */
589 tb_start = tb->pc & ~TARGET_PAGE_MASK;
590 tb_end = tb_start + tb->size;
591 if (tb_end > TARGET_PAGE_SIZE)
592 tb_end = TARGET_PAGE_SIZE;
593 } else {
594 tb_start = 0;
595 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
596 }
597 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
598 tb = tb->page_next[n];
599 }
600}
601
d720b93d
FB
602#ifdef TARGET_HAS_PRECISE_SMC
603
604static void tb_gen_code(CPUState *env,
605 target_ulong pc, target_ulong cs_base, int flags,
606 int cflags)
607{
608 TranslationBlock *tb;
609 uint8_t *tc_ptr;
610 target_ulong phys_pc, phys_page2, virt_page2;
611 int code_gen_size;
612
c27004ec
FB
613 phys_pc = get_phys_addr_code(env, pc);
614 tb = tb_alloc(pc);
d720b93d
FB
615 if (!tb) {
616 /* flush must be done */
617 tb_flush(env);
618 /* cannot fail at this point */
c27004ec 619 tb = tb_alloc(pc);
d720b93d
FB
620 }
621 tc_ptr = code_gen_ptr;
622 tb->tc_ptr = tc_ptr;
623 tb->cs_base = cs_base;
624 tb->flags = flags;
625 tb->cflags = cflags;
626 cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
627 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
628
629 /* check next page if needed */
c27004ec 630 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
d720b93d 631 phys_page2 = -1;
c27004ec 632 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
d720b93d
FB
633 phys_page2 = get_phys_addr_code(env, virt_page2);
634 }
635 tb_link_phys(tb, phys_pc, phys_page2);
636}
637#endif
638
9fa3e853
FB
639/* invalidate all TBs which intersect with the target physical page
640 starting in range [start;end[. NOTE: start and end must refer to
d720b93d
FB
641 the same physical page. 'is_cpu_write_access' should be true if called
642 from a real cpu write access: the virtual CPU will exit the current
643 TB if code is modified inside this TB. */
644void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
645 int is_cpu_write_access)
646{
647 int n, current_tb_modified, current_tb_not_found, current_flags;
d720b93d 648 CPUState *env = cpu_single_env;
9fa3e853 649 PageDesc *p;
ea1c1802 650 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
9fa3e853 651 target_ulong tb_start, tb_end;
d720b93d 652 target_ulong current_pc, current_cs_base;
9fa3e853
FB
653
654 p = page_find(start >> TARGET_PAGE_BITS);
655 if (!p)
656 return;
657 if (!p->code_bitmap &&
d720b93d
FB
658 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
659 is_cpu_write_access) {
9fa3e853
FB
660 /* build code bitmap */
661 build_page_bitmap(p);
662 }
663
664 /* we remove all the TBs in the range [start, end[ */
665 /* XXX: see if in some cases it could be faster to invalidate all the code */
d720b93d
FB
666 current_tb_not_found = is_cpu_write_access;
667 current_tb_modified = 0;
668 current_tb = NULL; /* avoid warning */
669 current_pc = 0; /* avoid warning */
670 current_cs_base = 0; /* avoid warning */
671 current_flags = 0; /* avoid warning */
9fa3e853
FB
672 tb = p->first_tb;
673 while (tb != NULL) {
674 n = (long)tb & 3;
675 tb = (TranslationBlock *)((long)tb & ~3);
676 tb_next = tb->page_next[n];
677 /* NOTE: this is subtle as a TB may span two physical pages */
678 if (n == 0) {
679 /* NOTE: tb_end may be after the end of the page, but
680 it is not a problem */
681 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
682 tb_end = tb_start + tb->size;
683 } else {
684 tb_start = tb->page_addr[1];
685 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
686 }
687 if (!(tb_end <= start || tb_start >= end)) {
d720b93d
FB
688#ifdef TARGET_HAS_PRECISE_SMC
689 if (current_tb_not_found) {
690 current_tb_not_found = 0;
691 current_tb = NULL;
692 if (env->mem_write_pc) {
693 /* now we have a real cpu fault */
694 current_tb = tb_find_pc(env->mem_write_pc);
695 }
696 }
697 if (current_tb == tb &&
698 !(current_tb->cflags & CF_SINGLE_INSN)) {
699 /* If we are modifying the current TB, we must stop
700 its execution. We could be more precise by checking
701 that the modification is after the current PC, but it
702 would require a specialized function to partially
703 restore the CPU state */
704
705 current_tb_modified = 1;
706 cpu_restore_state(current_tb, env,
707 env->mem_write_pc, NULL);
708#if defined(TARGET_I386)
709 current_flags = env->hflags;
710 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
711 current_cs_base = (target_ulong)env->segs[R_CS].base;
712 current_pc = current_cs_base + env->eip;
713#else
714#error unsupported CPU
715#endif
716 }
717#endif /* TARGET_HAS_PRECISE_SMC */
ea1c1802
FB
718 saved_tb = env->current_tb;
719 env->current_tb = NULL;
9fa3e853 720 tb_phys_invalidate(tb, -1);
ea1c1802
FB
721 env->current_tb = saved_tb;
722 if (env->interrupt_request && env->current_tb)
723 cpu_interrupt(env, env->interrupt_request);
9fa3e853
FB
724 }
725 tb = tb_next;
726 }
727#if !defined(CONFIG_USER_ONLY)
728 /* if no code remaining, no need to continue to use slow writes */
729 if (!p->first_tb) {
730 invalidate_page_bitmap(p);
d720b93d
FB
731 if (is_cpu_write_access) {
732 tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
733 }
734 }
735#endif
736#ifdef TARGET_HAS_PRECISE_SMC
737 if (current_tb_modified) {
738 /* we generate a block containing just the instruction
739 modifying the memory. It will ensure that it cannot modify
740 itself */
ea1c1802 741 env->current_tb = NULL;
d720b93d
FB
742 tb_gen_code(env, current_pc, current_cs_base, current_flags,
743 CF_SINGLE_INSN);
744 cpu_resume_from_signal(env, NULL);
9fa3e853 745 }
fd6ce8f6 746#endif
9fa3e853 747}
fd6ce8f6 748
9fa3e853 749/* len must be <= 8 and start must be a multiple of len */
d720b93d 750static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
9fa3e853
FB
751{
752 PageDesc *p;
753 int offset, b;
59817ccb 754#if 0
a4193c8a
FB
755 if (1) {
756 if (loglevel) {
757 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
758 cpu_single_env->mem_write_vaddr, len,
759 cpu_single_env->eip,
760 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
761 }
59817ccb
FB
762 }
763#endif
9fa3e853
FB
764 p = page_find(start >> TARGET_PAGE_BITS);
765 if (!p)
766 return;
767 if (p->code_bitmap) {
768 offset = start & ~TARGET_PAGE_MASK;
769 b = p->code_bitmap[offset >> 3] >> (offset & 7);
770 if (b & ((1 << len) - 1))
771 goto do_invalidate;
772 } else {
773 do_invalidate:
d720b93d 774 tb_invalidate_phys_page_range(start, start + len, 1);
9fa3e853
FB
775 }
776}
777
9fa3e853 778#if !defined(CONFIG_SOFTMMU)
d720b93d
FB
779static void tb_invalidate_phys_page(target_ulong addr,
780 unsigned long pc, void *puc)
9fa3e853 781{
d720b93d
FB
782 int n, current_flags, current_tb_modified;
783 target_ulong current_pc, current_cs_base;
9fa3e853 784 PageDesc *p;
d720b93d
FB
785 TranslationBlock *tb, *current_tb;
786#ifdef TARGET_HAS_PRECISE_SMC
787 CPUState *env = cpu_single_env;
788#endif
9fa3e853
FB
789
790 addr &= TARGET_PAGE_MASK;
791 p = page_find(addr >> TARGET_PAGE_BITS);
792 if (!p)
793 return;
794 tb = p->first_tb;
d720b93d
FB
795 current_tb_modified = 0;
796 current_tb = NULL;
797 current_pc = 0; /* avoid warning */
798 current_cs_base = 0; /* avoid warning */
799 current_flags = 0; /* avoid warning */
800#ifdef TARGET_HAS_PRECISE_SMC
801 if (tb && pc != 0) {
802 current_tb = tb_find_pc(pc);
803 }
804#endif
9fa3e853
FB
805 while (tb != NULL) {
806 n = (long)tb & 3;
807 tb = (TranslationBlock *)((long)tb & ~3);
d720b93d
FB
808#ifdef TARGET_HAS_PRECISE_SMC
809 if (current_tb == tb &&
810 !(current_tb->cflags & CF_SINGLE_INSN)) {
811 /* If we are modifying the current TB, we must stop
812 its execution. We could be more precise by checking
813 that the modification is after the current PC, but it
814 would require a specialized function to partially
815 restore the CPU state */
816
817 current_tb_modified = 1;
818 cpu_restore_state(current_tb, env, pc, puc);
819#if defined(TARGET_I386)
820 current_flags = env->hflags;
821 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
822 current_cs_base = (target_ulong)env->segs[R_CS].base;
823 current_pc = current_cs_base + env->eip;
824#else
825#error unsupported CPU
826#endif
827 }
828#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
829 tb_phys_invalidate(tb, addr);
830 tb = tb->page_next[n];
831 }
fd6ce8f6 832 p->first_tb = NULL;
d720b93d
FB
833#ifdef TARGET_HAS_PRECISE_SMC
834 if (current_tb_modified) {
835 /* we generate a block containing just the instruction
836 modifying the memory. It will ensure that it cannot modify
837 itself */
ea1c1802 838 env->current_tb = NULL;
d720b93d
FB
839 tb_gen_code(env, current_pc, current_cs_base, current_flags,
840 CF_SINGLE_INSN);
841 cpu_resume_from_signal(env, puc);
842 }
843#endif
fd6ce8f6 844}
9fa3e853 845#endif
fd6ce8f6
FB
846
847/* add the tb in the target page and protect it if necessary */
9fa3e853
FB
848static inline void tb_alloc_page(TranslationBlock *tb,
849 unsigned int n, unsigned int page_addr)
fd6ce8f6
FB
850{
851 PageDesc *p;
9fa3e853
FB
852 TranslationBlock *last_first_tb;
853
854 tb->page_addr[n] = page_addr;
855 p = page_find(page_addr >> TARGET_PAGE_BITS);
856 tb->page_next[n] = p->first_tb;
857 last_first_tb = p->first_tb;
858 p->first_tb = (TranslationBlock *)((long)tb | n);
859 invalidate_page_bitmap(p);
fd6ce8f6 860
107db443 861#if defined(TARGET_HAS_SMC) || 1
d720b93d 862
9fa3e853 863#if defined(CONFIG_USER_ONLY)
fd6ce8f6 864 if (p->flags & PAGE_WRITE) {
9fa3e853
FB
865 unsigned long host_start, host_end, addr;
866 int prot;
867
fd6ce8f6
FB
868 /* force the host page as non writable (writes will have a
869 page fault + mprotect overhead) */
83fb7adf
FB
870 host_start = page_addr & qemu_host_page_mask;
871 host_end = host_start + qemu_host_page_size;
fd6ce8f6
FB
872 prot = 0;
873 for(addr = host_start; addr < host_end; addr += TARGET_PAGE_SIZE)
874 prot |= page_get_flags(addr);
83fb7adf 875 mprotect((void *)host_start, qemu_host_page_size,
fd6ce8f6
FB
876 (prot & PAGE_BITS) & ~PAGE_WRITE);
877#ifdef DEBUG_TB_INVALIDATE
878 printf("protecting code page: 0x%08lx\n",
879 host_start);
880#endif
881 p->flags &= ~PAGE_WRITE;
fd6ce8f6 882 }
9fa3e853
FB
883#else
884 /* if some code is already present, then the pages are already
885 protected. So we handle the case where only the first TB is
886 allocated in a physical page */
887 if (!last_first_tb) {
888 target_ulong virt_addr;
889
890 virt_addr = (tb->pc & TARGET_PAGE_MASK) + (n << TARGET_PAGE_BITS);
891 tlb_protect_code(cpu_single_env, virt_addr);
892 }
893#endif
d720b93d
FB
894
895#endif /* TARGET_HAS_SMC */
fd6ce8f6
FB
896}
897
898/* Allocate a new translation block. Flush the translation buffer if
899 too many translation blocks or too much generated code. */
c27004ec 900TranslationBlock *tb_alloc(target_ulong pc)
fd6ce8f6
FB
901{
902 TranslationBlock *tb;
fd6ce8f6
FB
903
904 if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
905 (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
d4e8164f 906 return NULL;
fd6ce8f6
FB
907 tb = &tbs[nb_tbs++];
908 tb->pc = pc;
b448f2f3 909 tb->cflags = 0;
d4e8164f
FB
910 return tb;
911}
912
9fa3e853
FB
913/* add a new TB and link it to the physical page tables. phys_page2 is
914 (-1) to indicate that only one page contains the TB. */
915void tb_link_phys(TranslationBlock *tb,
916 target_ulong phys_pc, target_ulong phys_page2)
d4e8164f 917{
9fa3e853
FB
918 unsigned int h;
919 TranslationBlock **ptb;
920
921 /* add in the physical hash table */
922 h = tb_phys_hash_func(phys_pc);
923 ptb = &tb_phys_hash[h];
924 tb->phys_hash_next = *ptb;
925 *ptb = tb;
fd6ce8f6
FB
926
927 /* add in the page list */
9fa3e853
FB
928 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
929 if (phys_page2 != -1)
930 tb_alloc_page(tb, 1, phys_page2);
931 else
932 tb->page_addr[1] = -1;
61382a50
FB
933#ifdef DEBUG_TB_CHECK
934 tb_page_check();
935#endif
9fa3e853
FB
936}
937
938/* link the tb with the other TBs */
939void tb_link(TranslationBlock *tb)
940{
941#if !defined(CONFIG_USER_ONLY)
942 {
943 VirtPageDesc *vp;
944 target_ulong addr;
945
946 /* save the code memory mappings (needed to invalidate the code) */
947 addr = tb->pc & TARGET_PAGE_MASK;
948 vp = virt_page_find_alloc(addr >> TARGET_PAGE_BITS);
98857888
FB
949#ifdef DEBUG_TLB_CHECK
950 if (vp->valid_tag == virt_valid_tag &&
951 vp->phys_addr != tb->page_addr[0]) {
952 printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",
953 addr, tb->page_addr[0], vp->phys_addr);
954 }
955#endif
9fa3e853 956 vp->phys_addr = tb->page_addr[0];
59817ccb
FB
957 if (vp->valid_tag != virt_valid_tag) {
958 vp->valid_tag = virt_valid_tag;
959#if !defined(CONFIG_SOFTMMU)
960 vp->prot = 0;
961#endif
962 }
9fa3e853
FB
963
964 if (tb->page_addr[1] != -1) {
965 addr += TARGET_PAGE_SIZE;
966 vp = virt_page_find_alloc(addr >> TARGET_PAGE_BITS);
98857888
FB
967#ifdef DEBUG_TLB_CHECK
968 if (vp->valid_tag == virt_valid_tag &&
969 vp->phys_addr != tb->page_addr[1]) {
970 printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",
971 addr, tb->page_addr[1], vp->phys_addr);
972 }
973#endif
9fa3e853 974 vp->phys_addr = tb->page_addr[1];
59817ccb
FB
975 if (vp->valid_tag != virt_valid_tag) {
976 vp->valid_tag = virt_valid_tag;
977#if !defined(CONFIG_SOFTMMU)
978 vp->prot = 0;
979#endif
980 }
9fa3e853
FB
981 }
982 }
983#endif
984
d4e8164f
FB
985 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
986 tb->jmp_next[0] = NULL;
987 tb->jmp_next[1] = NULL;
b448f2f3
FB
988#ifdef USE_CODE_COPY
989 tb->cflags &= ~CF_FP_USED;
990 if (tb->cflags & CF_TB_FP_USED)
991 tb->cflags |= CF_FP_USED;
992#endif
d4e8164f
FB
993
994 /* init original jump addresses */
995 if (tb->tb_next_offset[0] != 0xffff)
996 tb_reset_jump(tb, 0);
997 if (tb->tb_next_offset[1] != 0xffff)
998 tb_reset_jump(tb, 1);
fd6ce8f6
FB
999}
1000
9fa3e853
FB
1001/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1002 tb[1].tc_ptr. Return NULL if not found */
1003TranslationBlock *tb_find_pc(unsigned long tc_ptr)
fd6ce8f6 1004{
9fa3e853
FB
1005 int m_min, m_max, m;
1006 unsigned long v;
1007 TranslationBlock *tb;
a513fe19
FB
1008
1009 if (nb_tbs <= 0)
1010 return NULL;
1011 if (tc_ptr < (unsigned long)code_gen_buffer ||
1012 tc_ptr >= (unsigned long)code_gen_ptr)
1013 return NULL;
1014 /* binary search (cf Knuth) */
1015 m_min = 0;
1016 m_max = nb_tbs - 1;
1017 while (m_min <= m_max) {
1018 m = (m_min + m_max) >> 1;
1019 tb = &tbs[m];
1020 v = (unsigned long)tb->tc_ptr;
1021 if (v == tc_ptr)
1022 return tb;
1023 else if (tc_ptr < v) {
1024 m_max = m - 1;
1025 } else {
1026 m_min = m + 1;
1027 }
1028 }
1029 return &tbs[m_max];
1030}
7501267e 1031
ea041c0e
FB
1032static void tb_reset_jump_recursive(TranslationBlock *tb);
1033
1034static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1035{
1036 TranslationBlock *tb1, *tb_next, **ptb;
1037 unsigned int n1;
1038
1039 tb1 = tb->jmp_next[n];
1040 if (tb1 != NULL) {
1041 /* find head of list */
1042 for(;;) {
1043 n1 = (long)tb1 & 3;
1044 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1045 if (n1 == 2)
1046 break;
1047 tb1 = tb1->jmp_next[n1];
1048 }
1049 /* we are now sure now that tb jumps to tb1 */
1050 tb_next = tb1;
1051
1052 /* remove tb from the jmp_first list */
1053 ptb = &tb_next->jmp_first;
1054 for(;;) {
1055 tb1 = *ptb;
1056 n1 = (long)tb1 & 3;
1057 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1058 if (n1 == n && tb1 == tb)
1059 break;
1060 ptb = &tb1->jmp_next[n1];
1061 }
1062 *ptb = tb->jmp_next[n];
1063 tb->jmp_next[n] = NULL;
1064
1065 /* suppress the jump to next tb in generated code */
1066 tb_reset_jump(tb, n);
1067
0124311e 1068 /* suppress jumps in the tb on which we could have jumped */
ea041c0e
FB
1069 tb_reset_jump_recursive(tb_next);
1070 }
1071}
1072
1073static void tb_reset_jump_recursive(TranslationBlock *tb)
1074{
1075 tb_reset_jump_recursive2(tb, 0);
1076 tb_reset_jump_recursive2(tb, 1);
1077}
1078
1fddef4b 1079#if defined(TARGET_HAS_ICE)
d720b93d
FB
1080static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1081{
1082 target_ulong phys_addr;
1083
1084 phys_addr = cpu_get_phys_page_debug(env, pc);
1085 tb_invalidate_phys_page_range(phys_addr, phys_addr + 1, 0);
1086}
c27004ec 1087#endif
d720b93d 1088
c33a346e
FB
1089/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1090 breakpoint is reached */
2e12669a 1091int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
4c3a88a2 1092{
1fddef4b 1093#if defined(TARGET_HAS_ICE)
4c3a88a2 1094 int i;
d720b93d 1095
4c3a88a2
FB
1096 for(i = 0; i < env->nb_breakpoints; i++) {
1097 if (env->breakpoints[i] == pc)
1098 return 0;
1099 }
1100
1101 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1102 return -1;
1103 env->breakpoints[env->nb_breakpoints++] = pc;
d720b93d
FB
1104
1105 breakpoint_invalidate(env, pc);
4c3a88a2
FB
1106 return 0;
1107#else
1108 return -1;
1109#endif
1110}
1111
1112/* remove a breakpoint */
2e12669a 1113int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
4c3a88a2 1114{
1fddef4b 1115#if defined(TARGET_HAS_ICE)
4c3a88a2
FB
1116 int i;
1117 for(i = 0; i < env->nb_breakpoints; i++) {
1118 if (env->breakpoints[i] == pc)
1119 goto found;
1120 }
1121 return -1;
1122 found:
4c3a88a2 1123 env->nb_breakpoints--;
1fddef4b
FB
1124 if (i < env->nb_breakpoints)
1125 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
d720b93d
FB
1126
1127 breakpoint_invalidate(env, pc);
4c3a88a2
FB
1128 return 0;
1129#else
1130 return -1;
1131#endif
1132}
1133
c33a346e
FB
1134/* enable or disable single step mode. EXCP_DEBUG is returned by the
1135 CPU loop after each instruction */
1136void cpu_single_step(CPUState *env, int enabled)
1137{
1fddef4b 1138#if defined(TARGET_HAS_ICE)
c33a346e
FB
1139 if (env->singlestep_enabled != enabled) {
1140 env->singlestep_enabled = enabled;
1141 /* must flush all the translated code to avoid inconsistancies */
9fa3e853 1142 /* XXX: only flush what is necessary */
0124311e 1143 tb_flush(env);
c33a346e
FB
1144 }
1145#endif
1146}
1147
34865134
FB
1148/* enable or disable low levels log */
1149void cpu_set_log(int log_flags)
1150{
1151 loglevel = log_flags;
1152 if (loglevel && !logfile) {
1153 logfile = fopen(logfilename, "w");
1154 if (!logfile) {
1155 perror(logfilename);
1156 _exit(1);
1157 }
9fa3e853
FB
1158#if !defined(CONFIG_SOFTMMU)
1159 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1160 {
1161 static uint8_t logfile_buf[4096];
1162 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1163 }
1164#else
34865134 1165 setvbuf(logfile, NULL, _IOLBF, 0);
9fa3e853 1166#endif
34865134
FB
1167 }
1168}
1169
1170void cpu_set_log_filename(const char *filename)
1171{
1172 logfilename = strdup(filename);
1173}
c33a346e 1174
0124311e 1175/* mask must never be zero, except for A20 change call */
68a79315 1176void cpu_interrupt(CPUState *env, int mask)
ea041c0e
FB
1177{
1178 TranslationBlock *tb;
ee8b7021 1179 static int interrupt_lock;
59817ccb 1180
68a79315 1181 env->interrupt_request |= mask;
ea041c0e
FB
1182 /* if the cpu is currently executing code, we must unlink it and
1183 all the potentially executing TB */
1184 tb = env->current_tb;
ee8b7021
FB
1185 if (tb && !testandset(&interrupt_lock)) {
1186 env->current_tb = NULL;
ea041c0e 1187 tb_reset_jump_recursive(tb);
ee8b7021 1188 interrupt_lock = 0;
ea041c0e
FB
1189 }
1190}
1191
b54ad049
FB
1192void cpu_reset_interrupt(CPUState *env, int mask)
1193{
1194 env->interrupt_request &= ~mask;
1195}
1196
f193c797
FB
1197CPULogItem cpu_log_items[] = {
1198 { CPU_LOG_TB_OUT_ASM, "out_asm",
1199 "show generated host assembly code for each compiled TB" },
1200 { CPU_LOG_TB_IN_ASM, "in_asm",
1201 "show target assembly code for each compiled TB" },
1202 { CPU_LOG_TB_OP, "op",
1203 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1204#ifdef TARGET_I386
1205 { CPU_LOG_TB_OP_OPT, "op_opt",
1206 "show micro ops after optimization for each compiled TB" },
1207#endif
1208 { CPU_LOG_INT, "int",
1209 "show interrupts/exceptions in short format" },
1210 { CPU_LOG_EXEC, "exec",
1211 "show trace before each executed TB (lots of logs)" },
9fddaa0c
FB
1212 { CPU_LOG_TB_CPU, "cpu",
1213 "show CPU state before bloc translation" },
f193c797
FB
1214#ifdef TARGET_I386
1215 { CPU_LOG_PCALL, "pcall",
1216 "show protected mode far calls/returns/exceptions" },
1217#endif
8e3a9fd2 1218#ifdef DEBUG_IOPORT
fd872598
FB
1219 { CPU_LOG_IOPORT, "ioport",
1220 "show all i/o ports accesses" },
8e3a9fd2 1221#endif
f193c797
FB
1222 { 0, NULL, NULL },
1223};
1224
1225static int cmp1(const char *s1, int n, const char *s2)
1226{
1227 if (strlen(s2) != n)
1228 return 0;
1229 return memcmp(s1, s2, n) == 0;
1230}
1231
1232/* takes a comma separated list of log masks. Return 0 if error. */
1233int cpu_str_to_log_mask(const char *str)
1234{
1235 CPULogItem *item;
1236 int mask;
1237 const char *p, *p1;
1238
1239 p = str;
1240 mask = 0;
1241 for(;;) {
1242 p1 = strchr(p, ',');
1243 if (!p1)
1244 p1 = p + strlen(p);
8e3a9fd2
FB
1245 if(cmp1(p,p1-p,"all")) {
1246 for(item = cpu_log_items; item->mask != 0; item++) {
1247 mask |= item->mask;
1248 }
1249 } else {
f193c797
FB
1250 for(item = cpu_log_items; item->mask != 0; item++) {
1251 if (cmp1(p, p1 - p, item->name))
1252 goto found;
1253 }
1254 return 0;
8e3a9fd2 1255 }
f193c797
FB
1256 found:
1257 mask |= item->mask;
1258 if (*p1 != ',')
1259 break;
1260 p = p1 + 1;
1261 }
1262 return mask;
1263}
ea041c0e 1264
7501267e
FB
1265void cpu_abort(CPUState *env, const char *fmt, ...)
1266{
1267 va_list ap;
1268
1269 va_start(ap, fmt);
1270 fprintf(stderr, "qemu: fatal: ");
1271 vfprintf(stderr, fmt, ap);
1272 fprintf(stderr, "\n");
1273#ifdef TARGET_I386
7fe48483
FB
1274 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1275#else
1276 cpu_dump_state(env, stderr, fprintf, 0);
7501267e
FB
1277#endif
1278 va_end(ap);
1279 abort();
1280}
1281
0124311e
FB
1282#if !defined(CONFIG_USER_ONLY)
1283
ee8b7021
FB
1284/* NOTE: if flush_global is true, also flush global entries (not
1285 implemented yet) */
1286void tlb_flush(CPUState *env, int flush_global)
33417e70 1287{
33417e70 1288 int i;
0124311e 1289
9fa3e853
FB
1290#if defined(DEBUG_TLB)
1291 printf("tlb_flush:\n");
1292#endif
0124311e
FB
1293 /* must reset current TB so that interrupts cannot modify the
1294 links while we are modifying them */
1295 env->current_tb = NULL;
1296
33417e70
FB
1297 for(i = 0; i < CPU_TLB_SIZE; i++) {
1298 env->tlb_read[0][i].address = -1;
1299 env->tlb_write[0][i].address = -1;
1300 env->tlb_read[1][i].address = -1;
1301 env->tlb_write[1][i].address = -1;
1302 }
9fa3e853
FB
1303
1304 virt_page_flush();
8a8a608f 1305 memset (tb_hash, 0, CODE_GEN_HASH_SIZE * sizeof (void *));
9fa3e853
FB
1306
1307#if !defined(CONFIG_SOFTMMU)
1308 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
0a962c02
FB
1309#endif
1310#ifdef USE_KQEMU
1311 if (env->kqemu_enabled) {
1312 kqemu_flush(env, flush_global);
1313 }
9fa3e853 1314#endif
e3db7226 1315 tlb_flush_count++;
33417e70
FB
1316}
1317
274da6b2 1318static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
61382a50
FB
1319{
1320 if (addr == (tlb_entry->address &
1321 (TARGET_PAGE_MASK | TLB_INVALID_MASK)))
1322 tlb_entry->address = -1;
1323}
1324
2e12669a 1325void tlb_flush_page(CPUState *env, target_ulong addr)
33417e70 1326{
9fa3e853
FB
1327 int i, n;
1328 VirtPageDesc *vp;
1329 PageDesc *p;
1330 TranslationBlock *tb;
0124311e 1331
9fa3e853
FB
1332#if defined(DEBUG_TLB)
1333 printf("tlb_flush_page: 0x%08x\n", addr);
1334#endif
0124311e
FB
1335 /* must reset current TB so that interrupts cannot modify the
1336 links while we are modifying them */
1337 env->current_tb = NULL;
61382a50
FB
1338
1339 addr &= TARGET_PAGE_MASK;
1340 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1341 tlb_flush_entry(&env->tlb_read[0][i], addr);
1342 tlb_flush_entry(&env->tlb_write[0][i], addr);
1343 tlb_flush_entry(&env->tlb_read[1][i], addr);
1344 tlb_flush_entry(&env->tlb_write[1][i], addr);
0124311e 1345
9fa3e853
FB
1346 /* remove from the virtual pc hash table all the TB at this
1347 virtual address */
1348
1349 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1350 if (vp && vp->valid_tag == virt_valid_tag) {
1351 p = page_find(vp->phys_addr >> TARGET_PAGE_BITS);
1352 if (p) {
1353 /* we remove all the links to the TBs in this virtual page */
1354 tb = p->first_tb;
1355 while (tb != NULL) {
1356 n = (long)tb & 3;
1357 tb = (TranslationBlock *)((long)tb & ~3);
1358 if ((tb->pc & TARGET_PAGE_MASK) == addr ||
1359 ((tb->pc + tb->size - 1) & TARGET_PAGE_MASK) == addr) {
1360 tb_invalidate(tb);
1361 }
1362 tb = tb->page_next[n];
1363 }
1364 }
98857888 1365 vp->valid_tag = 0;
9fa3e853
FB
1366 }
1367
0124311e 1368#if !defined(CONFIG_SOFTMMU)
9fa3e853 1369 if (addr < MMAP_AREA_END)
0124311e 1370 munmap((void *)addr, TARGET_PAGE_SIZE);
61382a50 1371#endif
0a962c02
FB
1372#ifdef USE_KQEMU
1373 if (env->kqemu_enabled) {
1374 kqemu_flush_page(env, addr);
1375 }
1376#endif
9fa3e853
FB
1377}
1378
4f2ac237 1379static inline void tlb_protect_code1(CPUTLBEntry *tlb_entry, target_ulong addr)
9fa3e853
FB
1380{
1381 if (addr == (tlb_entry->address &
1382 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) &&
98857888
FB
1383 (tlb_entry->address & ~TARGET_PAGE_MASK) != IO_MEM_CODE &&
1384 (tlb_entry->address & ~TARGET_PAGE_MASK) != IO_MEM_ROM) {
1ccde1cb 1385 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_CODE;
9fa3e853
FB
1386 }
1387}
1388
1389/* update the TLBs so that writes to code in the virtual page 'addr'
1390 can be detected */
4f2ac237 1391static void tlb_protect_code(CPUState *env, target_ulong addr)
9fa3e853
FB
1392{
1393 int i;
1394
1395 addr &= TARGET_PAGE_MASK;
1396 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1397 tlb_protect_code1(&env->tlb_write[0][i], addr);
1398 tlb_protect_code1(&env->tlb_write[1][i], addr);
1399#if !defined(CONFIG_SOFTMMU)
1400 /* NOTE: as we generated the code for this page, it is already at
1401 least readable */
1402 if (addr < MMAP_AREA_END)
1403 mprotect((void *)addr, TARGET_PAGE_SIZE, PROT_READ);
1404#endif
1405}
1406
9fa3e853 1407static inline void tlb_unprotect_code2(CPUTLBEntry *tlb_entry,
4f2ac237 1408 unsigned long phys_addr)
9fa3e853
FB
1409{
1410 if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_CODE &&
1411 ((tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend) == phys_addr) {
1ccde1cb 1412 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
9fa3e853
FB
1413 }
1414}
1415
1416/* update the TLB so that writes in physical page 'phys_addr' are no longer
1417 tested self modifying code */
4f2ac237 1418static void tlb_unprotect_code_phys(CPUState *env, unsigned long phys_addr, target_ulong vaddr)
9fa3e853
FB
1419{
1420 int i;
1421
1422 phys_addr &= TARGET_PAGE_MASK;
1ccde1cb
FB
1423 phys_addr += (long)phys_ram_base;
1424 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1425 tlb_unprotect_code2(&env->tlb_write[0][i], phys_addr);
1426 tlb_unprotect_code2(&env->tlb_write[1][i], phys_addr);
1427}
1428
1429static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1430 unsigned long start, unsigned long length)
1431{
1432 unsigned long addr;
1433 if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1434 addr = (tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend;
1435 if ((addr - start) < length) {
1436 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1437 }
1438 }
1439}
1440
0a962c02
FB
1441void cpu_physical_memory_reset_dirty(target_ulong start, target_ulong end,
1442 int dirty_flags)
1ccde1cb
FB
1443{
1444 CPUState *env;
4f2ac237 1445 unsigned long length, start1;
0a962c02
FB
1446 int i, mask, len;
1447 uint8_t *p;
1ccde1cb
FB
1448
1449 start &= TARGET_PAGE_MASK;
1450 end = TARGET_PAGE_ALIGN(end);
1451
1452 length = end - start;
1453 if (length == 0)
1454 return;
0a962c02
FB
1455 mask = ~dirty_flags;
1456 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1457 len = length >> TARGET_PAGE_BITS;
1458 for(i = 0; i < len; i++)
1459 p[i] &= mask;
1ccde1cb
FB
1460
1461 env = cpu_single_env;
1462 /* we modify the TLB cache so that the dirty bit will be set again
1463 when accessing the range */
59817ccb 1464 start1 = start + (unsigned long)phys_ram_base;
9fa3e853 1465 for(i = 0; i < CPU_TLB_SIZE; i++)
59817ccb 1466 tlb_reset_dirty_range(&env->tlb_write[0][i], start1, length);
9fa3e853 1467 for(i = 0; i < CPU_TLB_SIZE; i++)
59817ccb
FB
1468 tlb_reset_dirty_range(&env->tlb_write[1][i], start1, length);
1469
1470#if !defined(CONFIG_SOFTMMU)
1471 /* XXX: this is expensive */
1472 {
1473 VirtPageDesc *p;
1474 int j;
1475 target_ulong addr;
1476
1477 for(i = 0; i < L1_SIZE; i++) {
1478 p = l1_virt_map[i];
1479 if (p) {
1480 addr = i << (TARGET_PAGE_BITS + L2_BITS);
1481 for(j = 0; j < L2_SIZE; j++) {
1482 if (p->valid_tag == virt_valid_tag &&
1483 p->phys_addr >= start && p->phys_addr < end &&
1484 (p->prot & PROT_WRITE)) {
1485 if (addr < MMAP_AREA_END) {
1486 mprotect((void *)addr, TARGET_PAGE_SIZE,
1487 p->prot & ~PROT_WRITE);
1488 }
1489 }
1490 addr += TARGET_PAGE_SIZE;
1491 p++;
1492 }
1493 }
1494 }
1495 }
1496#endif
1ccde1cb
FB
1497}
1498
1499static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
1500 unsigned long start)
1501{
1502 unsigned long addr;
1503 if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1504 addr = (tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend;
1505 if (addr == start) {
1506 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_RAM;
1507 }
1508 }
1509}
1510
1511/* update the TLB corresponding to virtual page vaddr and phys addr
1512 addr so that it is no longer dirty */
1513static inline void tlb_set_dirty(unsigned long addr, target_ulong vaddr)
1514{
1515 CPUState *env = cpu_single_env;
1516 int i;
1517
0a962c02 1518 phys_ram_dirty[(addr - (unsigned long)phys_ram_base) >> TARGET_PAGE_BITS] = 0xff;
1ccde1cb
FB
1519
1520 addr &= TARGET_PAGE_MASK;
1521 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1522 tlb_set_dirty1(&env->tlb_write[0][i], addr);
1523 tlb_set_dirty1(&env->tlb_write[1][i], addr);
9fa3e853
FB
1524}
1525
59817ccb
FB
1526/* add a new TLB entry. At most one entry for a given virtual address
1527 is permitted. Return 0 if OK or 2 if the page could not be mapped
1528 (can only happen in non SOFTMMU mode for I/O pages or pages
1529 conflicting with the host address space). */
2e12669a
FB
1530int tlb_set_page(CPUState *env, target_ulong vaddr,
1531 target_phys_addr_t paddr, int prot,
9fa3e853
FB
1532 int is_user, int is_softmmu)
1533{
92e873b9 1534 PhysPageDesc *p;
4f2ac237 1535 unsigned long pd;
9fa3e853
FB
1536 TranslationBlock *first_tb;
1537 unsigned int index;
4f2ac237
FB
1538 target_ulong address;
1539 unsigned long addend;
9fa3e853
FB
1540 int ret;
1541
92e873b9
FB
1542 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1543 first_tb = NULL;
9fa3e853
FB
1544 if (!p) {
1545 pd = IO_MEM_UNASSIGNED;
9fa3e853 1546 } else {
92e873b9 1547 PageDesc *p1;
9fa3e853 1548 pd = p->phys_offset;
92e873b9
FB
1549 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
1550 /* NOTE: we also allocate the page at this stage */
1551 p1 = page_find_alloc(pd >> TARGET_PAGE_BITS);
1552 first_tb = p1->first_tb;
1553 }
9fa3e853
FB
1554 }
1555#if defined(DEBUG_TLB)
1556 printf("tlb_set_page: vaddr=0x%08x paddr=0x%08x prot=%x u=%d c=%d smmu=%d pd=0x%08x\n",
1557 vaddr, paddr, prot, is_user, (first_tb != NULL), is_softmmu, pd);
1558#endif
1559
1560 ret = 0;
1561#if !defined(CONFIG_SOFTMMU)
1562 if (is_softmmu)
1563#endif
1564 {
1565 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1566 /* IO memory case */
1567 address = vaddr | pd;
1568 addend = paddr;
1569 } else {
1570 /* standard memory */
1571 address = vaddr;
1572 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1573 }
1574
1575 index = (vaddr >> 12) & (CPU_TLB_SIZE - 1);
1576 addend -= vaddr;
67b915a5 1577 if (prot & PAGE_READ) {
9fa3e853
FB
1578 env->tlb_read[is_user][index].address = address;
1579 env->tlb_read[is_user][index].addend = addend;
1580 } else {
1581 env->tlb_read[is_user][index].address = -1;
1582 env->tlb_read[is_user][index].addend = -1;
1583 }
67b915a5 1584 if (prot & PAGE_WRITE) {
9fa3e853
FB
1585 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM) {
1586 /* ROM: access is ignored (same as unassigned) */
1587 env->tlb_write[is_user][index].address = vaddr | IO_MEM_ROM;
1ccde1cb 1588 env->tlb_write[is_user][index].addend = addend;
d720b93d
FB
1589 } else
1590 /* XXX: the PowerPC code seems not ready to handle
1591 self modifying code with DCBI */
1592#if defined(TARGET_HAS_SMC) || 1
1593 if (first_tb) {
9fa3e853
FB
1594 /* if code is present, we use a specific memory
1595 handler. It works only for physical memory access */
1596 env->tlb_write[is_user][index].address = vaddr | IO_MEM_CODE;
1ccde1cb 1597 env->tlb_write[is_user][index].addend = addend;
d720b93d
FB
1598 } else
1599#endif
1600 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1ccde1cb
FB
1601 !cpu_physical_memory_is_dirty(pd)) {
1602 env->tlb_write[is_user][index].address = vaddr | IO_MEM_NOTDIRTY;
1603 env->tlb_write[is_user][index].addend = addend;
9fa3e853
FB
1604 } else {
1605 env->tlb_write[is_user][index].address = address;
1606 env->tlb_write[is_user][index].addend = addend;
1607 }
1608 } else {
1609 env->tlb_write[is_user][index].address = -1;
1610 env->tlb_write[is_user][index].addend = -1;
1611 }
1612 }
1613#if !defined(CONFIG_SOFTMMU)
1614 else {
1615 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1616 /* IO access: no mapping is done as it will be handled by the
1617 soft MMU */
1618 if (!(env->hflags & HF_SOFTMMU_MASK))
1619 ret = 2;
1620 } else {
1621 void *map_addr;
59817ccb
FB
1622
1623 if (vaddr >= MMAP_AREA_END) {
1624 ret = 2;
1625 } else {
1626 if (prot & PROT_WRITE) {
1627 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
d720b93d 1628#if defined(TARGET_HAS_SMC) || 1
59817ccb 1629 first_tb ||
d720b93d 1630#endif
59817ccb
FB
1631 ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1632 !cpu_physical_memory_is_dirty(pd))) {
1633 /* ROM: we do as if code was inside */
1634 /* if code is present, we only map as read only and save the
1635 original mapping */
1636 VirtPageDesc *vp;
1637
1638 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS);
1639 vp->phys_addr = pd;
1640 vp->prot = prot;
1641 vp->valid_tag = virt_valid_tag;
1642 prot &= ~PAGE_WRITE;
1643 }
1644 }
1645 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
1646 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1647 if (map_addr == MAP_FAILED) {
1648 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1649 paddr, vaddr);
9fa3e853 1650 }
9fa3e853
FB
1651 }
1652 }
1653 }
1654#endif
1655 return ret;
1656}
1657
1658/* called from signal handler: invalidate the code and unprotect the
1659 page. Return TRUE if the fault was succesfully handled. */
d720b93d 1660int page_unprotect(unsigned long addr, unsigned long pc, void *puc)
9fa3e853
FB
1661{
1662#if !defined(CONFIG_SOFTMMU)
1663 VirtPageDesc *vp;
1664
1665#if defined(DEBUG_TLB)
1666 printf("page_unprotect: addr=0x%08x\n", addr);
1667#endif
1668 addr &= TARGET_PAGE_MASK;
59817ccb
FB
1669
1670 /* if it is not mapped, no need to worry here */
1671 if (addr >= MMAP_AREA_END)
1672 return 0;
9fa3e853
FB
1673 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1674 if (!vp)
1675 return 0;
1676 /* NOTE: in this case, validate_tag is _not_ tested as it
1677 validates only the code TLB */
1678 if (vp->valid_tag != virt_valid_tag)
1679 return 0;
1680 if (!(vp->prot & PAGE_WRITE))
1681 return 0;
1682#if defined(DEBUG_TLB)
1683 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1684 addr, vp->phys_addr, vp->prot);
1685#endif
59817ccb
FB
1686 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1687 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1688 (unsigned long)addr, vp->prot);
d720b93d 1689 /* set the dirty bit */
0a962c02 1690 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
d720b93d
FB
1691 /* flush the code inside */
1692 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
9fa3e853
FB
1693 return 1;
1694#else
1695 return 0;
1696#endif
33417e70
FB
1697}
1698
0124311e
FB
1699#else
1700
ee8b7021 1701void tlb_flush(CPUState *env, int flush_global)
0124311e
FB
1702{
1703}
1704
2e12669a 1705void tlb_flush_page(CPUState *env, target_ulong addr)
0124311e
FB
1706{
1707}
1708
2e12669a
FB
1709int tlb_set_page(CPUState *env, target_ulong vaddr,
1710 target_phys_addr_t paddr, int prot,
9fa3e853
FB
1711 int is_user, int is_softmmu)
1712{
1713 return 0;
1714}
0124311e 1715
9fa3e853
FB
1716/* dump memory mappings */
1717void page_dump(FILE *f)
33417e70 1718{
9fa3e853
FB
1719 unsigned long start, end;
1720 int i, j, prot, prot1;
1721 PageDesc *p;
33417e70 1722
9fa3e853
FB
1723 fprintf(f, "%-8s %-8s %-8s %s\n",
1724 "start", "end", "size", "prot");
1725 start = -1;
1726 end = -1;
1727 prot = 0;
1728 for(i = 0; i <= L1_SIZE; i++) {
1729 if (i < L1_SIZE)
1730 p = l1_map[i];
1731 else
1732 p = NULL;
1733 for(j = 0;j < L2_SIZE; j++) {
1734 if (!p)
1735 prot1 = 0;
1736 else
1737 prot1 = p[j].flags;
1738 if (prot1 != prot) {
1739 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1740 if (start != -1) {
1741 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1742 start, end, end - start,
1743 prot & PAGE_READ ? 'r' : '-',
1744 prot & PAGE_WRITE ? 'w' : '-',
1745 prot & PAGE_EXEC ? 'x' : '-');
1746 }
1747 if (prot1 != 0)
1748 start = end;
1749 else
1750 start = -1;
1751 prot = prot1;
1752 }
1753 if (!p)
1754 break;
1755 }
33417e70 1756 }
33417e70
FB
1757}
1758
9fa3e853 1759int page_get_flags(unsigned long address)
33417e70 1760{
9fa3e853
FB
1761 PageDesc *p;
1762
1763 p = page_find(address >> TARGET_PAGE_BITS);
33417e70 1764 if (!p)
9fa3e853
FB
1765 return 0;
1766 return p->flags;
1767}
1768
1769/* modify the flags of a page and invalidate the code if
1770 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1771 depending on PAGE_WRITE */
1772void page_set_flags(unsigned long start, unsigned long end, int flags)
1773{
1774 PageDesc *p;
1775 unsigned long addr;
1776
1777 start = start & TARGET_PAGE_MASK;
1778 end = TARGET_PAGE_ALIGN(end);
1779 if (flags & PAGE_WRITE)
1780 flags |= PAGE_WRITE_ORG;
1781 spin_lock(&tb_lock);
1782 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1783 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1784 /* if the write protection is set, then we invalidate the code
1785 inside */
1786 if (!(p->flags & PAGE_WRITE) &&
1787 (flags & PAGE_WRITE) &&
1788 p->first_tb) {
d720b93d 1789 tb_invalidate_phys_page(addr, 0, NULL);
9fa3e853
FB
1790 }
1791 p->flags = flags;
1792 }
1793 spin_unlock(&tb_lock);
33417e70
FB
1794}
1795
9fa3e853
FB
1796/* called from signal handler: invalidate the code and unprotect the
1797 page. Return TRUE if the fault was succesfully handled. */
d720b93d 1798int page_unprotect(unsigned long address, unsigned long pc, void *puc)
9fa3e853
FB
1799{
1800 unsigned int page_index, prot, pindex;
1801 PageDesc *p, *p1;
1802 unsigned long host_start, host_end, addr;
1803
83fb7adf 1804 host_start = address & qemu_host_page_mask;
9fa3e853
FB
1805 page_index = host_start >> TARGET_PAGE_BITS;
1806 p1 = page_find(page_index);
1807 if (!p1)
1808 return 0;
83fb7adf 1809 host_end = host_start + qemu_host_page_size;
9fa3e853
FB
1810 p = p1;
1811 prot = 0;
1812 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1813 prot |= p->flags;
1814 p++;
1815 }
1816 /* if the page was really writable, then we change its
1817 protection back to writable */
1818 if (prot & PAGE_WRITE_ORG) {
1819 pindex = (address - host_start) >> TARGET_PAGE_BITS;
1820 if (!(p1[pindex].flags & PAGE_WRITE)) {
83fb7adf 1821 mprotect((void *)host_start, qemu_host_page_size,
9fa3e853
FB
1822 (prot & PAGE_BITS) | PAGE_WRITE);
1823 p1[pindex].flags |= PAGE_WRITE;
1824 /* and since the content will be modified, we must invalidate
1825 the corresponding translated code. */
d720b93d 1826 tb_invalidate_phys_page(address, pc, puc);
9fa3e853
FB
1827#ifdef DEBUG_TB_CHECK
1828 tb_invalidate_check(address);
1829#endif
1830 return 1;
1831 }
1832 }
1833 return 0;
1834}
1835
1836/* call this function when system calls directly modify a memory area */
1837void page_unprotect_range(uint8_t *data, unsigned long data_size)
1838{
1839 unsigned long start, end, addr;
1840
1841 start = (unsigned long)data;
1842 end = start + data_size;
1843 start &= TARGET_PAGE_MASK;
1844 end = TARGET_PAGE_ALIGN(end);
1845 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
d720b93d 1846 page_unprotect(addr, 0, NULL);
9fa3e853
FB
1847 }
1848}
1849
1ccde1cb
FB
1850static inline void tlb_set_dirty(unsigned long addr, target_ulong vaddr)
1851{
1852}
9fa3e853
FB
1853#endif /* defined(CONFIG_USER_ONLY) */
1854
33417e70
FB
1855/* register physical memory. 'size' must be a multiple of the target
1856 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1857 io memory page */
2e12669a
FB
1858void cpu_register_physical_memory(target_phys_addr_t start_addr,
1859 unsigned long size,
1860 unsigned long phys_offset)
33417e70
FB
1861{
1862 unsigned long addr, end_addr;
92e873b9 1863 PhysPageDesc *p;
33417e70 1864
5fd386f6 1865 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
33417e70 1866 end_addr = start_addr + size;
5fd386f6 1867 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
92e873b9 1868 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS);
9fa3e853
FB
1869 p->phys_offset = phys_offset;
1870 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM)
33417e70
FB
1871 phys_offset += TARGET_PAGE_SIZE;
1872 }
1873}
1874
a4193c8a 1875static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
33417e70
FB
1876{
1877 return 0;
1878}
1879
a4193c8a 1880static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
33417e70
FB
1881{
1882}
1883
1884static CPUReadMemoryFunc *unassigned_mem_read[3] = {
1885 unassigned_mem_readb,
1886 unassigned_mem_readb,
1887 unassigned_mem_readb,
1888};
1889
1890static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
1891 unassigned_mem_writeb,
1892 unassigned_mem_writeb,
1893 unassigned_mem_writeb,
1894};
1895
9fa3e853
FB
1896/* self modifying code support in soft mmu mode : writing to a page
1897 containing code comes to these functions */
1898
a4193c8a 1899static void code_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 1900{
1ccde1cb
FB
1901 unsigned long phys_addr;
1902
274da6b2 1903 phys_addr = addr - (unsigned long)phys_ram_base;
9fa3e853 1904#if !defined(CONFIG_USER_ONLY)
d720b93d 1905 tb_invalidate_phys_page_fast(phys_addr, 1);
9fa3e853 1906#endif
c27004ec 1907 stb_p((uint8_t *)(long)addr, val);
0a962c02 1908 phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 0xff;
9fa3e853
FB
1909}
1910
a4193c8a 1911static void code_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 1912{
1ccde1cb
FB
1913 unsigned long phys_addr;
1914
274da6b2 1915 phys_addr = addr - (unsigned long)phys_ram_base;
9fa3e853 1916#if !defined(CONFIG_USER_ONLY)
d720b93d 1917 tb_invalidate_phys_page_fast(phys_addr, 2);
9fa3e853 1918#endif
c27004ec 1919 stw_p((uint8_t *)(long)addr, val);
0a962c02 1920 phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 0xff;
9fa3e853
FB
1921}
1922
a4193c8a 1923static void code_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 1924{
1ccde1cb
FB
1925 unsigned long phys_addr;
1926
274da6b2 1927 phys_addr = addr - (unsigned long)phys_ram_base;
9fa3e853 1928#if !defined(CONFIG_USER_ONLY)
d720b93d 1929 tb_invalidate_phys_page_fast(phys_addr, 4);
9fa3e853 1930#endif
c27004ec 1931 stl_p((uint8_t *)(long)addr, val);
0a962c02 1932 phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 0xff;
9fa3e853
FB
1933}
1934
1935static CPUReadMemoryFunc *code_mem_read[3] = {
1936 NULL, /* never used */
1937 NULL, /* never used */
1938 NULL, /* never used */
1939};
1940
1941static CPUWriteMemoryFunc *code_mem_write[3] = {
1942 code_mem_writeb,
1943 code_mem_writew,
1944 code_mem_writel,
1945};
33417e70 1946
a4193c8a 1947static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1ccde1cb 1948{
c27004ec 1949 stb_p((uint8_t *)(long)addr, val);
d720b93d 1950 tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
1ccde1cb
FB
1951}
1952
a4193c8a 1953static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
1ccde1cb 1954{
c27004ec 1955 stw_p((uint8_t *)(long)addr, val);
d720b93d 1956 tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
1ccde1cb
FB
1957}
1958
a4193c8a 1959static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
1ccde1cb 1960{
c27004ec 1961 stl_p((uint8_t *)(long)addr, val);
d720b93d 1962 tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
1ccde1cb
FB
1963}
1964
1965static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
1966 notdirty_mem_writeb,
1967 notdirty_mem_writew,
1968 notdirty_mem_writel,
1969};
1970
33417e70
FB
1971static void io_mem_init(void)
1972{
a4193c8a
FB
1973 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, code_mem_read, unassigned_mem_write, NULL);
1974 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
1975 cpu_register_io_memory(IO_MEM_CODE >> IO_MEM_SHIFT, code_mem_read, code_mem_write, NULL);
1976 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, code_mem_read, notdirty_mem_write, NULL);
1ccde1cb
FB
1977 io_mem_nb = 5;
1978
1979 /* alloc dirty bits array */
0a962c02 1980 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
33417e70
FB
1981}
1982
1983/* mem_read and mem_write are arrays of functions containing the
1984 function to access byte (index 0), word (index 1) and dword (index
1985 2). All functions must be supplied. If io_index is non zero, the
1986 corresponding io zone is modified. If it is zero, a new io zone is
1987 allocated. The return value can be used with
1988 cpu_register_physical_memory(). (-1) is returned if error. */
1989int cpu_register_io_memory(int io_index,
1990 CPUReadMemoryFunc **mem_read,
a4193c8a
FB
1991 CPUWriteMemoryFunc **mem_write,
1992 void *opaque)
33417e70
FB
1993{
1994 int i;
1995
1996 if (io_index <= 0) {
1997 if (io_index >= IO_MEM_NB_ENTRIES)
1998 return -1;
1999 io_index = io_mem_nb++;
2000 } else {
2001 if (io_index >= IO_MEM_NB_ENTRIES)
2002 return -1;
2003 }
2004
2005 for(i = 0;i < 3; i++) {
2006 io_mem_read[io_index][i] = mem_read[i];
2007 io_mem_write[io_index][i] = mem_write[i];
2008 }
a4193c8a 2009 io_mem_opaque[io_index] = opaque;
33417e70
FB
2010 return io_index << IO_MEM_SHIFT;
2011}
61382a50 2012
8926b517
FB
2013CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2014{
2015 return io_mem_write[io_index >> IO_MEM_SHIFT];
2016}
2017
2018CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2019{
2020 return io_mem_read[io_index >> IO_MEM_SHIFT];
2021}
2022
13eb76e0
FB
2023/* physical memory access (slow version, mainly for debug) */
2024#if defined(CONFIG_USER_ONLY)
2e12669a 2025void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
2026 int len, int is_write)
2027{
2028 int l, flags;
2029 target_ulong page;
2030
2031 while (len > 0) {
2032 page = addr & TARGET_PAGE_MASK;
2033 l = (page + TARGET_PAGE_SIZE) - addr;
2034 if (l > len)
2035 l = len;
2036 flags = page_get_flags(page);
2037 if (!(flags & PAGE_VALID))
2038 return;
2039 if (is_write) {
2040 if (!(flags & PAGE_WRITE))
2041 return;
2042 memcpy((uint8_t *)addr, buf, len);
2043 } else {
2044 if (!(flags & PAGE_READ))
2045 return;
2046 memcpy(buf, (uint8_t *)addr, len);
2047 }
2048 len -= l;
2049 buf += l;
2050 addr += l;
2051 }
2052}
8df1cd07
FB
2053
2054/* never used */
2055uint32_t ldl_phys(target_phys_addr_t addr)
2056{
2057 return 0;
2058}
2059
2060void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2061{
2062}
2063
2064void stl_phys(target_phys_addr_t addr, uint32_t val)
2065{
2066}
2067
13eb76e0 2068#else
2e12669a 2069void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
2070 int len, int is_write)
2071{
2072 int l, io_index;
2073 uint8_t *ptr;
2074 uint32_t val;
2e12669a
FB
2075 target_phys_addr_t page;
2076 unsigned long pd;
92e873b9 2077 PhysPageDesc *p;
13eb76e0
FB
2078
2079 while (len > 0) {
2080 page = addr & TARGET_PAGE_MASK;
2081 l = (page + TARGET_PAGE_SIZE) - addr;
2082 if (l > len)
2083 l = len;
92e873b9 2084 p = phys_page_find(page >> TARGET_PAGE_BITS);
13eb76e0
FB
2085 if (!p) {
2086 pd = IO_MEM_UNASSIGNED;
2087 } else {
2088 pd = p->phys_offset;
2089 }
2090
2091 if (is_write) {
2092 if ((pd & ~TARGET_PAGE_MASK) != 0) {
2093 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2094 if (l >= 4 && ((addr & 3) == 0)) {
2095 /* 32 bit read access */
c27004ec 2096 val = ldl_p(buf);
a4193c8a 2097 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2098 l = 4;
2099 } else if (l >= 2 && ((addr & 1) == 0)) {
2100 /* 16 bit read access */
c27004ec 2101 val = lduw_p(buf);
a4193c8a 2102 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2103 l = 2;
2104 } else {
2105 /* 8 bit access */
c27004ec 2106 val = ldub_p(buf);
a4193c8a 2107 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2108 l = 1;
2109 }
2110 } else {
b448f2f3
FB
2111 unsigned long addr1;
2112 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
13eb76e0 2113 /* RAM case */
b448f2f3 2114 ptr = phys_ram_base + addr1;
13eb76e0 2115 memcpy(ptr, buf, l);
b448f2f3
FB
2116 /* invalidate code */
2117 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2118 /* set dirty bit */
0a962c02 2119 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] = 0xff;
13eb76e0
FB
2120 }
2121 } else {
2122 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2123 (pd & ~TARGET_PAGE_MASK) != IO_MEM_CODE) {
2124 /* I/O case */
2125 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2126 if (l >= 4 && ((addr & 3) == 0)) {
2127 /* 32 bit read access */
a4193c8a 2128 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
c27004ec 2129 stl_p(buf, val);
13eb76e0
FB
2130 l = 4;
2131 } else if (l >= 2 && ((addr & 1) == 0)) {
2132 /* 16 bit read access */
a4193c8a 2133 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
c27004ec 2134 stw_p(buf, val);
13eb76e0
FB
2135 l = 2;
2136 } else {
2137 /* 8 bit access */
a4193c8a 2138 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
c27004ec 2139 stb_p(buf, val);
13eb76e0
FB
2140 l = 1;
2141 }
2142 } else {
2143 /* RAM case */
2144 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2145 (addr & ~TARGET_PAGE_MASK);
2146 memcpy(buf, ptr, l);
2147 }
2148 }
2149 len -= l;
2150 buf += l;
2151 addr += l;
2152 }
2153}
8df1cd07
FB
2154
2155/* warning: addr must be aligned */
2156uint32_t ldl_phys(target_phys_addr_t addr)
2157{
2158 int io_index;
2159 uint8_t *ptr;
2160 uint32_t val;
2161 unsigned long pd;
2162 PhysPageDesc *p;
2163
2164 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2165 if (!p) {
2166 pd = IO_MEM_UNASSIGNED;
2167 } else {
2168 pd = p->phys_offset;
2169 }
2170
2171 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2172 (pd & ~TARGET_PAGE_MASK) != IO_MEM_CODE) {
2173 /* I/O case */
2174 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2175 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2176 } else {
2177 /* RAM case */
2178 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2179 (addr & ~TARGET_PAGE_MASK);
2180 val = ldl_p(ptr);
2181 }
2182 return val;
2183}
2184
2185/* warning: addr must be aligned. The ram page is not masked as dirty
2186 and the code inside is not invalidated. It is useful if the dirty
2187 bits are used to track modified PTEs */
2188void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2189{
2190 int io_index;
2191 uint8_t *ptr;
2192 unsigned long pd;
2193 PhysPageDesc *p;
2194
2195 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2196 if (!p) {
2197 pd = IO_MEM_UNASSIGNED;
2198 } else {
2199 pd = p->phys_offset;
2200 }
2201
2202 if ((pd & ~TARGET_PAGE_MASK) != 0) {
2203 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2204 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2205 } else {
2206 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2207 (addr & ~TARGET_PAGE_MASK);
2208 stl_p(ptr, val);
2209 }
2210}
2211
2212/* warning: addr must be aligned */
2213/* XXX: optimize code invalidation test */
2214void stl_phys(target_phys_addr_t addr, uint32_t val)
2215{
2216 int io_index;
2217 uint8_t *ptr;
2218 unsigned long pd;
2219 PhysPageDesc *p;
2220
2221 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2222 if (!p) {
2223 pd = IO_MEM_UNASSIGNED;
2224 } else {
2225 pd = p->phys_offset;
2226 }
2227
2228 if ((pd & ~TARGET_PAGE_MASK) != 0) {
2229 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2230 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2231 } else {
2232 unsigned long addr1;
2233 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2234 /* RAM case */
2235 ptr = phys_ram_base + addr1;
2236 stl_p(ptr, val);
2237 /* invalidate code */
2238 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2239 /* set dirty bit */
0a962c02 2240 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] = 0xff;
8df1cd07
FB
2241 }
2242}
2243
13eb76e0
FB
2244#endif
2245
2246/* virtual memory access for debug */
b448f2f3
FB
2247int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
2248 uint8_t *buf, int len, int is_write)
13eb76e0
FB
2249{
2250 int l;
2251 target_ulong page, phys_addr;
2252
2253 while (len > 0) {
2254 page = addr & TARGET_PAGE_MASK;
2255 phys_addr = cpu_get_phys_page_debug(env, page);
2256 /* if no physical page mapped, return an error */
2257 if (phys_addr == -1)
2258 return -1;
2259 l = (page + TARGET_PAGE_SIZE) - addr;
2260 if (l > len)
2261 l = len;
b448f2f3
FB
2262 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
2263 buf, l, is_write);
13eb76e0
FB
2264 len -= l;
2265 buf += l;
2266 addr += l;
2267 }
2268 return 0;
2269}
2270
e3db7226
FB
2271void dump_exec_info(FILE *f,
2272 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2273{
2274 int i, target_code_size, max_target_code_size;
2275 int direct_jmp_count, direct_jmp2_count, cross_page;
2276 TranslationBlock *tb;
2277
2278 target_code_size = 0;
2279 max_target_code_size = 0;
2280 cross_page = 0;
2281 direct_jmp_count = 0;
2282 direct_jmp2_count = 0;
2283 for(i = 0; i < nb_tbs; i++) {
2284 tb = &tbs[i];
2285 target_code_size += tb->size;
2286 if (tb->size > max_target_code_size)
2287 max_target_code_size = tb->size;
2288 if (tb->page_addr[1] != -1)
2289 cross_page++;
2290 if (tb->tb_next_offset[0] != 0xffff) {
2291 direct_jmp_count++;
2292 if (tb->tb_next_offset[1] != 0xffff) {
2293 direct_jmp2_count++;
2294 }
2295 }
2296 }
2297 /* XXX: avoid using doubles ? */
2298 cpu_fprintf(f, "TB count %d\n", nb_tbs);
2299 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
2300 nb_tbs ? target_code_size / nb_tbs : 0,
2301 max_target_code_size);
2302 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
2303 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
2304 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
2305 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
2306 cross_page,
2307 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
2308 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
2309 direct_jmp_count,
2310 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
2311 direct_jmp2_count,
2312 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
2313 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
2314 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
2315 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
2316}
2317
61382a50
FB
2318#if !defined(CONFIG_USER_ONLY)
2319
2320#define MMUSUFFIX _cmmu
2321#define GETPC() NULL
2322#define env cpu_single_env
b769d8fe 2323#define SOFTMMU_CODE_ACCESS
61382a50
FB
2324
2325#define SHIFT 0
2326#include "softmmu_template.h"
2327
2328#define SHIFT 1
2329#include "softmmu_template.h"
2330
2331#define SHIFT 2
2332#include "softmmu_template.h"
2333
2334#define SHIFT 3
2335#include "softmmu_template.h"
2336
2337#undef env
2338
2339#endif