]> git.proxmox.com Git - qemu.git/blame - exec.c
fixed big endian host support
[qemu.git] / exec.c
CommitLineData
54936004 1/*
fd6ce8f6 2 * virtual page mapping and translated block handling
54936004
FB
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
67b915a5 20#include "config.h"
d5a8f07c
FB
21#ifdef _WIN32
22#include <windows.h>
23#else
a98d49b1 24#include <sys/types.h>
d5a8f07c
FB
25#include <sys/mman.h>
26#endif
54936004
FB
27#include <stdlib.h>
28#include <stdio.h>
29#include <stdarg.h>
30#include <string.h>
31#include <errno.h>
32#include <unistd.h>
33#include <inttypes.h>
34
6180a181
FB
35#include "cpu.h"
36#include "exec-all.h"
54936004 37
fd6ce8f6 38//#define DEBUG_TB_INVALIDATE
66e85a21 39//#define DEBUG_FLUSH
9fa3e853 40//#define DEBUG_TLB
fd6ce8f6
FB
41
42/* make various TB consistency checks */
43//#define DEBUG_TB_CHECK
98857888 44//#define DEBUG_TLB_CHECK
fd6ce8f6
FB
45
46/* threshold to flush the translated code buffer */
47#define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
48
9fa3e853
FB
49#define SMC_BITMAP_USE_THRESHOLD 10
50
51#define MMAP_AREA_START 0x00000000
52#define MMAP_AREA_END 0xa8000000
fd6ce8f6 53
108c49b8
FB
54#if defined(TARGET_SPARC64)
55#define TARGET_PHYS_ADDR_SPACE_BITS 41
56#elif defined(TARGET_PPC64)
57#define TARGET_PHYS_ADDR_SPACE_BITS 42
58#else
59/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
60#define TARGET_PHYS_ADDR_SPACE_BITS 32
61#endif
62
fd6ce8f6 63TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
9fa3e853 64TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
fd6ce8f6 65int nb_tbs;
eb51d102
FB
66/* any access to the tbs or the page table must use this lock */
67spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
fd6ce8f6 68
b8076a74 69uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] __attribute__((aligned (32)));
fd6ce8f6
FB
70uint8_t *code_gen_ptr;
71
9fa3e853
FB
72int phys_ram_size;
73int phys_ram_fd;
74uint8_t *phys_ram_base;
1ccde1cb 75uint8_t *phys_ram_dirty;
9fa3e853 76
54936004 77typedef struct PageDesc {
92e873b9 78 /* list of TBs intersecting this ram page */
fd6ce8f6 79 TranslationBlock *first_tb;
9fa3e853
FB
80 /* in order to optimize self modifying code, we count the number
81 of lookups we do to a given page to use a bitmap */
82 unsigned int code_write_count;
83 uint8_t *code_bitmap;
84#if defined(CONFIG_USER_ONLY)
85 unsigned long flags;
86#endif
54936004
FB
87} PageDesc;
88
92e873b9
FB
89typedef struct PhysPageDesc {
90 /* offset in host memory of the page + io_index in the low 12 bits */
e04f40b5 91 uint32_t phys_offset;
92e873b9
FB
92} PhysPageDesc;
93
54936004
FB
94#define L2_BITS 10
95#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
96
97#define L1_SIZE (1 << L1_BITS)
98#define L2_SIZE (1 << L2_BITS)
99
33417e70 100static void io_mem_init(void);
fd6ce8f6 101
83fb7adf
FB
102unsigned long qemu_real_host_page_size;
103unsigned long qemu_host_page_bits;
104unsigned long qemu_host_page_size;
105unsigned long qemu_host_page_mask;
54936004 106
92e873b9 107/* XXX: for system emulation, it could just be an array */
54936004 108static PageDesc *l1_map[L1_SIZE];
0a962c02 109PhysPageDesc **l1_phys_map;
54936004 110
33417e70 111/* io memory support */
33417e70
FB
112CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
113CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
a4193c8a 114void *io_mem_opaque[IO_MEM_NB_ENTRIES];
33417e70
FB
115static int io_mem_nb;
116
34865134
FB
117/* log support */
118char *logfilename = "/tmp/qemu.log";
119FILE *logfile;
120int loglevel;
121
e3db7226
FB
122/* statistics */
123static int tlb_flush_count;
124static int tb_flush_count;
125static int tb_phys_invalidate_count;
126
b346ff46 127static void page_init(void)
54936004 128{
83fb7adf 129 /* NOTE: we can always suppose that qemu_host_page_size >=
54936004 130 TARGET_PAGE_SIZE */
67b915a5 131#ifdef _WIN32
d5a8f07c
FB
132 {
133 SYSTEM_INFO system_info;
134 DWORD old_protect;
135
136 GetSystemInfo(&system_info);
137 qemu_real_host_page_size = system_info.dwPageSize;
138
139 VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
140 PAGE_EXECUTE_READWRITE, &old_protect);
141 }
67b915a5 142#else
83fb7adf 143 qemu_real_host_page_size = getpagesize();
d5a8f07c
FB
144 {
145 unsigned long start, end;
146
147 start = (unsigned long)code_gen_buffer;
148 start &= ~(qemu_real_host_page_size - 1);
149
150 end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
151 end += qemu_real_host_page_size - 1;
152 end &= ~(qemu_real_host_page_size - 1);
153
154 mprotect((void *)start, end - start,
155 PROT_READ | PROT_WRITE | PROT_EXEC);
156 }
67b915a5 157#endif
d5a8f07c 158
83fb7adf
FB
159 if (qemu_host_page_size == 0)
160 qemu_host_page_size = qemu_real_host_page_size;
161 if (qemu_host_page_size < TARGET_PAGE_SIZE)
162 qemu_host_page_size = TARGET_PAGE_SIZE;
163 qemu_host_page_bits = 0;
164 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
165 qemu_host_page_bits++;
166 qemu_host_page_mask = ~(qemu_host_page_size - 1);
108c49b8
FB
167 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
168 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
54936004
FB
169}
170
fd6ce8f6 171static inline PageDesc *page_find_alloc(unsigned int index)
54936004 172{
54936004
FB
173 PageDesc **lp, *p;
174
54936004
FB
175 lp = &l1_map[index >> L2_BITS];
176 p = *lp;
177 if (!p) {
178 /* allocate if not found */
59817ccb 179 p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
fd6ce8f6 180 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
54936004
FB
181 *lp = p;
182 }
183 return p + (index & (L2_SIZE - 1));
184}
185
fd6ce8f6 186static inline PageDesc *page_find(unsigned int index)
54936004 187{
54936004
FB
188 PageDesc *p;
189
54936004
FB
190 p = l1_map[index >> L2_BITS];
191 if (!p)
192 return 0;
fd6ce8f6
FB
193 return p + (index & (L2_SIZE - 1));
194}
195
108c49b8 196static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
92e873b9 197{
108c49b8 198 void **lp, **p;
92e873b9 199
108c49b8
FB
200 p = (void **)l1_phys_map;
201#if TARGET_PHYS_ADDR_SPACE_BITS > 32
202
203#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
204#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
205#endif
206 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
92e873b9
FB
207 p = *lp;
208 if (!p) {
209 /* allocate if not found */
108c49b8
FB
210 if (!alloc)
211 return NULL;
212 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
213 memset(p, 0, sizeof(void *) * L1_SIZE);
214 *lp = p;
215 }
216#endif
217 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
218 p = *lp;
219 if (!p) {
220 /* allocate if not found */
221 if (!alloc)
222 return NULL;
0a962c02 223 p = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
92e873b9
FB
224 memset(p, 0, sizeof(PhysPageDesc) * L2_SIZE);
225 *lp = p;
226 }
108c49b8 227 return ((PhysPageDesc *)p) + (index & (L2_SIZE - 1));
92e873b9
FB
228}
229
108c49b8 230static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
92e873b9 231{
108c49b8 232 return phys_page_find_alloc(index, 0);
92e873b9
FB
233}
234
9fa3e853 235#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
236static void tlb_protect_code(CPUState *env, ram_addr_t ram_addr,
237 target_ulong vaddr);
238static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
239 target_ulong vaddr);
9fa3e853 240#endif
fd6ce8f6 241
b346ff46 242void cpu_exec_init(void)
fd6ce8f6
FB
243{
244 if (!code_gen_ptr) {
245 code_gen_ptr = code_gen_buffer;
b346ff46 246 page_init();
33417e70 247 io_mem_init();
fd6ce8f6
FB
248 }
249}
250
9fa3e853
FB
251static inline void invalidate_page_bitmap(PageDesc *p)
252{
253 if (p->code_bitmap) {
59817ccb 254 qemu_free(p->code_bitmap);
9fa3e853
FB
255 p->code_bitmap = NULL;
256 }
257 p->code_write_count = 0;
258}
259
fd6ce8f6
FB
260/* set to NULL all the 'first_tb' fields in all PageDescs */
261static void page_flush_tb(void)
262{
263 int i, j;
264 PageDesc *p;
265
266 for(i = 0; i < L1_SIZE; i++) {
267 p = l1_map[i];
268 if (p) {
9fa3e853
FB
269 for(j = 0; j < L2_SIZE; j++) {
270 p->first_tb = NULL;
271 invalidate_page_bitmap(p);
272 p++;
273 }
fd6ce8f6
FB
274 }
275 }
276}
277
278/* flush all the translation blocks */
d4e8164f 279/* XXX: tb_flush is currently not thread safe */
0124311e 280void tb_flush(CPUState *env)
fd6ce8f6 281{
0124311e 282#if defined(DEBUG_FLUSH)
fd6ce8f6
FB
283 printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
284 code_gen_ptr - code_gen_buffer,
285 nb_tbs,
0124311e 286 nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0);
fd6ce8f6
FB
287#endif
288 nb_tbs = 0;
8a40a180 289 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
9fa3e853 290
8a8a608f 291 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
fd6ce8f6 292 page_flush_tb();
9fa3e853 293
fd6ce8f6 294 code_gen_ptr = code_gen_buffer;
d4e8164f
FB
295 /* XXX: flush processor icache at this point if cache flush is
296 expensive */
e3db7226 297 tb_flush_count++;
fd6ce8f6
FB
298}
299
300#ifdef DEBUG_TB_CHECK
301
302static void tb_invalidate_check(unsigned long address)
303{
304 TranslationBlock *tb;
305 int i;
306 address &= TARGET_PAGE_MASK;
307 for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
308 for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
309 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
310 address >= tb->pc + tb->size)) {
311 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
312 address, tb->pc, tb->size);
313 }
314 }
315 }
316}
317
318/* verify that all the pages have correct rights for code */
319static void tb_page_check(void)
320{
321 TranslationBlock *tb;
322 int i, flags1, flags2;
323
324 for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
325 for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
326 flags1 = page_get_flags(tb->pc);
327 flags2 = page_get_flags(tb->pc + tb->size - 1);
328 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
329 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
330 tb->pc, tb->size, flags1, flags2);
331 }
332 }
333 }
334}
335
d4e8164f
FB
336void tb_jmp_check(TranslationBlock *tb)
337{
338 TranslationBlock *tb1;
339 unsigned int n1;
340
341 /* suppress any remaining jumps to this TB */
342 tb1 = tb->jmp_first;
343 for(;;) {
344 n1 = (long)tb1 & 3;
345 tb1 = (TranslationBlock *)((long)tb1 & ~3);
346 if (n1 == 2)
347 break;
348 tb1 = tb1->jmp_next[n1];
349 }
350 /* check end of list */
351 if (tb1 != tb) {
352 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
353 }
354}
355
fd6ce8f6
FB
356#endif
357
358/* invalidate one TB */
359static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
360 int next_offset)
361{
362 TranslationBlock *tb1;
363 for(;;) {
364 tb1 = *ptb;
365 if (tb1 == tb) {
366 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
367 break;
368 }
369 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
370 }
371}
372
9fa3e853
FB
373static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
374{
375 TranslationBlock *tb1;
376 unsigned int n1;
377
378 for(;;) {
379 tb1 = *ptb;
380 n1 = (long)tb1 & 3;
381 tb1 = (TranslationBlock *)((long)tb1 & ~3);
382 if (tb1 == tb) {
383 *ptb = tb1->page_next[n1];
384 break;
385 }
386 ptb = &tb1->page_next[n1];
387 }
388}
389
d4e8164f
FB
390static inline void tb_jmp_remove(TranslationBlock *tb, int n)
391{
392 TranslationBlock *tb1, **ptb;
393 unsigned int n1;
394
395 ptb = &tb->jmp_next[n];
396 tb1 = *ptb;
397 if (tb1) {
398 /* find tb(n) in circular list */
399 for(;;) {
400 tb1 = *ptb;
401 n1 = (long)tb1 & 3;
402 tb1 = (TranslationBlock *)((long)tb1 & ~3);
403 if (n1 == n && tb1 == tb)
404 break;
405 if (n1 == 2) {
406 ptb = &tb1->jmp_first;
407 } else {
408 ptb = &tb1->jmp_next[n1];
409 }
410 }
411 /* now we can suppress tb(n) from the list */
412 *ptb = tb->jmp_next[n];
413
414 tb->jmp_next[n] = NULL;
415 }
416}
417
418/* reset the jump entry 'n' of a TB so that it is not chained to
419 another TB */
420static inline void tb_reset_jump(TranslationBlock *tb, int n)
421{
422 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
423}
424
8a40a180 425static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
fd6ce8f6 426{
8a40a180 427 PageDesc *p;
d4e8164f 428 unsigned int h, n1;
8a40a180
FB
429 target_ulong phys_pc;
430 TranslationBlock *tb1, *tb2;
d4e8164f 431
8a40a180
FB
432 /* remove the TB from the hash list */
433 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
434 h = tb_phys_hash_func(phys_pc);
435 tb_remove(&tb_phys_hash[h], tb,
436 offsetof(TranslationBlock, phys_hash_next));
437
438 /* remove the TB from the page list */
439 if (tb->page_addr[0] != page_addr) {
440 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
441 tb_page_remove(&p->first_tb, tb);
442 invalidate_page_bitmap(p);
443 }
444 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
445 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
446 tb_page_remove(&p->first_tb, tb);
447 invalidate_page_bitmap(p);
448 }
449
36bdbe54 450 tb_invalidated_flag = 1;
59817ccb 451
fd6ce8f6 452 /* remove the TB from the hash list */
8a40a180
FB
453 h = tb_jmp_cache_hash_func(tb->pc);
454 cpu_single_env->tb_jmp_cache[h] = NULL;
d4e8164f
FB
455
456 /* suppress this TB from the two jump lists */
457 tb_jmp_remove(tb, 0);
458 tb_jmp_remove(tb, 1);
459
460 /* suppress any remaining jumps to this TB */
461 tb1 = tb->jmp_first;
462 for(;;) {
463 n1 = (long)tb1 & 3;
464 if (n1 == 2)
465 break;
466 tb1 = (TranslationBlock *)((long)tb1 & ~3);
467 tb2 = tb1->jmp_next[n1];
468 tb_reset_jump(tb1, n1);
469 tb1->jmp_next[n1] = NULL;
470 tb1 = tb2;
471 }
472 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
9fa3e853 473
e3db7226 474 tb_phys_invalidate_count++;
9fa3e853
FB
475}
476
477static inline void set_bits(uint8_t *tab, int start, int len)
478{
479 int end, mask, end1;
480
481 end = start + len;
482 tab += start >> 3;
483 mask = 0xff << (start & 7);
484 if ((start & ~7) == (end & ~7)) {
485 if (start < end) {
486 mask &= ~(0xff << (end & 7));
487 *tab |= mask;
488 }
489 } else {
490 *tab++ |= mask;
491 start = (start + 8) & ~7;
492 end1 = end & ~7;
493 while (start < end1) {
494 *tab++ = 0xff;
495 start += 8;
496 }
497 if (start < end) {
498 mask = ~(0xff << (end & 7));
499 *tab |= mask;
500 }
501 }
502}
503
504static void build_page_bitmap(PageDesc *p)
505{
506 int n, tb_start, tb_end;
507 TranslationBlock *tb;
508
59817ccb 509 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
9fa3e853
FB
510 if (!p->code_bitmap)
511 return;
512 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
513
514 tb = p->first_tb;
515 while (tb != NULL) {
516 n = (long)tb & 3;
517 tb = (TranslationBlock *)((long)tb & ~3);
518 /* NOTE: this is subtle as a TB may span two physical pages */
519 if (n == 0) {
520 /* NOTE: tb_end may be after the end of the page, but
521 it is not a problem */
522 tb_start = tb->pc & ~TARGET_PAGE_MASK;
523 tb_end = tb_start + tb->size;
524 if (tb_end > TARGET_PAGE_SIZE)
525 tb_end = TARGET_PAGE_SIZE;
526 } else {
527 tb_start = 0;
528 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
529 }
530 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
531 tb = tb->page_next[n];
532 }
533}
534
d720b93d
FB
535#ifdef TARGET_HAS_PRECISE_SMC
536
537static void tb_gen_code(CPUState *env,
538 target_ulong pc, target_ulong cs_base, int flags,
539 int cflags)
540{
541 TranslationBlock *tb;
542 uint8_t *tc_ptr;
543 target_ulong phys_pc, phys_page2, virt_page2;
544 int code_gen_size;
545
c27004ec
FB
546 phys_pc = get_phys_addr_code(env, pc);
547 tb = tb_alloc(pc);
d720b93d
FB
548 if (!tb) {
549 /* flush must be done */
550 tb_flush(env);
551 /* cannot fail at this point */
c27004ec 552 tb = tb_alloc(pc);
d720b93d
FB
553 }
554 tc_ptr = code_gen_ptr;
555 tb->tc_ptr = tc_ptr;
556 tb->cs_base = cs_base;
557 tb->flags = flags;
558 tb->cflags = cflags;
559 cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
560 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
561
562 /* check next page if needed */
c27004ec 563 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
d720b93d 564 phys_page2 = -1;
c27004ec 565 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
d720b93d
FB
566 phys_page2 = get_phys_addr_code(env, virt_page2);
567 }
568 tb_link_phys(tb, phys_pc, phys_page2);
569}
570#endif
571
9fa3e853
FB
572/* invalidate all TBs which intersect with the target physical page
573 starting in range [start;end[. NOTE: start and end must refer to
d720b93d
FB
574 the same physical page. 'is_cpu_write_access' should be true if called
575 from a real cpu write access: the virtual CPU will exit the current
576 TB if code is modified inside this TB. */
577void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
578 int is_cpu_write_access)
579{
580 int n, current_tb_modified, current_tb_not_found, current_flags;
d720b93d 581 CPUState *env = cpu_single_env;
9fa3e853 582 PageDesc *p;
ea1c1802 583 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
9fa3e853 584 target_ulong tb_start, tb_end;
d720b93d 585 target_ulong current_pc, current_cs_base;
9fa3e853
FB
586
587 p = page_find(start >> TARGET_PAGE_BITS);
588 if (!p)
589 return;
590 if (!p->code_bitmap &&
d720b93d
FB
591 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
592 is_cpu_write_access) {
9fa3e853
FB
593 /* build code bitmap */
594 build_page_bitmap(p);
595 }
596
597 /* we remove all the TBs in the range [start, end[ */
598 /* XXX: see if in some cases it could be faster to invalidate all the code */
d720b93d
FB
599 current_tb_not_found = is_cpu_write_access;
600 current_tb_modified = 0;
601 current_tb = NULL; /* avoid warning */
602 current_pc = 0; /* avoid warning */
603 current_cs_base = 0; /* avoid warning */
604 current_flags = 0; /* avoid warning */
9fa3e853
FB
605 tb = p->first_tb;
606 while (tb != NULL) {
607 n = (long)tb & 3;
608 tb = (TranslationBlock *)((long)tb & ~3);
609 tb_next = tb->page_next[n];
610 /* NOTE: this is subtle as a TB may span two physical pages */
611 if (n == 0) {
612 /* NOTE: tb_end may be after the end of the page, but
613 it is not a problem */
614 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
615 tb_end = tb_start + tb->size;
616 } else {
617 tb_start = tb->page_addr[1];
618 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
619 }
620 if (!(tb_end <= start || tb_start >= end)) {
d720b93d
FB
621#ifdef TARGET_HAS_PRECISE_SMC
622 if (current_tb_not_found) {
623 current_tb_not_found = 0;
624 current_tb = NULL;
625 if (env->mem_write_pc) {
626 /* now we have a real cpu fault */
627 current_tb = tb_find_pc(env->mem_write_pc);
628 }
629 }
630 if (current_tb == tb &&
631 !(current_tb->cflags & CF_SINGLE_INSN)) {
632 /* If we are modifying the current TB, we must stop
633 its execution. We could be more precise by checking
634 that the modification is after the current PC, but it
635 would require a specialized function to partially
636 restore the CPU state */
637
638 current_tb_modified = 1;
639 cpu_restore_state(current_tb, env,
640 env->mem_write_pc, NULL);
641#if defined(TARGET_I386)
642 current_flags = env->hflags;
643 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
644 current_cs_base = (target_ulong)env->segs[R_CS].base;
645 current_pc = current_cs_base + env->eip;
646#else
647#error unsupported CPU
648#endif
649 }
650#endif /* TARGET_HAS_PRECISE_SMC */
ea1c1802
FB
651 saved_tb = env->current_tb;
652 env->current_tb = NULL;
9fa3e853 653 tb_phys_invalidate(tb, -1);
ea1c1802
FB
654 env->current_tb = saved_tb;
655 if (env->interrupt_request && env->current_tb)
656 cpu_interrupt(env, env->interrupt_request);
9fa3e853
FB
657 }
658 tb = tb_next;
659 }
660#if !defined(CONFIG_USER_ONLY)
661 /* if no code remaining, no need to continue to use slow writes */
662 if (!p->first_tb) {
663 invalidate_page_bitmap(p);
d720b93d
FB
664 if (is_cpu_write_access) {
665 tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
666 }
667 }
668#endif
669#ifdef TARGET_HAS_PRECISE_SMC
670 if (current_tb_modified) {
671 /* we generate a block containing just the instruction
672 modifying the memory. It will ensure that it cannot modify
673 itself */
ea1c1802 674 env->current_tb = NULL;
d720b93d
FB
675 tb_gen_code(env, current_pc, current_cs_base, current_flags,
676 CF_SINGLE_INSN);
677 cpu_resume_from_signal(env, NULL);
9fa3e853 678 }
fd6ce8f6 679#endif
9fa3e853 680}
fd6ce8f6 681
9fa3e853 682/* len must be <= 8 and start must be a multiple of len */
d720b93d 683static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
9fa3e853
FB
684{
685 PageDesc *p;
686 int offset, b;
59817ccb 687#if 0
a4193c8a
FB
688 if (1) {
689 if (loglevel) {
690 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
691 cpu_single_env->mem_write_vaddr, len,
692 cpu_single_env->eip,
693 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
694 }
59817ccb
FB
695 }
696#endif
9fa3e853
FB
697 p = page_find(start >> TARGET_PAGE_BITS);
698 if (!p)
699 return;
700 if (p->code_bitmap) {
701 offset = start & ~TARGET_PAGE_MASK;
702 b = p->code_bitmap[offset >> 3] >> (offset & 7);
703 if (b & ((1 << len) - 1))
704 goto do_invalidate;
705 } else {
706 do_invalidate:
d720b93d 707 tb_invalidate_phys_page_range(start, start + len, 1);
9fa3e853
FB
708 }
709}
710
9fa3e853 711#if !defined(CONFIG_SOFTMMU)
d720b93d
FB
712static void tb_invalidate_phys_page(target_ulong addr,
713 unsigned long pc, void *puc)
9fa3e853 714{
d720b93d
FB
715 int n, current_flags, current_tb_modified;
716 target_ulong current_pc, current_cs_base;
9fa3e853 717 PageDesc *p;
d720b93d
FB
718 TranslationBlock *tb, *current_tb;
719#ifdef TARGET_HAS_PRECISE_SMC
720 CPUState *env = cpu_single_env;
721#endif
9fa3e853
FB
722
723 addr &= TARGET_PAGE_MASK;
724 p = page_find(addr >> TARGET_PAGE_BITS);
725 if (!p)
726 return;
727 tb = p->first_tb;
d720b93d
FB
728 current_tb_modified = 0;
729 current_tb = NULL;
730 current_pc = 0; /* avoid warning */
731 current_cs_base = 0; /* avoid warning */
732 current_flags = 0; /* avoid warning */
733#ifdef TARGET_HAS_PRECISE_SMC
734 if (tb && pc != 0) {
735 current_tb = tb_find_pc(pc);
736 }
737#endif
9fa3e853
FB
738 while (tb != NULL) {
739 n = (long)tb & 3;
740 tb = (TranslationBlock *)((long)tb & ~3);
d720b93d
FB
741#ifdef TARGET_HAS_PRECISE_SMC
742 if (current_tb == tb &&
743 !(current_tb->cflags & CF_SINGLE_INSN)) {
744 /* If we are modifying the current TB, we must stop
745 its execution. We could be more precise by checking
746 that the modification is after the current PC, but it
747 would require a specialized function to partially
748 restore the CPU state */
749
750 current_tb_modified = 1;
751 cpu_restore_state(current_tb, env, pc, puc);
752#if defined(TARGET_I386)
753 current_flags = env->hflags;
754 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
755 current_cs_base = (target_ulong)env->segs[R_CS].base;
756 current_pc = current_cs_base + env->eip;
757#else
758#error unsupported CPU
759#endif
760 }
761#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
762 tb_phys_invalidate(tb, addr);
763 tb = tb->page_next[n];
764 }
fd6ce8f6 765 p->first_tb = NULL;
d720b93d
FB
766#ifdef TARGET_HAS_PRECISE_SMC
767 if (current_tb_modified) {
768 /* we generate a block containing just the instruction
769 modifying the memory. It will ensure that it cannot modify
770 itself */
ea1c1802 771 env->current_tb = NULL;
d720b93d
FB
772 tb_gen_code(env, current_pc, current_cs_base, current_flags,
773 CF_SINGLE_INSN);
774 cpu_resume_from_signal(env, puc);
775 }
776#endif
fd6ce8f6 777}
9fa3e853 778#endif
fd6ce8f6
FB
779
780/* add the tb in the target page and protect it if necessary */
9fa3e853
FB
781static inline void tb_alloc_page(TranslationBlock *tb,
782 unsigned int n, unsigned int page_addr)
fd6ce8f6
FB
783{
784 PageDesc *p;
9fa3e853
FB
785 TranslationBlock *last_first_tb;
786
787 tb->page_addr[n] = page_addr;
3a7d929e 788 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
9fa3e853
FB
789 tb->page_next[n] = p->first_tb;
790 last_first_tb = p->first_tb;
791 p->first_tb = (TranslationBlock *)((long)tb | n);
792 invalidate_page_bitmap(p);
fd6ce8f6 793
107db443 794#if defined(TARGET_HAS_SMC) || 1
d720b93d 795
9fa3e853 796#if defined(CONFIG_USER_ONLY)
fd6ce8f6 797 if (p->flags & PAGE_WRITE) {
9fa3e853
FB
798 unsigned long host_start, host_end, addr;
799 int prot;
800
fd6ce8f6
FB
801 /* force the host page as non writable (writes will have a
802 page fault + mprotect overhead) */
83fb7adf
FB
803 host_start = page_addr & qemu_host_page_mask;
804 host_end = host_start + qemu_host_page_size;
fd6ce8f6
FB
805 prot = 0;
806 for(addr = host_start; addr < host_end; addr += TARGET_PAGE_SIZE)
807 prot |= page_get_flags(addr);
83fb7adf 808 mprotect((void *)host_start, qemu_host_page_size,
fd6ce8f6
FB
809 (prot & PAGE_BITS) & ~PAGE_WRITE);
810#ifdef DEBUG_TB_INVALIDATE
811 printf("protecting code page: 0x%08lx\n",
812 host_start);
813#endif
814 p->flags &= ~PAGE_WRITE;
fd6ce8f6 815 }
9fa3e853
FB
816#else
817 /* if some code is already present, then the pages are already
818 protected. So we handle the case where only the first TB is
819 allocated in a physical page */
820 if (!last_first_tb) {
821 target_ulong virt_addr;
822
823 virt_addr = (tb->pc & TARGET_PAGE_MASK) + (n << TARGET_PAGE_BITS);
3a7d929e 824 tlb_protect_code(cpu_single_env, page_addr, virt_addr);
9fa3e853
FB
825 }
826#endif
d720b93d
FB
827
828#endif /* TARGET_HAS_SMC */
fd6ce8f6
FB
829}
830
831/* Allocate a new translation block. Flush the translation buffer if
832 too many translation blocks or too much generated code. */
c27004ec 833TranslationBlock *tb_alloc(target_ulong pc)
fd6ce8f6
FB
834{
835 TranslationBlock *tb;
fd6ce8f6
FB
836
837 if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
838 (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
d4e8164f 839 return NULL;
fd6ce8f6
FB
840 tb = &tbs[nb_tbs++];
841 tb->pc = pc;
b448f2f3 842 tb->cflags = 0;
d4e8164f
FB
843 return tb;
844}
845
9fa3e853
FB
846/* add a new TB and link it to the physical page tables. phys_page2 is
847 (-1) to indicate that only one page contains the TB. */
848void tb_link_phys(TranslationBlock *tb,
849 target_ulong phys_pc, target_ulong phys_page2)
d4e8164f 850{
9fa3e853
FB
851 unsigned int h;
852 TranslationBlock **ptb;
853
854 /* add in the physical hash table */
855 h = tb_phys_hash_func(phys_pc);
856 ptb = &tb_phys_hash[h];
857 tb->phys_hash_next = *ptb;
858 *ptb = tb;
fd6ce8f6
FB
859
860 /* add in the page list */
9fa3e853
FB
861 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
862 if (phys_page2 != -1)
863 tb_alloc_page(tb, 1, phys_page2);
864 else
865 tb->page_addr[1] = -1;
9fa3e853 866
d4e8164f
FB
867 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
868 tb->jmp_next[0] = NULL;
869 tb->jmp_next[1] = NULL;
b448f2f3
FB
870#ifdef USE_CODE_COPY
871 tb->cflags &= ~CF_FP_USED;
872 if (tb->cflags & CF_TB_FP_USED)
873 tb->cflags |= CF_FP_USED;
874#endif
d4e8164f
FB
875
876 /* init original jump addresses */
877 if (tb->tb_next_offset[0] != 0xffff)
878 tb_reset_jump(tb, 0);
879 if (tb->tb_next_offset[1] != 0xffff)
880 tb_reset_jump(tb, 1);
8a40a180
FB
881
882#ifdef DEBUG_TB_CHECK
883 tb_page_check();
884#endif
fd6ce8f6
FB
885}
886
9fa3e853
FB
887/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
888 tb[1].tc_ptr. Return NULL if not found */
889TranslationBlock *tb_find_pc(unsigned long tc_ptr)
fd6ce8f6 890{
9fa3e853
FB
891 int m_min, m_max, m;
892 unsigned long v;
893 TranslationBlock *tb;
a513fe19
FB
894
895 if (nb_tbs <= 0)
896 return NULL;
897 if (tc_ptr < (unsigned long)code_gen_buffer ||
898 tc_ptr >= (unsigned long)code_gen_ptr)
899 return NULL;
900 /* binary search (cf Knuth) */
901 m_min = 0;
902 m_max = nb_tbs - 1;
903 while (m_min <= m_max) {
904 m = (m_min + m_max) >> 1;
905 tb = &tbs[m];
906 v = (unsigned long)tb->tc_ptr;
907 if (v == tc_ptr)
908 return tb;
909 else if (tc_ptr < v) {
910 m_max = m - 1;
911 } else {
912 m_min = m + 1;
913 }
914 }
915 return &tbs[m_max];
916}
7501267e 917
ea041c0e
FB
918static void tb_reset_jump_recursive(TranslationBlock *tb);
919
920static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
921{
922 TranslationBlock *tb1, *tb_next, **ptb;
923 unsigned int n1;
924
925 tb1 = tb->jmp_next[n];
926 if (tb1 != NULL) {
927 /* find head of list */
928 for(;;) {
929 n1 = (long)tb1 & 3;
930 tb1 = (TranslationBlock *)((long)tb1 & ~3);
931 if (n1 == 2)
932 break;
933 tb1 = tb1->jmp_next[n1];
934 }
935 /* we are now sure now that tb jumps to tb1 */
936 tb_next = tb1;
937
938 /* remove tb from the jmp_first list */
939 ptb = &tb_next->jmp_first;
940 for(;;) {
941 tb1 = *ptb;
942 n1 = (long)tb1 & 3;
943 tb1 = (TranslationBlock *)((long)tb1 & ~3);
944 if (n1 == n && tb1 == tb)
945 break;
946 ptb = &tb1->jmp_next[n1];
947 }
948 *ptb = tb->jmp_next[n];
949 tb->jmp_next[n] = NULL;
950
951 /* suppress the jump to next tb in generated code */
952 tb_reset_jump(tb, n);
953
0124311e 954 /* suppress jumps in the tb on which we could have jumped */
ea041c0e
FB
955 tb_reset_jump_recursive(tb_next);
956 }
957}
958
959static void tb_reset_jump_recursive(TranslationBlock *tb)
960{
961 tb_reset_jump_recursive2(tb, 0);
962 tb_reset_jump_recursive2(tb, 1);
963}
964
1fddef4b 965#if defined(TARGET_HAS_ICE)
d720b93d
FB
966static void breakpoint_invalidate(CPUState *env, target_ulong pc)
967{
968 target_ulong phys_addr;
969
970 phys_addr = cpu_get_phys_page_debug(env, pc);
971 tb_invalidate_phys_page_range(phys_addr, phys_addr + 1, 0);
972}
c27004ec 973#endif
d720b93d 974
c33a346e
FB
975/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
976 breakpoint is reached */
2e12669a 977int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
4c3a88a2 978{
1fddef4b 979#if defined(TARGET_HAS_ICE)
4c3a88a2 980 int i;
d720b93d 981
4c3a88a2
FB
982 for(i = 0; i < env->nb_breakpoints; i++) {
983 if (env->breakpoints[i] == pc)
984 return 0;
985 }
986
987 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
988 return -1;
989 env->breakpoints[env->nb_breakpoints++] = pc;
d720b93d
FB
990
991 breakpoint_invalidate(env, pc);
4c3a88a2
FB
992 return 0;
993#else
994 return -1;
995#endif
996}
997
998/* remove a breakpoint */
2e12669a 999int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
4c3a88a2 1000{
1fddef4b 1001#if defined(TARGET_HAS_ICE)
4c3a88a2
FB
1002 int i;
1003 for(i = 0; i < env->nb_breakpoints; i++) {
1004 if (env->breakpoints[i] == pc)
1005 goto found;
1006 }
1007 return -1;
1008 found:
4c3a88a2 1009 env->nb_breakpoints--;
1fddef4b
FB
1010 if (i < env->nb_breakpoints)
1011 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
d720b93d
FB
1012
1013 breakpoint_invalidate(env, pc);
4c3a88a2
FB
1014 return 0;
1015#else
1016 return -1;
1017#endif
1018}
1019
c33a346e
FB
1020/* enable or disable single step mode. EXCP_DEBUG is returned by the
1021 CPU loop after each instruction */
1022void cpu_single_step(CPUState *env, int enabled)
1023{
1fddef4b 1024#if defined(TARGET_HAS_ICE)
c33a346e
FB
1025 if (env->singlestep_enabled != enabled) {
1026 env->singlestep_enabled = enabled;
1027 /* must flush all the translated code to avoid inconsistancies */
9fa3e853 1028 /* XXX: only flush what is necessary */
0124311e 1029 tb_flush(env);
c33a346e
FB
1030 }
1031#endif
1032}
1033
34865134
FB
1034/* enable or disable low levels log */
1035void cpu_set_log(int log_flags)
1036{
1037 loglevel = log_flags;
1038 if (loglevel && !logfile) {
1039 logfile = fopen(logfilename, "w");
1040 if (!logfile) {
1041 perror(logfilename);
1042 _exit(1);
1043 }
9fa3e853
FB
1044#if !defined(CONFIG_SOFTMMU)
1045 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1046 {
1047 static uint8_t logfile_buf[4096];
1048 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1049 }
1050#else
34865134 1051 setvbuf(logfile, NULL, _IOLBF, 0);
9fa3e853 1052#endif
34865134
FB
1053 }
1054}
1055
1056void cpu_set_log_filename(const char *filename)
1057{
1058 logfilename = strdup(filename);
1059}
c33a346e 1060
0124311e 1061/* mask must never be zero, except for A20 change call */
68a79315 1062void cpu_interrupt(CPUState *env, int mask)
ea041c0e
FB
1063{
1064 TranslationBlock *tb;
ee8b7021 1065 static int interrupt_lock;
59817ccb 1066
68a79315 1067 env->interrupt_request |= mask;
ea041c0e
FB
1068 /* if the cpu is currently executing code, we must unlink it and
1069 all the potentially executing TB */
1070 tb = env->current_tb;
ee8b7021
FB
1071 if (tb && !testandset(&interrupt_lock)) {
1072 env->current_tb = NULL;
ea041c0e 1073 tb_reset_jump_recursive(tb);
ee8b7021 1074 interrupt_lock = 0;
ea041c0e
FB
1075 }
1076}
1077
b54ad049
FB
1078void cpu_reset_interrupt(CPUState *env, int mask)
1079{
1080 env->interrupt_request &= ~mask;
1081}
1082
f193c797
FB
1083CPULogItem cpu_log_items[] = {
1084 { CPU_LOG_TB_OUT_ASM, "out_asm",
1085 "show generated host assembly code for each compiled TB" },
1086 { CPU_LOG_TB_IN_ASM, "in_asm",
1087 "show target assembly code for each compiled TB" },
1088 { CPU_LOG_TB_OP, "op",
1089 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1090#ifdef TARGET_I386
1091 { CPU_LOG_TB_OP_OPT, "op_opt",
1092 "show micro ops after optimization for each compiled TB" },
1093#endif
1094 { CPU_LOG_INT, "int",
1095 "show interrupts/exceptions in short format" },
1096 { CPU_LOG_EXEC, "exec",
1097 "show trace before each executed TB (lots of logs)" },
9fddaa0c
FB
1098 { CPU_LOG_TB_CPU, "cpu",
1099 "show CPU state before bloc translation" },
f193c797
FB
1100#ifdef TARGET_I386
1101 { CPU_LOG_PCALL, "pcall",
1102 "show protected mode far calls/returns/exceptions" },
1103#endif
8e3a9fd2 1104#ifdef DEBUG_IOPORT
fd872598
FB
1105 { CPU_LOG_IOPORT, "ioport",
1106 "show all i/o ports accesses" },
8e3a9fd2 1107#endif
f193c797
FB
1108 { 0, NULL, NULL },
1109};
1110
1111static int cmp1(const char *s1, int n, const char *s2)
1112{
1113 if (strlen(s2) != n)
1114 return 0;
1115 return memcmp(s1, s2, n) == 0;
1116}
1117
1118/* takes a comma separated list of log masks. Return 0 if error. */
1119int cpu_str_to_log_mask(const char *str)
1120{
1121 CPULogItem *item;
1122 int mask;
1123 const char *p, *p1;
1124
1125 p = str;
1126 mask = 0;
1127 for(;;) {
1128 p1 = strchr(p, ',');
1129 if (!p1)
1130 p1 = p + strlen(p);
8e3a9fd2
FB
1131 if(cmp1(p,p1-p,"all")) {
1132 for(item = cpu_log_items; item->mask != 0; item++) {
1133 mask |= item->mask;
1134 }
1135 } else {
f193c797
FB
1136 for(item = cpu_log_items; item->mask != 0; item++) {
1137 if (cmp1(p, p1 - p, item->name))
1138 goto found;
1139 }
1140 return 0;
8e3a9fd2 1141 }
f193c797
FB
1142 found:
1143 mask |= item->mask;
1144 if (*p1 != ',')
1145 break;
1146 p = p1 + 1;
1147 }
1148 return mask;
1149}
ea041c0e 1150
7501267e
FB
1151void cpu_abort(CPUState *env, const char *fmt, ...)
1152{
1153 va_list ap;
1154
1155 va_start(ap, fmt);
1156 fprintf(stderr, "qemu: fatal: ");
1157 vfprintf(stderr, fmt, ap);
1158 fprintf(stderr, "\n");
1159#ifdef TARGET_I386
7fe48483
FB
1160 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1161#else
1162 cpu_dump_state(env, stderr, fprintf, 0);
7501267e
FB
1163#endif
1164 va_end(ap);
1165 abort();
1166}
1167
0124311e
FB
1168#if !defined(CONFIG_USER_ONLY)
1169
ee8b7021
FB
1170/* NOTE: if flush_global is true, also flush global entries (not
1171 implemented yet) */
1172void tlb_flush(CPUState *env, int flush_global)
33417e70 1173{
33417e70 1174 int i;
0124311e 1175
9fa3e853
FB
1176#if defined(DEBUG_TLB)
1177 printf("tlb_flush:\n");
1178#endif
0124311e
FB
1179 /* must reset current TB so that interrupts cannot modify the
1180 links while we are modifying them */
1181 env->current_tb = NULL;
1182
33417e70
FB
1183 for(i = 0; i < CPU_TLB_SIZE; i++) {
1184 env->tlb_read[0][i].address = -1;
1185 env->tlb_write[0][i].address = -1;
1186 env->tlb_read[1][i].address = -1;
1187 env->tlb_write[1][i].address = -1;
1188 }
9fa3e853 1189
8a40a180 1190 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
9fa3e853
FB
1191
1192#if !defined(CONFIG_SOFTMMU)
1193 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
0a962c02
FB
1194#endif
1195#ifdef USE_KQEMU
1196 if (env->kqemu_enabled) {
1197 kqemu_flush(env, flush_global);
1198 }
9fa3e853 1199#endif
e3db7226 1200 tlb_flush_count++;
33417e70
FB
1201}
1202
274da6b2 1203static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
61382a50
FB
1204{
1205 if (addr == (tlb_entry->address &
1206 (TARGET_PAGE_MASK | TLB_INVALID_MASK)))
1207 tlb_entry->address = -1;
1208}
1209
2e12669a 1210void tlb_flush_page(CPUState *env, target_ulong addr)
33417e70 1211{
8a40a180 1212 int i;
9fa3e853 1213 TranslationBlock *tb;
0124311e 1214
9fa3e853 1215#if defined(DEBUG_TLB)
108c49b8 1216 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
9fa3e853 1217#endif
0124311e
FB
1218 /* must reset current TB so that interrupts cannot modify the
1219 links while we are modifying them */
1220 env->current_tb = NULL;
61382a50
FB
1221
1222 addr &= TARGET_PAGE_MASK;
1223 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1224 tlb_flush_entry(&env->tlb_read[0][i], addr);
1225 tlb_flush_entry(&env->tlb_write[0][i], addr);
1226 tlb_flush_entry(&env->tlb_read[1][i], addr);
1227 tlb_flush_entry(&env->tlb_write[1][i], addr);
0124311e 1228
8a40a180
FB
1229 for(i = 0; i < TB_JMP_CACHE_SIZE; i++) {
1230 tb = env->tb_jmp_cache[i];
1231 if (tb &&
1232 ((tb->pc & TARGET_PAGE_MASK) == addr ||
1233 ((tb->pc + tb->size - 1) & TARGET_PAGE_MASK) == addr)) {
1234 env->tb_jmp_cache[i] = NULL;
9fa3e853
FB
1235 }
1236 }
1237
0124311e 1238#if !defined(CONFIG_SOFTMMU)
9fa3e853 1239 if (addr < MMAP_AREA_END)
0124311e 1240 munmap((void *)addr, TARGET_PAGE_SIZE);
61382a50 1241#endif
0a962c02
FB
1242#ifdef USE_KQEMU
1243 if (env->kqemu_enabled) {
1244 kqemu_flush_page(env, addr);
1245 }
1246#endif
9fa3e853
FB
1247}
1248
4f2ac237 1249static inline void tlb_protect_code1(CPUTLBEntry *tlb_entry, target_ulong addr)
9fa3e853
FB
1250{
1251 if (addr == (tlb_entry->address &
1252 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) &&
3a7d929e
FB
1253 (tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1254 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
9fa3e853
FB
1255 }
1256}
1257
1258/* update the TLBs so that writes to code in the virtual page 'addr'
1259 can be detected */
3a7d929e
FB
1260static void tlb_protect_code(CPUState *env, ram_addr_t ram_addr,
1261 target_ulong vaddr)
9fa3e853
FB
1262{
1263 int i;
1264
3a7d929e
FB
1265 vaddr &= TARGET_PAGE_MASK;
1266 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1267 tlb_protect_code1(&env->tlb_write[0][i], vaddr);
1268 tlb_protect_code1(&env->tlb_write[1][i], vaddr);
1269
3a7d929e
FB
1270#ifdef USE_KQEMU
1271 if (env->kqemu_enabled) {
1272 kqemu_set_notdirty(env, ram_addr);
1273 }
1274#endif
f23db169 1275 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] &= ~CODE_DIRTY_FLAG;
3a7d929e 1276
9fa3e853
FB
1277#if !defined(CONFIG_SOFTMMU)
1278 /* NOTE: as we generated the code for this page, it is already at
1279 least readable */
3a7d929e
FB
1280 if (vaddr < MMAP_AREA_END)
1281 mprotect((void *)vaddr, TARGET_PAGE_SIZE, PROT_READ);
9fa3e853
FB
1282#endif
1283}
1284
9fa3e853 1285/* update the TLB so that writes in physical page 'phys_addr' are no longer
3a7d929e
FB
1286 tested for self modifying code */
1287static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1288 target_ulong vaddr)
9fa3e853 1289{
3a7d929e 1290 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1ccde1cb
FB
1291}
1292
1293static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1294 unsigned long start, unsigned long length)
1295{
1296 unsigned long addr;
1297 if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1298 addr = (tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend;
1299 if ((addr - start) < length) {
1300 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1301 }
1302 }
1303}
1304
3a7d929e 1305void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 1306 int dirty_flags)
1ccde1cb
FB
1307{
1308 CPUState *env;
4f2ac237 1309 unsigned long length, start1;
0a962c02
FB
1310 int i, mask, len;
1311 uint8_t *p;
1ccde1cb
FB
1312
1313 start &= TARGET_PAGE_MASK;
1314 end = TARGET_PAGE_ALIGN(end);
1315
1316 length = end - start;
1317 if (length == 0)
1318 return;
0a962c02 1319 len = length >> TARGET_PAGE_BITS;
1ccde1cb 1320 env = cpu_single_env;
3a7d929e
FB
1321#ifdef USE_KQEMU
1322 if (env->kqemu_enabled) {
f23db169
FB
1323 ram_addr_t addr;
1324 addr = start;
1325 for(i = 0; i < len; i++) {
1326 kqemu_set_notdirty(env, addr);
1327 addr += TARGET_PAGE_SIZE;
1328 }
3a7d929e
FB
1329 }
1330#endif
f23db169
FB
1331 mask = ~dirty_flags;
1332 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1333 for(i = 0; i < len; i++)
1334 p[i] &= mask;
1335
1ccde1cb
FB
1336 /* we modify the TLB cache so that the dirty bit will be set again
1337 when accessing the range */
59817ccb 1338 start1 = start + (unsigned long)phys_ram_base;
9fa3e853 1339 for(i = 0; i < CPU_TLB_SIZE; i++)
59817ccb 1340 tlb_reset_dirty_range(&env->tlb_write[0][i], start1, length);
9fa3e853 1341 for(i = 0; i < CPU_TLB_SIZE; i++)
59817ccb
FB
1342 tlb_reset_dirty_range(&env->tlb_write[1][i], start1, length);
1343
1344#if !defined(CONFIG_SOFTMMU)
1345 /* XXX: this is expensive */
1346 {
1347 VirtPageDesc *p;
1348 int j;
1349 target_ulong addr;
1350
1351 for(i = 0; i < L1_SIZE; i++) {
1352 p = l1_virt_map[i];
1353 if (p) {
1354 addr = i << (TARGET_PAGE_BITS + L2_BITS);
1355 for(j = 0; j < L2_SIZE; j++) {
1356 if (p->valid_tag == virt_valid_tag &&
1357 p->phys_addr >= start && p->phys_addr < end &&
1358 (p->prot & PROT_WRITE)) {
1359 if (addr < MMAP_AREA_END) {
1360 mprotect((void *)addr, TARGET_PAGE_SIZE,
1361 p->prot & ~PROT_WRITE);
1362 }
1363 }
1364 addr += TARGET_PAGE_SIZE;
1365 p++;
1366 }
1367 }
1368 }
1369 }
1370#endif
1ccde1cb
FB
1371}
1372
3a7d929e
FB
1373static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1374{
1375 ram_addr_t ram_addr;
1376
1377 if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1378 ram_addr = (tlb_entry->address & TARGET_PAGE_MASK) +
1379 tlb_entry->addend - (unsigned long)phys_ram_base;
1380 if (!cpu_physical_memory_is_dirty(ram_addr)) {
1381 tlb_entry->address |= IO_MEM_NOTDIRTY;
1382 }
1383 }
1384}
1385
1386/* update the TLB according to the current state of the dirty bits */
1387void cpu_tlb_update_dirty(CPUState *env)
1388{
1389 int i;
1390 for(i = 0; i < CPU_TLB_SIZE; i++)
1391 tlb_update_dirty(&env->tlb_write[0][i]);
1392 for(i = 0; i < CPU_TLB_SIZE; i++)
1393 tlb_update_dirty(&env->tlb_write[1][i]);
1394}
1395
1ccde1cb 1396static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
108c49b8 1397 unsigned long start)
1ccde1cb
FB
1398{
1399 unsigned long addr;
1400 if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1401 addr = (tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend;
1402 if (addr == start) {
1403 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_RAM;
1404 }
1405 }
1406}
1407
1408/* update the TLB corresponding to virtual page vaddr and phys addr
1409 addr so that it is no longer dirty */
1410static inline void tlb_set_dirty(unsigned long addr, target_ulong vaddr)
1411{
1412 CPUState *env = cpu_single_env;
1413 int i;
1414
1ccde1cb
FB
1415 addr &= TARGET_PAGE_MASK;
1416 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1417 tlb_set_dirty1(&env->tlb_write[0][i], addr);
1418 tlb_set_dirty1(&env->tlb_write[1][i], addr);
9fa3e853
FB
1419}
1420
59817ccb
FB
1421/* add a new TLB entry. At most one entry for a given virtual address
1422 is permitted. Return 0 if OK or 2 if the page could not be mapped
1423 (can only happen in non SOFTMMU mode for I/O pages or pages
1424 conflicting with the host address space). */
2e12669a
FB
1425int tlb_set_page(CPUState *env, target_ulong vaddr,
1426 target_phys_addr_t paddr, int prot,
9fa3e853
FB
1427 int is_user, int is_softmmu)
1428{
92e873b9 1429 PhysPageDesc *p;
4f2ac237 1430 unsigned long pd;
9fa3e853 1431 unsigned int index;
4f2ac237 1432 target_ulong address;
108c49b8 1433 target_phys_addr_t addend;
9fa3e853
FB
1434 int ret;
1435
92e873b9 1436 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
9fa3e853
FB
1437 if (!p) {
1438 pd = IO_MEM_UNASSIGNED;
9fa3e853
FB
1439 } else {
1440 pd = p->phys_offset;
9fa3e853
FB
1441 }
1442#if defined(DEBUG_TLB)
3a7d929e
FB
1443 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x u=%d smmu=%d pd=0x%08lx\n",
1444 vaddr, paddr, prot, is_user, is_softmmu, pd);
9fa3e853
FB
1445#endif
1446
1447 ret = 0;
1448#if !defined(CONFIG_SOFTMMU)
1449 if (is_softmmu)
1450#endif
1451 {
1452 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1453 /* IO memory case */
1454 address = vaddr | pd;
1455 addend = paddr;
1456 } else {
1457 /* standard memory */
1458 address = vaddr;
1459 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1460 }
1461
90f18422 1462 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
9fa3e853 1463 addend -= vaddr;
67b915a5 1464 if (prot & PAGE_READ) {
9fa3e853
FB
1465 env->tlb_read[is_user][index].address = address;
1466 env->tlb_read[is_user][index].addend = addend;
1467 } else {
1468 env->tlb_read[is_user][index].address = -1;
1469 env->tlb_read[is_user][index].addend = -1;
1470 }
67b915a5 1471 if (prot & PAGE_WRITE) {
9fa3e853
FB
1472 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM) {
1473 /* ROM: access is ignored (same as unassigned) */
1474 env->tlb_write[is_user][index].address = vaddr | IO_MEM_ROM;
1ccde1cb 1475 env->tlb_write[is_user][index].addend = addend;
3a7d929e 1476 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1ccde1cb
FB
1477 !cpu_physical_memory_is_dirty(pd)) {
1478 env->tlb_write[is_user][index].address = vaddr | IO_MEM_NOTDIRTY;
1479 env->tlb_write[is_user][index].addend = addend;
9fa3e853
FB
1480 } else {
1481 env->tlb_write[is_user][index].address = address;
1482 env->tlb_write[is_user][index].addend = addend;
1483 }
1484 } else {
1485 env->tlb_write[is_user][index].address = -1;
1486 env->tlb_write[is_user][index].addend = -1;
1487 }
1488 }
1489#if !defined(CONFIG_SOFTMMU)
1490 else {
1491 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1492 /* IO access: no mapping is done as it will be handled by the
1493 soft MMU */
1494 if (!(env->hflags & HF_SOFTMMU_MASK))
1495 ret = 2;
1496 } else {
1497 void *map_addr;
59817ccb
FB
1498
1499 if (vaddr >= MMAP_AREA_END) {
1500 ret = 2;
1501 } else {
1502 if (prot & PROT_WRITE) {
1503 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
d720b93d 1504#if defined(TARGET_HAS_SMC) || 1
59817ccb 1505 first_tb ||
d720b93d 1506#endif
59817ccb
FB
1507 ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1508 !cpu_physical_memory_is_dirty(pd))) {
1509 /* ROM: we do as if code was inside */
1510 /* if code is present, we only map as read only and save the
1511 original mapping */
1512 VirtPageDesc *vp;
1513
90f18422 1514 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
59817ccb
FB
1515 vp->phys_addr = pd;
1516 vp->prot = prot;
1517 vp->valid_tag = virt_valid_tag;
1518 prot &= ~PAGE_WRITE;
1519 }
1520 }
1521 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
1522 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1523 if (map_addr == MAP_FAILED) {
1524 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1525 paddr, vaddr);
9fa3e853 1526 }
9fa3e853
FB
1527 }
1528 }
1529 }
1530#endif
1531 return ret;
1532}
1533
1534/* called from signal handler: invalidate the code and unprotect the
1535 page. Return TRUE if the fault was succesfully handled. */
d720b93d 1536int page_unprotect(unsigned long addr, unsigned long pc, void *puc)
9fa3e853
FB
1537{
1538#if !defined(CONFIG_SOFTMMU)
1539 VirtPageDesc *vp;
1540
1541#if defined(DEBUG_TLB)
1542 printf("page_unprotect: addr=0x%08x\n", addr);
1543#endif
1544 addr &= TARGET_PAGE_MASK;
59817ccb
FB
1545
1546 /* if it is not mapped, no need to worry here */
1547 if (addr >= MMAP_AREA_END)
1548 return 0;
9fa3e853
FB
1549 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1550 if (!vp)
1551 return 0;
1552 /* NOTE: in this case, validate_tag is _not_ tested as it
1553 validates only the code TLB */
1554 if (vp->valid_tag != virt_valid_tag)
1555 return 0;
1556 if (!(vp->prot & PAGE_WRITE))
1557 return 0;
1558#if defined(DEBUG_TLB)
1559 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1560 addr, vp->phys_addr, vp->prot);
1561#endif
59817ccb
FB
1562 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1563 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1564 (unsigned long)addr, vp->prot);
d720b93d 1565 /* set the dirty bit */
0a962c02 1566 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
d720b93d
FB
1567 /* flush the code inside */
1568 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
9fa3e853
FB
1569 return 1;
1570#else
1571 return 0;
1572#endif
33417e70
FB
1573}
1574
0124311e
FB
1575#else
1576
ee8b7021 1577void tlb_flush(CPUState *env, int flush_global)
0124311e
FB
1578{
1579}
1580
2e12669a 1581void tlb_flush_page(CPUState *env, target_ulong addr)
0124311e
FB
1582{
1583}
1584
2e12669a
FB
1585int tlb_set_page(CPUState *env, target_ulong vaddr,
1586 target_phys_addr_t paddr, int prot,
9fa3e853
FB
1587 int is_user, int is_softmmu)
1588{
1589 return 0;
1590}
0124311e 1591
9fa3e853
FB
1592/* dump memory mappings */
1593void page_dump(FILE *f)
33417e70 1594{
9fa3e853
FB
1595 unsigned long start, end;
1596 int i, j, prot, prot1;
1597 PageDesc *p;
33417e70 1598
9fa3e853
FB
1599 fprintf(f, "%-8s %-8s %-8s %s\n",
1600 "start", "end", "size", "prot");
1601 start = -1;
1602 end = -1;
1603 prot = 0;
1604 for(i = 0; i <= L1_SIZE; i++) {
1605 if (i < L1_SIZE)
1606 p = l1_map[i];
1607 else
1608 p = NULL;
1609 for(j = 0;j < L2_SIZE; j++) {
1610 if (!p)
1611 prot1 = 0;
1612 else
1613 prot1 = p[j].flags;
1614 if (prot1 != prot) {
1615 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1616 if (start != -1) {
1617 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1618 start, end, end - start,
1619 prot & PAGE_READ ? 'r' : '-',
1620 prot & PAGE_WRITE ? 'w' : '-',
1621 prot & PAGE_EXEC ? 'x' : '-');
1622 }
1623 if (prot1 != 0)
1624 start = end;
1625 else
1626 start = -1;
1627 prot = prot1;
1628 }
1629 if (!p)
1630 break;
1631 }
33417e70 1632 }
33417e70
FB
1633}
1634
9fa3e853 1635int page_get_flags(unsigned long address)
33417e70 1636{
9fa3e853
FB
1637 PageDesc *p;
1638
1639 p = page_find(address >> TARGET_PAGE_BITS);
33417e70 1640 if (!p)
9fa3e853
FB
1641 return 0;
1642 return p->flags;
1643}
1644
1645/* modify the flags of a page and invalidate the code if
1646 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1647 depending on PAGE_WRITE */
1648void page_set_flags(unsigned long start, unsigned long end, int flags)
1649{
1650 PageDesc *p;
1651 unsigned long addr;
1652
1653 start = start & TARGET_PAGE_MASK;
1654 end = TARGET_PAGE_ALIGN(end);
1655 if (flags & PAGE_WRITE)
1656 flags |= PAGE_WRITE_ORG;
1657 spin_lock(&tb_lock);
1658 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1659 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1660 /* if the write protection is set, then we invalidate the code
1661 inside */
1662 if (!(p->flags & PAGE_WRITE) &&
1663 (flags & PAGE_WRITE) &&
1664 p->first_tb) {
d720b93d 1665 tb_invalidate_phys_page(addr, 0, NULL);
9fa3e853
FB
1666 }
1667 p->flags = flags;
1668 }
1669 spin_unlock(&tb_lock);
33417e70
FB
1670}
1671
9fa3e853
FB
1672/* called from signal handler: invalidate the code and unprotect the
1673 page. Return TRUE if the fault was succesfully handled. */
d720b93d 1674int page_unprotect(unsigned long address, unsigned long pc, void *puc)
9fa3e853
FB
1675{
1676 unsigned int page_index, prot, pindex;
1677 PageDesc *p, *p1;
1678 unsigned long host_start, host_end, addr;
1679
83fb7adf 1680 host_start = address & qemu_host_page_mask;
9fa3e853
FB
1681 page_index = host_start >> TARGET_PAGE_BITS;
1682 p1 = page_find(page_index);
1683 if (!p1)
1684 return 0;
83fb7adf 1685 host_end = host_start + qemu_host_page_size;
9fa3e853
FB
1686 p = p1;
1687 prot = 0;
1688 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1689 prot |= p->flags;
1690 p++;
1691 }
1692 /* if the page was really writable, then we change its
1693 protection back to writable */
1694 if (prot & PAGE_WRITE_ORG) {
1695 pindex = (address - host_start) >> TARGET_PAGE_BITS;
1696 if (!(p1[pindex].flags & PAGE_WRITE)) {
83fb7adf 1697 mprotect((void *)host_start, qemu_host_page_size,
9fa3e853
FB
1698 (prot & PAGE_BITS) | PAGE_WRITE);
1699 p1[pindex].flags |= PAGE_WRITE;
1700 /* and since the content will be modified, we must invalidate
1701 the corresponding translated code. */
d720b93d 1702 tb_invalidate_phys_page(address, pc, puc);
9fa3e853
FB
1703#ifdef DEBUG_TB_CHECK
1704 tb_invalidate_check(address);
1705#endif
1706 return 1;
1707 }
1708 }
1709 return 0;
1710}
1711
1712/* call this function when system calls directly modify a memory area */
1713void page_unprotect_range(uint8_t *data, unsigned long data_size)
1714{
1715 unsigned long start, end, addr;
1716
1717 start = (unsigned long)data;
1718 end = start + data_size;
1719 start &= TARGET_PAGE_MASK;
1720 end = TARGET_PAGE_ALIGN(end);
1721 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
d720b93d 1722 page_unprotect(addr, 0, NULL);
9fa3e853
FB
1723 }
1724}
1725
1ccde1cb
FB
1726static inline void tlb_set_dirty(unsigned long addr, target_ulong vaddr)
1727{
1728}
9fa3e853
FB
1729#endif /* defined(CONFIG_USER_ONLY) */
1730
33417e70
FB
1731/* register physical memory. 'size' must be a multiple of the target
1732 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1733 io memory page */
2e12669a
FB
1734void cpu_register_physical_memory(target_phys_addr_t start_addr,
1735 unsigned long size,
1736 unsigned long phys_offset)
33417e70 1737{
108c49b8 1738 target_phys_addr_t addr, end_addr;
92e873b9 1739 PhysPageDesc *p;
33417e70 1740
5fd386f6 1741 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
33417e70 1742 end_addr = start_addr + size;
5fd386f6 1743 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
108c49b8 1744 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
9fa3e853
FB
1745 p->phys_offset = phys_offset;
1746 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM)
33417e70
FB
1747 phys_offset += TARGET_PAGE_SIZE;
1748 }
1749}
1750
a4193c8a 1751static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
33417e70
FB
1752{
1753 return 0;
1754}
1755
a4193c8a 1756static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
33417e70
FB
1757{
1758}
1759
1760static CPUReadMemoryFunc *unassigned_mem_read[3] = {
1761 unassigned_mem_readb,
1762 unassigned_mem_readb,
1763 unassigned_mem_readb,
1764};
1765
1766static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
1767 unassigned_mem_writeb,
1768 unassigned_mem_writeb,
1769 unassigned_mem_writeb,
1770};
1771
3a7d929e 1772static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 1773{
3a7d929e
FB
1774 unsigned long ram_addr;
1775 int dirty_flags;
1776 ram_addr = addr - (unsigned long)phys_ram_base;
1777 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1778 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 1779#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
1780 tb_invalidate_phys_page_fast(ram_addr, 1);
1781 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 1782#endif
3a7d929e 1783 }
c27004ec 1784 stb_p((uint8_t *)(long)addr, val);
f23db169
FB
1785 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1786 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
1787 /* we remove the notdirty callback only if the code has been
1788 flushed */
1789 if (dirty_flags == 0xff)
3a7d929e 1790 tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
9fa3e853
FB
1791}
1792
3a7d929e 1793static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 1794{
3a7d929e
FB
1795 unsigned long ram_addr;
1796 int dirty_flags;
1797 ram_addr = addr - (unsigned long)phys_ram_base;
1798 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1799 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 1800#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
1801 tb_invalidate_phys_page_fast(ram_addr, 2);
1802 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 1803#endif
3a7d929e 1804 }
c27004ec 1805 stw_p((uint8_t *)(long)addr, val);
f23db169
FB
1806 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1807 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
1808 /* we remove the notdirty callback only if the code has been
1809 flushed */
1810 if (dirty_flags == 0xff)
3a7d929e 1811 tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
9fa3e853
FB
1812}
1813
3a7d929e 1814static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 1815{
3a7d929e
FB
1816 unsigned long ram_addr;
1817 int dirty_flags;
1818 ram_addr = addr - (unsigned long)phys_ram_base;
1819 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1820 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 1821#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
1822 tb_invalidate_phys_page_fast(ram_addr, 4);
1823 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 1824#endif
3a7d929e 1825 }
c27004ec 1826 stl_p((uint8_t *)(long)addr, val);
f23db169
FB
1827 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1828 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
1829 /* we remove the notdirty callback only if the code has been
1830 flushed */
1831 if (dirty_flags == 0xff)
3a7d929e 1832 tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
9fa3e853
FB
1833}
1834
3a7d929e 1835static CPUReadMemoryFunc *error_mem_read[3] = {
9fa3e853
FB
1836 NULL, /* never used */
1837 NULL, /* never used */
1838 NULL, /* never used */
1839};
1840
1ccde1cb
FB
1841static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
1842 notdirty_mem_writeb,
1843 notdirty_mem_writew,
1844 notdirty_mem_writel,
1845};
1846
33417e70
FB
1847static void io_mem_init(void)
1848{
3a7d929e 1849 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
a4193c8a 1850 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
3a7d929e 1851 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
1ccde1cb
FB
1852 io_mem_nb = 5;
1853
1854 /* alloc dirty bits array */
0a962c02 1855 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
3a7d929e 1856 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
33417e70
FB
1857}
1858
1859/* mem_read and mem_write are arrays of functions containing the
1860 function to access byte (index 0), word (index 1) and dword (index
1861 2). All functions must be supplied. If io_index is non zero, the
1862 corresponding io zone is modified. If it is zero, a new io zone is
1863 allocated. The return value can be used with
1864 cpu_register_physical_memory(). (-1) is returned if error. */
1865int cpu_register_io_memory(int io_index,
1866 CPUReadMemoryFunc **mem_read,
a4193c8a
FB
1867 CPUWriteMemoryFunc **mem_write,
1868 void *opaque)
33417e70
FB
1869{
1870 int i;
1871
1872 if (io_index <= 0) {
1873 if (io_index >= IO_MEM_NB_ENTRIES)
1874 return -1;
1875 io_index = io_mem_nb++;
1876 } else {
1877 if (io_index >= IO_MEM_NB_ENTRIES)
1878 return -1;
1879 }
1880
1881 for(i = 0;i < 3; i++) {
1882 io_mem_read[io_index][i] = mem_read[i];
1883 io_mem_write[io_index][i] = mem_write[i];
1884 }
a4193c8a 1885 io_mem_opaque[io_index] = opaque;
33417e70
FB
1886 return io_index << IO_MEM_SHIFT;
1887}
61382a50 1888
8926b517
FB
1889CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
1890{
1891 return io_mem_write[io_index >> IO_MEM_SHIFT];
1892}
1893
1894CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
1895{
1896 return io_mem_read[io_index >> IO_MEM_SHIFT];
1897}
1898
13eb76e0
FB
1899/* physical memory access (slow version, mainly for debug) */
1900#if defined(CONFIG_USER_ONLY)
2e12669a 1901void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
1902 int len, int is_write)
1903{
1904 int l, flags;
1905 target_ulong page;
1906
1907 while (len > 0) {
1908 page = addr & TARGET_PAGE_MASK;
1909 l = (page + TARGET_PAGE_SIZE) - addr;
1910 if (l > len)
1911 l = len;
1912 flags = page_get_flags(page);
1913 if (!(flags & PAGE_VALID))
1914 return;
1915 if (is_write) {
1916 if (!(flags & PAGE_WRITE))
1917 return;
1918 memcpy((uint8_t *)addr, buf, len);
1919 } else {
1920 if (!(flags & PAGE_READ))
1921 return;
1922 memcpy(buf, (uint8_t *)addr, len);
1923 }
1924 len -= l;
1925 buf += l;
1926 addr += l;
1927 }
1928}
8df1cd07 1929
13eb76e0 1930#else
2e12669a 1931void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
1932 int len, int is_write)
1933{
1934 int l, io_index;
1935 uint8_t *ptr;
1936 uint32_t val;
2e12669a
FB
1937 target_phys_addr_t page;
1938 unsigned long pd;
92e873b9 1939 PhysPageDesc *p;
13eb76e0
FB
1940
1941 while (len > 0) {
1942 page = addr & TARGET_PAGE_MASK;
1943 l = (page + TARGET_PAGE_SIZE) - addr;
1944 if (l > len)
1945 l = len;
92e873b9 1946 p = phys_page_find(page >> TARGET_PAGE_BITS);
13eb76e0
FB
1947 if (!p) {
1948 pd = IO_MEM_UNASSIGNED;
1949 } else {
1950 pd = p->phys_offset;
1951 }
1952
1953 if (is_write) {
3a7d929e 1954 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
13eb76e0
FB
1955 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
1956 if (l >= 4 && ((addr & 3) == 0)) {
1c213d19 1957 /* 32 bit write access */
c27004ec 1958 val = ldl_p(buf);
a4193c8a 1959 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
1960 l = 4;
1961 } else if (l >= 2 && ((addr & 1) == 0)) {
1c213d19 1962 /* 16 bit write access */
c27004ec 1963 val = lduw_p(buf);
a4193c8a 1964 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
1965 l = 2;
1966 } else {
1c213d19 1967 /* 8 bit write access */
c27004ec 1968 val = ldub_p(buf);
a4193c8a 1969 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
1970 l = 1;
1971 }
1972 } else {
b448f2f3
FB
1973 unsigned long addr1;
1974 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
13eb76e0 1975 /* RAM case */
b448f2f3 1976 ptr = phys_ram_base + addr1;
13eb76e0 1977 memcpy(ptr, buf, l);
3a7d929e
FB
1978 if (!cpu_physical_memory_is_dirty(addr1)) {
1979 /* invalidate code */
1980 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
1981 /* set dirty bit */
f23db169
FB
1982 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
1983 (0xff & ~CODE_DIRTY_FLAG);
3a7d929e 1984 }
13eb76e0
FB
1985 }
1986 } else {
3a7d929e 1987 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
13eb76e0
FB
1988 /* I/O case */
1989 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
1990 if (l >= 4 && ((addr & 3) == 0)) {
1991 /* 32 bit read access */
a4193c8a 1992 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
c27004ec 1993 stl_p(buf, val);
13eb76e0
FB
1994 l = 4;
1995 } else if (l >= 2 && ((addr & 1) == 0)) {
1996 /* 16 bit read access */
a4193c8a 1997 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
c27004ec 1998 stw_p(buf, val);
13eb76e0
FB
1999 l = 2;
2000 } else {
1c213d19 2001 /* 8 bit read access */
a4193c8a 2002 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
c27004ec 2003 stb_p(buf, val);
13eb76e0
FB
2004 l = 1;
2005 }
2006 } else {
2007 /* RAM case */
2008 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2009 (addr & ~TARGET_PAGE_MASK);
2010 memcpy(buf, ptr, l);
2011 }
2012 }
2013 len -= l;
2014 buf += l;
2015 addr += l;
2016 }
2017}
8df1cd07
FB
2018
2019/* warning: addr must be aligned */
2020uint32_t ldl_phys(target_phys_addr_t addr)
2021{
2022 int io_index;
2023 uint8_t *ptr;
2024 uint32_t val;
2025 unsigned long pd;
2026 PhysPageDesc *p;
2027
2028 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2029 if (!p) {
2030 pd = IO_MEM_UNASSIGNED;
2031 } else {
2032 pd = p->phys_offset;
2033 }
2034
3a7d929e 2035 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
8df1cd07
FB
2036 /* I/O case */
2037 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2038 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2039 } else {
2040 /* RAM case */
2041 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2042 (addr & ~TARGET_PAGE_MASK);
2043 val = ldl_p(ptr);
2044 }
2045 return val;
2046}
2047
aab33094
FB
2048/* XXX: optimize */
2049uint32_t ldub_phys(target_phys_addr_t addr)
2050{
2051 uint8_t val;
2052 cpu_physical_memory_read(addr, &val, 1);
2053 return val;
2054}
2055
2056/* XXX: optimize */
2057uint32_t lduw_phys(target_phys_addr_t addr)
2058{
2059 uint16_t val;
2060 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2061 return tswap16(val);
2062}
2063
2064/* XXX: optimize */
2065uint64_t ldq_phys(target_phys_addr_t addr)
2066{
2067 uint64_t val;
2068 cpu_physical_memory_read(addr, (uint8_t *)&val, 8);
2069 return tswap64(val);
2070}
2071
8df1cd07
FB
2072/* warning: addr must be aligned. The ram page is not masked as dirty
2073 and the code inside is not invalidated. It is useful if the dirty
2074 bits are used to track modified PTEs */
2075void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2076{
2077 int io_index;
2078 uint8_t *ptr;
2079 unsigned long pd;
2080 PhysPageDesc *p;
2081
2082 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2083 if (!p) {
2084 pd = IO_MEM_UNASSIGNED;
2085 } else {
2086 pd = p->phys_offset;
2087 }
2088
3a7d929e 2089 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07
FB
2090 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2091 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2092 } else {
2093 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2094 (addr & ~TARGET_PAGE_MASK);
2095 stl_p(ptr, val);
2096 }
2097}
2098
2099/* warning: addr must be aligned */
8df1cd07
FB
2100void stl_phys(target_phys_addr_t addr, uint32_t val)
2101{
2102 int io_index;
2103 uint8_t *ptr;
2104 unsigned long pd;
2105 PhysPageDesc *p;
2106
2107 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2108 if (!p) {
2109 pd = IO_MEM_UNASSIGNED;
2110 } else {
2111 pd = p->phys_offset;
2112 }
2113
3a7d929e 2114 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07
FB
2115 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2116 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2117 } else {
2118 unsigned long addr1;
2119 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2120 /* RAM case */
2121 ptr = phys_ram_base + addr1;
2122 stl_p(ptr, val);
3a7d929e
FB
2123 if (!cpu_physical_memory_is_dirty(addr1)) {
2124 /* invalidate code */
2125 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2126 /* set dirty bit */
f23db169
FB
2127 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2128 (0xff & ~CODE_DIRTY_FLAG);
3a7d929e 2129 }
8df1cd07
FB
2130 }
2131}
2132
aab33094
FB
2133/* XXX: optimize */
2134void stb_phys(target_phys_addr_t addr, uint32_t val)
2135{
2136 uint8_t v = val;
2137 cpu_physical_memory_write(addr, &v, 1);
2138}
2139
2140/* XXX: optimize */
2141void stw_phys(target_phys_addr_t addr, uint32_t val)
2142{
2143 uint16_t v = tswap16(val);
2144 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2145}
2146
2147/* XXX: optimize */
2148void stq_phys(target_phys_addr_t addr, uint64_t val)
2149{
2150 val = tswap64(val);
2151 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
2152}
2153
13eb76e0
FB
2154#endif
2155
2156/* virtual memory access for debug */
b448f2f3
FB
2157int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
2158 uint8_t *buf, int len, int is_write)
13eb76e0
FB
2159{
2160 int l;
2161 target_ulong page, phys_addr;
2162
2163 while (len > 0) {
2164 page = addr & TARGET_PAGE_MASK;
2165 phys_addr = cpu_get_phys_page_debug(env, page);
2166 /* if no physical page mapped, return an error */
2167 if (phys_addr == -1)
2168 return -1;
2169 l = (page + TARGET_PAGE_SIZE) - addr;
2170 if (l > len)
2171 l = len;
b448f2f3
FB
2172 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
2173 buf, l, is_write);
13eb76e0
FB
2174 len -= l;
2175 buf += l;
2176 addr += l;
2177 }
2178 return 0;
2179}
2180
e3db7226
FB
2181void dump_exec_info(FILE *f,
2182 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2183{
2184 int i, target_code_size, max_target_code_size;
2185 int direct_jmp_count, direct_jmp2_count, cross_page;
2186 TranslationBlock *tb;
2187
2188 target_code_size = 0;
2189 max_target_code_size = 0;
2190 cross_page = 0;
2191 direct_jmp_count = 0;
2192 direct_jmp2_count = 0;
2193 for(i = 0; i < nb_tbs; i++) {
2194 tb = &tbs[i];
2195 target_code_size += tb->size;
2196 if (tb->size > max_target_code_size)
2197 max_target_code_size = tb->size;
2198 if (tb->page_addr[1] != -1)
2199 cross_page++;
2200 if (tb->tb_next_offset[0] != 0xffff) {
2201 direct_jmp_count++;
2202 if (tb->tb_next_offset[1] != 0xffff) {
2203 direct_jmp2_count++;
2204 }
2205 }
2206 }
2207 /* XXX: avoid using doubles ? */
2208 cpu_fprintf(f, "TB count %d\n", nb_tbs);
2209 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
2210 nb_tbs ? target_code_size / nb_tbs : 0,
2211 max_target_code_size);
2212 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
2213 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
2214 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
2215 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
2216 cross_page,
2217 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
2218 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
2219 direct_jmp_count,
2220 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
2221 direct_jmp2_count,
2222 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
2223 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
2224 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
2225 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
2226}
2227
61382a50
FB
2228#if !defined(CONFIG_USER_ONLY)
2229
2230#define MMUSUFFIX _cmmu
2231#define GETPC() NULL
2232#define env cpu_single_env
b769d8fe 2233#define SOFTMMU_CODE_ACCESS
61382a50
FB
2234
2235#define SHIFT 0
2236#include "softmmu_template.h"
2237
2238#define SHIFT 1
2239#include "softmmu_template.h"
2240
2241#define SHIFT 2
2242#include "softmmu_template.h"
2243
2244#define SHIFT 3
2245#include "softmmu_template.h"
2246
2247#undef env
2248
2249#endif