]> git.proxmox.com Git - mirror_qemu.git/blame - exec.c
using _exit in fork() (Kamo Hiroyasu)
[mirror_qemu.git] / exec.c
CommitLineData
54936004 1/*
fd6ce8f6 2 * virtual page mapping and translated block handling
54936004
FB
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
67b915a5 20#include "config.h"
d5a8f07c
FB
21#ifdef _WIN32
22#include <windows.h>
23#else
a98d49b1 24#include <sys/types.h>
d5a8f07c
FB
25#include <sys/mman.h>
26#endif
54936004
FB
27#include <stdlib.h>
28#include <stdio.h>
29#include <stdarg.h>
30#include <string.h>
31#include <errno.h>
32#include <unistd.h>
33#include <inttypes.h>
34
6180a181
FB
35#include "cpu.h"
36#include "exec-all.h"
54936004 37
fd6ce8f6 38//#define DEBUG_TB_INVALIDATE
66e85a21 39//#define DEBUG_FLUSH
9fa3e853 40//#define DEBUG_TLB
fd6ce8f6
FB
41
42/* make various TB consistency checks */
43//#define DEBUG_TB_CHECK
98857888 44//#define DEBUG_TLB_CHECK
fd6ce8f6
FB
45
46/* threshold to flush the translated code buffer */
47#define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
48
9fa3e853
FB
49#define SMC_BITMAP_USE_THRESHOLD 10
50
51#define MMAP_AREA_START 0x00000000
52#define MMAP_AREA_END 0xa8000000
fd6ce8f6 53
108c49b8
FB
54#if defined(TARGET_SPARC64)
55#define TARGET_PHYS_ADDR_SPACE_BITS 41
56#elif defined(TARGET_PPC64)
57#define TARGET_PHYS_ADDR_SPACE_BITS 42
58#else
59/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
60#define TARGET_PHYS_ADDR_SPACE_BITS 32
61#endif
62
fd6ce8f6 63TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
9fa3e853 64TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
fd6ce8f6 65int nb_tbs;
eb51d102
FB
66/* any access to the tbs or the page table must use this lock */
67spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
fd6ce8f6 68
b8076a74 69uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] __attribute__((aligned (32)));
fd6ce8f6
FB
70uint8_t *code_gen_ptr;
71
9fa3e853
FB
72int phys_ram_size;
73int phys_ram_fd;
74uint8_t *phys_ram_base;
1ccde1cb 75uint8_t *phys_ram_dirty;
9fa3e853 76
6a00d601
FB
77CPUState *first_cpu;
78/* current CPU in the current thread. It is only valid inside
79 cpu_exec() */
80CPUState *cpu_single_env;
81
54936004 82typedef struct PageDesc {
92e873b9 83 /* list of TBs intersecting this ram page */
fd6ce8f6 84 TranslationBlock *first_tb;
9fa3e853
FB
85 /* in order to optimize self modifying code, we count the number
86 of lookups we do to a given page to use a bitmap */
87 unsigned int code_write_count;
88 uint8_t *code_bitmap;
89#if defined(CONFIG_USER_ONLY)
90 unsigned long flags;
91#endif
54936004
FB
92} PageDesc;
93
92e873b9
FB
94typedef struct PhysPageDesc {
95 /* offset in host memory of the page + io_index in the low 12 bits */
e04f40b5 96 uint32_t phys_offset;
92e873b9
FB
97} PhysPageDesc;
98
54936004
FB
99#define L2_BITS 10
100#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
101
102#define L1_SIZE (1 << L1_BITS)
103#define L2_SIZE (1 << L2_BITS)
104
33417e70 105static void io_mem_init(void);
fd6ce8f6 106
83fb7adf
FB
107unsigned long qemu_real_host_page_size;
108unsigned long qemu_host_page_bits;
109unsigned long qemu_host_page_size;
110unsigned long qemu_host_page_mask;
54936004 111
92e873b9 112/* XXX: for system emulation, it could just be an array */
54936004 113static PageDesc *l1_map[L1_SIZE];
0a962c02 114PhysPageDesc **l1_phys_map;
54936004 115
33417e70 116/* io memory support */
33417e70
FB
117CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
118CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
a4193c8a 119void *io_mem_opaque[IO_MEM_NB_ENTRIES];
33417e70
FB
120static int io_mem_nb;
121
34865134
FB
122/* log support */
123char *logfilename = "/tmp/qemu.log";
124FILE *logfile;
125int loglevel;
126
e3db7226
FB
127/* statistics */
128static int tlb_flush_count;
129static int tb_flush_count;
130static int tb_phys_invalidate_count;
131
b346ff46 132static void page_init(void)
54936004 133{
83fb7adf 134 /* NOTE: we can always suppose that qemu_host_page_size >=
54936004 135 TARGET_PAGE_SIZE */
67b915a5 136#ifdef _WIN32
d5a8f07c
FB
137 {
138 SYSTEM_INFO system_info;
139 DWORD old_protect;
140
141 GetSystemInfo(&system_info);
142 qemu_real_host_page_size = system_info.dwPageSize;
143
144 VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
145 PAGE_EXECUTE_READWRITE, &old_protect);
146 }
67b915a5 147#else
83fb7adf 148 qemu_real_host_page_size = getpagesize();
d5a8f07c
FB
149 {
150 unsigned long start, end;
151
152 start = (unsigned long)code_gen_buffer;
153 start &= ~(qemu_real_host_page_size - 1);
154
155 end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
156 end += qemu_real_host_page_size - 1;
157 end &= ~(qemu_real_host_page_size - 1);
158
159 mprotect((void *)start, end - start,
160 PROT_READ | PROT_WRITE | PROT_EXEC);
161 }
67b915a5 162#endif
d5a8f07c 163
83fb7adf
FB
164 if (qemu_host_page_size == 0)
165 qemu_host_page_size = qemu_real_host_page_size;
166 if (qemu_host_page_size < TARGET_PAGE_SIZE)
167 qemu_host_page_size = TARGET_PAGE_SIZE;
168 qemu_host_page_bits = 0;
169 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
170 qemu_host_page_bits++;
171 qemu_host_page_mask = ~(qemu_host_page_size - 1);
108c49b8
FB
172 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
173 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
54936004
FB
174}
175
fd6ce8f6 176static inline PageDesc *page_find_alloc(unsigned int index)
54936004 177{
54936004
FB
178 PageDesc **lp, *p;
179
54936004
FB
180 lp = &l1_map[index >> L2_BITS];
181 p = *lp;
182 if (!p) {
183 /* allocate if not found */
59817ccb 184 p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
fd6ce8f6 185 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
54936004
FB
186 *lp = p;
187 }
188 return p + (index & (L2_SIZE - 1));
189}
190
fd6ce8f6 191static inline PageDesc *page_find(unsigned int index)
54936004 192{
54936004
FB
193 PageDesc *p;
194
54936004
FB
195 p = l1_map[index >> L2_BITS];
196 if (!p)
197 return 0;
fd6ce8f6
FB
198 return p + (index & (L2_SIZE - 1));
199}
200
108c49b8 201static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
92e873b9 202{
108c49b8 203 void **lp, **p;
92e873b9 204
108c49b8
FB
205 p = (void **)l1_phys_map;
206#if TARGET_PHYS_ADDR_SPACE_BITS > 32
207
208#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
209#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
210#endif
211 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
92e873b9
FB
212 p = *lp;
213 if (!p) {
214 /* allocate if not found */
108c49b8
FB
215 if (!alloc)
216 return NULL;
217 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
218 memset(p, 0, sizeof(void *) * L1_SIZE);
219 *lp = p;
220 }
221#endif
222 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
223 p = *lp;
224 if (!p) {
225 /* allocate if not found */
226 if (!alloc)
227 return NULL;
0a962c02 228 p = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
92e873b9
FB
229 memset(p, 0, sizeof(PhysPageDesc) * L2_SIZE);
230 *lp = p;
231 }
108c49b8 232 return ((PhysPageDesc *)p) + (index & (L2_SIZE - 1));
92e873b9
FB
233}
234
108c49b8 235static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
92e873b9 236{
108c49b8 237 return phys_page_find_alloc(index, 0);
92e873b9
FB
238}
239
9fa3e853 240#if !defined(CONFIG_USER_ONLY)
6a00d601 241static void tlb_protect_code(ram_addr_t ram_addr);
3a7d929e
FB
242static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
243 target_ulong vaddr);
9fa3e853 244#endif
fd6ce8f6 245
6a00d601 246void cpu_exec_init(CPUState *env)
fd6ce8f6 247{
6a00d601
FB
248 CPUState **penv;
249 int cpu_index;
250
fd6ce8f6
FB
251 if (!code_gen_ptr) {
252 code_gen_ptr = code_gen_buffer;
b346ff46 253 page_init();
33417e70 254 io_mem_init();
fd6ce8f6 255 }
6a00d601
FB
256 env->next_cpu = NULL;
257 penv = &first_cpu;
258 cpu_index = 0;
259 while (*penv != NULL) {
260 penv = (CPUState **)&(*penv)->next_cpu;
261 cpu_index++;
262 }
263 env->cpu_index = cpu_index;
264 *penv = env;
fd6ce8f6
FB
265}
266
9fa3e853
FB
267static inline void invalidate_page_bitmap(PageDesc *p)
268{
269 if (p->code_bitmap) {
59817ccb 270 qemu_free(p->code_bitmap);
9fa3e853
FB
271 p->code_bitmap = NULL;
272 }
273 p->code_write_count = 0;
274}
275
fd6ce8f6
FB
276/* set to NULL all the 'first_tb' fields in all PageDescs */
277static void page_flush_tb(void)
278{
279 int i, j;
280 PageDesc *p;
281
282 for(i = 0; i < L1_SIZE; i++) {
283 p = l1_map[i];
284 if (p) {
9fa3e853
FB
285 for(j = 0; j < L2_SIZE; j++) {
286 p->first_tb = NULL;
287 invalidate_page_bitmap(p);
288 p++;
289 }
fd6ce8f6
FB
290 }
291 }
292}
293
294/* flush all the translation blocks */
d4e8164f 295/* XXX: tb_flush is currently not thread safe */
6a00d601 296void tb_flush(CPUState *env1)
fd6ce8f6 297{
6a00d601 298 CPUState *env;
0124311e 299#if defined(DEBUG_FLUSH)
fd6ce8f6
FB
300 printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
301 code_gen_ptr - code_gen_buffer,
302 nb_tbs,
0124311e 303 nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0);
fd6ce8f6
FB
304#endif
305 nb_tbs = 0;
6a00d601
FB
306
307 for(env = first_cpu; env != NULL; env = env->next_cpu) {
308 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
309 }
9fa3e853 310
8a8a608f 311 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
fd6ce8f6 312 page_flush_tb();
9fa3e853 313
fd6ce8f6 314 code_gen_ptr = code_gen_buffer;
d4e8164f
FB
315 /* XXX: flush processor icache at this point if cache flush is
316 expensive */
e3db7226 317 tb_flush_count++;
fd6ce8f6
FB
318}
319
320#ifdef DEBUG_TB_CHECK
321
322static void tb_invalidate_check(unsigned long address)
323{
324 TranslationBlock *tb;
325 int i;
326 address &= TARGET_PAGE_MASK;
327 for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
328 for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
329 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
330 address >= tb->pc + tb->size)) {
331 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
332 address, tb->pc, tb->size);
333 }
334 }
335 }
336}
337
338/* verify that all the pages have correct rights for code */
339static void tb_page_check(void)
340{
341 TranslationBlock *tb;
342 int i, flags1, flags2;
343
344 for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
345 for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
346 flags1 = page_get_flags(tb->pc);
347 flags2 = page_get_flags(tb->pc + tb->size - 1);
348 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
349 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
350 tb->pc, tb->size, flags1, flags2);
351 }
352 }
353 }
354}
355
d4e8164f
FB
356void tb_jmp_check(TranslationBlock *tb)
357{
358 TranslationBlock *tb1;
359 unsigned int n1;
360
361 /* suppress any remaining jumps to this TB */
362 tb1 = tb->jmp_first;
363 for(;;) {
364 n1 = (long)tb1 & 3;
365 tb1 = (TranslationBlock *)((long)tb1 & ~3);
366 if (n1 == 2)
367 break;
368 tb1 = tb1->jmp_next[n1];
369 }
370 /* check end of list */
371 if (tb1 != tb) {
372 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
373 }
374}
375
fd6ce8f6
FB
376#endif
377
378/* invalidate one TB */
379static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
380 int next_offset)
381{
382 TranslationBlock *tb1;
383 for(;;) {
384 tb1 = *ptb;
385 if (tb1 == tb) {
386 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
387 break;
388 }
389 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
390 }
391}
392
9fa3e853
FB
393static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
394{
395 TranslationBlock *tb1;
396 unsigned int n1;
397
398 for(;;) {
399 tb1 = *ptb;
400 n1 = (long)tb1 & 3;
401 tb1 = (TranslationBlock *)((long)tb1 & ~3);
402 if (tb1 == tb) {
403 *ptb = tb1->page_next[n1];
404 break;
405 }
406 ptb = &tb1->page_next[n1];
407 }
408}
409
d4e8164f
FB
410static inline void tb_jmp_remove(TranslationBlock *tb, int n)
411{
412 TranslationBlock *tb1, **ptb;
413 unsigned int n1;
414
415 ptb = &tb->jmp_next[n];
416 tb1 = *ptb;
417 if (tb1) {
418 /* find tb(n) in circular list */
419 for(;;) {
420 tb1 = *ptb;
421 n1 = (long)tb1 & 3;
422 tb1 = (TranslationBlock *)((long)tb1 & ~3);
423 if (n1 == n && tb1 == tb)
424 break;
425 if (n1 == 2) {
426 ptb = &tb1->jmp_first;
427 } else {
428 ptb = &tb1->jmp_next[n1];
429 }
430 }
431 /* now we can suppress tb(n) from the list */
432 *ptb = tb->jmp_next[n];
433
434 tb->jmp_next[n] = NULL;
435 }
436}
437
438/* reset the jump entry 'n' of a TB so that it is not chained to
439 another TB */
440static inline void tb_reset_jump(TranslationBlock *tb, int n)
441{
442 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
443}
444
8a40a180 445static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
fd6ce8f6 446{
6a00d601 447 CPUState *env;
8a40a180 448 PageDesc *p;
d4e8164f 449 unsigned int h, n1;
8a40a180
FB
450 target_ulong phys_pc;
451 TranslationBlock *tb1, *tb2;
d4e8164f 452
8a40a180
FB
453 /* remove the TB from the hash list */
454 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
455 h = tb_phys_hash_func(phys_pc);
456 tb_remove(&tb_phys_hash[h], tb,
457 offsetof(TranslationBlock, phys_hash_next));
458
459 /* remove the TB from the page list */
460 if (tb->page_addr[0] != page_addr) {
461 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
462 tb_page_remove(&p->first_tb, tb);
463 invalidate_page_bitmap(p);
464 }
465 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
466 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
467 tb_page_remove(&p->first_tb, tb);
468 invalidate_page_bitmap(p);
469 }
470
36bdbe54 471 tb_invalidated_flag = 1;
59817ccb 472
fd6ce8f6 473 /* remove the TB from the hash list */
8a40a180 474 h = tb_jmp_cache_hash_func(tb->pc);
6a00d601
FB
475 for(env = first_cpu; env != NULL; env = env->next_cpu) {
476 if (env->tb_jmp_cache[h] == tb)
477 env->tb_jmp_cache[h] = NULL;
478 }
d4e8164f
FB
479
480 /* suppress this TB from the two jump lists */
481 tb_jmp_remove(tb, 0);
482 tb_jmp_remove(tb, 1);
483
484 /* suppress any remaining jumps to this TB */
485 tb1 = tb->jmp_first;
486 for(;;) {
487 n1 = (long)tb1 & 3;
488 if (n1 == 2)
489 break;
490 tb1 = (TranslationBlock *)((long)tb1 & ~3);
491 tb2 = tb1->jmp_next[n1];
492 tb_reset_jump(tb1, n1);
493 tb1->jmp_next[n1] = NULL;
494 tb1 = tb2;
495 }
496 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
9fa3e853 497
e3db7226 498 tb_phys_invalidate_count++;
9fa3e853
FB
499}
500
501static inline void set_bits(uint8_t *tab, int start, int len)
502{
503 int end, mask, end1;
504
505 end = start + len;
506 tab += start >> 3;
507 mask = 0xff << (start & 7);
508 if ((start & ~7) == (end & ~7)) {
509 if (start < end) {
510 mask &= ~(0xff << (end & 7));
511 *tab |= mask;
512 }
513 } else {
514 *tab++ |= mask;
515 start = (start + 8) & ~7;
516 end1 = end & ~7;
517 while (start < end1) {
518 *tab++ = 0xff;
519 start += 8;
520 }
521 if (start < end) {
522 mask = ~(0xff << (end & 7));
523 *tab |= mask;
524 }
525 }
526}
527
528static void build_page_bitmap(PageDesc *p)
529{
530 int n, tb_start, tb_end;
531 TranslationBlock *tb;
532
59817ccb 533 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
9fa3e853
FB
534 if (!p->code_bitmap)
535 return;
536 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
537
538 tb = p->first_tb;
539 while (tb != NULL) {
540 n = (long)tb & 3;
541 tb = (TranslationBlock *)((long)tb & ~3);
542 /* NOTE: this is subtle as a TB may span two physical pages */
543 if (n == 0) {
544 /* NOTE: tb_end may be after the end of the page, but
545 it is not a problem */
546 tb_start = tb->pc & ~TARGET_PAGE_MASK;
547 tb_end = tb_start + tb->size;
548 if (tb_end > TARGET_PAGE_SIZE)
549 tb_end = TARGET_PAGE_SIZE;
550 } else {
551 tb_start = 0;
552 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
553 }
554 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
555 tb = tb->page_next[n];
556 }
557}
558
d720b93d
FB
559#ifdef TARGET_HAS_PRECISE_SMC
560
561static void tb_gen_code(CPUState *env,
562 target_ulong pc, target_ulong cs_base, int flags,
563 int cflags)
564{
565 TranslationBlock *tb;
566 uint8_t *tc_ptr;
567 target_ulong phys_pc, phys_page2, virt_page2;
568 int code_gen_size;
569
c27004ec
FB
570 phys_pc = get_phys_addr_code(env, pc);
571 tb = tb_alloc(pc);
d720b93d
FB
572 if (!tb) {
573 /* flush must be done */
574 tb_flush(env);
575 /* cannot fail at this point */
c27004ec 576 tb = tb_alloc(pc);
d720b93d
FB
577 }
578 tc_ptr = code_gen_ptr;
579 tb->tc_ptr = tc_ptr;
580 tb->cs_base = cs_base;
581 tb->flags = flags;
582 tb->cflags = cflags;
583 cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
584 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
585
586 /* check next page if needed */
c27004ec 587 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
d720b93d 588 phys_page2 = -1;
c27004ec 589 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
d720b93d
FB
590 phys_page2 = get_phys_addr_code(env, virt_page2);
591 }
592 tb_link_phys(tb, phys_pc, phys_page2);
593}
594#endif
595
9fa3e853
FB
596/* invalidate all TBs which intersect with the target physical page
597 starting in range [start;end[. NOTE: start and end must refer to
d720b93d
FB
598 the same physical page. 'is_cpu_write_access' should be true if called
599 from a real cpu write access: the virtual CPU will exit the current
600 TB if code is modified inside this TB. */
601void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
602 int is_cpu_write_access)
603{
604 int n, current_tb_modified, current_tb_not_found, current_flags;
d720b93d 605 CPUState *env = cpu_single_env;
9fa3e853 606 PageDesc *p;
ea1c1802 607 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
9fa3e853 608 target_ulong tb_start, tb_end;
d720b93d 609 target_ulong current_pc, current_cs_base;
9fa3e853
FB
610
611 p = page_find(start >> TARGET_PAGE_BITS);
612 if (!p)
613 return;
614 if (!p->code_bitmap &&
d720b93d
FB
615 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
616 is_cpu_write_access) {
9fa3e853
FB
617 /* build code bitmap */
618 build_page_bitmap(p);
619 }
620
621 /* we remove all the TBs in the range [start, end[ */
622 /* XXX: see if in some cases it could be faster to invalidate all the code */
d720b93d
FB
623 current_tb_not_found = is_cpu_write_access;
624 current_tb_modified = 0;
625 current_tb = NULL; /* avoid warning */
626 current_pc = 0; /* avoid warning */
627 current_cs_base = 0; /* avoid warning */
628 current_flags = 0; /* avoid warning */
9fa3e853
FB
629 tb = p->first_tb;
630 while (tb != NULL) {
631 n = (long)tb & 3;
632 tb = (TranslationBlock *)((long)tb & ~3);
633 tb_next = tb->page_next[n];
634 /* NOTE: this is subtle as a TB may span two physical pages */
635 if (n == 0) {
636 /* NOTE: tb_end may be after the end of the page, but
637 it is not a problem */
638 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
639 tb_end = tb_start + tb->size;
640 } else {
641 tb_start = tb->page_addr[1];
642 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
643 }
644 if (!(tb_end <= start || tb_start >= end)) {
d720b93d
FB
645#ifdef TARGET_HAS_PRECISE_SMC
646 if (current_tb_not_found) {
647 current_tb_not_found = 0;
648 current_tb = NULL;
649 if (env->mem_write_pc) {
650 /* now we have a real cpu fault */
651 current_tb = tb_find_pc(env->mem_write_pc);
652 }
653 }
654 if (current_tb == tb &&
655 !(current_tb->cflags & CF_SINGLE_INSN)) {
656 /* If we are modifying the current TB, we must stop
657 its execution. We could be more precise by checking
658 that the modification is after the current PC, but it
659 would require a specialized function to partially
660 restore the CPU state */
661
662 current_tb_modified = 1;
663 cpu_restore_state(current_tb, env,
664 env->mem_write_pc, NULL);
665#if defined(TARGET_I386)
666 current_flags = env->hflags;
667 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
668 current_cs_base = (target_ulong)env->segs[R_CS].base;
669 current_pc = current_cs_base + env->eip;
670#else
671#error unsupported CPU
672#endif
673 }
674#endif /* TARGET_HAS_PRECISE_SMC */
ea1c1802
FB
675 saved_tb = env->current_tb;
676 env->current_tb = NULL;
9fa3e853 677 tb_phys_invalidate(tb, -1);
ea1c1802
FB
678 env->current_tb = saved_tb;
679 if (env->interrupt_request && env->current_tb)
680 cpu_interrupt(env, env->interrupt_request);
9fa3e853
FB
681 }
682 tb = tb_next;
683 }
684#if !defined(CONFIG_USER_ONLY)
685 /* if no code remaining, no need to continue to use slow writes */
686 if (!p->first_tb) {
687 invalidate_page_bitmap(p);
d720b93d
FB
688 if (is_cpu_write_access) {
689 tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
690 }
691 }
692#endif
693#ifdef TARGET_HAS_PRECISE_SMC
694 if (current_tb_modified) {
695 /* we generate a block containing just the instruction
696 modifying the memory. It will ensure that it cannot modify
697 itself */
ea1c1802 698 env->current_tb = NULL;
d720b93d
FB
699 tb_gen_code(env, current_pc, current_cs_base, current_flags,
700 CF_SINGLE_INSN);
701 cpu_resume_from_signal(env, NULL);
9fa3e853 702 }
fd6ce8f6 703#endif
9fa3e853 704}
fd6ce8f6 705
9fa3e853 706/* len must be <= 8 and start must be a multiple of len */
d720b93d 707static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
9fa3e853
FB
708{
709 PageDesc *p;
710 int offset, b;
59817ccb 711#if 0
a4193c8a
FB
712 if (1) {
713 if (loglevel) {
714 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
715 cpu_single_env->mem_write_vaddr, len,
716 cpu_single_env->eip,
717 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
718 }
59817ccb
FB
719 }
720#endif
9fa3e853
FB
721 p = page_find(start >> TARGET_PAGE_BITS);
722 if (!p)
723 return;
724 if (p->code_bitmap) {
725 offset = start & ~TARGET_PAGE_MASK;
726 b = p->code_bitmap[offset >> 3] >> (offset & 7);
727 if (b & ((1 << len) - 1))
728 goto do_invalidate;
729 } else {
730 do_invalidate:
d720b93d 731 tb_invalidate_phys_page_range(start, start + len, 1);
9fa3e853
FB
732 }
733}
734
9fa3e853 735#if !defined(CONFIG_SOFTMMU)
d720b93d
FB
736static void tb_invalidate_phys_page(target_ulong addr,
737 unsigned long pc, void *puc)
9fa3e853 738{
d720b93d
FB
739 int n, current_flags, current_tb_modified;
740 target_ulong current_pc, current_cs_base;
9fa3e853 741 PageDesc *p;
d720b93d
FB
742 TranslationBlock *tb, *current_tb;
743#ifdef TARGET_HAS_PRECISE_SMC
744 CPUState *env = cpu_single_env;
745#endif
9fa3e853
FB
746
747 addr &= TARGET_PAGE_MASK;
748 p = page_find(addr >> TARGET_PAGE_BITS);
749 if (!p)
750 return;
751 tb = p->first_tb;
d720b93d
FB
752 current_tb_modified = 0;
753 current_tb = NULL;
754 current_pc = 0; /* avoid warning */
755 current_cs_base = 0; /* avoid warning */
756 current_flags = 0; /* avoid warning */
757#ifdef TARGET_HAS_PRECISE_SMC
758 if (tb && pc != 0) {
759 current_tb = tb_find_pc(pc);
760 }
761#endif
9fa3e853
FB
762 while (tb != NULL) {
763 n = (long)tb & 3;
764 tb = (TranslationBlock *)((long)tb & ~3);
d720b93d
FB
765#ifdef TARGET_HAS_PRECISE_SMC
766 if (current_tb == tb &&
767 !(current_tb->cflags & CF_SINGLE_INSN)) {
768 /* If we are modifying the current TB, we must stop
769 its execution. We could be more precise by checking
770 that the modification is after the current PC, but it
771 would require a specialized function to partially
772 restore the CPU state */
773
774 current_tb_modified = 1;
775 cpu_restore_state(current_tb, env, pc, puc);
776#if defined(TARGET_I386)
777 current_flags = env->hflags;
778 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
779 current_cs_base = (target_ulong)env->segs[R_CS].base;
780 current_pc = current_cs_base + env->eip;
781#else
782#error unsupported CPU
783#endif
784 }
785#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
786 tb_phys_invalidate(tb, addr);
787 tb = tb->page_next[n];
788 }
fd6ce8f6 789 p->first_tb = NULL;
d720b93d
FB
790#ifdef TARGET_HAS_PRECISE_SMC
791 if (current_tb_modified) {
792 /* we generate a block containing just the instruction
793 modifying the memory. It will ensure that it cannot modify
794 itself */
ea1c1802 795 env->current_tb = NULL;
d720b93d
FB
796 tb_gen_code(env, current_pc, current_cs_base, current_flags,
797 CF_SINGLE_INSN);
798 cpu_resume_from_signal(env, puc);
799 }
800#endif
fd6ce8f6 801}
9fa3e853 802#endif
fd6ce8f6
FB
803
804/* add the tb in the target page and protect it if necessary */
9fa3e853
FB
805static inline void tb_alloc_page(TranslationBlock *tb,
806 unsigned int n, unsigned int page_addr)
fd6ce8f6
FB
807{
808 PageDesc *p;
9fa3e853
FB
809 TranslationBlock *last_first_tb;
810
811 tb->page_addr[n] = page_addr;
3a7d929e 812 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
9fa3e853
FB
813 tb->page_next[n] = p->first_tb;
814 last_first_tb = p->first_tb;
815 p->first_tb = (TranslationBlock *)((long)tb | n);
816 invalidate_page_bitmap(p);
fd6ce8f6 817
107db443 818#if defined(TARGET_HAS_SMC) || 1
d720b93d 819
9fa3e853 820#if defined(CONFIG_USER_ONLY)
fd6ce8f6 821 if (p->flags & PAGE_WRITE) {
9fa3e853
FB
822 unsigned long host_start, host_end, addr;
823 int prot;
824
fd6ce8f6
FB
825 /* force the host page as non writable (writes will have a
826 page fault + mprotect overhead) */
83fb7adf
FB
827 host_start = page_addr & qemu_host_page_mask;
828 host_end = host_start + qemu_host_page_size;
fd6ce8f6
FB
829 prot = 0;
830 for(addr = host_start; addr < host_end; addr += TARGET_PAGE_SIZE)
831 prot |= page_get_flags(addr);
83fb7adf 832 mprotect((void *)host_start, qemu_host_page_size,
fd6ce8f6
FB
833 (prot & PAGE_BITS) & ~PAGE_WRITE);
834#ifdef DEBUG_TB_INVALIDATE
835 printf("protecting code page: 0x%08lx\n",
836 host_start);
837#endif
838 p->flags &= ~PAGE_WRITE;
fd6ce8f6 839 }
9fa3e853
FB
840#else
841 /* if some code is already present, then the pages are already
842 protected. So we handle the case where only the first TB is
843 allocated in a physical page */
844 if (!last_first_tb) {
6a00d601 845 tlb_protect_code(page_addr);
9fa3e853
FB
846 }
847#endif
d720b93d
FB
848
849#endif /* TARGET_HAS_SMC */
fd6ce8f6
FB
850}
851
852/* Allocate a new translation block. Flush the translation buffer if
853 too many translation blocks or too much generated code. */
c27004ec 854TranslationBlock *tb_alloc(target_ulong pc)
fd6ce8f6
FB
855{
856 TranslationBlock *tb;
fd6ce8f6
FB
857
858 if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
859 (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
d4e8164f 860 return NULL;
fd6ce8f6
FB
861 tb = &tbs[nb_tbs++];
862 tb->pc = pc;
b448f2f3 863 tb->cflags = 0;
d4e8164f
FB
864 return tb;
865}
866
9fa3e853
FB
867/* add a new TB and link it to the physical page tables. phys_page2 is
868 (-1) to indicate that only one page contains the TB. */
869void tb_link_phys(TranslationBlock *tb,
870 target_ulong phys_pc, target_ulong phys_page2)
d4e8164f 871{
9fa3e853
FB
872 unsigned int h;
873 TranslationBlock **ptb;
874
875 /* add in the physical hash table */
876 h = tb_phys_hash_func(phys_pc);
877 ptb = &tb_phys_hash[h];
878 tb->phys_hash_next = *ptb;
879 *ptb = tb;
fd6ce8f6
FB
880
881 /* add in the page list */
9fa3e853
FB
882 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
883 if (phys_page2 != -1)
884 tb_alloc_page(tb, 1, phys_page2);
885 else
886 tb->page_addr[1] = -1;
9fa3e853 887
d4e8164f
FB
888 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
889 tb->jmp_next[0] = NULL;
890 tb->jmp_next[1] = NULL;
b448f2f3
FB
891#ifdef USE_CODE_COPY
892 tb->cflags &= ~CF_FP_USED;
893 if (tb->cflags & CF_TB_FP_USED)
894 tb->cflags |= CF_FP_USED;
895#endif
d4e8164f
FB
896
897 /* init original jump addresses */
898 if (tb->tb_next_offset[0] != 0xffff)
899 tb_reset_jump(tb, 0);
900 if (tb->tb_next_offset[1] != 0xffff)
901 tb_reset_jump(tb, 1);
8a40a180
FB
902
903#ifdef DEBUG_TB_CHECK
904 tb_page_check();
905#endif
fd6ce8f6
FB
906}
907
9fa3e853
FB
908/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
909 tb[1].tc_ptr. Return NULL if not found */
910TranslationBlock *tb_find_pc(unsigned long tc_ptr)
fd6ce8f6 911{
9fa3e853
FB
912 int m_min, m_max, m;
913 unsigned long v;
914 TranslationBlock *tb;
a513fe19
FB
915
916 if (nb_tbs <= 0)
917 return NULL;
918 if (tc_ptr < (unsigned long)code_gen_buffer ||
919 tc_ptr >= (unsigned long)code_gen_ptr)
920 return NULL;
921 /* binary search (cf Knuth) */
922 m_min = 0;
923 m_max = nb_tbs - 1;
924 while (m_min <= m_max) {
925 m = (m_min + m_max) >> 1;
926 tb = &tbs[m];
927 v = (unsigned long)tb->tc_ptr;
928 if (v == tc_ptr)
929 return tb;
930 else if (tc_ptr < v) {
931 m_max = m - 1;
932 } else {
933 m_min = m + 1;
934 }
935 }
936 return &tbs[m_max];
937}
7501267e 938
ea041c0e
FB
939static void tb_reset_jump_recursive(TranslationBlock *tb);
940
941static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
942{
943 TranslationBlock *tb1, *tb_next, **ptb;
944 unsigned int n1;
945
946 tb1 = tb->jmp_next[n];
947 if (tb1 != NULL) {
948 /* find head of list */
949 for(;;) {
950 n1 = (long)tb1 & 3;
951 tb1 = (TranslationBlock *)((long)tb1 & ~3);
952 if (n1 == 2)
953 break;
954 tb1 = tb1->jmp_next[n1];
955 }
956 /* we are now sure now that tb jumps to tb1 */
957 tb_next = tb1;
958
959 /* remove tb from the jmp_first list */
960 ptb = &tb_next->jmp_first;
961 for(;;) {
962 tb1 = *ptb;
963 n1 = (long)tb1 & 3;
964 tb1 = (TranslationBlock *)((long)tb1 & ~3);
965 if (n1 == n && tb1 == tb)
966 break;
967 ptb = &tb1->jmp_next[n1];
968 }
969 *ptb = tb->jmp_next[n];
970 tb->jmp_next[n] = NULL;
971
972 /* suppress the jump to next tb in generated code */
973 tb_reset_jump(tb, n);
974
0124311e 975 /* suppress jumps in the tb on which we could have jumped */
ea041c0e
FB
976 tb_reset_jump_recursive(tb_next);
977 }
978}
979
980static void tb_reset_jump_recursive(TranslationBlock *tb)
981{
982 tb_reset_jump_recursive2(tb, 0);
983 tb_reset_jump_recursive2(tb, 1);
984}
985
1fddef4b 986#if defined(TARGET_HAS_ICE)
d720b93d
FB
987static void breakpoint_invalidate(CPUState *env, target_ulong pc)
988{
989 target_ulong phys_addr;
990
991 phys_addr = cpu_get_phys_page_debug(env, pc);
992 tb_invalidate_phys_page_range(phys_addr, phys_addr + 1, 0);
993}
c27004ec 994#endif
d720b93d 995
c33a346e
FB
996/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
997 breakpoint is reached */
2e12669a 998int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
4c3a88a2 999{
1fddef4b 1000#if defined(TARGET_HAS_ICE)
4c3a88a2 1001 int i;
d720b93d 1002
4c3a88a2
FB
1003 for(i = 0; i < env->nb_breakpoints; i++) {
1004 if (env->breakpoints[i] == pc)
1005 return 0;
1006 }
1007
1008 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1009 return -1;
1010 env->breakpoints[env->nb_breakpoints++] = pc;
d720b93d
FB
1011
1012 breakpoint_invalidate(env, pc);
4c3a88a2
FB
1013 return 0;
1014#else
1015 return -1;
1016#endif
1017}
1018
1019/* remove a breakpoint */
2e12669a 1020int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
4c3a88a2 1021{
1fddef4b 1022#if defined(TARGET_HAS_ICE)
4c3a88a2
FB
1023 int i;
1024 for(i = 0; i < env->nb_breakpoints; i++) {
1025 if (env->breakpoints[i] == pc)
1026 goto found;
1027 }
1028 return -1;
1029 found:
4c3a88a2 1030 env->nb_breakpoints--;
1fddef4b
FB
1031 if (i < env->nb_breakpoints)
1032 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
d720b93d
FB
1033
1034 breakpoint_invalidate(env, pc);
4c3a88a2
FB
1035 return 0;
1036#else
1037 return -1;
1038#endif
1039}
1040
c33a346e
FB
1041/* enable or disable single step mode. EXCP_DEBUG is returned by the
1042 CPU loop after each instruction */
1043void cpu_single_step(CPUState *env, int enabled)
1044{
1fddef4b 1045#if defined(TARGET_HAS_ICE)
c33a346e
FB
1046 if (env->singlestep_enabled != enabled) {
1047 env->singlestep_enabled = enabled;
1048 /* must flush all the translated code to avoid inconsistancies */
9fa3e853 1049 /* XXX: only flush what is necessary */
0124311e 1050 tb_flush(env);
c33a346e
FB
1051 }
1052#endif
1053}
1054
34865134
FB
1055/* enable or disable low levels log */
1056void cpu_set_log(int log_flags)
1057{
1058 loglevel = log_flags;
1059 if (loglevel && !logfile) {
1060 logfile = fopen(logfilename, "w");
1061 if (!logfile) {
1062 perror(logfilename);
1063 _exit(1);
1064 }
9fa3e853
FB
1065#if !defined(CONFIG_SOFTMMU)
1066 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1067 {
1068 static uint8_t logfile_buf[4096];
1069 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1070 }
1071#else
34865134 1072 setvbuf(logfile, NULL, _IOLBF, 0);
9fa3e853 1073#endif
34865134
FB
1074 }
1075}
1076
1077void cpu_set_log_filename(const char *filename)
1078{
1079 logfilename = strdup(filename);
1080}
c33a346e 1081
0124311e 1082/* mask must never be zero, except for A20 change call */
68a79315 1083void cpu_interrupt(CPUState *env, int mask)
ea041c0e
FB
1084{
1085 TranslationBlock *tb;
ee8b7021 1086 static int interrupt_lock;
59817ccb 1087
68a79315 1088 env->interrupt_request |= mask;
ea041c0e
FB
1089 /* if the cpu is currently executing code, we must unlink it and
1090 all the potentially executing TB */
1091 tb = env->current_tb;
ee8b7021
FB
1092 if (tb && !testandset(&interrupt_lock)) {
1093 env->current_tb = NULL;
ea041c0e 1094 tb_reset_jump_recursive(tb);
ee8b7021 1095 interrupt_lock = 0;
ea041c0e
FB
1096 }
1097}
1098
b54ad049
FB
1099void cpu_reset_interrupt(CPUState *env, int mask)
1100{
1101 env->interrupt_request &= ~mask;
1102}
1103
f193c797
FB
1104CPULogItem cpu_log_items[] = {
1105 { CPU_LOG_TB_OUT_ASM, "out_asm",
1106 "show generated host assembly code for each compiled TB" },
1107 { CPU_LOG_TB_IN_ASM, "in_asm",
1108 "show target assembly code for each compiled TB" },
1109 { CPU_LOG_TB_OP, "op",
1110 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1111#ifdef TARGET_I386
1112 { CPU_LOG_TB_OP_OPT, "op_opt",
1113 "show micro ops after optimization for each compiled TB" },
1114#endif
1115 { CPU_LOG_INT, "int",
1116 "show interrupts/exceptions in short format" },
1117 { CPU_LOG_EXEC, "exec",
1118 "show trace before each executed TB (lots of logs)" },
9fddaa0c
FB
1119 { CPU_LOG_TB_CPU, "cpu",
1120 "show CPU state before bloc translation" },
f193c797
FB
1121#ifdef TARGET_I386
1122 { CPU_LOG_PCALL, "pcall",
1123 "show protected mode far calls/returns/exceptions" },
1124#endif
8e3a9fd2 1125#ifdef DEBUG_IOPORT
fd872598
FB
1126 { CPU_LOG_IOPORT, "ioport",
1127 "show all i/o ports accesses" },
8e3a9fd2 1128#endif
f193c797
FB
1129 { 0, NULL, NULL },
1130};
1131
1132static int cmp1(const char *s1, int n, const char *s2)
1133{
1134 if (strlen(s2) != n)
1135 return 0;
1136 return memcmp(s1, s2, n) == 0;
1137}
1138
1139/* takes a comma separated list of log masks. Return 0 if error. */
1140int cpu_str_to_log_mask(const char *str)
1141{
1142 CPULogItem *item;
1143 int mask;
1144 const char *p, *p1;
1145
1146 p = str;
1147 mask = 0;
1148 for(;;) {
1149 p1 = strchr(p, ',');
1150 if (!p1)
1151 p1 = p + strlen(p);
8e3a9fd2
FB
1152 if(cmp1(p,p1-p,"all")) {
1153 for(item = cpu_log_items; item->mask != 0; item++) {
1154 mask |= item->mask;
1155 }
1156 } else {
f193c797
FB
1157 for(item = cpu_log_items; item->mask != 0; item++) {
1158 if (cmp1(p, p1 - p, item->name))
1159 goto found;
1160 }
1161 return 0;
8e3a9fd2 1162 }
f193c797
FB
1163 found:
1164 mask |= item->mask;
1165 if (*p1 != ',')
1166 break;
1167 p = p1 + 1;
1168 }
1169 return mask;
1170}
ea041c0e 1171
7501267e
FB
1172void cpu_abort(CPUState *env, const char *fmt, ...)
1173{
1174 va_list ap;
1175
1176 va_start(ap, fmt);
1177 fprintf(stderr, "qemu: fatal: ");
1178 vfprintf(stderr, fmt, ap);
1179 fprintf(stderr, "\n");
1180#ifdef TARGET_I386
7fe48483
FB
1181 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1182#else
1183 cpu_dump_state(env, stderr, fprintf, 0);
7501267e
FB
1184#endif
1185 va_end(ap);
1186 abort();
1187}
1188
0124311e
FB
1189#if !defined(CONFIG_USER_ONLY)
1190
ee8b7021
FB
1191/* NOTE: if flush_global is true, also flush global entries (not
1192 implemented yet) */
1193void tlb_flush(CPUState *env, int flush_global)
33417e70 1194{
33417e70 1195 int i;
0124311e 1196
9fa3e853
FB
1197#if defined(DEBUG_TLB)
1198 printf("tlb_flush:\n");
1199#endif
0124311e
FB
1200 /* must reset current TB so that interrupts cannot modify the
1201 links while we are modifying them */
1202 env->current_tb = NULL;
1203
33417e70
FB
1204 for(i = 0; i < CPU_TLB_SIZE; i++) {
1205 env->tlb_read[0][i].address = -1;
1206 env->tlb_write[0][i].address = -1;
1207 env->tlb_read[1][i].address = -1;
1208 env->tlb_write[1][i].address = -1;
1209 }
9fa3e853 1210
8a40a180 1211 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
9fa3e853
FB
1212
1213#if !defined(CONFIG_SOFTMMU)
1214 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
0a962c02
FB
1215#endif
1216#ifdef USE_KQEMU
1217 if (env->kqemu_enabled) {
1218 kqemu_flush(env, flush_global);
1219 }
9fa3e853 1220#endif
e3db7226 1221 tlb_flush_count++;
33417e70
FB
1222}
1223
274da6b2 1224static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
61382a50
FB
1225{
1226 if (addr == (tlb_entry->address &
1227 (TARGET_PAGE_MASK | TLB_INVALID_MASK)))
1228 tlb_entry->address = -1;
1229}
1230
2e12669a 1231void tlb_flush_page(CPUState *env, target_ulong addr)
33417e70 1232{
8a40a180 1233 int i;
9fa3e853 1234 TranslationBlock *tb;
0124311e 1235
9fa3e853 1236#if defined(DEBUG_TLB)
108c49b8 1237 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
9fa3e853 1238#endif
0124311e
FB
1239 /* must reset current TB so that interrupts cannot modify the
1240 links while we are modifying them */
1241 env->current_tb = NULL;
61382a50
FB
1242
1243 addr &= TARGET_PAGE_MASK;
1244 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1245 tlb_flush_entry(&env->tlb_read[0][i], addr);
1246 tlb_flush_entry(&env->tlb_write[0][i], addr);
1247 tlb_flush_entry(&env->tlb_read[1][i], addr);
1248 tlb_flush_entry(&env->tlb_write[1][i], addr);
0124311e 1249
8a40a180
FB
1250 for(i = 0; i < TB_JMP_CACHE_SIZE; i++) {
1251 tb = env->tb_jmp_cache[i];
1252 if (tb &&
1253 ((tb->pc & TARGET_PAGE_MASK) == addr ||
1254 ((tb->pc + tb->size - 1) & TARGET_PAGE_MASK) == addr)) {
1255 env->tb_jmp_cache[i] = NULL;
9fa3e853
FB
1256 }
1257 }
1258
0124311e 1259#if !defined(CONFIG_SOFTMMU)
9fa3e853 1260 if (addr < MMAP_AREA_END)
0124311e 1261 munmap((void *)addr, TARGET_PAGE_SIZE);
61382a50 1262#endif
0a962c02
FB
1263#ifdef USE_KQEMU
1264 if (env->kqemu_enabled) {
1265 kqemu_flush_page(env, addr);
1266 }
1267#endif
9fa3e853
FB
1268}
1269
9fa3e853
FB
1270/* update the TLBs so that writes to code in the virtual page 'addr'
1271 can be detected */
6a00d601 1272static void tlb_protect_code(ram_addr_t ram_addr)
9fa3e853 1273{
6a00d601
FB
1274 cpu_physical_memory_reset_dirty(ram_addr,
1275 ram_addr + TARGET_PAGE_SIZE,
1276 CODE_DIRTY_FLAG);
9fa3e853
FB
1277}
1278
9fa3e853 1279/* update the TLB so that writes in physical page 'phys_addr' are no longer
3a7d929e
FB
1280 tested for self modifying code */
1281static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1282 target_ulong vaddr)
9fa3e853 1283{
3a7d929e 1284 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1ccde1cb
FB
1285}
1286
1287static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1288 unsigned long start, unsigned long length)
1289{
1290 unsigned long addr;
1291 if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1292 addr = (tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend;
1293 if ((addr - start) < length) {
1294 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1295 }
1296 }
1297}
1298
3a7d929e 1299void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 1300 int dirty_flags)
1ccde1cb
FB
1301{
1302 CPUState *env;
4f2ac237 1303 unsigned long length, start1;
0a962c02
FB
1304 int i, mask, len;
1305 uint8_t *p;
1ccde1cb
FB
1306
1307 start &= TARGET_PAGE_MASK;
1308 end = TARGET_PAGE_ALIGN(end);
1309
1310 length = end - start;
1311 if (length == 0)
1312 return;
0a962c02 1313 len = length >> TARGET_PAGE_BITS;
3a7d929e 1314#ifdef USE_KQEMU
6a00d601
FB
1315 /* XXX: should not depend on cpu context */
1316 env = first_cpu;
3a7d929e 1317 if (env->kqemu_enabled) {
f23db169
FB
1318 ram_addr_t addr;
1319 addr = start;
1320 for(i = 0; i < len; i++) {
1321 kqemu_set_notdirty(env, addr);
1322 addr += TARGET_PAGE_SIZE;
1323 }
3a7d929e
FB
1324 }
1325#endif
f23db169
FB
1326 mask = ~dirty_flags;
1327 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1328 for(i = 0; i < len; i++)
1329 p[i] &= mask;
1330
1ccde1cb
FB
1331 /* we modify the TLB cache so that the dirty bit will be set again
1332 when accessing the range */
59817ccb 1333 start1 = start + (unsigned long)phys_ram_base;
6a00d601
FB
1334 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1335 for(i = 0; i < CPU_TLB_SIZE; i++)
1336 tlb_reset_dirty_range(&env->tlb_write[0][i], start1, length);
1337 for(i = 0; i < CPU_TLB_SIZE; i++)
1338 tlb_reset_dirty_range(&env->tlb_write[1][i], start1, length);
1339 }
59817ccb
FB
1340
1341#if !defined(CONFIG_SOFTMMU)
1342 /* XXX: this is expensive */
1343 {
1344 VirtPageDesc *p;
1345 int j;
1346 target_ulong addr;
1347
1348 for(i = 0; i < L1_SIZE; i++) {
1349 p = l1_virt_map[i];
1350 if (p) {
1351 addr = i << (TARGET_PAGE_BITS + L2_BITS);
1352 for(j = 0; j < L2_SIZE; j++) {
1353 if (p->valid_tag == virt_valid_tag &&
1354 p->phys_addr >= start && p->phys_addr < end &&
1355 (p->prot & PROT_WRITE)) {
1356 if (addr < MMAP_AREA_END) {
1357 mprotect((void *)addr, TARGET_PAGE_SIZE,
1358 p->prot & ~PROT_WRITE);
1359 }
1360 }
1361 addr += TARGET_PAGE_SIZE;
1362 p++;
1363 }
1364 }
1365 }
1366 }
1367#endif
1ccde1cb
FB
1368}
1369
3a7d929e
FB
1370static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1371{
1372 ram_addr_t ram_addr;
1373
1374 if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1375 ram_addr = (tlb_entry->address & TARGET_PAGE_MASK) +
1376 tlb_entry->addend - (unsigned long)phys_ram_base;
1377 if (!cpu_physical_memory_is_dirty(ram_addr)) {
1378 tlb_entry->address |= IO_MEM_NOTDIRTY;
1379 }
1380 }
1381}
1382
1383/* update the TLB according to the current state of the dirty bits */
1384void cpu_tlb_update_dirty(CPUState *env)
1385{
1386 int i;
1387 for(i = 0; i < CPU_TLB_SIZE; i++)
1388 tlb_update_dirty(&env->tlb_write[0][i]);
1389 for(i = 0; i < CPU_TLB_SIZE; i++)
1390 tlb_update_dirty(&env->tlb_write[1][i]);
1391}
1392
1ccde1cb 1393static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
108c49b8 1394 unsigned long start)
1ccde1cb
FB
1395{
1396 unsigned long addr;
1397 if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1398 addr = (tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend;
1399 if (addr == start) {
1400 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_RAM;
1401 }
1402 }
1403}
1404
1405/* update the TLB corresponding to virtual page vaddr and phys addr
1406 addr so that it is no longer dirty */
6a00d601
FB
1407static inline void tlb_set_dirty(CPUState *env,
1408 unsigned long addr, target_ulong vaddr)
1ccde1cb 1409{
1ccde1cb
FB
1410 int i;
1411
1ccde1cb
FB
1412 addr &= TARGET_PAGE_MASK;
1413 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1414 tlb_set_dirty1(&env->tlb_write[0][i], addr);
1415 tlb_set_dirty1(&env->tlb_write[1][i], addr);
9fa3e853
FB
1416}
1417
59817ccb
FB
1418/* add a new TLB entry. At most one entry for a given virtual address
1419 is permitted. Return 0 if OK or 2 if the page could not be mapped
1420 (can only happen in non SOFTMMU mode for I/O pages or pages
1421 conflicting with the host address space). */
2e12669a
FB
1422int tlb_set_page(CPUState *env, target_ulong vaddr,
1423 target_phys_addr_t paddr, int prot,
9fa3e853
FB
1424 int is_user, int is_softmmu)
1425{
92e873b9 1426 PhysPageDesc *p;
4f2ac237 1427 unsigned long pd;
9fa3e853 1428 unsigned int index;
4f2ac237 1429 target_ulong address;
108c49b8 1430 target_phys_addr_t addend;
9fa3e853
FB
1431 int ret;
1432
92e873b9 1433 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
9fa3e853
FB
1434 if (!p) {
1435 pd = IO_MEM_UNASSIGNED;
9fa3e853
FB
1436 } else {
1437 pd = p->phys_offset;
9fa3e853
FB
1438 }
1439#if defined(DEBUG_TLB)
3a7d929e
FB
1440 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x u=%d smmu=%d pd=0x%08lx\n",
1441 vaddr, paddr, prot, is_user, is_softmmu, pd);
9fa3e853
FB
1442#endif
1443
1444 ret = 0;
1445#if !defined(CONFIG_SOFTMMU)
1446 if (is_softmmu)
1447#endif
1448 {
1449 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1450 /* IO memory case */
1451 address = vaddr | pd;
1452 addend = paddr;
1453 } else {
1454 /* standard memory */
1455 address = vaddr;
1456 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1457 }
1458
90f18422 1459 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
9fa3e853 1460 addend -= vaddr;
67b915a5 1461 if (prot & PAGE_READ) {
9fa3e853
FB
1462 env->tlb_read[is_user][index].address = address;
1463 env->tlb_read[is_user][index].addend = addend;
1464 } else {
1465 env->tlb_read[is_user][index].address = -1;
1466 env->tlb_read[is_user][index].addend = -1;
1467 }
67b915a5 1468 if (prot & PAGE_WRITE) {
9fa3e853
FB
1469 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM) {
1470 /* ROM: access is ignored (same as unassigned) */
1471 env->tlb_write[is_user][index].address = vaddr | IO_MEM_ROM;
1ccde1cb 1472 env->tlb_write[is_user][index].addend = addend;
3a7d929e 1473 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1ccde1cb
FB
1474 !cpu_physical_memory_is_dirty(pd)) {
1475 env->tlb_write[is_user][index].address = vaddr | IO_MEM_NOTDIRTY;
1476 env->tlb_write[is_user][index].addend = addend;
9fa3e853
FB
1477 } else {
1478 env->tlb_write[is_user][index].address = address;
1479 env->tlb_write[is_user][index].addend = addend;
1480 }
1481 } else {
1482 env->tlb_write[is_user][index].address = -1;
1483 env->tlb_write[is_user][index].addend = -1;
1484 }
1485 }
1486#if !defined(CONFIG_SOFTMMU)
1487 else {
1488 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1489 /* IO access: no mapping is done as it will be handled by the
1490 soft MMU */
1491 if (!(env->hflags & HF_SOFTMMU_MASK))
1492 ret = 2;
1493 } else {
1494 void *map_addr;
59817ccb
FB
1495
1496 if (vaddr >= MMAP_AREA_END) {
1497 ret = 2;
1498 } else {
1499 if (prot & PROT_WRITE) {
1500 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
d720b93d 1501#if defined(TARGET_HAS_SMC) || 1
59817ccb 1502 first_tb ||
d720b93d 1503#endif
59817ccb
FB
1504 ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1505 !cpu_physical_memory_is_dirty(pd))) {
1506 /* ROM: we do as if code was inside */
1507 /* if code is present, we only map as read only and save the
1508 original mapping */
1509 VirtPageDesc *vp;
1510
90f18422 1511 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
59817ccb
FB
1512 vp->phys_addr = pd;
1513 vp->prot = prot;
1514 vp->valid_tag = virt_valid_tag;
1515 prot &= ~PAGE_WRITE;
1516 }
1517 }
1518 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
1519 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1520 if (map_addr == MAP_FAILED) {
1521 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1522 paddr, vaddr);
9fa3e853 1523 }
9fa3e853
FB
1524 }
1525 }
1526 }
1527#endif
1528 return ret;
1529}
1530
1531/* called from signal handler: invalidate the code and unprotect the
1532 page. Return TRUE if the fault was succesfully handled. */
d720b93d 1533int page_unprotect(unsigned long addr, unsigned long pc, void *puc)
9fa3e853
FB
1534{
1535#if !defined(CONFIG_SOFTMMU)
1536 VirtPageDesc *vp;
1537
1538#if defined(DEBUG_TLB)
1539 printf("page_unprotect: addr=0x%08x\n", addr);
1540#endif
1541 addr &= TARGET_PAGE_MASK;
59817ccb
FB
1542
1543 /* if it is not mapped, no need to worry here */
1544 if (addr >= MMAP_AREA_END)
1545 return 0;
9fa3e853
FB
1546 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1547 if (!vp)
1548 return 0;
1549 /* NOTE: in this case, validate_tag is _not_ tested as it
1550 validates only the code TLB */
1551 if (vp->valid_tag != virt_valid_tag)
1552 return 0;
1553 if (!(vp->prot & PAGE_WRITE))
1554 return 0;
1555#if defined(DEBUG_TLB)
1556 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1557 addr, vp->phys_addr, vp->prot);
1558#endif
59817ccb
FB
1559 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1560 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1561 (unsigned long)addr, vp->prot);
d720b93d 1562 /* set the dirty bit */
0a962c02 1563 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
d720b93d
FB
1564 /* flush the code inside */
1565 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
9fa3e853
FB
1566 return 1;
1567#else
1568 return 0;
1569#endif
33417e70
FB
1570}
1571
0124311e
FB
1572#else
1573
ee8b7021 1574void tlb_flush(CPUState *env, int flush_global)
0124311e
FB
1575{
1576}
1577
2e12669a 1578void tlb_flush_page(CPUState *env, target_ulong addr)
0124311e
FB
1579{
1580}
1581
2e12669a
FB
1582int tlb_set_page(CPUState *env, target_ulong vaddr,
1583 target_phys_addr_t paddr, int prot,
9fa3e853
FB
1584 int is_user, int is_softmmu)
1585{
1586 return 0;
1587}
0124311e 1588
9fa3e853
FB
1589/* dump memory mappings */
1590void page_dump(FILE *f)
33417e70 1591{
9fa3e853
FB
1592 unsigned long start, end;
1593 int i, j, prot, prot1;
1594 PageDesc *p;
33417e70 1595
9fa3e853
FB
1596 fprintf(f, "%-8s %-8s %-8s %s\n",
1597 "start", "end", "size", "prot");
1598 start = -1;
1599 end = -1;
1600 prot = 0;
1601 for(i = 0; i <= L1_SIZE; i++) {
1602 if (i < L1_SIZE)
1603 p = l1_map[i];
1604 else
1605 p = NULL;
1606 for(j = 0;j < L2_SIZE; j++) {
1607 if (!p)
1608 prot1 = 0;
1609 else
1610 prot1 = p[j].flags;
1611 if (prot1 != prot) {
1612 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1613 if (start != -1) {
1614 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1615 start, end, end - start,
1616 prot & PAGE_READ ? 'r' : '-',
1617 prot & PAGE_WRITE ? 'w' : '-',
1618 prot & PAGE_EXEC ? 'x' : '-');
1619 }
1620 if (prot1 != 0)
1621 start = end;
1622 else
1623 start = -1;
1624 prot = prot1;
1625 }
1626 if (!p)
1627 break;
1628 }
33417e70 1629 }
33417e70
FB
1630}
1631
9fa3e853 1632int page_get_flags(unsigned long address)
33417e70 1633{
9fa3e853
FB
1634 PageDesc *p;
1635
1636 p = page_find(address >> TARGET_PAGE_BITS);
33417e70 1637 if (!p)
9fa3e853
FB
1638 return 0;
1639 return p->flags;
1640}
1641
1642/* modify the flags of a page and invalidate the code if
1643 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1644 depending on PAGE_WRITE */
1645void page_set_flags(unsigned long start, unsigned long end, int flags)
1646{
1647 PageDesc *p;
1648 unsigned long addr;
1649
1650 start = start & TARGET_PAGE_MASK;
1651 end = TARGET_PAGE_ALIGN(end);
1652 if (flags & PAGE_WRITE)
1653 flags |= PAGE_WRITE_ORG;
1654 spin_lock(&tb_lock);
1655 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1656 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1657 /* if the write protection is set, then we invalidate the code
1658 inside */
1659 if (!(p->flags & PAGE_WRITE) &&
1660 (flags & PAGE_WRITE) &&
1661 p->first_tb) {
d720b93d 1662 tb_invalidate_phys_page(addr, 0, NULL);
9fa3e853
FB
1663 }
1664 p->flags = flags;
1665 }
1666 spin_unlock(&tb_lock);
33417e70
FB
1667}
1668
9fa3e853
FB
1669/* called from signal handler: invalidate the code and unprotect the
1670 page. Return TRUE if the fault was succesfully handled. */
d720b93d 1671int page_unprotect(unsigned long address, unsigned long pc, void *puc)
9fa3e853
FB
1672{
1673 unsigned int page_index, prot, pindex;
1674 PageDesc *p, *p1;
1675 unsigned long host_start, host_end, addr;
1676
83fb7adf 1677 host_start = address & qemu_host_page_mask;
9fa3e853
FB
1678 page_index = host_start >> TARGET_PAGE_BITS;
1679 p1 = page_find(page_index);
1680 if (!p1)
1681 return 0;
83fb7adf 1682 host_end = host_start + qemu_host_page_size;
9fa3e853
FB
1683 p = p1;
1684 prot = 0;
1685 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1686 prot |= p->flags;
1687 p++;
1688 }
1689 /* if the page was really writable, then we change its
1690 protection back to writable */
1691 if (prot & PAGE_WRITE_ORG) {
1692 pindex = (address - host_start) >> TARGET_PAGE_BITS;
1693 if (!(p1[pindex].flags & PAGE_WRITE)) {
83fb7adf 1694 mprotect((void *)host_start, qemu_host_page_size,
9fa3e853
FB
1695 (prot & PAGE_BITS) | PAGE_WRITE);
1696 p1[pindex].flags |= PAGE_WRITE;
1697 /* and since the content will be modified, we must invalidate
1698 the corresponding translated code. */
d720b93d 1699 tb_invalidate_phys_page(address, pc, puc);
9fa3e853
FB
1700#ifdef DEBUG_TB_CHECK
1701 tb_invalidate_check(address);
1702#endif
1703 return 1;
1704 }
1705 }
1706 return 0;
1707}
1708
1709/* call this function when system calls directly modify a memory area */
1710void page_unprotect_range(uint8_t *data, unsigned long data_size)
1711{
1712 unsigned long start, end, addr;
1713
1714 start = (unsigned long)data;
1715 end = start + data_size;
1716 start &= TARGET_PAGE_MASK;
1717 end = TARGET_PAGE_ALIGN(end);
1718 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
d720b93d 1719 page_unprotect(addr, 0, NULL);
9fa3e853
FB
1720 }
1721}
1722
6a00d601
FB
1723static inline void tlb_set_dirty(CPUState *env,
1724 unsigned long addr, target_ulong vaddr)
1ccde1cb
FB
1725{
1726}
9fa3e853
FB
1727#endif /* defined(CONFIG_USER_ONLY) */
1728
33417e70
FB
1729/* register physical memory. 'size' must be a multiple of the target
1730 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1731 io memory page */
2e12669a
FB
1732void cpu_register_physical_memory(target_phys_addr_t start_addr,
1733 unsigned long size,
1734 unsigned long phys_offset)
33417e70 1735{
108c49b8 1736 target_phys_addr_t addr, end_addr;
92e873b9 1737 PhysPageDesc *p;
33417e70 1738
5fd386f6 1739 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
33417e70 1740 end_addr = start_addr + size;
5fd386f6 1741 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
108c49b8 1742 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
9fa3e853
FB
1743 p->phys_offset = phys_offset;
1744 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM)
33417e70
FB
1745 phys_offset += TARGET_PAGE_SIZE;
1746 }
1747}
1748
a4193c8a 1749static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
33417e70
FB
1750{
1751 return 0;
1752}
1753
a4193c8a 1754static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
33417e70
FB
1755{
1756}
1757
1758static CPUReadMemoryFunc *unassigned_mem_read[3] = {
1759 unassigned_mem_readb,
1760 unassigned_mem_readb,
1761 unassigned_mem_readb,
1762};
1763
1764static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
1765 unassigned_mem_writeb,
1766 unassigned_mem_writeb,
1767 unassigned_mem_writeb,
1768};
1769
3a7d929e 1770static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 1771{
3a7d929e
FB
1772 unsigned long ram_addr;
1773 int dirty_flags;
1774 ram_addr = addr - (unsigned long)phys_ram_base;
1775 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1776 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 1777#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
1778 tb_invalidate_phys_page_fast(ram_addr, 1);
1779 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 1780#endif
3a7d929e 1781 }
c27004ec 1782 stb_p((uint8_t *)(long)addr, val);
f23db169
FB
1783 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1784 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
1785 /* we remove the notdirty callback only if the code has been
1786 flushed */
1787 if (dirty_flags == 0xff)
6a00d601 1788 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
9fa3e853
FB
1789}
1790
3a7d929e 1791static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 1792{
3a7d929e
FB
1793 unsigned long ram_addr;
1794 int dirty_flags;
1795 ram_addr = addr - (unsigned long)phys_ram_base;
1796 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1797 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 1798#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
1799 tb_invalidate_phys_page_fast(ram_addr, 2);
1800 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 1801#endif
3a7d929e 1802 }
c27004ec 1803 stw_p((uint8_t *)(long)addr, val);
f23db169
FB
1804 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1805 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
1806 /* we remove the notdirty callback only if the code has been
1807 flushed */
1808 if (dirty_flags == 0xff)
6a00d601 1809 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
9fa3e853
FB
1810}
1811
3a7d929e 1812static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 1813{
3a7d929e
FB
1814 unsigned long ram_addr;
1815 int dirty_flags;
1816 ram_addr = addr - (unsigned long)phys_ram_base;
1817 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1818 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 1819#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
1820 tb_invalidate_phys_page_fast(ram_addr, 4);
1821 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 1822#endif
3a7d929e 1823 }
c27004ec 1824 stl_p((uint8_t *)(long)addr, val);
f23db169
FB
1825 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1826 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
1827 /* we remove the notdirty callback only if the code has been
1828 flushed */
1829 if (dirty_flags == 0xff)
6a00d601 1830 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
9fa3e853
FB
1831}
1832
3a7d929e 1833static CPUReadMemoryFunc *error_mem_read[3] = {
9fa3e853
FB
1834 NULL, /* never used */
1835 NULL, /* never used */
1836 NULL, /* never used */
1837};
1838
1ccde1cb
FB
1839static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
1840 notdirty_mem_writeb,
1841 notdirty_mem_writew,
1842 notdirty_mem_writel,
1843};
1844
33417e70
FB
1845static void io_mem_init(void)
1846{
3a7d929e 1847 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
a4193c8a 1848 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
3a7d929e 1849 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
1ccde1cb
FB
1850 io_mem_nb = 5;
1851
1852 /* alloc dirty bits array */
0a962c02 1853 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
3a7d929e 1854 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
33417e70
FB
1855}
1856
1857/* mem_read and mem_write are arrays of functions containing the
1858 function to access byte (index 0), word (index 1) and dword (index
1859 2). All functions must be supplied. If io_index is non zero, the
1860 corresponding io zone is modified. If it is zero, a new io zone is
1861 allocated. The return value can be used with
1862 cpu_register_physical_memory(). (-1) is returned if error. */
1863int cpu_register_io_memory(int io_index,
1864 CPUReadMemoryFunc **mem_read,
a4193c8a
FB
1865 CPUWriteMemoryFunc **mem_write,
1866 void *opaque)
33417e70
FB
1867{
1868 int i;
1869
1870 if (io_index <= 0) {
b5ff1b31 1871 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
33417e70
FB
1872 return -1;
1873 io_index = io_mem_nb++;
1874 } else {
1875 if (io_index >= IO_MEM_NB_ENTRIES)
1876 return -1;
1877 }
b5ff1b31 1878
33417e70
FB
1879 for(i = 0;i < 3; i++) {
1880 io_mem_read[io_index][i] = mem_read[i];
1881 io_mem_write[io_index][i] = mem_write[i];
1882 }
a4193c8a 1883 io_mem_opaque[io_index] = opaque;
33417e70
FB
1884 return io_index << IO_MEM_SHIFT;
1885}
61382a50 1886
8926b517
FB
1887CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
1888{
1889 return io_mem_write[io_index >> IO_MEM_SHIFT];
1890}
1891
1892CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
1893{
1894 return io_mem_read[io_index >> IO_MEM_SHIFT];
1895}
1896
13eb76e0
FB
1897/* physical memory access (slow version, mainly for debug) */
1898#if defined(CONFIG_USER_ONLY)
2e12669a 1899void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
1900 int len, int is_write)
1901{
1902 int l, flags;
1903 target_ulong page;
1904
1905 while (len > 0) {
1906 page = addr & TARGET_PAGE_MASK;
1907 l = (page + TARGET_PAGE_SIZE) - addr;
1908 if (l > len)
1909 l = len;
1910 flags = page_get_flags(page);
1911 if (!(flags & PAGE_VALID))
1912 return;
1913 if (is_write) {
1914 if (!(flags & PAGE_WRITE))
1915 return;
1916 memcpy((uint8_t *)addr, buf, len);
1917 } else {
1918 if (!(flags & PAGE_READ))
1919 return;
1920 memcpy(buf, (uint8_t *)addr, len);
1921 }
1922 len -= l;
1923 buf += l;
1924 addr += l;
1925 }
1926}
8df1cd07 1927
13eb76e0 1928#else
2e12669a 1929void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
1930 int len, int is_write)
1931{
1932 int l, io_index;
1933 uint8_t *ptr;
1934 uint32_t val;
2e12669a
FB
1935 target_phys_addr_t page;
1936 unsigned long pd;
92e873b9 1937 PhysPageDesc *p;
13eb76e0
FB
1938
1939 while (len > 0) {
1940 page = addr & TARGET_PAGE_MASK;
1941 l = (page + TARGET_PAGE_SIZE) - addr;
1942 if (l > len)
1943 l = len;
92e873b9 1944 p = phys_page_find(page >> TARGET_PAGE_BITS);
13eb76e0
FB
1945 if (!p) {
1946 pd = IO_MEM_UNASSIGNED;
1947 } else {
1948 pd = p->phys_offset;
1949 }
1950
1951 if (is_write) {
3a7d929e 1952 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
13eb76e0 1953 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
6a00d601
FB
1954 /* XXX: could force cpu_single_env to NULL to avoid
1955 potential bugs */
13eb76e0 1956 if (l >= 4 && ((addr & 3) == 0)) {
1c213d19 1957 /* 32 bit write access */
c27004ec 1958 val = ldl_p(buf);
a4193c8a 1959 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
1960 l = 4;
1961 } else if (l >= 2 && ((addr & 1) == 0)) {
1c213d19 1962 /* 16 bit write access */
c27004ec 1963 val = lduw_p(buf);
a4193c8a 1964 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
1965 l = 2;
1966 } else {
1c213d19 1967 /* 8 bit write access */
c27004ec 1968 val = ldub_p(buf);
a4193c8a 1969 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
1970 l = 1;
1971 }
1972 } else {
b448f2f3
FB
1973 unsigned long addr1;
1974 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
13eb76e0 1975 /* RAM case */
b448f2f3 1976 ptr = phys_ram_base + addr1;
13eb76e0 1977 memcpy(ptr, buf, l);
3a7d929e
FB
1978 if (!cpu_physical_memory_is_dirty(addr1)) {
1979 /* invalidate code */
1980 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
1981 /* set dirty bit */
f23db169
FB
1982 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
1983 (0xff & ~CODE_DIRTY_FLAG);
3a7d929e 1984 }
13eb76e0
FB
1985 }
1986 } else {
3a7d929e 1987 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
13eb76e0
FB
1988 /* I/O case */
1989 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
1990 if (l >= 4 && ((addr & 3) == 0)) {
1991 /* 32 bit read access */
a4193c8a 1992 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
c27004ec 1993 stl_p(buf, val);
13eb76e0
FB
1994 l = 4;
1995 } else if (l >= 2 && ((addr & 1) == 0)) {
1996 /* 16 bit read access */
a4193c8a 1997 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
c27004ec 1998 stw_p(buf, val);
13eb76e0
FB
1999 l = 2;
2000 } else {
1c213d19 2001 /* 8 bit read access */
a4193c8a 2002 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
c27004ec 2003 stb_p(buf, val);
13eb76e0
FB
2004 l = 1;
2005 }
2006 } else {
2007 /* RAM case */
2008 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2009 (addr & ~TARGET_PAGE_MASK);
2010 memcpy(buf, ptr, l);
2011 }
2012 }
2013 len -= l;
2014 buf += l;
2015 addr += l;
2016 }
2017}
8df1cd07
FB
2018
2019/* warning: addr must be aligned */
2020uint32_t ldl_phys(target_phys_addr_t addr)
2021{
2022 int io_index;
2023 uint8_t *ptr;
2024 uint32_t val;
2025 unsigned long pd;
2026 PhysPageDesc *p;
2027
2028 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2029 if (!p) {
2030 pd = IO_MEM_UNASSIGNED;
2031 } else {
2032 pd = p->phys_offset;
2033 }
2034
3a7d929e 2035 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
8df1cd07
FB
2036 /* I/O case */
2037 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2038 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2039 } else {
2040 /* RAM case */
2041 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2042 (addr & ~TARGET_PAGE_MASK);
2043 val = ldl_p(ptr);
2044 }
2045 return val;
2046}
2047
aab33094
FB
2048/* XXX: optimize */
2049uint32_t ldub_phys(target_phys_addr_t addr)
2050{
2051 uint8_t val;
2052 cpu_physical_memory_read(addr, &val, 1);
2053 return val;
2054}
2055
2056/* XXX: optimize */
2057uint32_t lduw_phys(target_phys_addr_t addr)
2058{
2059 uint16_t val;
2060 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2061 return tswap16(val);
2062}
2063
2064/* XXX: optimize */
2065uint64_t ldq_phys(target_phys_addr_t addr)
2066{
2067 uint64_t val;
2068 cpu_physical_memory_read(addr, (uint8_t *)&val, 8);
2069 return tswap64(val);
2070}
2071
8df1cd07
FB
2072/* warning: addr must be aligned. The ram page is not masked as dirty
2073 and the code inside is not invalidated. It is useful if the dirty
2074 bits are used to track modified PTEs */
2075void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2076{
2077 int io_index;
2078 uint8_t *ptr;
2079 unsigned long pd;
2080 PhysPageDesc *p;
2081
2082 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2083 if (!p) {
2084 pd = IO_MEM_UNASSIGNED;
2085 } else {
2086 pd = p->phys_offset;
2087 }
2088
3a7d929e 2089 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07
FB
2090 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2091 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2092 } else {
2093 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2094 (addr & ~TARGET_PAGE_MASK);
2095 stl_p(ptr, val);
2096 }
2097}
2098
2099/* warning: addr must be aligned */
8df1cd07
FB
2100void stl_phys(target_phys_addr_t addr, uint32_t val)
2101{
2102 int io_index;
2103 uint8_t *ptr;
2104 unsigned long pd;
2105 PhysPageDesc *p;
2106
2107 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2108 if (!p) {
2109 pd = IO_MEM_UNASSIGNED;
2110 } else {
2111 pd = p->phys_offset;
2112 }
2113
3a7d929e 2114 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07
FB
2115 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2116 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2117 } else {
2118 unsigned long addr1;
2119 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2120 /* RAM case */
2121 ptr = phys_ram_base + addr1;
2122 stl_p(ptr, val);
3a7d929e
FB
2123 if (!cpu_physical_memory_is_dirty(addr1)) {
2124 /* invalidate code */
2125 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2126 /* set dirty bit */
f23db169
FB
2127 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2128 (0xff & ~CODE_DIRTY_FLAG);
3a7d929e 2129 }
8df1cd07
FB
2130 }
2131}
2132
aab33094
FB
2133/* XXX: optimize */
2134void stb_phys(target_phys_addr_t addr, uint32_t val)
2135{
2136 uint8_t v = val;
2137 cpu_physical_memory_write(addr, &v, 1);
2138}
2139
2140/* XXX: optimize */
2141void stw_phys(target_phys_addr_t addr, uint32_t val)
2142{
2143 uint16_t v = tswap16(val);
2144 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2145}
2146
2147/* XXX: optimize */
2148void stq_phys(target_phys_addr_t addr, uint64_t val)
2149{
2150 val = tswap64(val);
2151 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
2152}
2153
13eb76e0
FB
2154#endif
2155
2156/* virtual memory access for debug */
b448f2f3
FB
2157int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
2158 uint8_t *buf, int len, int is_write)
13eb76e0
FB
2159{
2160 int l;
2161 target_ulong page, phys_addr;
2162
2163 while (len > 0) {
2164 page = addr & TARGET_PAGE_MASK;
2165 phys_addr = cpu_get_phys_page_debug(env, page);
2166 /* if no physical page mapped, return an error */
2167 if (phys_addr == -1)
2168 return -1;
2169 l = (page + TARGET_PAGE_SIZE) - addr;
2170 if (l > len)
2171 l = len;
b448f2f3
FB
2172 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
2173 buf, l, is_write);
13eb76e0
FB
2174 len -= l;
2175 buf += l;
2176 addr += l;
2177 }
2178 return 0;
2179}
2180
e3db7226
FB
2181void dump_exec_info(FILE *f,
2182 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2183{
2184 int i, target_code_size, max_target_code_size;
2185 int direct_jmp_count, direct_jmp2_count, cross_page;
2186 TranslationBlock *tb;
2187
2188 target_code_size = 0;
2189 max_target_code_size = 0;
2190 cross_page = 0;
2191 direct_jmp_count = 0;
2192 direct_jmp2_count = 0;
2193 for(i = 0; i < nb_tbs; i++) {
2194 tb = &tbs[i];
2195 target_code_size += tb->size;
2196 if (tb->size > max_target_code_size)
2197 max_target_code_size = tb->size;
2198 if (tb->page_addr[1] != -1)
2199 cross_page++;
2200 if (tb->tb_next_offset[0] != 0xffff) {
2201 direct_jmp_count++;
2202 if (tb->tb_next_offset[1] != 0xffff) {
2203 direct_jmp2_count++;
2204 }
2205 }
2206 }
2207 /* XXX: avoid using doubles ? */
2208 cpu_fprintf(f, "TB count %d\n", nb_tbs);
2209 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
2210 nb_tbs ? target_code_size / nb_tbs : 0,
2211 max_target_code_size);
2212 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
2213 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
2214 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
2215 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
2216 cross_page,
2217 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
2218 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
2219 direct_jmp_count,
2220 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
2221 direct_jmp2_count,
2222 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
2223 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
2224 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
2225 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
2226}
2227
61382a50
FB
2228#if !defined(CONFIG_USER_ONLY)
2229
2230#define MMUSUFFIX _cmmu
2231#define GETPC() NULL
2232#define env cpu_single_env
b769d8fe 2233#define SOFTMMU_CODE_ACCESS
61382a50
FB
2234
2235#define SHIFT 0
2236#include "softmmu_template.h"
2237
2238#define SHIFT 1
2239#include "softmmu_template.h"
2240
2241#define SHIFT 2
2242#include "softmmu_template.h"
2243
2244#define SHIFT 3
2245#include "softmmu_template.h"
2246
2247#undef env
2248
2249#endif