]> git.proxmox.com Git - mirror_qemu.git/blame_incremental - exec.c
using _exit in fork() (Kamo Hiroyasu)
[mirror_qemu.git] / exec.c
... / ...
CommitLineData
1/*
2 * virtual page mapping and translated block handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20#include "config.h"
21#ifdef _WIN32
22#include <windows.h>
23#else
24#include <sys/types.h>
25#include <sys/mman.h>
26#endif
27#include <stdlib.h>
28#include <stdio.h>
29#include <stdarg.h>
30#include <string.h>
31#include <errno.h>
32#include <unistd.h>
33#include <inttypes.h>
34
35#include "cpu.h"
36#include "exec-all.h"
37
38//#define DEBUG_TB_INVALIDATE
39//#define DEBUG_FLUSH
40//#define DEBUG_TLB
41
42/* make various TB consistency checks */
43//#define DEBUG_TB_CHECK
44//#define DEBUG_TLB_CHECK
45
46/* threshold to flush the translated code buffer */
47#define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
48
49#define SMC_BITMAP_USE_THRESHOLD 10
50
51#define MMAP_AREA_START 0x00000000
52#define MMAP_AREA_END 0xa8000000
53
54#if defined(TARGET_SPARC64)
55#define TARGET_PHYS_ADDR_SPACE_BITS 41
56#elif defined(TARGET_PPC64)
57#define TARGET_PHYS_ADDR_SPACE_BITS 42
58#else
59/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
60#define TARGET_PHYS_ADDR_SPACE_BITS 32
61#endif
62
63TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
64TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
65int nb_tbs;
66/* any access to the tbs or the page table must use this lock */
67spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
68
69uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] __attribute__((aligned (32)));
70uint8_t *code_gen_ptr;
71
72int phys_ram_size;
73int phys_ram_fd;
74uint8_t *phys_ram_base;
75uint8_t *phys_ram_dirty;
76
77CPUState *first_cpu;
78/* current CPU in the current thread. It is only valid inside
79 cpu_exec() */
80CPUState *cpu_single_env;
81
82typedef struct PageDesc {
83 /* list of TBs intersecting this ram page */
84 TranslationBlock *first_tb;
85 /* in order to optimize self modifying code, we count the number
86 of lookups we do to a given page to use a bitmap */
87 unsigned int code_write_count;
88 uint8_t *code_bitmap;
89#if defined(CONFIG_USER_ONLY)
90 unsigned long flags;
91#endif
92} PageDesc;
93
94typedef struct PhysPageDesc {
95 /* offset in host memory of the page + io_index in the low 12 bits */
96 uint32_t phys_offset;
97} PhysPageDesc;
98
99#define L2_BITS 10
100#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
101
102#define L1_SIZE (1 << L1_BITS)
103#define L2_SIZE (1 << L2_BITS)
104
105static void io_mem_init(void);
106
107unsigned long qemu_real_host_page_size;
108unsigned long qemu_host_page_bits;
109unsigned long qemu_host_page_size;
110unsigned long qemu_host_page_mask;
111
112/* XXX: for system emulation, it could just be an array */
113static PageDesc *l1_map[L1_SIZE];
114PhysPageDesc **l1_phys_map;
115
116/* io memory support */
117CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
118CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
119void *io_mem_opaque[IO_MEM_NB_ENTRIES];
120static int io_mem_nb;
121
122/* log support */
123char *logfilename = "/tmp/qemu.log";
124FILE *logfile;
125int loglevel;
126
127/* statistics */
128static int tlb_flush_count;
129static int tb_flush_count;
130static int tb_phys_invalidate_count;
131
132static void page_init(void)
133{
134 /* NOTE: we can always suppose that qemu_host_page_size >=
135 TARGET_PAGE_SIZE */
136#ifdef _WIN32
137 {
138 SYSTEM_INFO system_info;
139 DWORD old_protect;
140
141 GetSystemInfo(&system_info);
142 qemu_real_host_page_size = system_info.dwPageSize;
143
144 VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
145 PAGE_EXECUTE_READWRITE, &old_protect);
146 }
147#else
148 qemu_real_host_page_size = getpagesize();
149 {
150 unsigned long start, end;
151
152 start = (unsigned long)code_gen_buffer;
153 start &= ~(qemu_real_host_page_size - 1);
154
155 end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
156 end += qemu_real_host_page_size - 1;
157 end &= ~(qemu_real_host_page_size - 1);
158
159 mprotect((void *)start, end - start,
160 PROT_READ | PROT_WRITE | PROT_EXEC);
161 }
162#endif
163
164 if (qemu_host_page_size == 0)
165 qemu_host_page_size = qemu_real_host_page_size;
166 if (qemu_host_page_size < TARGET_PAGE_SIZE)
167 qemu_host_page_size = TARGET_PAGE_SIZE;
168 qemu_host_page_bits = 0;
169 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
170 qemu_host_page_bits++;
171 qemu_host_page_mask = ~(qemu_host_page_size - 1);
172 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
173 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
174}
175
176static inline PageDesc *page_find_alloc(unsigned int index)
177{
178 PageDesc **lp, *p;
179
180 lp = &l1_map[index >> L2_BITS];
181 p = *lp;
182 if (!p) {
183 /* allocate if not found */
184 p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
185 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
186 *lp = p;
187 }
188 return p + (index & (L2_SIZE - 1));
189}
190
191static inline PageDesc *page_find(unsigned int index)
192{
193 PageDesc *p;
194
195 p = l1_map[index >> L2_BITS];
196 if (!p)
197 return 0;
198 return p + (index & (L2_SIZE - 1));
199}
200
201static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
202{
203 void **lp, **p;
204
205 p = (void **)l1_phys_map;
206#if TARGET_PHYS_ADDR_SPACE_BITS > 32
207
208#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
209#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
210#endif
211 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
212 p = *lp;
213 if (!p) {
214 /* allocate if not found */
215 if (!alloc)
216 return NULL;
217 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
218 memset(p, 0, sizeof(void *) * L1_SIZE);
219 *lp = p;
220 }
221#endif
222 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
223 p = *lp;
224 if (!p) {
225 /* allocate if not found */
226 if (!alloc)
227 return NULL;
228 p = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
229 memset(p, 0, sizeof(PhysPageDesc) * L2_SIZE);
230 *lp = p;
231 }
232 return ((PhysPageDesc *)p) + (index & (L2_SIZE - 1));
233}
234
235static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
236{
237 return phys_page_find_alloc(index, 0);
238}
239
240#if !defined(CONFIG_USER_ONLY)
241static void tlb_protect_code(ram_addr_t ram_addr);
242static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
243 target_ulong vaddr);
244#endif
245
246void cpu_exec_init(CPUState *env)
247{
248 CPUState **penv;
249 int cpu_index;
250
251 if (!code_gen_ptr) {
252 code_gen_ptr = code_gen_buffer;
253 page_init();
254 io_mem_init();
255 }
256 env->next_cpu = NULL;
257 penv = &first_cpu;
258 cpu_index = 0;
259 while (*penv != NULL) {
260 penv = (CPUState **)&(*penv)->next_cpu;
261 cpu_index++;
262 }
263 env->cpu_index = cpu_index;
264 *penv = env;
265}
266
267static inline void invalidate_page_bitmap(PageDesc *p)
268{
269 if (p->code_bitmap) {
270 qemu_free(p->code_bitmap);
271 p->code_bitmap = NULL;
272 }
273 p->code_write_count = 0;
274}
275
276/* set to NULL all the 'first_tb' fields in all PageDescs */
277static void page_flush_tb(void)
278{
279 int i, j;
280 PageDesc *p;
281
282 for(i = 0; i < L1_SIZE; i++) {
283 p = l1_map[i];
284 if (p) {
285 for(j = 0; j < L2_SIZE; j++) {
286 p->first_tb = NULL;
287 invalidate_page_bitmap(p);
288 p++;
289 }
290 }
291 }
292}
293
294/* flush all the translation blocks */
295/* XXX: tb_flush is currently not thread safe */
296void tb_flush(CPUState *env1)
297{
298 CPUState *env;
299#if defined(DEBUG_FLUSH)
300 printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
301 code_gen_ptr - code_gen_buffer,
302 nb_tbs,
303 nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0);
304#endif
305 nb_tbs = 0;
306
307 for(env = first_cpu; env != NULL; env = env->next_cpu) {
308 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
309 }
310
311 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
312 page_flush_tb();
313
314 code_gen_ptr = code_gen_buffer;
315 /* XXX: flush processor icache at this point if cache flush is
316 expensive */
317 tb_flush_count++;
318}
319
320#ifdef DEBUG_TB_CHECK
321
322static void tb_invalidate_check(unsigned long address)
323{
324 TranslationBlock *tb;
325 int i;
326 address &= TARGET_PAGE_MASK;
327 for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
328 for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
329 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
330 address >= tb->pc + tb->size)) {
331 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
332 address, tb->pc, tb->size);
333 }
334 }
335 }
336}
337
338/* verify that all the pages have correct rights for code */
339static void tb_page_check(void)
340{
341 TranslationBlock *tb;
342 int i, flags1, flags2;
343
344 for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
345 for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
346 flags1 = page_get_flags(tb->pc);
347 flags2 = page_get_flags(tb->pc + tb->size - 1);
348 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
349 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
350 tb->pc, tb->size, flags1, flags2);
351 }
352 }
353 }
354}
355
356void tb_jmp_check(TranslationBlock *tb)
357{
358 TranslationBlock *tb1;
359 unsigned int n1;
360
361 /* suppress any remaining jumps to this TB */
362 tb1 = tb->jmp_first;
363 for(;;) {
364 n1 = (long)tb1 & 3;
365 tb1 = (TranslationBlock *)((long)tb1 & ~3);
366 if (n1 == 2)
367 break;
368 tb1 = tb1->jmp_next[n1];
369 }
370 /* check end of list */
371 if (tb1 != tb) {
372 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
373 }
374}
375
376#endif
377
378/* invalidate one TB */
379static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
380 int next_offset)
381{
382 TranslationBlock *tb1;
383 for(;;) {
384 tb1 = *ptb;
385 if (tb1 == tb) {
386 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
387 break;
388 }
389 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
390 }
391}
392
393static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
394{
395 TranslationBlock *tb1;
396 unsigned int n1;
397
398 for(;;) {
399 tb1 = *ptb;
400 n1 = (long)tb1 & 3;
401 tb1 = (TranslationBlock *)((long)tb1 & ~3);
402 if (tb1 == tb) {
403 *ptb = tb1->page_next[n1];
404 break;
405 }
406 ptb = &tb1->page_next[n1];
407 }
408}
409
410static inline void tb_jmp_remove(TranslationBlock *tb, int n)
411{
412 TranslationBlock *tb1, **ptb;
413 unsigned int n1;
414
415 ptb = &tb->jmp_next[n];
416 tb1 = *ptb;
417 if (tb1) {
418 /* find tb(n) in circular list */
419 for(;;) {
420 tb1 = *ptb;
421 n1 = (long)tb1 & 3;
422 tb1 = (TranslationBlock *)((long)tb1 & ~3);
423 if (n1 == n && tb1 == tb)
424 break;
425 if (n1 == 2) {
426 ptb = &tb1->jmp_first;
427 } else {
428 ptb = &tb1->jmp_next[n1];
429 }
430 }
431 /* now we can suppress tb(n) from the list */
432 *ptb = tb->jmp_next[n];
433
434 tb->jmp_next[n] = NULL;
435 }
436}
437
438/* reset the jump entry 'n' of a TB so that it is not chained to
439 another TB */
440static inline void tb_reset_jump(TranslationBlock *tb, int n)
441{
442 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
443}
444
445static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
446{
447 CPUState *env;
448 PageDesc *p;
449 unsigned int h, n1;
450 target_ulong phys_pc;
451 TranslationBlock *tb1, *tb2;
452
453 /* remove the TB from the hash list */
454 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
455 h = tb_phys_hash_func(phys_pc);
456 tb_remove(&tb_phys_hash[h], tb,
457 offsetof(TranslationBlock, phys_hash_next));
458
459 /* remove the TB from the page list */
460 if (tb->page_addr[0] != page_addr) {
461 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
462 tb_page_remove(&p->first_tb, tb);
463 invalidate_page_bitmap(p);
464 }
465 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
466 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
467 tb_page_remove(&p->first_tb, tb);
468 invalidate_page_bitmap(p);
469 }
470
471 tb_invalidated_flag = 1;
472
473 /* remove the TB from the hash list */
474 h = tb_jmp_cache_hash_func(tb->pc);
475 for(env = first_cpu; env != NULL; env = env->next_cpu) {
476 if (env->tb_jmp_cache[h] == tb)
477 env->tb_jmp_cache[h] = NULL;
478 }
479
480 /* suppress this TB from the two jump lists */
481 tb_jmp_remove(tb, 0);
482 tb_jmp_remove(tb, 1);
483
484 /* suppress any remaining jumps to this TB */
485 tb1 = tb->jmp_first;
486 for(;;) {
487 n1 = (long)tb1 & 3;
488 if (n1 == 2)
489 break;
490 tb1 = (TranslationBlock *)((long)tb1 & ~3);
491 tb2 = tb1->jmp_next[n1];
492 tb_reset_jump(tb1, n1);
493 tb1->jmp_next[n1] = NULL;
494 tb1 = tb2;
495 }
496 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
497
498 tb_phys_invalidate_count++;
499}
500
501static inline void set_bits(uint8_t *tab, int start, int len)
502{
503 int end, mask, end1;
504
505 end = start + len;
506 tab += start >> 3;
507 mask = 0xff << (start & 7);
508 if ((start & ~7) == (end & ~7)) {
509 if (start < end) {
510 mask &= ~(0xff << (end & 7));
511 *tab |= mask;
512 }
513 } else {
514 *tab++ |= mask;
515 start = (start + 8) & ~7;
516 end1 = end & ~7;
517 while (start < end1) {
518 *tab++ = 0xff;
519 start += 8;
520 }
521 if (start < end) {
522 mask = ~(0xff << (end & 7));
523 *tab |= mask;
524 }
525 }
526}
527
528static void build_page_bitmap(PageDesc *p)
529{
530 int n, tb_start, tb_end;
531 TranslationBlock *tb;
532
533 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
534 if (!p->code_bitmap)
535 return;
536 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
537
538 tb = p->first_tb;
539 while (tb != NULL) {
540 n = (long)tb & 3;
541 tb = (TranslationBlock *)((long)tb & ~3);
542 /* NOTE: this is subtle as a TB may span two physical pages */
543 if (n == 0) {
544 /* NOTE: tb_end may be after the end of the page, but
545 it is not a problem */
546 tb_start = tb->pc & ~TARGET_PAGE_MASK;
547 tb_end = tb_start + tb->size;
548 if (tb_end > TARGET_PAGE_SIZE)
549 tb_end = TARGET_PAGE_SIZE;
550 } else {
551 tb_start = 0;
552 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
553 }
554 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
555 tb = tb->page_next[n];
556 }
557}
558
559#ifdef TARGET_HAS_PRECISE_SMC
560
561static void tb_gen_code(CPUState *env,
562 target_ulong pc, target_ulong cs_base, int flags,
563 int cflags)
564{
565 TranslationBlock *tb;
566 uint8_t *tc_ptr;
567 target_ulong phys_pc, phys_page2, virt_page2;
568 int code_gen_size;
569
570 phys_pc = get_phys_addr_code(env, pc);
571 tb = tb_alloc(pc);
572 if (!tb) {
573 /* flush must be done */
574 tb_flush(env);
575 /* cannot fail at this point */
576 tb = tb_alloc(pc);
577 }
578 tc_ptr = code_gen_ptr;
579 tb->tc_ptr = tc_ptr;
580 tb->cs_base = cs_base;
581 tb->flags = flags;
582 tb->cflags = cflags;
583 cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
584 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
585
586 /* check next page if needed */
587 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
588 phys_page2 = -1;
589 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
590 phys_page2 = get_phys_addr_code(env, virt_page2);
591 }
592 tb_link_phys(tb, phys_pc, phys_page2);
593}
594#endif
595
596/* invalidate all TBs which intersect with the target physical page
597 starting in range [start;end[. NOTE: start and end must refer to
598 the same physical page. 'is_cpu_write_access' should be true if called
599 from a real cpu write access: the virtual CPU will exit the current
600 TB if code is modified inside this TB. */
601void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
602 int is_cpu_write_access)
603{
604 int n, current_tb_modified, current_tb_not_found, current_flags;
605 CPUState *env = cpu_single_env;
606 PageDesc *p;
607 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
608 target_ulong tb_start, tb_end;
609 target_ulong current_pc, current_cs_base;
610
611 p = page_find(start >> TARGET_PAGE_BITS);
612 if (!p)
613 return;
614 if (!p->code_bitmap &&
615 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
616 is_cpu_write_access) {
617 /* build code bitmap */
618 build_page_bitmap(p);
619 }
620
621 /* we remove all the TBs in the range [start, end[ */
622 /* XXX: see if in some cases it could be faster to invalidate all the code */
623 current_tb_not_found = is_cpu_write_access;
624 current_tb_modified = 0;
625 current_tb = NULL; /* avoid warning */
626 current_pc = 0; /* avoid warning */
627 current_cs_base = 0; /* avoid warning */
628 current_flags = 0; /* avoid warning */
629 tb = p->first_tb;
630 while (tb != NULL) {
631 n = (long)tb & 3;
632 tb = (TranslationBlock *)((long)tb & ~3);
633 tb_next = tb->page_next[n];
634 /* NOTE: this is subtle as a TB may span two physical pages */
635 if (n == 0) {
636 /* NOTE: tb_end may be after the end of the page, but
637 it is not a problem */
638 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
639 tb_end = tb_start + tb->size;
640 } else {
641 tb_start = tb->page_addr[1];
642 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
643 }
644 if (!(tb_end <= start || tb_start >= end)) {
645#ifdef TARGET_HAS_PRECISE_SMC
646 if (current_tb_not_found) {
647 current_tb_not_found = 0;
648 current_tb = NULL;
649 if (env->mem_write_pc) {
650 /* now we have a real cpu fault */
651 current_tb = tb_find_pc(env->mem_write_pc);
652 }
653 }
654 if (current_tb == tb &&
655 !(current_tb->cflags & CF_SINGLE_INSN)) {
656 /* If we are modifying the current TB, we must stop
657 its execution. We could be more precise by checking
658 that the modification is after the current PC, but it
659 would require a specialized function to partially
660 restore the CPU state */
661
662 current_tb_modified = 1;
663 cpu_restore_state(current_tb, env,
664 env->mem_write_pc, NULL);
665#if defined(TARGET_I386)
666 current_flags = env->hflags;
667 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
668 current_cs_base = (target_ulong)env->segs[R_CS].base;
669 current_pc = current_cs_base + env->eip;
670#else
671#error unsupported CPU
672#endif
673 }
674#endif /* TARGET_HAS_PRECISE_SMC */
675 saved_tb = env->current_tb;
676 env->current_tb = NULL;
677 tb_phys_invalidate(tb, -1);
678 env->current_tb = saved_tb;
679 if (env->interrupt_request && env->current_tb)
680 cpu_interrupt(env, env->interrupt_request);
681 }
682 tb = tb_next;
683 }
684#if !defined(CONFIG_USER_ONLY)
685 /* if no code remaining, no need to continue to use slow writes */
686 if (!p->first_tb) {
687 invalidate_page_bitmap(p);
688 if (is_cpu_write_access) {
689 tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
690 }
691 }
692#endif
693#ifdef TARGET_HAS_PRECISE_SMC
694 if (current_tb_modified) {
695 /* we generate a block containing just the instruction
696 modifying the memory. It will ensure that it cannot modify
697 itself */
698 env->current_tb = NULL;
699 tb_gen_code(env, current_pc, current_cs_base, current_flags,
700 CF_SINGLE_INSN);
701 cpu_resume_from_signal(env, NULL);
702 }
703#endif
704}
705
706/* len must be <= 8 and start must be a multiple of len */
707static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
708{
709 PageDesc *p;
710 int offset, b;
711#if 0
712 if (1) {
713 if (loglevel) {
714 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
715 cpu_single_env->mem_write_vaddr, len,
716 cpu_single_env->eip,
717 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
718 }
719 }
720#endif
721 p = page_find(start >> TARGET_PAGE_BITS);
722 if (!p)
723 return;
724 if (p->code_bitmap) {
725 offset = start & ~TARGET_PAGE_MASK;
726 b = p->code_bitmap[offset >> 3] >> (offset & 7);
727 if (b & ((1 << len) - 1))
728 goto do_invalidate;
729 } else {
730 do_invalidate:
731 tb_invalidate_phys_page_range(start, start + len, 1);
732 }
733}
734
735#if !defined(CONFIG_SOFTMMU)
736static void tb_invalidate_phys_page(target_ulong addr,
737 unsigned long pc, void *puc)
738{
739 int n, current_flags, current_tb_modified;
740 target_ulong current_pc, current_cs_base;
741 PageDesc *p;
742 TranslationBlock *tb, *current_tb;
743#ifdef TARGET_HAS_PRECISE_SMC
744 CPUState *env = cpu_single_env;
745#endif
746
747 addr &= TARGET_PAGE_MASK;
748 p = page_find(addr >> TARGET_PAGE_BITS);
749 if (!p)
750 return;
751 tb = p->first_tb;
752 current_tb_modified = 0;
753 current_tb = NULL;
754 current_pc = 0; /* avoid warning */
755 current_cs_base = 0; /* avoid warning */
756 current_flags = 0; /* avoid warning */
757#ifdef TARGET_HAS_PRECISE_SMC
758 if (tb && pc != 0) {
759 current_tb = tb_find_pc(pc);
760 }
761#endif
762 while (tb != NULL) {
763 n = (long)tb & 3;
764 tb = (TranslationBlock *)((long)tb & ~3);
765#ifdef TARGET_HAS_PRECISE_SMC
766 if (current_tb == tb &&
767 !(current_tb->cflags & CF_SINGLE_INSN)) {
768 /* If we are modifying the current TB, we must stop
769 its execution. We could be more precise by checking
770 that the modification is after the current PC, but it
771 would require a specialized function to partially
772 restore the CPU state */
773
774 current_tb_modified = 1;
775 cpu_restore_state(current_tb, env, pc, puc);
776#if defined(TARGET_I386)
777 current_flags = env->hflags;
778 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
779 current_cs_base = (target_ulong)env->segs[R_CS].base;
780 current_pc = current_cs_base + env->eip;
781#else
782#error unsupported CPU
783#endif
784 }
785#endif /* TARGET_HAS_PRECISE_SMC */
786 tb_phys_invalidate(tb, addr);
787 tb = tb->page_next[n];
788 }
789 p->first_tb = NULL;
790#ifdef TARGET_HAS_PRECISE_SMC
791 if (current_tb_modified) {
792 /* we generate a block containing just the instruction
793 modifying the memory. It will ensure that it cannot modify
794 itself */
795 env->current_tb = NULL;
796 tb_gen_code(env, current_pc, current_cs_base, current_flags,
797 CF_SINGLE_INSN);
798 cpu_resume_from_signal(env, puc);
799 }
800#endif
801}
802#endif
803
804/* add the tb in the target page and protect it if necessary */
805static inline void tb_alloc_page(TranslationBlock *tb,
806 unsigned int n, unsigned int page_addr)
807{
808 PageDesc *p;
809 TranslationBlock *last_first_tb;
810
811 tb->page_addr[n] = page_addr;
812 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
813 tb->page_next[n] = p->first_tb;
814 last_first_tb = p->first_tb;
815 p->first_tb = (TranslationBlock *)((long)tb | n);
816 invalidate_page_bitmap(p);
817
818#if defined(TARGET_HAS_SMC) || 1
819
820#if defined(CONFIG_USER_ONLY)
821 if (p->flags & PAGE_WRITE) {
822 unsigned long host_start, host_end, addr;
823 int prot;
824
825 /* force the host page as non writable (writes will have a
826 page fault + mprotect overhead) */
827 host_start = page_addr & qemu_host_page_mask;
828 host_end = host_start + qemu_host_page_size;
829 prot = 0;
830 for(addr = host_start; addr < host_end; addr += TARGET_PAGE_SIZE)
831 prot |= page_get_flags(addr);
832 mprotect((void *)host_start, qemu_host_page_size,
833 (prot & PAGE_BITS) & ~PAGE_WRITE);
834#ifdef DEBUG_TB_INVALIDATE
835 printf("protecting code page: 0x%08lx\n",
836 host_start);
837#endif
838 p->flags &= ~PAGE_WRITE;
839 }
840#else
841 /* if some code is already present, then the pages are already
842 protected. So we handle the case where only the first TB is
843 allocated in a physical page */
844 if (!last_first_tb) {
845 tlb_protect_code(page_addr);
846 }
847#endif
848
849#endif /* TARGET_HAS_SMC */
850}
851
852/* Allocate a new translation block. Flush the translation buffer if
853 too many translation blocks or too much generated code. */
854TranslationBlock *tb_alloc(target_ulong pc)
855{
856 TranslationBlock *tb;
857
858 if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
859 (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
860 return NULL;
861 tb = &tbs[nb_tbs++];
862 tb->pc = pc;
863 tb->cflags = 0;
864 return tb;
865}
866
867/* add a new TB and link it to the physical page tables. phys_page2 is
868 (-1) to indicate that only one page contains the TB. */
869void tb_link_phys(TranslationBlock *tb,
870 target_ulong phys_pc, target_ulong phys_page2)
871{
872 unsigned int h;
873 TranslationBlock **ptb;
874
875 /* add in the physical hash table */
876 h = tb_phys_hash_func(phys_pc);
877 ptb = &tb_phys_hash[h];
878 tb->phys_hash_next = *ptb;
879 *ptb = tb;
880
881 /* add in the page list */
882 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
883 if (phys_page2 != -1)
884 tb_alloc_page(tb, 1, phys_page2);
885 else
886 tb->page_addr[1] = -1;
887
888 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
889 tb->jmp_next[0] = NULL;
890 tb->jmp_next[1] = NULL;
891#ifdef USE_CODE_COPY
892 tb->cflags &= ~CF_FP_USED;
893 if (tb->cflags & CF_TB_FP_USED)
894 tb->cflags |= CF_FP_USED;
895#endif
896
897 /* init original jump addresses */
898 if (tb->tb_next_offset[0] != 0xffff)
899 tb_reset_jump(tb, 0);
900 if (tb->tb_next_offset[1] != 0xffff)
901 tb_reset_jump(tb, 1);
902
903#ifdef DEBUG_TB_CHECK
904 tb_page_check();
905#endif
906}
907
908/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
909 tb[1].tc_ptr. Return NULL if not found */
910TranslationBlock *tb_find_pc(unsigned long tc_ptr)
911{
912 int m_min, m_max, m;
913 unsigned long v;
914 TranslationBlock *tb;
915
916 if (nb_tbs <= 0)
917 return NULL;
918 if (tc_ptr < (unsigned long)code_gen_buffer ||
919 tc_ptr >= (unsigned long)code_gen_ptr)
920 return NULL;
921 /* binary search (cf Knuth) */
922 m_min = 0;
923 m_max = nb_tbs - 1;
924 while (m_min <= m_max) {
925 m = (m_min + m_max) >> 1;
926 tb = &tbs[m];
927 v = (unsigned long)tb->tc_ptr;
928 if (v == tc_ptr)
929 return tb;
930 else if (tc_ptr < v) {
931 m_max = m - 1;
932 } else {
933 m_min = m + 1;
934 }
935 }
936 return &tbs[m_max];
937}
938
939static void tb_reset_jump_recursive(TranslationBlock *tb);
940
941static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
942{
943 TranslationBlock *tb1, *tb_next, **ptb;
944 unsigned int n1;
945
946 tb1 = tb->jmp_next[n];
947 if (tb1 != NULL) {
948 /* find head of list */
949 for(;;) {
950 n1 = (long)tb1 & 3;
951 tb1 = (TranslationBlock *)((long)tb1 & ~3);
952 if (n1 == 2)
953 break;
954 tb1 = tb1->jmp_next[n1];
955 }
956 /* we are now sure now that tb jumps to tb1 */
957 tb_next = tb1;
958
959 /* remove tb from the jmp_first list */
960 ptb = &tb_next->jmp_first;
961 for(;;) {
962 tb1 = *ptb;
963 n1 = (long)tb1 & 3;
964 tb1 = (TranslationBlock *)((long)tb1 & ~3);
965 if (n1 == n && tb1 == tb)
966 break;
967 ptb = &tb1->jmp_next[n1];
968 }
969 *ptb = tb->jmp_next[n];
970 tb->jmp_next[n] = NULL;
971
972 /* suppress the jump to next tb in generated code */
973 tb_reset_jump(tb, n);
974
975 /* suppress jumps in the tb on which we could have jumped */
976 tb_reset_jump_recursive(tb_next);
977 }
978}
979
980static void tb_reset_jump_recursive(TranslationBlock *tb)
981{
982 tb_reset_jump_recursive2(tb, 0);
983 tb_reset_jump_recursive2(tb, 1);
984}
985
986#if defined(TARGET_HAS_ICE)
987static void breakpoint_invalidate(CPUState *env, target_ulong pc)
988{
989 target_ulong phys_addr;
990
991 phys_addr = cpu_get_phys_page_debug(env, pc);
992 tb_invalidate_phys_page_range(phys_addr, phys_addr + 1, 0);
993}
994#endif
995
996/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
997 breakpoint is reached */
998int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
999{
1000#if defined(TARGET_HAS_ICE)
1001 int i;
1002
1003 for(i = 0; i < env->nb_breakpoints; i++) {
1004 if (env->breakpoints[i] == pc)
1005 return 0;
1006 }
1007
1008 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1009 return -1;
1010 env->breakpoints[env->nb_breakpoints++] = pc;
1011
1012 breakpoint_invalidate(env, pc);
1013 return 0;
1014#else
1015 return -1;
1016#endif
1017}
1018
1019/* remove a breakpoint */
1020int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1021{
1022#if defined(TARGET_HAS_ICE)
1023 int i;
1024 for(i = 0; i < env->nb_breakpoints; i++) {
1025 if (env->breakpoints[i] == pc)
1026 goto found;
1027 }
1028 return -1;
1029 found:
1030 env->nb_breakpoints--;
1031 if (i < env->nb_breakpoints)
1032 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1033
1034 breakpoint_invalidate(env, pc);
1035 return 0;
1036#else
1037 return -1;
1038#endif
1039}
1040
1041/* enable or disable single step mode. EXCP_DEBUG is returned by the
1042 CPU loop after each instruction */
1043void cpu_single_step(CPUState *env, int enabled)
1044{
1045#if defined(TARGET_HAS_ICE)
1046 if (env->singlestep_enabled != enabled) {
1047 env->singlestep_enabled = enabled;
1048 /* must flush all the translated code to avoid inconsistancies */
1049 /* XXX: only flush what is necessary */
1050 tb_flush(env);
1051 }
1052#endif
1053}
1054
1055/* enable or disable low levels log */
1056void cpu_set_log(int log_flags)
1057{
1058 loglevel = log_flags;
1059 if (loglevel && !logfile) {
1060 logfile = fopen(logfilename, "w");
1061 if (!logfile) {
1062 perror(logfilename);
1063 _exit(1);
1064 }
1065#if !defined(CONFIG_SOFTMMU)
1066 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1067 {
1068 static uint8_t logfile_buf[4096];
1069 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1070 }
1071#else
1072 setvbuf(logfile, NULL, _IOLBF, 0);
1073#endif
1074 }
1075}
1076
1077void cpu_set_log_filename(const char *filename)
1078{
1079 logfilename = strdup(filename);
1080}
1081
1082/* mask must never be zero, except for A20 change call */
1083void cpu_interrupt(CPUState *env, int mask)
1084{
1085 TranslationBlock *tb;
1086 static int interrupt_lock;
1087
1088 env->interrupt_request |= mask;
1089 /* if the cpu is currently executing code, we must unlink it and
1090 all the potentially executing TB */
1091 tb = env->current_tb;
1092 if (tb && !testandset(&interrupt_lock)) {
1093 env->current_tb = NULL;
1094 tb_reset_jump_recursive(tb);
1095 interrupt_lock = 0;
1096 }
1097}
1098
1099void cpu_reset_interrupt(CPUState *env, int mask)
1100{
1101 env->interrupt_request &= ~mask;
1102}
1103
1104CPULogItem cpu_log_items[] = {
1105 { CPU_LOG_TB_OUT_ASM, "out_asm",
1106 "show generated host assembly code for each compiled TB" },
1107 { CPU_LOG_TB_IN_ASM, "in_asm",
1108 "show target assembly code for each compiled TB" },
1109 { CPU_LOG_TB_OP, "op",
1110 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1111#ifdef TARGET_I386
1112 { CPU_LOG_TB_OP_OPT, "op_opt",
1113 "show micro ops after optimization for each compiled TB" },
1114#endif
1115 { CPU_LOG_INT, "int",
1116 "show interrupts/exceptions in short format" },
1117 { CPU_LOG_EXEC, "exec",
1118 "show trace before each executed TB (lots of logs)" },
1119 { CPU_LOG_TB_CPU, "cpu",
1120 "show CPU state before bloc translation" },
1121#ifdef TARGET_I386
1122 { CPU_LOG_PCALL, "pcall",
1123 "show protected mode far calls/returns/exceptions" },
1124#endif
1125#ifdef DEBUG_IOPORT
1126 { CPU_LOG_IOPORT, "ioport",
1127 "show all i/o ports accesses" },
1128#endif
1129 { 0, NULL, NULL },
1130};
1131
1132static int cmp1(const char *s1, int n, const char *s2)
1133{
1134 if (strlen(s2) != n)
1135 return 0;
1136 return memcmp(s1, s2, n) == 0;
1137}
1138
1139/* takes a comma separated list of log masks. Return 0 if error. */
1140int cpu_str_to_log_mask(const char *str)
1141{
1142 CPULogItem *item;
1143 int mask;
1144 const char *p, *p1;
1145
1146 p = str;
1147 mask = 0;
1148 for(;;) {
1149 p1 = strchr(p, ',');
1150 if (!p1)
1151 p1 = p + strlen(p);
1152 if(cmp1(p,p1-p,"all")) {
1153 for(item = cpu_log_items; item->mask != 0; item++) {
1154 mask |= item->mask;
1155 }
1156 } else {
1157 for(item = cpu_log_items; item->mask != 0; item++) {
1158 if (cmp1(p, p1 - p, item->name))
1159 goto found;
1160 }
1161 return 0;
1162 }
1163 found:
1164 mask |= item->mask;
1165 if (*p1 != ',')
1166 break;
1167 p = p1 + 1;
1168 }
1169 return mask;
1170}
1171
1172void cpu_abort(CPUState *env, const char *fmt, ...)
1173{
1174 va_list ap;
1175
1176 va_start(ap, fmt);
1177 fprintf(stderr, "qemu: fatal: ");
1178 vfprintf(stderr, fmt, ap);
1179 fprintf(stderr, "\n");
1180#ifdef TARGET_I386
1181 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1182#else
1183 cpu_dump_state(env, stderr, fprintf, 0);
1184#endif
1185 va_end(ap);
1186 abort();
1187}
1188
1189#if !defined(CONFIG_USER_ONLY)
1190
1191/* NOTE: if flush_global is true, also flush global entries (not
1192 implemented yet) */
1193void tlb_flush(CPUState *env, int flush_global)
1194{
1195 int i;
1196
1197#if defined(DEBUG_TLB)
1198 printf("tlb_flush:\n");
1199#endif
1200 /* must reset current TB so that interrupts cannot modify the
1201 links while we are modifying them */
1202 env->current_tb = NULL;
1203
1204 for(i = 0; i < CPU_TLB_SIZE; i++) {
1205 env->tlb_read[0][i].address = -1;
1206 env->tlb_write[0][i].address = -1;
1207 env->tlb_read[1][i].address = -1;
1208 env->tlb_write[1][i].address = -1;
1209 }
1210
1211 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1212
1213#if !defined(CONFIG_SOFTMMU)
1214 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
1215#endif
1216#ifdef USE_KQEMU
1217 if (env->kqemu_enabled) {
1218 kqemu_flush(env, flush_global);
1219 }
1220#endif
1221 tlb_flush_count++;
1222}
1223
1224static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1225{
1226 if (addr == (tlb_entry->address &
1227 (TARGET_PAGE_MASK | TLB_INVALID_MASK)))
1228 tlb_entry->address = -1;
1229}
1230
1231void tlb_flush_page(CPUState *env, target_ulong addr)
1232{
1233 int i;
1234 TranslationBlock *tb;
1235
1236#if defined(DEBUG_TLB)
1237 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1238#endif
1239 /* must reset current TB so that interrupts cannot modify the
1240 links while we are modifying them */
1241 env->current_tb = NULL;
1242
1243 addr &= TARGET_PAGE_MASK;
1244 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1245 tlb_flush_entry(&env->tlb_read[0][i], addr);
1246 tlb_flush_entry(&env->tlb_write[0][i], addr);
1247 tlb_flush_entry(&env->tlb_read[1][i], addr);
1248 tlb_flush_entry(&env->tlb_write[1][i], addr);
1249
1250 for(i = 0; i < TB_JMP_CACHE_SIZE; i++) {
1251 tb = env->tb_jmp_cache[i];
1252 if (tb &&
1253 ((tb->pc & TARGET_PAGE_MASK) == addr ||
1254 ((tb->pc + tb->size - 1) & TARGET_PAGE_MASK) == addr)) {
1255 env->tb_jmp_cache[i] = NULL;
1256 }
1257 }
1258
1259#if !defined(CONFIG_SOFTMMU)
1260 if (addr < MMAP_AREA_END)
1261 munmap((void *)addr, TARGET_PAGE_SIZE);
1262#endif
1263#ifdef USE_KQEMU
1264 if (env->kqemu_enabled) {
1265 kqemu_flush_page(env, addr);
1266 }
1267#endif
1268}
1269
1270/* update the TLBs so that writes to code in the virtual page 'addr'
1271 can be detected */
1272static void tlb_protect_code(ram_addr_t ram_addr)
1273{
1274 cpu_physical_memory_reset_dirty(ram_addr,
1275 ram_addr + TARGET_PAGE_SIZE,
1276 CODE_DIRTY_FLAG);
1277}
1278
1279/* update the TLB so that writes in physical page 'phys_addr' are no longer
1280 tested for self modifying code */
1281static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1282 target_ulong vaddr)
1283{
1284 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1285}
1286
1287static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1288 unsigned long start, unsigned long length)
1289{
1290 unsigned long addr;
1291 if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1292 addr = (tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend;
1293 if ((addr - start) < length) {
1294 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1295 }
1296 }
1297}
1298
1299void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1300 int dirty_flags)
1301{
1302 CPUState *env;
1303 unsigned long length, start1;
1304 int i, mask, len;
1305 uint8_t *p;
1306
1307 start &= TARGET_PAGE_MASK;
1308 end = TARGET_PAGE_ALIGN(end);
1309
1310 length = end - start;
1311 if (length == 0)
1312 return;
1313 len = length >> TARGET_PAGE_BITS;
1314#ifdef USE_KQEMU
1315 /* XXX: should not depend on cpu context */
1316 env = first_cpu;
1317 if (env->kqemu_enabled) {
1318 ram_addr_t addr;
1319 addr = start;
1320 for(i = 0; i < len; i++) {
1321 kqemu_set_notdirty(env, addr);
1322 addr += TARGET_PAGE_SIZE;
1323 }
1324 }
1325#endif
1326 mask = ~dirty_flags;
1327 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1328 for(i = 0; i < len; i++)
1329 p[i] &= mask;
1330
1331 /* we modify the TLB cache so that the dirty bit will be set again
1332 when accessing the range */
1333 start1 = start + (unsigned long)phys_ram_base;
1334 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1335 for(i = 0; i < CPU_TLB_SIZE; i++)
1336 tlb_reset_dirty_range(&env->tlb_write[0][i], start1, length);
1337 for(i = 0; i < CPU_TLB_SIZE; i++)
1338 tlb_reset_dirty_range(&env->tlb_write[1][i], start1, length);
1339 }
1340
1341#if !defined(CONFIG_SOFTMMU)
1342 /* XXX: this is expensive */
1343 {
1344 VirtPageDesc *p;
1345 int j;
1346 target_ulong addr;
1347
1348 for(i = 0; i < L1_SIZE; i++) {
1349 p = l1_virt_map[i];
1350 if (p) {
1351 addr = i << (TARGET_PAGE_BITS + L2_BITS);
1352 for(j = 0; j < L2_SIZE; j++) {
1353 if (p->valid_tag == virt_valid_tag &&
1354 p->phys_addr >= start && p->phys_addr < end &&
1355 (p->prot & PROT_WRITE)) {
1356 if (addr < MMAP_AREA_END) {
1357 mprotect((void *)addr, TARGET_PAGE_SIZE,
1358 p->prot & ~PROT_WRITE);
1359 }
1360 }
1361 addr += TARGET_PAGE_SIZE;
1362 p++;
1363 }
1364 }
1365 }
1366 }
1367#endif
1368}
1369
1370static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1371{
1372 ram_addr_t ram_addr;
1373
1374 if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1375 ram_addr = (tlb_entry->address & TARGET_PAGE_MASK) +
1376 tlb_entry->addend - (unsigned long)phys_ram_base;
1377 if (!cpu_physical_memory_is_dirty(ram_addr)) {
1378 tlb_entry->address |= IO_MEM_NOTDIRTY;
1379 }
1380 }
1381}
1382
1383/* update the TLB according to the current state of the dirty bits */
1384void cpu_tlb_update_dirty(CPUState *env)
1385{
1386 int i;
1387 for(i = 0; i < CPU_TLB_SIZE; i++)
1388 tlb_update_dirty(&env->tlb_write[0][i]);
1389 for(i = 0; i < CPU_TLB_SIZE; i++)
1390 tlb_update_dirty(&env->tlb_write[1][i]);
1391}
1392
1393static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
1394 unsigned long start)
1395{
1396 unsigned long addr;
1397 if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1398 addr = (tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend;
1399 if (addr == start) {
1400 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_RAM;
1401 }
1402 }
1403}
1404
1405/* update the TLB corresponding to virtual page vaddr and phys addr
1406 addr so that it is no longer dirty */
1407static inline void tlb_set_dirty(CPUState *env,
1408 unsigned long addr, target_ulong vaddr)
1409{
1410 int i;
1411
1412 addr &= TARGET_PAGE_MASK;
1413 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1414 tlb_set_dirty1(&env->tlb_write[0][i], addr);
1415 tlb_set_dirty1(&env->tlb_write[1][i], addr);
1416}
1417
1418/* add a new TLB entry. At most one entry for a given virtual address
1419 is permitted. Return 0 if OK or 2 if the page could not be mapped
1420 (can only happen in non SOFTMMU mode for I/O pages or pages
1421 conflicting with the host address space). */
1422int tlb_set_page(CPUState *env, target_ulong vaddr,
1423 target_phys_addr_t paddr, int prot,
1424 int is_user, int is_softmmu)
1425{
1426 PhysPageDesc *p;
1427 unsigned long pd;
1428 unsigned int index;
1429 target_ulong address;
1430 target_phys_addr_t addend;
1431 int ret;
1432
1433 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1434 if (!p) {
1435 pd = IO_MEM_UNASSIGNED;
1436 } else {
1437 pd = p->phys_offset;
1438 }
1439#if defined(DEBUG_TLB)
1440 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x u=%d smmu=%d pd=0x%08lx\n",
1441 vaddr, paddr, prot, is_user, is_softmmu, pd);
1442#endif
1443
1444 ret = 0;
1445#if !defined(CONFIG_SOFTMMU)
1446 if (is_softmmu)
1447#endif
1448 {
1449 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1450 /* IO memory case */
1451 address = vaddr | pd;
1452 addend = paddr;
1453 } else {
1454 /* standard memory */
1455 address = vaddr;
1456 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1457 }
1458
1459 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1460 addend -= vaddr;
1461 if (prot & PAGE_READ) {
1462 env->tlb_read[is_user][index].address = address;
1463 env->tlb_read[is_user][index].addend = addend;
1464 } else {
1465 env->tlb_read[is_user][index].address = -1;
1466 env->tlb_read[is_user][index].addend = -1;
1467 }
1468 if (prot & PAGE_WRITE) {
1469 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM) {
1470 /* ROM: access is ignored (same as unassigned) */
1471 env->tlb_write[is_user][index].address = vaddr | IO_MEM_ROM;
1472 env->tlb_write[is_user][index].addend = addend;
1473 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1474 !cpu_physical_memory_is_dirty(pd)) {
1475 env->tlb_write[is_user][index].address = vaddr | IO_MEM_NOTDIRTY;
1476 env->tlb_write[is_user][index].addend = addend;
1477 } else {
1478 env->tlb_write[is_user][index].address = address;
1479 env->tlb_write[is_user][index].addend = addend;
1480 }
1481 } else {
1482 env->tlb_write[is_user][index].address = -1;
1483 env->tlb_write[is_user][index].addend = -1;
1484 }
1485 }
1486#if !defined(CONFIG_SOFTMMU)
1487 else {
1488 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1489 /* IO access: no mapping is done as it will be handled by the
1490 soft MMU */
1491 if (!(env->hflags & HF_SOFTMMU_MASK))
1492 ret = 2;
1493 } else {
1494 void *map_addr;
1495
1496 if (vaddr >= MMAP_AREA_END) {
1497 ret = 2;
1498 } else {
1499 if (prot & PROT_WRITE) {
1500 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1501#if defined(TARGET_HAS_SMC) || 1
1502 first_tb ||
1503#endif
1504 ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1505 !cpu_physical_memory_is_dirty(pd))) {
1506 /* ROM: we do as if code was inside */
1507 /* if code is present, we only map as read only and save the
1508 original mapping */
1509 VirtPageDesc *vp;
1510
1511 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
1512 vp->phys_addr = pd;
1513 vp->prot = prot;
1514 vp->valid_tag = virt_valid_tag;
1515 prot &= ~PAGE_WRITE;
1516 }
1517 }
1518 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
1519 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1520 if (map_addr == MAP_FAILED) {
1521 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1522 paddr, vaddr);
1523 }
1524 }
1525 }
1526 }
1527#endif
1528 return ret;
1529}
1530
1531/* called from signal handler: invalidate the code and unprotect the
1532 page. Return TRUE if the fault was succesfully handled. */
1533int page_unprotect(unsigned long addr, unsigned long pc, void *puc)
1534{
1535#if !defined(CONFIG_SOFTMMU)
1536 VirtPageDesc *vp;
1537
1538#if defined(DEBUG_TLB)
1539 printf("page_unprotect: addr=0x%08x\n", addr);
1540#endif
1541 addr &= TARGET_PAGE_MASK;
1542
1543 /* if it is not mapped, no need to worry here */
1544 if (addr >= MMAP_AREA_END)
1545 return 0;
1546 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1547 if (!vp)
1548 return 0;
1549 /* NOTE: in this case, validate_tag is _not_ tested as it
1550 validates only the code TLB */
1551 if (vp->valid_tag != virt_valid_tag)
1552 return 0;
1553 if (!(vp->prot & PAGE_WRITE))
1554 return 0;
1555#if defined(DEBUG_TLB)
1556 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1557 addr, vp->phys_addr, vp->prot);
1558#endif
1559 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1560 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1561 (unsigned long)addr, vp->prot);
1562 /* set the dirty bit */
1563 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
1564 /* flush the code inside */
1565 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
1566 return 1;
1567#else
1568 return 0;
1569#endif
1570}
1571
1572#else
1573
1574void tlb_flush(CPUState *env, int flush_global)
1575{
1576}
1577
1578void tlb_flush_page(CPUState *env, target_ulong addr)
1579{
1580}
1581
1582int tlb_set_page(CPUState *env, target_ulong vaddr,
1583 target_phys_addr_t paddr, int prot,
1584 int is_user, int is_softmmu)
1585{
1586 return 0;
1587}
1588
1589/* dump memory mappings */
1590void page_dump(FILE *f)
1591{
1592 unsigned long start, end;
1593 int i, j, prot, prot1;
1594 PageDesc *p;
1595
1596 fprintf(f, "%-8s %-8s %-8s %s\n",
1597 "start", "end", "size", "prot");
1598 start = -1;
1599 end = -1;
1600 prot = 0;
1601 for(i = 0; i <= L1_SIZE; i++) {
1602 if (i < L1_SIZE)
1603 p = l1_map[i];
1604 else
1605 p = NULL;
1606 for(j = 0;j < L2_SIZE; j++) {
1607 if (!p)
1608 prot1 = 0;
1609 else
1610 prot1 = p[j].flags;
1611 if (prot1 != prot) {
1612 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1613 if (start != -1) {
1614 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1615 start, end, end - start,
1616 prot & PAGE_READ ? 'r' : '-',
1617 prot & PAGE_WRITE ? 'w' : '-',
1618 prot & PAGE_EXEC ? 'x' : '-');
1619 }
1620 if (prot1 != 0)
1621 start = end;
1622 else
1623 start = -1;
1624 prot = prot1;
1625 }
1626 if (!p)
1627 break;
1628 }
1629 }
1630}
1631
1632int page_get_flags(unsigned long address)
1633{
1634 PageDesc *p;
1635
1636 p = page_find(address >> TARGET_PAGE_BITS);
1637 if (!p)
1638 return 0;
1639 return p->flags;
1640}
1641
1642/* modify the flags of a page and invalidate the code if
1643 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1644 depending on PAGE_WRITE */
1645void page_set_flags(unsigned long start, unsigned long end, int flags)
1646{
1647 PageDesc *p;
1648 unsigned long addr;
1649
1650 start = start & TARGET_PAGE_MASK;
1651 end = TARGET_PAGE_ALIGN(end);
1652 if (flags & PAGE_WRITE)
1653 flags |= PAGE_WRITE_ORG;
1654 spin_lock(&tb_lock);
1655 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1656 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1657 /* if the write protection is set, then we invalidate the code
1658 inside */
1659 if (!(p->flags & PAGE_WRITE) &&
1660 (flags & PAGE_WRITE) &&
1661 p->first_tb) {
1662 tb_invalidate_phys_page(addr, 0, NULL);
1663 }
1664 p->flags = flags;
1665 }
1666 spin_unlock(&tb_lock);
1667}
1668
1669/* called from signal handler: invalidate the code and unprotect the
1670 page. Return TRUE if the fault was succesfully handled. */
1671int page_unprotect(unsigned long address, unsigned long pc, void *puc)
1672{
1673 unsigned int page_index, prot, pindex;
1674 PageDesc *p, *p1;
1675 unsigned long host_start, host_end, addr;
1676
1677 host_start = address & qemu_host_page_mask;
1678 page_index = host_start >> TARGET_PAGE_BITS;
1679 p1 = page_find(page_index);
1680 if (!p1)
1681 return 0;
1682 host_end = host_start + qemu_host_page_size;
1683 p = p1;
1684 prot = 0;
1685 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1686 prot |= p->flags;
1687 p++;
1688 }
1689 /* if the page was really writable, then we change its
1690 protection back to writable */
1691 if (prot & PAGE_WRITE_ORG) {
1692 pindex = (address - host_start) >> TARGET_PAGE_BITS;
1693 if (!(p1[pindex].flags & PAGE_WRITE)) {
1694 mprotect((void *)host_start, qemu_host_page_size,
1695 (prot & PAGE_BITS) | PAGE_WRITE);
1696 p1[pindex].flags |= PAGE_WRITE;
1697 /* and since the content will be modified, we must invalidate
1698 the corresponding translated code. */
1699 tb_invalidate_phys_page(address, pc, puc);
1700#ifdef DEBUG_TB_CHECK
1701 tb_invalidate_check(address);
1702#endif
1703 return 1;
1704 }
1705 }
1706 return 0;
1707}
1708
1709/* call this function when system calls directly modify a memory area */
1710void page_unprotect_range(uint8_t *data, unsigned long data_size)
1711{
1712 unsigned long start, end, addr;
1713
1714 start = (unsigned long)data;
1715 end = start + data_size;
1716 start &= TARGET_PAGE_MASK;
1717 end = TARGET_PAGE_ALIGN(end);
1718 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1719 page_unprotect(addr, 0, NULL);
1720 }
1721}
1722
1723static inline void tlb_set_dirty(CPUState *env,
1724 unsigned long addr, target_ulong vaddr)
1725{
1726}
1727#endif /* defined(CONFIG_USER_ONLY) */
1728
1729/* register physical memory. 'size' must be a multiple of the target
1730 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1731 io memory page */
1732void cpu_register_physical_memory(target_phys_addr_t start_addr,
1733 unsigned long size,
1734 unsigned long phys_offset)
1735{
1736 target_phys_addr_t addr, end_addr;
1737 PhysPageDesc *p;
1738
1739 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
1740 end_addr = start_addr + size;
1741 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
1742 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
1743 p->phys_offset = phys_offset;
1744 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM)
1745 phys_offset += TARGET_PAGE_SIZE;
1746 }
1747}
1748
1749static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
1750{
1751 return 0;
1752}
1753
1754static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1755{
1756}
1757
1758static CPUReadMemoryFunc *unassigned_mem_read[3] = {
1759 unassigned_mem_readb,
1760 unassigned_mem_readb,
1761 unassigned_mem_readb,
1762};
1763
1764static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
1765 unassigned_mem_writeb,
1766 unassigned_mem_writeb,
1767 unassigned_mem_writeb,
1768};
1769
1770static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1771{
1772 unsigned long ram_addr;
1773 int dirty_flags;
1774 ram_addr = addr - (unsigned long)phys_ram_base;
1775 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1776 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
1777#if !defined(CONFIG_USER_ONLY)
1778 tb_invalidate_phys_page_fast(ram_addr, 1);
1779 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1780#endif
1781 }
1782 stb_p((uint8_t *)(long)addr, val);
1783 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1784 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
1785 /* we remove the notdirty callback only if the code has been
1786 flushed */
1787 if (dirty_flags == 0xff)
1788 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
1789}
1790
1791static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
1792{
1793 unsigned long ram_addr;
1794 int dirty_flags;
1795 ram_addr = addr - (unsigned long)phys_ram_base;
1796 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1797 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
1798#if !defined(CONFIG_USER_ONLY)
1799 tb_invalidate_phys_page_fast(ram_addr, 2);
1800 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1801#endif
1802 }
1803 stw_p((uint8_t *)(long)addr, val);
1804 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1805 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
1806 /* we remove the notdirty callback only if the code has been
1807 flushed */
1808 if (dirty_flags == 0xff)
1809 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
1810}
1811
1812static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
1813{
1814 unsigned long ram_addr;
1815 int dirty_flags;
1816 ram_addr = addr - (unsigned long)phys_ram_base;
1817 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1818 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
1819#if !defined(CONFIG_USER_ONLY)
1820 tb_invalidate_phys_page_fast(ram_addr, 4);
1821 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1822#endif
1823 }
1824 stl_p((uint8_t *)(long)addr, val);
1825 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1826 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
1827 /* we remove the notdirty callback only if the code has been
1828 flushed */
1829 if (dirty_flags == 0xff)
1830 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
1831}
1832
1833static CPUReadMemoryFunc *error_mem_read[3] = {
1834 NULL, /* never used */
1835 NULL, /* never used */
1836 NULL, /* never used */
1837};
1838
1839static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
1840 notdirty_mem_writeb,
1841 notdirty_mem_writew,
1842 notdirty_mem_writel,
1843};
1844
1845static void io_mem_init(void)
1846{
1847 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
1848 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
1849 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
1850 io_mem_nb = 5;
1851
1852 /* alloc dirty bits array */
1853 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
1854 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
1855}
1856
1857/* mem_read and mem_write are arrays of functions containing the
1858 function to access byte (index 0), word (index 1) and dword (index
1859 2). All functions must be supplied. If io_index is non zero, the
1860 corresponding io zone is modified. If it is zero, a new io zone is
1861 allocated. The return value can be used with
1862 cpu_register_physical_memory(). (-1) is returned if error. */
1863int cpu_register_io_memory(int io_index,
1864 CPUReadMemoryFunc **mem_read,
1865 CPUWriteMemoryFunc **mem_write,
1866 void *opaque)
1867{
1868 int i;
1869
1870 if (io_index <= 0) {
1871 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
1872 return -1;
1873 io_index = io_mem_nb++;
1874 } else {
1875 if (io_index >= IO_MEM_NB_ENTRIES)
1876 return -1;
1877 }
1878
1879 for(i = 0;i < 3; i++) {
1880 io_mem_read[io_index][i] = mem_read[i];
1881 io_mem_write[io_index][i] = mem_write[i];
1882 }
1883 io_mem_opaque[io_index] = opaque;
1884 return io_index << IO_MEM_SHIFT;
1885}
1886
1887CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
1888{
1889 return io_mem_write[io_index >> IO_MEM_SHIFT];
1890}
1891
1892CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
1893{
1894 return io_mem_read[io_index >> IO_MEM_SHIFT];
1895}
1896
1897/* physical memory access (slow version, mainly for debug) */
1898#if defined(CONFIG_USER_ONLY)
1899void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
1900 int len, int is_write)
1901{
1902 int l, flags;
1903 target_ulong page;
1904
1905 while (len > 0) {
1906 page = addr & TARGET_PAGE_MASK;
1907 l = (page + TARGET_PAGE_SIZE) - addr;
1908 if (l > len)
1909 l = len;
1910 flags = page_get_flags(page);
1911 if (!(flags & PAGE_VALID))
1912 return;
1913 if (is_write) {
1914 if (!(flags & PAGE_WRITE))
1915 return;
1916 memcpy((uint8_t *)addr, buf, len);
1917 } else {
1918 if (!(flags & PAGE_READ))
1919 return;
1920 memcpy(buf, (uint8_t *)addr, len);
1921 }
1922 len -= l;
1923 buf += l;
1924 addr += l;
1925 }
1926}
1927
1928#else
1929void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
1930 int len, int is_write)
1931{
1932 int l, io_index;
1933 uint8_t *ptr;
1934 uint32_t val;
1935 target_phys_addr_t page;
1936 unsigned long pd;
1937 PhysPageDesc *p;
1938
1939 while (len > 0) {
1940 page = addr & TARGET_PAGE_MASK;
1941 l = (page + TARGET_PAGE_SIZE) - addr;
1942 if (l > len)
1943 l = len;
1944 p = phys_page_find(page >> TARGET_PAGE_BITS);
1945 if (!p) {
1946 pd = IO_MEM_UNASSIGNED;
1947 } else {
1948 pd = p->phys_offset;
1949 }
1950
1951 if (is_write) {
1952 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
1953 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
1954 /* XXX: could force cpu_single_env to NULL to avoid
1955 potential bugs */
1956 if (l >= 4 && ((addr & 3) == 0)) {
1957 /* 32 bit write access */
1958 val = ldl_p(buf);
1959 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
1960 l = 4;
1961 } else if (l >= 2 && ((addr & 1) == 0)) {
1962 /* 16 bit write access */
1963 val = lduw_p(buf);
1964 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
1965 l = 2;
1966 } else {
1967 /* 8 bit write access */
1968 val = ldub_p(buf);
1969 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
1970 l = 1;
1971 }
1972 } else {
1973 unsigned long addr1;
1974 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
1975 /* RAM case */
1976 ptr = phys_ram_base + addr1;
1977 memcpy(ptr, buf, l);
1978 if (!cpu_physical_memory_is_dirty(addr1)) {
1979 /* invalidate code */
1980 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
1981 /* set dirty bit */
1982 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
1983 (0xff & ~CODE_DIRTY_FLAG);
1984 }
1985 }
1986 } else {
1987 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1988 /* I/O case */
1989 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
1990 if (l >= 4 && ((addr & 3) == 0)) {
1991 /* 32 bit read access */
1992 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
1993 stl_p(buf, val);
1994 l = 4;
1995 } else if (l >= 2 && ((addr & 1) == 0)) {
1996 /* 16 bit read access */
1997 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
1998 stw_p(buf, val);
1999 l = 2;
2000 } else {
2001 /* 8 bit read access */
2002 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2003 stb_p(buf, val);
2004 l = 1;
2005 }
2006 } else {
2007 /* RAM case */
2008 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2009 (addr & ~TARGET_PAGE_MASK);
2010 memcpy(buf, ptr, l);
2011 }
2012 }
2013 len -= l;
2014 buf += l;
2015 addr += l;
2016 }
2017}
2018
2019/* warning: addr must be aligned */
2020uint32_t ldl_phys(target_phys_addr_t addr)
2021{
2022 int io_index;
2023 uint8_t *ptr;
2024 uint32_t val;
2025 unsigned long pd;
2026 PhysPageDesc *p;
2027
2028 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2029 if (!p) {
2030 pd = IO_MEM_UNASSIGNED;
2031 } else {
2032 pd = p->phys_offset;
2033 }
2034
2035 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
2036 /* I/O case */
2037 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2038 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2039 } else {
2040 /* RAM case */
2041 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2042 (addr & ~TARGET_PAGE_MASK);
2043 val = ldl_p(ptr);
2044 }
2045 return val;
2046}
2047
2048/* XXX: optimize */
2049uint32_t ldub_phys(target_phys_addr_t addr)
2050{
2051 uint8_t val;
2052 cpu_physical_memory_read(addr, &val, 1);
2053 return val;
2054}
2055
2056/* XXX: optimize */
2057uint32_t lduw_phys(target_phys_addr_t addr)
2058{
2059 uint16_t val;
2060 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2061 return tswap16(val);
2062}
2063
2064/* XXX: optimize */
2065uint64_t ldq_phys(target_phys_addr_t addr)
2066{
2067 uint64_t val;
2068 cpu_physical_memory_read(addr, (uint8_t *)&val, 8);
2069 return tswap64(val);
2070}
2071
2072/* warning: addr must be aligned. The ram page is not masked as dirty
2073 and the code inside is not invalidated. It is useful if the dirty
2074 bits are used to track modified PTEs */
2075void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2076{
2077 int io_index;
2078 uint8_t *ptr;
2079 unsigned long pd;
2080 PhysPageDesc *p;
2081
2082 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2083 if (!p) {
2084 pd = IO_MEM_UNASSIGNED;
2085 } else {
2086 pd = p->phys_offset;
2087 }
2088
2089 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2090 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2091 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2092 } else {
2093 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2094 (addr & ~TARGET_PAGE_MASK);
2095 stl_p(ptr, val);
2096 }
2097}
2098
2099/* warning: addr must be aligned */
2100void stl_phys(target_phys_addr_t addr, uint32_t val)
2101{
2102 int io_index;
2103 uint8_t *ptr;
2104 unsigned long pd;
2105 PhysPageDesc *p;
2106
2107 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2108 if (!p) {
2109 pd = IO_MEM_UNASSIGNED;
2110 } else {
2111 pd = p->phys_offset;
2112 }
2113
2114 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2115 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2116 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2117 } else {
2118 unsigned long addr1;
2119 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2120 /* RAM case */
2121 ptr = phys_ram_base + addr1;
2122 stl_p(ptr, val);
2123 if (!cpu_physical_memory_is_dirty(addr1)) {
2124 /* invalidate code */
2125 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2126 /* set dirty bit */
2127 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2128 (0xff & ~CODE_DIRTY_FLAG);
2129 }
2130 }
2131}
2132
2133/* XXX: optimize */
2134void stb_phys(target_phys_addr_t addr, uint32_t val)
2135{
2136 uint8_t v = val;
2137 cpu_physical_memory_write(addr, &v, 1);
2138}
2139
2140/* XXX: optimize */
2141void stw_phys(target_phys_addr_t addr, uint32_t val)
2142{
2143 uint16_t v = tswap16(val);
2144 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2145}
2146
2147/* XXX: optimize */
2148void stq_phys(target_phys_addr_t addr, uint64_t val)
2149{
2150 val = tswap64(val);
2151 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
2152}
2153
2154#endif
2155
2156/* virtual memory access for debug */
2157int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
2158 uint8_t *buf, int len, int is_write)
2159{
2160 int l;
2161 target_ulong page, phys_addr;
2162
2163 while (len > 0) {
2164 page = addr & TARGET_PAGE_MASK;
2165 phys_addr = cpu_get_phys_page_debug(env, page);
2166 /* if no physical page mapped, return an error */
2167 if (phys_addr == -1)
2168 return -1;
2169 l = (page + TARGET_PAGE_SIZE) - addr;
2170 if (l > len)
2171 l = len;
2172 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
2173 buf, l, is_write);
2174 len -= l;
2175 buf += l;
2176 addr += l;
2177 }
2178 return 0;
2179}
2180
2181void dump_exec_info(FILE *f,
2182 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2183{
2184 int i, target_code_size, max_target_code_size;
2185 int direct_jmp_count, direct_jmp2_count, cross_page;
2186 TranslationBlock *tb;
2187
2188 target_code_size = 0;
2189 max_target_code_size = 0;
2190 cross_page = 0;
2191 direct_jmp_count = 0;
2192 direct_jmp2_count = 0;
2193 for(i = 0; i < nb_tbs; i++) {
2194 tb = &tbs[i];
2195 target_code_size += tb->size;
2196 if (tb->size > max_target_code_size)
2197 max_target_code_size = tb->size;
2198 if (tb->page_addr[1] != -1)
2199 cross_page++;
2200 if (tb->tb_next_offset[0] != 0xffff) {
2201 direct_jmp_count++;
2202 if (tb->tb_next_offset[1] != 0xffff) {
2203 direct_jmp2_count++;
2204 }
2205 }
2206 }
2207 /* XXX: avoid using doubles ? */
2208 cpu_fprintf(f, "TB count %d\n", nb_tbs);
2209 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
2210 nb_tbs ? target_code_size / nb_tbs : 0,
2211 max_target_code_size);
2212 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
2213 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
2214 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
2215 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
2216 cross_page,
2217 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
2218 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
2219 direct_jmp_count,
2220 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
2221 direct_jmp2_count,
2222 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
2223 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
2224 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
2225 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
2226}
2227
2228#if !defined(CONFIG_USER_ONLY)
2229
2230#define MMUSUFFIX _cmmu
2231#define GETPC() NULL
2232#define env cpu_single_env
2233#define SOFTMMU_CODE_ACCESS
2234
2235#define SHIFT 0
2236#include "softmmu_template.h"
2237
2238#define SHIFT 1
2239#include "softmmu_template.h"
2240
2241#define SHIFT 2
2242#include "softmmu_template.h"
2243
2244#define SHIFT 3
2245#include "softmmu_template.h"
2246
2247#undef env
2248
2249#endif