]> git.proxmox.com Git - qemu.git/blob - exec.c
9ad2edcf3b57899798bf9a1ff066c5ed36fd1ed0
[qemu.git] / exec.c
1 /*
2 * virtual page mapping and translated block handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20 #include "config.h"
21 #ifdef _WIN32
22 #include <windows.h>
23 #else
24 #include <sys/types.h>
25 #include <sys/mman.h>
26 #endif
27 #include <stdlib.h>
28 #include <stdio.h>
29 #include <stdarg.h>
30 #include <string.h>
31 #include <errno.h>
32 #include <unistd.h>
33 #include <inttypes.h>
34
35 #include "cpu.h"
36 #include "exec-all.h"
37 #if defined(CONFIG_USER_ONLY)
38 #include <qemu.h>
39 #endif
40
41 //#define DEBUG_TB_INVALIDATE
42 //#define DEBUG_FLUSH
43 //#define DEBUG_TLB
44
45 /* make various TB consistency checks */
46 //#define DEBUG_TB_CHECK
47 //#define DEBUG_TLB_CHECK
48
49 /* threshold to flush the translated code buffer */
50 #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
51
52 #define SMC_BITMAP_USE_THRESHOLD 10
53
54 #define MMAP_AREA_START 0x00000000
55 #define MMAP_AREA_END 0xa8000000
56
57 #if defined(TARGET_SPARC64)
58 #define TARGET_PHYS_ADDR_SPACE_BITS 41
59 #elif defined(TARGET_PPC64)
60 #define TARGET_PHYS_ADDR_SPACE_BITS 42
61 #else
62 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
63 #define TARGET_PHYS_ADDR_SPACE_BITS 32
64 #endif
65
66 TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
67 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
68 int nb_tbs;
69 /* any access to the tbs or the page table must use this lock */
70 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
71
72 uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] __attribute__((aligned (32)));
73 uint8_t *code_gen_ptr;
74
75 int phys_ram_size;
76 int phys_ram_fd;
77 uint8_t *phys_ram_base;
78 uint8_t *phys_ram_dirty;
79
80 CPUState *first_cpu;
81 /* current CPU in the current thread. It is only valid inside
82 cpu_exec() */
83 CPUState *cpu_single_env;
84
85 typedef struct PageDesc {
86 /* list of TBs intersecting this ram page */
87 TranslationBlock *first_tb;
88 /* in order to optimize self modifying code, we count the number
89 of lookups we do to a given page to use a bitmap */
90 unsigned int code_write_count;
91 uint8_t *code_bitmap;
92 #if defined(CONFIG_USER_ONLY)
93 unsigned long flags;
94 #endif
95 } PageDesc;
96
97 typedef struct PhysPageDesc {
98 /* offset in host memory of the page + io_index in the low 12 bits */
99 uint32_t phys_offset;
100 } PhysPageDesc;
101
102 #define L2_BITS 10
103 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
104
105 #define L1_SIZE (1 << L1_BITS)
106 #define L2_SIZE (1 << L2_BITS)
107
108 static void io_mem_init(void);
109
110 unsigned long qemu_real_host_page_size;
111 unsigned long qemu_host_page_bits;
112 unsigned long qemu_host_page_size;
113 unsigned long qemu_host_page_mask;
114
115 /* XXX: for system emulation, it could just be an array */
116 static PageDesc *l1_map[L1_SIZE];
117 PhysPageDesc **l1_phys_map;
118
119 /* io memory support */
120 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
121 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
122 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
123 static int io_mem_nb;
124
125 /* log support */
126 char *logfilename = "/tmp/qemu.log";
127 FILE *logfile;
128 int loglevel;
129
130 /* statistics */
131 static int tlb_flush_count;
132 static int tb_flush_count;
133 static int tb_phys_invalidate_count;
134
135 static void page_init(void)
136 {
137 /* NOTE: we can always suppose that qemu_host_page_size >=
138 TARGET_PAGE_SIZE */
139 #ifdef _WIN32
140 {
141 SYSTEM_INFO system_info;
142 DWORD old_protect;
143
144 GetSystemInfo(&system_info);
145 qemu_real_host_page_size = system_info.dwPageSize;
146
147 VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
148 PAGE_EXECUTE_READWRITE, &old_protect);
149 }
150 #else
151 qemu_real_host_page_size = getpagesize();
152 {
153 unsigned long start, end;
154
155 start = (unsigned long)code_gen_buffer;
156 start &= ~(qemu_real_host_page_size - 1);
157
158 end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
159 end += qemu_real_host_page_size - 1;
160 end &= ~(qemu_real_host_page_size - 1);
161
162 mprotect((void *)start, end - start,
163 PROT_READ | PROT_WRITE | PROT_EXEC);
164 }
165 #endif
166
167 if (qemu_host_page_size == 0)
168 qemu_host_page_size = qemu_real_host_page_size;
169 if (qemu_host_page_size < TARGET_PAGE_SIZE)
170 qemu_host_page_size = TARGET_PAGE_SIZE;
171 qemu_host_page_bits = 0;
172 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
173 qemu_host_page_bits++;
174 qemu_host_page_mask = ~(qemu_host_page_size - 1);
175 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
176 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
177 }
178
179 static inline PageDesc *page_find_alloc(unsigned int index)
180 {
181 PageDesc **lp, *p;
182
183 lp = &l1_map[index >> L2_BITS];
184 p = *lp;
185 if (!p) {
186 /* allocate if not found */
187 p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
188 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
189 *lp = p;
190 }
191 return p + (index & (L2_SIZE - 1));
192 }
193
194 static inline PageDesc *page_find(unsigned int index)
195 {
196 PageDesc *p;
197
198 p = l1_map[index >> L2_BITS];
199 if (!p)
200 return 0;
201 return p + (index & (L2_SIZE - 1));
202 }
203
204 static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
205 {
206 void **lp, **p;
207
208 p = (void **)l1_phys_map;
209 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
210
211 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
212 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
213 #endif
214 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
215 p = *lp;
216 if (!p) {
217 /* allocate if not found */
218 if (!alloc)
219 return NULL;
220 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
221 memset(p, 0, sizeof(void *) * L1_SIZE);
222 *lp = p;
223 }
224 #endif
225 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
226 p = *lp;
227 if (!p) {
228 /* allocate if not found */
229 if (!alloc)
230 return NULL;
231 p = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
232 memset(p, 0, sizeof(PhysPageDesc) * L2_SIZE);
233 *lp = p;
234 }
235 return ((PhysPageDesc *)p) + (index & (L2_SIZE - 1));
236 }
237
238 static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
239 {
240 return phys_page_find_alloc(index, 0);
241 }
242
243 #if !defined(CONFIG_USER_ONLY)
244 static void tlb_protect_code(ram_addr_t ram_addr);
245 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
246 target_ulong vaddr);
247 #endif
248
249 void cpu_exec_init(CPUState *env)
250 {
251 CPUState **penv;
252 int cpu_index;
253
254 if (!code_gen_ptr) {
255 code_gen_ptr = code_gen_buffer;
256 page_init();
257 io_mem_init();
258 }
259 env->next_cpu = NULL;
260 penv = &first_cpu;
261 cpu_index = 0;
262 while (*penv != NULL) {
263 penv = (CPUState **)&(*penv)->next_cpu;
264 cpu_index++;
265 }
266 env->cpu_index = cpu_index;
267 *penv = env;
268 }
269
270 static inline void invalidate_page_bitmap(PageDesc *p)
271 {
272 if (p->code_bitmap) {
273 qemu_free(p->code_bitmap);
274 p->code_bitmap = NULL;
275 }
276 p->code_write_count = 0;
277 }
278
279 /* set to NULL all the 'first_tb' fields in all PageDescs */
280 static void page_flush_tb(void)
281 {
282 int i, j;
283 PageDesc *p;
284
285 for(i = 0; i < L1_SIZE; i++) {
286 p = l1_map[i];
287 if (p) {
288 for(j = 0; j < L2_SIZE; j++) {
289 p->first_tb = NULL;
290 invalidate_page_bitmap(p);
291 p++;
292 }
293 }
294 }
295 }
296
297 /* flush all the translation blocks */
298 /* XXX: tb_flush is currently not thread safe */
299 void tb_flush(CPUState *env1)
300 {
301 CPUState *env;
302 #if defined(DEBUG_FLUSH)
303 printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
304 code_gen_ptr - code_gen_buffer,
305 nb_tbs,
306 nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0);
307 #endif
308 nb_tbs = 0;
309
310 for(env = first_cpu; env != NULL; env = env->next_cpu) {
311 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
312 }
313
314 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
315 page_flush_tb();
316
317 code_gen_ptr = code_gen_buffer;
318 /* XXX: flush processor icache at this point if cache flush is
319 expensive */
320 tb_flush_count++;
321 }
322
323 #ifdef DEBUG_TB_CHECK
324
325 static void tb_invalidate_check(unsigned long address)
326 {
327 TranslationBlock *tb;
328 int i;
329 address &= TARGET_PAGE_MASK;
330 for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
331 for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
332 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
333 address >= tb->pc + tb->size)) {
334 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
335 address, tb->pc, tb->size);
336 }
337 }
338 }
339 }
340
341 /* verify that all the pages have correct rights for code */
342 static void tb_page_check(void)
343 {
344 TranslationBlock *tb;
345 int i, flags1, flags2;
346
347 for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
348 for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
349 flags1 = page_get_flags(tb->pc);
350 flags2 = page_get_flags(tb->pc + tb->size - 1);
351 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
352 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
353 tb->pc, tb->size, flags1, flags2);
354 }
355 }
356 }
357 }
358
359 void tb_jmp_check(TranslationBlock *tb)
360 {
361 TranslationBlock *tb1;
362 unsigned int n1;
363
364 /* suppress any remaining jumps to this TB */
365 tb1 = tb->jmp_first;
366 for(;;) {
367 n1 = (long)tb1 & 3;
368 tb1 = (TranslationBlock *)((long)tb1 & ~3);
369 if (n1 == 2)
370 break;
371 tb1 = tb1->jmp_next[n1];
372 }
373 /* check end of list */
374 if (tb1 != tb) {
375 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
376 }
377 }
378
379 #endif
380
381 /* invalidate one TB */
382 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
383 int next_offset)
384 {
385 TranslationBlock *tb1;
386 for(;;) {
387 tb1 = *ptb;
388 if (tb1 == tb) {
389 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
390 break;
391 }
392 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
393 }
394 }
395
396 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
397 {
398 TranslationBlock *tb1;
399 unsigned int n1;
400
401 for(;;) {
402 tb1 = *ptb;
403 n1 = (long)tb1 & 3;
404 tb1 = (TranslationBlock *)((long)tb1 & ~3);
405 if (tb1 == tb) {
406 *ptb = tb1->page_next[n1];
407 break;
408 }
409 ptb = &tb1->page_next[n1];
410 }
411 }
412
413 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
414 {
415 TranslationBlock *tb1, **ptb;
416 unsigned int n1;
417
418 ptb = &tb->jmp_next[n];
419 tb1 = *ptb;
420 if (tb1) {
421 /* find tb(n) in circular list */
422 for(;;) {
423 tb1 = *ptb;
424 n1 = (long)tb1 & 3;
425 tb1 = (TranslationBlock *)((long)tb1 & ~3);
426 if (n1 == n && tb1 == tb)
427 break;
428 if (n1 == 2) {
429 ptb = &tb1->jmp_first;
430 } else {
431 ptb = &tb1->jmp_next[n1];
432 }
433 }
434 /* now we can suppress tb(n) from the list */
435 *ptb = tb->jmp_next[n];
436
437 tb->jmp_next[n] = NULL;
438 }
439 }
440
441 /* reset the jump entry 'n' of a TB so that it is not chained to
442 another TB */
443 static inline void tb_reset_jump(TranslationBlock *tb, int n)
444 {
445 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
446 }
447
448 static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
449 {
450 CPUState *env;
451 PageDesc *p;
452 unsigned int h, n1;
453 target_ulong phys_pc;
454 TranslationBlock *tb1, *tb2;
455
456 /* remove the TB from the hash list */
457 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
458 h = tb_phys_hash_func(phys_pc);
459 tb_remove(&tb_phys_hash[h], tb,
460 offsetof(TranslationBlock, phys_hash_next));
461
462 /* remove the TB from the page list */
463 if (tb->page_addr[0] != page_addr) {
464 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
465 tb_page_remove(&p->first_tb, tb);
466 invalidate_page_bitmap(p);
467 }
468 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
469 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
470 tb_page_remove(&p->first_tb, tb);
471 invalidate_page_bitmap(p);
472 }
473
474 tb_invalidated_flag = 1;
475
476 /* remove the TB from the hash list */
477 h = tb_jmp_cache_hash_func(tb->pc);
478 for(env = first_cpu; env != NULL; env = env->next_cpu) {
479 if (env->tb_jmp_cache[h] == tb)
480 env->tb_jmp_cache[h] = NULL;
481 }
482
483 /* suppress this TB from the two jump lists */
484 tb_jmp_remove(tb, 0);
485 tb_jmp_remove(tb, 1);
486
487 /* suppress any remaining jumps to this TB */
488 tb1 = tb->jmp_first;
489 for(;;) {
490 n1 = (long)tb1 & 3;
491 if (n1 == 2)
492 break;
493 tb1 = (TranslationBlock *)((long)tb1 & ~3);
494 tb2 = tb1->jmp_next[n1];
495 tb_reset_jump(tb1, n1);
496 tb1->jmp_next[n1] = NULL;
497 tb1 = tb2;
498 }
499 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
500
501 tb_phys_invalidate_count++;
502 }
503
504 static inline void set_bits(uint8_t *tab, int start, int len)
505 {
506 int end, mask, end1;
507
508 end = start + len;
509 tab += start >> 3;
510 mask = 0xff << (start & 7);
511 if ((start & ~7) == (end & ~7)) {
512 if (start < end) {
513 mask &= ~(0xff << (end & 7));
514 *tab |= mask;
515 }
516 } else {
517 *tab++ |= mask;
518 start = (start + 8) & ~7;
519 end1 = end & ~7;
520 while (start < end1) {
521 *tab++ = 0xff;
522 start += 8;
523 }
524 if (start < end) {
525 mask = ~(0xff << (end & 7));
526 *tab |= mask;
527 }
528 }
529 }
530
531 static void build_page_bitmap(PageDesc *p)
532 {
533 int n, tb_start, tb_end;
534 TranslationBlock *tb;
535
536 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
537 if (!p->code_bitmap)
538 return;
539 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
540
541 tb = p->first_tb;
542 while (tb != NULL) {
543 n = (long)tb & 3;
544 tb = (TranslationBlock *)((long)tb & ~3);
545 /* NOTE: this is subtle as a TB may span two physical pages */
546 if (n == 0) {
547 /* NOTE: tb_end may be after the end of the page, but
548 it is not a problem */
549 tb_start = tb->pc & ~TARGET_PAGE_MASK;
550 tb_end = tb_start + tb->size;
551 if (tb_end > TARGET_PAGE_SIZE)
552 tb_end = TARGET_PAGE_SIZE;
553 } else {
554 tb_start = 0;
555 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
556 }
557 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
558 tb = tb->page_next[n];
559 }
560 }
561
562 #ifdef TARGET_HAS_PRECISE_SMC
563
564 static void tb_gen_code(CPUState *env,
565 target_ulong pc, target_ulong cs_base, int flags,
566 int cflags)
567 {
568 TranslationBlock *tb;
569 uint8_t *tc_ptr;
570 target_ulong phys_pc, phys_page2, virt_page2;
571 int code_gen_size;
572
573 phys_pc = get_phys_addr_code(env, pc);
574 tb = tb_alloc(pc);
575 if (!tb) {
576 /* flush must be done */
577 tb_flush(env);
578 /* cannot fail at this point */
579 tb = tb_alloc(pc);
580 }
581 tc_ptr = code_gen_ptr;
582 tb->tc_ptr = tc_ptr;
583 tb->cs_base = cs_base;
584 tb->flags = flags;
585 tb->cflags = cflags;
586 cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
587 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
588
589 /* check next page if needed */
590 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
591 phys_page2 = -1;
592 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
593 phys_page2 = get_phys_addr_code(env, virt_page2);
594 }
595 tb_link_phys(tb, phys_pc, phys_page2);
596 }
597 #endif
598
599 /* invalidate all TBs which intersect with the target physical page
600 starting in range [start;end[. NOTE: start and end must refer to
601 the same physical page. 'is_cpu_write_access' should be true if called
602 from a real cpu write access: the virtual CPU will exit the current
603 TB if code is modified inside this TB. */
604 void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
605 int is_cpu_write_access)
606 {
607 int n, current_tb_modified, current_tb_not_found, current_flags;
608 CPUState *env = cpu_single_env;
609 PageDesc *p;
610 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
611 target_ulong tb_start, tb_end;
612 target_ulong current_pc, current_cs_base;
613
614 p = page_find(start >> TARGET_PAGE_BITS);
615 if (!p)
616 return;
617 if (!p->code_bitmap &&
618 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
619 is_cpu_write_access) {
620 /* build code bitmap */
621 build_page_bitmap(p);
622 }
623
624 /* we remove all the TBs in the range [start, end[ */
625 /* XXX: see if in some cases it could be faster to invalidate all the code */
626 current_tb_not_found = is_cpu_write_access;
627 current_tb_modified = 0;
628 current_tb = NULL; /* avoid warning */
629 current_pc = 0; /* avoid warning */
630 current_cs_base = 0; /* avoid warning */
631 current_flags = 0; /* avoid warning */
632 tb = p->first_tb;
633 while (tb != NULL) {
634 n = (long)tb & 3;
635 tb = (TranslationBlock *)((long)tb & ~3);
636 tb_next = tb->page_next[n];
637 /* NOTE: this is subtle as a TB may span two physical pages */
638 if (n == 0) {
639 /* NOTE: tb_end may be after the end of the page, but
640 it is not a problem */
641 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
642 tb_end = tb_start + tb->size;
643 } else {
644 tb_start = tb->page_addr[1];
645 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
646 }
647 if (!(tb_end <= start || tb_start >= end)) {
648 #ifdef TARGET_HAS_PRECISE_SMC
649 if (current_tb_not_found) {
650 current_tb_not_found = 0;
651 current_tb = NULL;
652 if (env->mem_write_pc) {
653 /* now we have a real cpu fault */
654 current_tb = tb_find_pc(env->mem_write_pc);
655 }
656 }
657 if (current_tb == tb &&
658 !(current_tb->cflags & CF_SINGLE_INSN)) {
659 /* If we are modifying the current TB, we must stop
660 its execution. We could be more precise by checking
661 that the modification is after the current PC, but it
662 would require a specialized function to partially
663 restore the CPU state */
664
665 current_tb_modified = 1;
666 cpu_restore_state(current_tb, env,
667 env->mem_write_pc, NULL);
668 #if defined(TARGET_I386)
669 current_flags = env->hflags;
670 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
671 current_cs_base = (target_ulong)env->segs[R_CS].base;
672 current_pc = current_cs_base + env->eip;
673 #else
674 #error unsupported CPU
675 #endif
676 }
677 #endif /* TARGET_HAS_PRECISE_SMC */
678 /* we need to do that to handle the case where a signal
679 occurs while doing tb_phys_invalidate() */
680 saved_tb = NULL;
681 if (env) {
682 saved_tb = env->current_tb;
683 env->current_tb = NULL;
684 }
685 tb_phys_invalidate(tb, -1);
686 if (env) {
687 env->current_tb = saved_tb;
688 if (env->interrupt_request && env->current_tb)
689 cpu_interrupt(env, env->interrupt_request);
690 }
691 }
692 tb = tb_next;
693 }
694 #if !defined(CONFIG_USER_ONLY)
695 /* if no code remaining, no need to continue to use slow writes */
696 if (!p->first_tb) {
697 invalidate_page_bitmap(p);
698 if (is_cpu_write_access) {
699 tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
700 }
701 }
702 #endif
703 #ifdef TARGET_HAS_PRECISE_SMC
704 if (current_tb_modified) {
705 /* we generate a block containing just the instruction
706 modifying the memory. It will ensure that it cannot modify
707 itself */
708 env->current_tb = NULL;
709 tb_gen_code(env, current_pc, current_cs_base, current_flags,
710 CF_SINGLE_INSN);
711 cpu_resume_from_signal(env, NULL);
712 }
713 #endif
714 }
715
716 /* len must be <= 8 and start must be a multiple of len */
717 static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
718 {
719 PageDesc *p;
720 int offset, b;
721 #if 0
722 if (1) {
723 if (loglevel) {
724 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
725 cpu_single_env->mem_write_vaddr, len,
726 cpu_single_env->eip,
727 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
728 }
729 }
730 #endif
731 p = page_find(start >> TARGET_PAGE_BITS);
732 if (!p)
733 return;
734 if (p->code_bitmap) {
735 offset = start & ~TARGET_PAGE_MASK;
736 b = p->code_bitmap[offset >> 3] >> (offset & 7);
737 if (b & ((1 << len) - 1))
738 goto do_invalidate;
739 } else {
740 do_invalidate:
741 tb_invalidate_phys_page_range(start, start + len, 1);
742 }
743 }
744
745 #if !defined(CONFIG_SOFTMMU)
746 static void tb_invalidate_phys_page(target_ulong addr,
747 unsigned long pc, void *puc)
748 {
749 int n, current_flags, current_tb_modified;
750 target_ulong current_pc, current_cs_base;
751 PageDesc *p;
752 TranslationBlock *tb, *current_tb;
753 #ifdef TARGET_HAS_PRECISE_SMC
754 CPUState *env = cpu_single_env;
755 #endif
756
757 addr &= TARGET_PAGE_MASK;
758 p = page_find(addr >> TARGET_PAGE_BITS);
759 if (!p)
760 return;
761 tb = p->first_tb;
762 current_tb_modified = 0;
763 current_tb = NULL;
764 current_pc = 0; /* avoid warning */
765 current_cs_base = 0; /* avoid warning */
766 current_flags = 0; /* avoid warning */
767 #ifdef TARGET_HAS_PRECISE_SMC
768 if (tb && pc != 0) {
769 current_tb = tb_find_pc(pc);
770 }
771 #endif
772 while (tb != NULL) {
773 n = (long)tb & 3;
774 tb = (TranslationBlock *)((long)tb & ~3);
775 #ifdef TARGET_HAS_PRECISE_SMC
776 if (current_tb == tb &&
777 !(current_tb->cflags & CF_SINGLE_INSN)) {
778 /* If we are modifying the current TB, we must stop
779 its execution. We could be more precise by checking
780 that the modification is after the current PC, but it
781 would require a specialized function to partially
782 restore the CPU state */
783
784 current_tb_modified = 1;
785 cpu_restore_state(current_tb, env, pc, puc);
786 #if defined(TARGET_I386)
787 current_flags = env->hflags;
788 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
789 current_cs_base = (target_ulong)env->segs[R_CS].base;
790 current_pc = current_cs_base + env->eip;
791 #else
792 #error unsupported CPU
793 #endif
794 }
795 #endif /* TARGET_HAS_PRECISE_SMC */
796 tb_phys_invalidate(tb, addr);
797 tb = tb->page_next[n];
798 }
799 p->first_tb = NULL;
800 #ifdef TARGET_HAS_PRECISE_SMC
801 if (current_tb_modified) {
802 /* we generate a block containing just the instruction
803 modifying the memory. It will ensure that it cannot modify
804 itself */
805 env->current_tb = NULL;
806 tb_gen_code(env, current_pc, current_cs_base, current_flags,
807 CF_SINGLE_INSN);
808 cpu_resume_from_signal(env, puc);
809 }
810 #endif
811 }
812 #endif
813
814 /* add the tb in the target page and protect it if necessary */
815 static inline void tb_alloc_page(TranslationBlock *tb,
816 unsigned int n, target_ulong page_addr)
817 {
818 PageDesc *p;
819 TranslationBlock *last_first_tb;
820
821 tb->page_addr[n] = page_addr;
822 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
823 tb->page_next[n] = p->first_tb;
824 last_first_tb = p->first_tb;
825 p->first_tb = (TranslationBlock *)((long)tb | n);
826 invalidate_page_bitmap(p);
827
828 #if defined(TARGET_HAS_SMC) || 1
829
830 #if defined(CONFIG_USER_ONLY)
831 if (p->flags & PAGE_WRITE) {
832 target_ulong addr;
833 PageDesc *p2;
834 int prot;
835
836 /* force the host page as non writable (writes will have a
837 page fault + mprotect overhead) */
838 page_addr &= qemu_host_page_mask;
839 prot = 0;
840 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
841 addr += TARGET_PAGE_SIZE) {
842
843 p2 = page_find (addr >> TARGET_PAGE_BITS);
844 if (!p2)
845 continue;
846 prot |= p2->flags;
847 p2->flags &= ~PAGE_WRITE;
848 page_get_flags(addr);
849 }
850 mprotect(g2h(page_addr), qemu_host_page_size,
851 (prot & PAGE_BITS) & ~PAGE_WRITE);
852 #ifdef DEBUG_TB_INVALIDATE
853 printf("protecting code page: 0x%08lx\n",
854 page_addr);
855 #endif
856 }
857 #else
858 /* if some code is already present, then the pages are already
859 protected. So we handle the case where only the first TB is
860 allocated in a physical page */
861 if (!last_first_tb) {
862 tlb_protect_code(page_addr);
863 }
864 #endif
865
866 #endif /* TARGET_HAS_SMC */
867 }
868
869 /* Allocate a new translation block. Flush the translation buffer if
870 too many translation blocks or too much generated code. */
871 TranslationBlock *tb_alloc(target_ulong pc)
872 {
873 TranslationBlock *tb;
874
875 if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
876 (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
877 return NULL;
878 tb = &tbs[nb_tbs++];
879 tb->pc = pc;
880 tb->cflags = 0;
881 return tb;
882 }
883
884 /* add a new TB and link it to the physical page tables. phys_page2 is
885 (-1) to indicate that only one page contains the TB. */
886 void tb_link_phys(TranslationBlock *tb,
887 target_ulong phys_pc, target_ulong phys_page2)
888 {
889 unsigned int h;
890 TranslationBlock **ptb;
891
892 /* add in the physical hash table */
893 h = tb_phys_hash_func(phys_pc);
894 ptb = &tb_phys_hash[h];
895 tb->phys_hash_next = *ptb;
896 *ptb = tb;
897
898 /* add in the page list */
899 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
900 if (phys_page2 != -1)
901 tb_alloc_page(tb, 1, phys_page2);
902 else
903 tb->page_addr[1] = -1;
904
905 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
906 tb->jmp_next[0] = NULL;
907 tb->jmp_next[1] = NULL;
908 #ifdef USE_CODE_COPY
909 tb->cflags &= ~CF_FP_USED;
910 if (tb->cflags & CF_TB_FP_USED)
911 tb->cflags |= CF_FP_USED;
912 #endif
913
914 /* init original jump addresses */
915 if (tb->tb_next_offset[0] != 0xffff)
916 tb_reset_jump(tb, 0);
917 if (tb->tb_next_offset[1] != 0xffff)
918 tb_reset_jump(tb, 1);
919
920 #ifdef DEBUG_TB_CHECK
921 tb_page_check();
922 #endif
923 }
924
925 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
926 tb[1].tc_ptr. Return NULL if not found */
927 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
928 {
929 int m_min, m_max, m;
930 unsigned long v;
931 TranslationBlock *tb;
932
933 if (nb_tbs <= 0)
934 return NULL;
935 if (tc_ptr < (unsigned long)code_gen_buffer ||
936 tc_ptr >= (unsigned long)code_gen_ptr)
937 return NULL;
938 /* binary search (cf Knuth) */
939 m_min = 0;
940 m_max = nb_tbs - 1;
941 while (m_min <= m_max) {
942 m = (m_min + m_max) >> 1;
943 tb = &tbs[m];
944 v = (unsigned long)tb->tc_ptr;
945 if (v == tc_ptr)
946 return tb;
947 else if (tc_ptr < v) {
948 m_max = m - 1;
949 } else {
950 m_min = m + 1;
951 }
952 }
953 return &tbs[m_max];
954 }
955
956 static void tb_reset_jump_recursive(TranslationBlock *tb);
957
958 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
959 {
960 TranslationBlock *tb1, *tb_next, **ptb;
961 unsigned int n1;
962
963 tb1 = tb->jmp_next[n];
964 if (tb1 != NULL) {
965 /* find head of list */
966 for(;;) {
967 n1 = (long)tb1 & 3;
968 tb1 = (TranslationBlock *)((long)tb1 & ~3);
969 if (n1 == 2)
970 break;
971 tb1 = tb1->jmp_next[n1];
972 }
973 /* we are now sure now that tb jumps to tb1 */
974 tb_next = tb1;
975
976 /* remove tb from the jmp_first list */
977 ptb = &tb_next->jmp_first;
978 for(;;) {
979 tb1 = *ptb;
980 n1 = (long)tb1 & 3;
981 tb1 = (TranslationBlock *)((long)tb1 & ~3);
982 if (n1 == n && tb1 == tb)
983 break;
984 ptb = &tb1->jmp_next[n1];
985 }
986 *ptb = tb->jmp_next[n];
987 tb->jmp_next[n] = NULL;
988
989 /* suppress the jump to next tb in generated code */
990 tb_reset_jump(tb, n);
991
992 /* suppress jumps in the tb on which we could have jumped */
993 tb_reset_jump_recursive(tb_next);
994 }
995 }
996
997 static void tb_reset_jump_recursive(TranslationBlock *tb)
998 {
999 tb_reset_jump_recursive2(tb, 0);
1000 tb_reset_jump_recursive2(tb, 1);
1001 }
1002
1003 #if defined(TARGET_HAS_ICE)
1004 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1005 {
1006 target_ulong phys_addr;
1007
1008 phys_addr = cpu_get_phys_page_debug(env, pc);
1009 tb_invalidate_phys_page_range(phys_addr, phys_addr + 1, 0);
1010 }
1011 #endif
1012
1013 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1014 breakpoint is reached */
1015 int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1016 {
1017 #if defined(TARGET_HAS_ICE)
1018 int i;
1019
1020 for(i = 0; i < env->nb_breakpoints; i++) {
1021 if (env->breakpoints[i] == pc)
1022 return 0;
1023 }
1024
1025 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1026 return -1;
1027 env->breakpoints[env->nb_breakpoints++] = pc;
1028
1029 breakpoint_invalidate(env, pc);
1030 return 0;
1031 #else
1032 return -1;
1033 #endif
1034 }
1035
1036 /* remove a breakpoint */
1037 int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1038 {
1039 #if defined(TARGET_HAS_ICE)
1040 int i;
1041 for(i = 0; i < env->nb_breakpoints; i++) {
1042 if (env->breakpoints[i] == pc)
1043 goto found;
1044 }
1045 return -1;
1046 found:
1047 env->nb_breakpoints--;
1048 if (i < env->nb_breakpoints)
1049 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1050
1051 breakpoint_invalidate(env, pc);
1052 return 0;
1053 #else
1054 return -1;
1055 #endif
1056 }
1057
1058 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1059 CPU loop after each instruction */
1060 void cpu_single_step(CPUState *env, int enabled)
1061 {
1062 #if defined(TARGET_HAS_ICE)
1063 if (env->singlestep_enabled != enabled) {
1064 env->singlestep_enabled = enabled;
1065 /* must flush all the translated code to avoid inconsistancies */
1066 /* XXX: only flush what is necessary */
1067 tb_flush(env);
1068 }
1069 #endif
1070 }
1071
1072 /* enable or disable low levels log */
1073 void cpu_set_log(int log_flags)
1074 {
1075 loglevel = log_flags;
1076 if (loglevel && !logfile) {
1077 logfile = fopen(logfilename, "w");
1078 if (!logfile) {
1079 perror(logfilename);
1080 _exit(1);
1081 }
1082 #if !defined(CONFIG_SOFTMMU)
1083 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1084 {
1085 static uint8_t logfile_buf[4096];
1086 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1087 }
1088 #else
1089 setvbuf(logfile, NULL, _IOLBF, 0);
1090 #endif
1091 }
1092 }
1093
1094 void cpu_set_log_filename(const char *filename)
1095 {
1096 logfilename = strdup(filename);
1097 }
1098
1099 /* mask must never be zero, except for A20 change call */
1100 void cpu_interrupt(CPUState *env, int mask)
1101 {
1102 TranslationBlock *tb;
1103 static int interrupt_lock;
1104
1105 env->interrupt_request |= mask;
1106 /* if the cpu is currently executing code, we must unlink it and
1107 all the potentially executing TB */
1108 tb = env->current_tb;
1109 if (tb && !testandset(&interrupt_lock)) {
1110 env->current_tb = NULL;
1111 tb_reset_jump_recursive(tb);
1112 interrupt_lock = 0;
1113 }
1114 }
1115
1116 void cpu_reset_interrupt(CPUState *env, int mask)
1117 {
1118 env->interrupt_request &= ~mask;
1119 }
1120
1121 CPULogItem cpu_log_items[] = {
1122 { CPU_LOG_TB_OUT_ASM, "out_asm",
1123 "show generated host assembly code for each compiled TB" },
1124 { CPU_LOG_TB_IN_ASM, "in_asm",
1125 "show target assembly code for each compiled TB" },
1126 { CPU_LOG_TB_OP, "op",
1127 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1128 #ifdef TARGET_I386
1129 { CPU_LOG_TB_OP_OPT, "op_opt",
1130 "show micro ops after optimization for each compiled TB" },
1131 #endif
1132 { CPU_LOG_INT, "int",
1133 "show interrupts/exceptions in short format" },
1134 { CPU_LOG_EXEC, "exec",
1135 "show trace before each executed TB (lots of logs)" },
1136 { CPU_LOG_TB_CPU, "cpu",
1137 "show CPU state before bloc translation" },
1138 #ifdef TARGET_I386
1139 { CPU_LOG_PCALL, "pcall",
1140 "show protected mode far calls/returns/exceptions" },
1141 #endif
1142 #ifdef DEBUG_IOPORT
1143 { CPU_LOG_IOPORT, "ioport",
1144 "show all i/o ports accesses" },
1145 #endif
1146 { 0, NULL, NULL },
1147 };
1148
1149 static int cmp1(const char *s1, int n, const char *s2)
1150 {
1151 if (strlen(s2) != n)
1152 return 0;
1153 return memcmp(s1, s2, n) == 0;
1154 }
1155
1156 /* takes a comma separated list of log masks. Return 0 if error. */
1157 int cpu_str_to_log_mask(const char *str)
1158 {
1159 CPULogItem *item;
1160 int mask;
1161 const char *p, *p1;
1162
1163 p = str;
1164 mask = 0;
1165 for(;;) {
1166 p1 = strchr(p, ',');
1167 if (!p1)
1168 p1 = p + strlen(p);
1169 if(cmp1(p,p1-p,"all")) {
1170 for(item = cpu_log_items; item->mask != 0; item++) {
1171 mask |= item->mask;
1172 }
1173 } else {
1174 for(item = cpu_log_items; item->mask != 0; item++) {
1175 if (cmp1(p, p1 - p, item->name))
1176 goto found;
1177 }
1178 return 0;
1179 }
1180 found:
1181 mask |= item->mask;
1182 if (*p1 != ',')
1183 break;
1184 p = p1 + 1;
1185 }
1186 return mask;
1187 }
1188
1189 void cpu_abort(CPUState *env, const char *fmt, ...)
1190 {
1191 va_list ap;
1192
1193 va_start(ap, fmt);
1194 fprintf(stderr, "qemu: fatal: ");
1195 vfprintf(stderr, fmt, ap);
1196 fprintf(stderr, "\n");
1197 #ifdef TARGET_I386
1198 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1199 #else
1200 cpu_dump_state(env, stderr, fprintf, 0);
1201 #endif
1202 va_end(ap);
1203 abort();
1204 }
1205
1206 #if !defined(CONFIG_USER_ONLY)
1207
1208 /* NOTE: if flush_global is true, also flush global entries (not
1209 implemented yet) */
1210 void tlb_flush(CPUState *env, int flush_global)
1211 {
1212 int i;
1213
1214 #if defined(DEBUG_TLB)
1215 printf("tlb_flush:\n");
1216 #endif
1217 /* must reset current TB so that interrupts cannot modify the
1218 links while we are modifying them */
1219 env->current_tb = NULL;
1220
1221 for(i = 0; i < CPU_TLB_SIZE; i++) {
1222 env->tlb_table[0][i].addr_read = -1;
1223 env->tlb_table[0][i].addr_write = -1;
1224 env->tlb_table[0][i].addr_code = -1;
1225 env->tlb_table[1][i].addr_read = -1;
1226 env->tlb_table[1][i].addr_write = -1;
1227 env->tlb_table[1][i].addr_code = -1;
1228 }
1229
1230 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1231
1232 #if !defined(CONFIG_SOFTMMU)
1233 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
1234 #endif
1235 #ifdef USE_KQEMU
1236 if (env->kqemu_enabled) {
1237 kqemu_flush(env, flush_global);
1238 }
1239 #endif
1240 tlb_flush_count++;
1241 }
1242
1243 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1244 {
1245 if (addr == (tlb_entry->addr_read &
1246 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1247 addr == (tlb_entry->addr_write &
1248 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1249 addr == (tlb_entry->addr_code &
1250 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1251 tlb_entry->addr_read = -1;
1252 tlb_entry->addr_write = -1;
1253 tlb_entry->addr_code = -1;
1254 }
1255 }
1256
1257 void tlb_flush_page(CPUState *env, target_ulong addr)
1258 {
1259 int i;
1260 TranslationBlock *tb;
1261
1262 #if defined(DEBUG_TLB)
1263 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1264 #endif
1265 /* must reset current TB so that interrupts cannot modify the
1266 links while we are modifying them */
1267 env->current_tb = NULL;
1268
1269 addr &= TARGET_PAGE_MASK;
1270 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1271 tlb_flush_entry(&env->tlb_table[0][i], addr);
1272 tlb_flush_entry(&env->tlb_table[1][i], addr);
1273
1274 for(i = 0; i < TB_JMP_CACHE_SIZE; i++) {
1275 tb = env->tb_jmp_cache[i];
1276 if (tb &&
1277 ((tb->pc & TARGET_PAGE_MASK) == addr ||
1278 ((tb->pc + tb->size - 1) & TARGET_PAGE_MASK) == addr)) {
1279 env->tb_jmp_cache[i] = NULL;
1280 }
1281 }
1282
1283 #if !defined(CONFIG_SOFTMMU)
1284 if (addr < MMAP_AREA_END)
1285 munmap((void *)addr, TARGET_PAGE_SIZE);
1286 #endif
1287 #ifdef USE_KQEMU
1288 if (env->kqemu_enabled) {
1289 kqemu_flush_page(env, addr);
1290 }
1291 #endif
1292 }
1293
1294 /* update the TLBs so that writes to code in the virtual page 'addr'
1295 can be detected */
1296 static void tlb_protect_code(ram_addr_t ram_addr)
1297 {
1298 cpu_physical_memory_reset_dirty(ram_addr,
1299 ram_addr + TARGET_PAGE_SIZE,
1300 CODE_DIRTY_FLAG);
1301 }
1302
1303 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1304 tested for self modifying code */
1305 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1306 target_ulong vaddr)
1307 {
1308 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1309 }
1310
1311 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1312 unsigned long start, unsigned long length)
1313 {
1314 unsigned long addr;
1315 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1316 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1317 if ((addr - start) < length) {
1318 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1319 }
1320 }
1321 }
1322
1323 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1324 int dirty_flags)
1325 {
1326 CPUState *env;
1327 unsigned long length, start1;
1328 int i, mask, len;
1329 uint8_t *p;
1330
1331 start &= TARGET_PAGE_MASK;
1332 end = TARGET_PAGE_ALIGN(end);
1333
1334 length = end - start;
1335 if (length == 0)
1336 return;
1337 len = length >> TARGET_PAGE_BITS;
1338 #ifdef USE_KQEMU
1339 /* XXX: should not depend on cpu context */
1340 env = first_cpu;
1341 if (env->kqemu_enabled) {
1342 ram_addr_t addr;
1343 addr = start;
1344 for(i = 0; i < len; i++) {
1345 kqemu_set_notdirty(env, addr);
1346 addr += TARGET_PAGE_SIZE;
1347 }
1348 }
1349 #endif
1350 mask = ~dirty_flags;
1351 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1352 for(i = 0; i < len; i++)
1353 p[i] &= mask;
1354
1355 /* we modify the TLB cache so that the dirty bit will be set again
1356 when accessing the range */
1357 start1 = start + (unsigned long)phys_ram_base;
1358 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1359 for(i = 0; i < CPU_TLB_SIZE; i++)
1360 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1361 for(i = 0; i < CPU_TLB_SIZE; i++)
1362 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1363 }
1364
1365 #if !defined(CONFIG_SOFTMMU)
1366 /* XXX: this is expensive */
1367 {
1368 VirtPageDesc *p;
1369 int j;
1370 target_ulong addr;
1371
1372 for(i = 0; i < L1_SIZE; i++) {
1373 p = l1_virt_map[i];
1374 if (p) {
1375 addr = i << (TARGET_PAGE_BITS + L2_BITS);
1376 for(j = 0; j < L2_SIZE; j++) {
1377 if (p->valid_tag == virt_valid_tag &&
1378 p->phys_addr >= start && p->phys_addr < end &&
1379 (p->prot & PROT_WRITE)) {
1380 if (addr < MMAP_AREA_END) {
1381 mprotect((void *)addr, TARGET_PAGE_SIZE,
1382 p->prot & ~PROT_WRITE);
1383 }
1384 }
1385 addr += TARGET_PAGE_SIZE;
1386 p++;
1387 }
1388 }
1389 }
1390 }
1391 #endif
1392 }
1393
1394 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1395 {
1396 ram_addr_t ram_addr;
1397
1398 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1399 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1400 tlb_entry->addend - (unsigned long)phys_ram_base;
1401 if (!cpu_physical_memory_is_dirty(ram_addr)) {
1402 tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
1403 }
1404 }
1405 }
1406
1407 /* update the TLB according to the current state of the dirty bits */
1408 void cpu_tlb_update_dirty(CPUState *env)
1409 {
1410 int i;
1411 for(i = 0; i < CPU_TLB_SIZE; i++)
1412 tlb_update_dirty(&env->tlb_table[0][i]);
1413 for(i = 0; i < CPU_TLB_SIZE; i++)
1414 tlb_update_dirty(&env->tlb_table[1][i]);
1415 }
1416
1417 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
1418 unsigned long start)
1419 {
1420 unsigned long addr;
1421 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1422 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1423 if (addr == start) {
1424 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;
1425 }
1426 }
1427 }
1428
1429 /* update the TLB corresponding to virtual page vaddr and phys addr
1430 addr so that it is no longer dirty */
1431 static inline void tlb_set_dirty(CPUState *env,
1432 unsigned long addr, target_ulong vaddr)
1433 {
1434 int i;
1435
1436 addr &= TARGET_PAGE_MASK;
1437 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1438 tlb_set_dirty1(&env->tlb_table[0][i], addr);
1439 tlb_set_dirty1(&env->tlb_table[1][i], addr);
1440 }
1441
1442 /* add a new TLB entry. At most one entry for a given virtual address
1443 is permitted. Return 0 if OK or 2 if the page could not be mapped
1444 (can only happen in non SOFTMMU mode for I/O pages or pages
1445 conflicting with the host address space). */
1446 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1447 target_phys_addr_t paddr, int prot,
1448 int is_user, int is_softmmu)
1449 {
1450 PhysPageDesc *p;
1451 unsigned long pd;
1452 unsigned int index;
1453 target_ulong address;
1454 target_phys_addr_t addend;
1455 int ret;
1456 CPUTLBEntry *te;
1457
1458 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1459 if (!p) {
1460 pd = IO_MEM_UNASSIGNED;
1461 } else {
1462 pd = p->phys_offset;
1463 }
1464 #if defined(DEBUG_TLB)
1465 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x u=%d smmu=%d pd=0x%08lx\n",
1466 vaddr, (int)paddr, prot, is_user, is_softmmu, pd);
1467 #endif
1468
1469 ret = 0;
1470 #if !defined(CONFIG_SOFTMMU)
1471 if (is_softmmu)
1472 #endif
1473 {
1474 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1475 /* IO memory case */
1476 address = vaddr | pd;
1477 addend = paddr;
1478 } else {
1479 /* standard memory */
1480 address = vaddr;
1481 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1482 }
1483
1484 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1485 addend -= vaddr;
1486 te = &env->tlb_table[is_user][index];
1487 te->addend = addend;
1488 if (prot & PAGE_READ) {
1489 te->addr_read = address;
1490 } else {
1491 te->addr_read = -1;
1492 }
1493 if (prot & PAGE_EXEC) {
1494 te->addr_code = address;
1495 } else {
1496 te->addr_code = -1;
1497 }
1498 if (prot & PAGE_WRITE) {
1499 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM) {
1500 /* ROM: access is ignored (same as unassigned) */
1501 te->addr_write = vaddr | IO_MEM_ROM;
1502 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1503 !cpu_physical_memory_is_dirty(pd)) {
1504 te->addr_write = vaddr | IO_MEM_NOTDIRTY;
1505 } else {
1506 te->addr_write = address;
1507 }
1508 } else {
1509 te->addr_write = -1;
1510 }
1511 }
1512 #if !defined(CONFIG_SOFTMMU)
1513 else {
1514 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1515 /* IO access: no mapping is done as it will be handled by the
1516 soft MMU */
1517 if (!(env->hflags & HF_SOFTMMU_MASK))
1518 ret = 2;
1519 } else {
1520 void *map_addr;
1521
1522 if (vaddr >= MMAP_AREA_END) {
1523 ret = 2;
1524 } else {
1525 if (prot & PROT_WRITE) {
1526 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1527 #if defined(TARGET_HAS_SMC) || 1
1528 first_tb ||
1529 #endif
1530 ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1531 !cpu_physical_memory_is_dirty(pd))) {
1532 /* ROM: we do as if code was inside */
1533 /* if code is present, we only map as read only and save the
1534 original mapping */
1535 VirtPageDesc *vp;
1536
1537 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
1538 vp->phys_addr = pd;
1539 vp->prot = prot;
1540 vp->valid_tag = virt_valid_tag;
1541 prot &= ~PAGE_WRITE;
1542 }
1543 }
1544 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
1545 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1546 if (map_addr == MAP_FAILED) {
1547 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1548 paddr, vaddr);
1549 }
1550 }
1551 }
1552 }
1553 #endif
1554 return ret;
1555 }
1556
1557 /* called from signal handler: invalidate the code and unprotect the
1558 page. Return TRUE if the fault was succesfully handled. */
1559 int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
1560 {
1561 #if !defined(CONFIG_SOFTMMU)
1562 VirtPageDesc *vp;
1563
1564 #if defined(DEBUG_TLB)
1565 printf("page_unprotect: addr=0x%08x\n", addr);
1566 #endif
1567 addr &= TARGET_PAGE_MASK;
1568
1569 /* if it is not mapped, no need to worry here */
1570 if (addr >= MMAP_AREA_END)
1571 return 0;
1572 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1573 if (!vp)
1574 return 0;
1575 /* NOTE: in this case, validate_tag is _not_ tested as it
1576 validates only the code TLB */
1577 if (vp->valid_tag != virt_valid_tag)
1578 return 0;
1579 if (!(vp->prot & PAGE_WRITE))
1580 return 0;
1581 #if defined(DEBUG_TLB)
1582 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1583 addr, vp->phys_addr, vp->prot);
1584 #endif
1585 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1586 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1587 (unsigned long)addr, vp->prot);
1588 /* set the dirty bit */
1589 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
1590 /* flush the code inside */
1591 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
1592 return 1;
1593 #else
1594 return 0;
1595 #endif
1596 }
1597
1598 #else
1599
1600 void tlb_flush(CPUState *env, int flush_global)
1601 {
1602 }
1603
1604 void tlb_flush_page(CPUState *env, target_ulong addr)
1605 {
1606 }
1607
1608 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1609 target_phys_addr_t paddr, int prot,
1610 int is_user, int is_softmmu)
1611 {
1612 return 0;
1613 }
1614
1615 /* dump memory mappings */
1616 void page_dump(FILE *f)
1617 {
1618 unsigned long start, end;
1619 int i, j, prot, prot1;
1620 PageDesc *p;
1621
1622 fprintf(f, "%-8s %-8s %-8s %s\n",
1623 "start", "end", "size", "prot");
1624 start = -1;
1625 end = -1;
1626 prot = 0;
1627 for(i = 0; i <= L1_SIZE; i++) {
1628 if (i < L1_SIZE)
1629 p = l1_map[i];
1630 else
1631 p = NULL;
1632 for(j = 0;j < L2_SIZE; j++) {
1633 if (!p)
1634 prot1 = 0;
1635 else
1636 prot1 = p[j].flags;
1637 if (prot1 != prot) {
1638 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1639 if (start != -1) {
1640 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1641 start, end, end - start,
1642 prot & PAGE_READ ? 'r' : '-',
1643 prot & PAGE_WRITE ? 'w' : '-',
1644 prot & PAGE_EXEC ? 'x' : '-');
1645 }
1646 if (prot1 != 0)
1647 start = end;
1648 else
1649 start = -1;
1650 prot = prot1;
1651 }
1652 if (!p)
1653 break;
1654 }
1655 }
1656 }
1657
1658 int page_get_flags(target_ulong address)
1659 {
1660 PageDesc *p;
1661
1662 p = page_find(address >> TARGET_PAGE_BITS);
1663 if (!p)
1664 return 0;
1665 return p->flags;
1666 }
1667
1668 /* modify the flags of a page and invalidate the code if
1669 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1670 depending on PAGE_WRITE */
1671 void page_set_flags(target_ulong start, target_ulong end, int flags)
1672 {
1673 PageDesc *p;
1674 target_ulong addr;
1675
1676 start = start & TARGET_PAGE_MASK;
1677 end = TARGET_PAGE_ALIGN(end);
1678 if (flags & PAGE_WRITE)
1679 flags |= PAGE_WRITE_ORG;
1680 spin_lock(&tb_lock);
1681 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1682 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1683 /* if the write protection is set, then we invalidate the code
1684 inside */
1685 if (!(p->flags & PAGE_WRITE) &&
1686 (flags & PAGE_WRITE) &&
1687 p->first_tb) {
1688 tb_invalidate_phys_page(addr, 0, NULL);
1689 }
1690 p->flags = flags;
1691 }
1692 spin_unlock(&tb_lock);
1693 }
1694
1695 /* called from signal handler: invalidate the code and unprotect the
1696 page. Return TRUE if the fault was succesfully handled. */
1697 int page_unprotect(target_ulong address, unsigned long pc, void *puc)
1698 {
1699 unsigned int page_index, prot, pindex;
1700 PageDesc *p, *p1;
1701 target_ulong host_start, host_end, addr;
1702
1703 host_start = address & qemu_host_page_mask;
1704 page_index = host_start >> TARGET_PAGE_BITS;
1705 p1 = page_find(page_index);
1706 if (!p1)
1707 return 0;
1708 host_end = host_start + qemu_host_page_size;
1709 p = p1;
1710 prot = 0;
1711 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1712 prot |= p->flags;
1713 p++;
1714 }
1715 /* if the page was really writable, then we change its
1716 protection back to writable */
1717 if (prot & PAGE_WRITE_ORG) {
1718 pindex = (address - host_start) >> TARGET_PAGE_BITS;
1719 if (!(p1[pindex].flags & PAGE_WRITE)) {
1720 mprotect((void *)g2h(host_start), qemu_host_page_size,
1721 (prot & PAGE_BITS) | PAGE_WRITE);
1722 p1[pindex].flags |= PAGE_WRITE;
1723 /* and since the content will be modified, we must invalidate
1724 the corresponding translated code. */
1725 tb_invalidate_phys_page(address, pc, puc);
1726 #ifdef DEBUG_TB_CHECK
1727 tb_invalidate_check(address);
1728 #endif
1729 return 1;
1730 }
1731 }
1732 return 0;
1733 }
1734
1735 /* call this function when system calls directly modify a memory area */
1736 /* ??? This should be redundant now we have lock_user. */
1737 void page_unprotect_range(target_ulong data, target_ulong data_size)
1738 {
1739 target_ulong start, end, addr;
1740
1741 start = data;
1742 end = start + data_size;
1743 start &= TARGET_PAGE_MASK;
1744 end = TARGET_PAGE_ALIGN(end);
1745 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1746 page_unprotect(addr, 0, NULL);
1747 }
1748 }
1749
1750 static inline void tlb_set_dirty(CPUState *env,
1751 unsigned long addr, target_ulong vaddr)
1752 {
1753 }
1754 #endif /* defined(CONFIG_USER_ONLY) */
1755
1756 /* register physical memory. 'size' must be a multiple of the target
1757 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1758 io memory page */
1759 void cpu_register_physical_memory(target_phys_addr_t start_addr,
1760 unsigned long size,
1761 unsigned long phys_offset)
1762 {
1763 target_phys_addr_t addr, end_addr;
1764 PhysPageDesc *p;
1765
1766 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
1767 end_addr = start_addr + size;
1768 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
1769 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
1770 p->phys_offset = phys_offset;
1771 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM)
1772 phys_offset += TARGET_PAGE_SIZE;
1773 }
1774 }
1775
1776 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
1777 {
1778 return 0;
1779 }
1780
1781 static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1782 {
1783 }
1784
1785 static CPUReadMemoryFunc *unassigned_mem_read[3] = {
1786 unassigned_mem_readb,
1787 unassigned_mem_readb,
1788 unassigned_mem_readb,
1789 };
1790
1791 static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
1792 unassigned_mem_writeb,
1793 unassigned_mem_writeb,
1794 unassigned_mem_writeb,
1795 };
1796
1797 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1798 {
1799 unsigned long ram_addr;
1800 int dirty_flags;
1801 ram_addr = addr - (unsigned long)phys_ram_base;
1802 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1803 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
1804 #if !defined(CONFIG_USER_ONLY)
1805 tb_invalidate_phys_page_fast(ram_addr, 1);
1806 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1807 #endif
1808 }
1809 stb_p((uint8_t *)(long)addr, val);
1810 #ifdef USE_KQEMU
1811 if (cpu_single_env->kqemu_enabled &&
1812 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
1813 kqemu_modify_page(cpu_single_env, ram_addr);
1814 #endif
1815 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1816 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
1817 /* we remove the notdirty callback only if the code has been
1818 flushed */
1819 if (dirty_flags == 0xff)
1820 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
1821 }
1822
1823 static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
1824 {
1825 unsigned long ram_addr;
1826 int dirty_flags;
1827 ram_addr = addr - (unsigned long)phys_ram_base;
1828 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1829 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
1830 #if !defined(CONFIG_USER_ONLY)
1831 tb_invalidate_phys_page_fast(ram_addr, 2);
1832 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1833 #endif
1834 }
1835 stw_p((uint8_t *)(long)addr, val);
1836 #ifdef USE_KQEMU
1837 if (cpu_single_env->kqemu_enabled &&
1838 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
1839 kqemu_modify_page(cpu_single_env, ram_addr);
1840 #endif
1841 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1842 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
1843 /* we remove the notdirty callback only if the code has been
1844 flushed */
1845 if (dirty_flags == 0xff)
1846 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
1847 }
1848
1849 static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
1850 {
1851 unsigned long ram_addr;
1852 int dirty_flags;
1853 ram_addr = addr - (unsigned long)phys_ram_base;
1854 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1855 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
1856 #if !defined(CONFIG_USER_ONLY)
1857 tb_invalidate_phys_page_fast(ram_addr, 4);
1858 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1859 #endif
1860 }
1861 stl_p((uint8_t *)(long)addr, val);
1862 #ifdef USE_KQEMU
1863 if (cpu_single_env->kqemu_enabled &&
1864 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
1865 kqemu_modify_page(cpu_single_env, ram_addr);
1866 #endif
1867 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1868 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
1869 /* we remove the notdirty callback only if the code has been
1870 flushed */
1871 if (dirty_flags == 0xff)
1872 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
1873 }
1874
1875 static CPUReadMemoryFunc *error_mem_read[3] = {
1876 NULL, /* never used */
1877 NULL, /* never used */
1878 NULL, /* never used */
1879 };
1880
1881 static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
1882 notdirty_mem_writeb,
1883 notdirty_mem_writew,
1884 notdirty_mem_writel,
1885 };
1886
1887 static void io_mem_init(void)
1888 {
1889 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
1890 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
1891 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
1892 io_mem_nb = 5;
1893
1894 /* alloc dirty bits array */
1895 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
1896 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
1897 }
1898
1899 /* mem_read and mem_write are arrays of functions containing the
1900 function to access byte (index 0), word (index 1) and dword (index
1901 2). All functions must be supplied. If io_index is non zero, the
1902 corresponding io zone is modified. If it is zero, a new io zone is
1903 allocated. The return value can be used with
1904 cpu_register_physical_memory(). (-1) is returned if error. */
1905 int cpu_register_io_memory(int io_index,
1906 CPUReadMemoryFunc **mem_read,
1907 CPUWriteMemoryFunc **mem_write,
1908 void *opaque)
1909 {
1910 int i;
1911
1912 if (io_index <= 0) {
1913 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
1914 return -1;
1915 io_index = io_mem_nb++;
1916 } else {
1917 if (io_index >= IO_MEM_NB_ENTRIES)
1918 return -1;
1919 }
1920
1921 for(i = 0;i < 3; i++) {
1922 io_mem_read[io_index][i] = mem_read[i];
1923 io_mem_write[io_index][i] = mem_write[i];
1924 }
1925 io_mem_opaque[io_index] = opaque;
1926 return io_index << IO_MEM_SHIFT;
1927 }
1928
1929 CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
1930 {
1931 return io_mem_write[io_index >> IO_MEM_SHIFT];
1932 }
1933
1934 CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
1935 {
1936 return io_mem_read[io_index >> IO_MEM_SHIFT];
1937 }
1938
1939 /* physical memory access (slow version, mainly for debug) */
1940 #if defined(CONFIG_USER_ONLY)
1941 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
1942 int len, int is_write)
1943 {
1944 int l, flags;
1945 target_ulong page;
1946 void * p;
1947
1948 while (len > 0) {
1949 page = addr & TARGET_PAGE_MASK;
1950 l = (page + TARGET_PAGE_SIZE) - addr;
1951 if (l > len)
1952 l = len;
1953 flags = page_get_flags(page);
1954 if (!(flags & PAGE_VALID))
1955 return;
1956 if (is_write) {
1957 if (!(flags & PAGE_WRITE))
1958 return;
1959 p = lock_user(addr, len, 0);
1960 memcpy(p, buf, len);
1961 unlock_user(p, addr, len);
1962 } else {
1963 if (!(flags & PAGE_READ))
1964 return;
1965 p = lock_user(addr, len, 1);
1966 memcpy(buf, p, len);
1967 unlock_user(p, addr, 0);
1968 }
1969 len -= l;
1970 buf += l;
1971 addr += l;
1972 }
1973 }
1974
1975 #else
1976 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
1977 int len, int is_write)
1978 {
1979 int l, io_index;
1980 uint8_t *ptr;
1981 uint32_t val;
1982 target_phys_addr_t page;
1983 unsigned long pd;
1984 PhysPageDesc *p;
1985
1986 while (len > 0) {
1987 page = addr & TARGET_PAGE_MASK;
1988 l = (page + TARGET_PAGE_SIZE) - addr;
1989 if (l > len)
1990 l = len;
1991 p = phys_page_find(page >> TARGET_PAGE_BITS);
1992 if (!p) {
1993 pd = IO_MEM_UNASSIGNED;
1994 } else {
1995 pd = p->phys_offset;
1996 }
1997
1998 if (is_write) {
1999 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2000 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2001 /* XXX: could force cpu_single_env to NULL to avoid
2002 potential bugs */
2003 if (l >= 4 && ((addr & 3) == 0)) {
2004 /* 32 bit write access */
2005 val = ldl_p(buf);
2006 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2007 l = 4;
2008 } else if (l >= 2 && ((addr & 1) == 0)) {
2009 /* 16 bit write access */
2010 val = lduw_p(buf);
2011 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2012 l = 2;
2013 } else {
2014 /* 8 bit write access */
2015 val = ldub_p(buf);
2016 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2017 l = 1;
2018 }
2019 } else {
2020 unsigned long addr1;
2021 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2022 /* RAM case */
2023 ptr = phys_ram_base + addr1;
2024 memcpy(ptr, buf, l);
2025 if (!cpu_physical_memory_is_dirty(addr1)) {
2026 /* invalidate code */
2027 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2028 /* set dirty bit */
2029 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2030 (0xff & ~CODE_DIRTY_FLAG);
2031 }
2032 }
2033 } else {
2034 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
2035 /* I/O case */
2036 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2037 if (l >= 4 && ((addr & 3) == 0)) {
2038 /* 32 bit read access */
2039 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2040 stl_p(buf, val);
2041 l = 4;
2042 } else if (l >= 2 && ((addr & 1) == 0)) {
2043 /* 16 bit read access */
2044 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2045 stw_p(buf, val);
2046 l = 2;
2047 } else {
2048 /* 8 bit read access */
2049 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2050 stb_p(buf, val);
2051 l = 1;
2052 }
2053 } else {
2054 /* RAM case */
2055 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2056 (addr & ~TARGET_PAGE_MASK);
2057 memcpy(buf, ptr, l);
2058 }
2059 }
2060 len -= l;
2061 buf += l;
2062 addr += l;
2063 }
2064 }
2065
2066 /* warning: addr must be aligned */
2067 uint32_t ldl_phys(target_phys_addr_t addr)
2068 {
2069 int io_index;
2070 uint8_t *ptr;
2071 uint32_t val;
2072 unsigned long pd;
2073 PhysPageDesc *p;
2074
2075 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2076 if (!p) {
2077 pd = IO_MEM_UNASSIGNED;
2078 } else {
2079 pd = p->phys_offset;
2080 }
2081
2082 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
2083 /* I/O case */
2084 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2085 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2086 } else {
2087 /* RAM case */
2088 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2089 (addr & ~TARGET_PAGE_MASK);
2090 val = ldl_p(ptr);
2091 }
2092 return val;
2093 }
2094
2095 /* warning: addr must be aligned */
2096 uint64_t ldq_phys(target_phys_addr_t addr)
2097 {
2098 int io_index;
2099 uint8_t *ptr;
2100 uint64_t val;
2101 unsigned long pd;
2102 PhysPageDesc *p;
2103
2104 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2105 if (!p) {
2106 pd = IO_MEM_UNASSIGNED;
2107 } else {
2108 pd = p->phys_offset;
2109 }
2110
2111 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
2112 /* I/O case */
2113 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2114 #ifdef TARGET_WORDS_BIGENDIAN
2115 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2116 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2117 #else
2118 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2119 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2120 #endif
2121 } else {
2122 /* RAM case */
2123 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2124 (addr & ~TARGET_PAGE_MASK);
2125 val = ldq_p(ptr);
2126 }
2127 return val;
2128 }
2129
2130 /* XXX: optimize */
2131 uint32_t ldub_phys(target_phys_addr_t addr)
2132 {
2133 uint8_t val;
2134 cpu_physical_memory_read(addr, &val, 1);
2135 return val;
2136 }
2137
2138 /* XXX: optimize */
2139 uint32_t lduw_phys(target_phys_addr_t addr)
2140 {
2141 uint16_t val;
2142 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2143 return tswap16(val);
2144 }
2145
2146 /* warning: addr must be aligned. The ram page is not masked as dirty
2147 and the code inside is not invalidated. It is useful if the dirty
2148 bits are used to track modified PTEs */
2149 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2150 {
2151 int io_index;
2152 uint8_t *ptr;
2153 unsigned long pd;
2154 PhysPageDesc *p;
2155
2156 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2157 if (!p) {
2158 pd = IO_MEM_UNASSIGNED;
2159 } else {
2160 pd = p->phys_offset;
2161 }
2162
2163 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2164 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2165 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2166 } else {
2167 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2168 (addr & ~TARGET_PAGE_MASK);
2169 stl_p(ptr, val);
2170 }
2171 }
2172
2173 /* warning: addr must be aligned */
2174 void stl_phys(target_phys_addr_t addr, uint32_t val)
2175 {
2176 int io_index;
2177 uint8_t *ptr;
2178 unsigned long pd;
2179 PhysPageDesc *p;
2180
2181 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2182 if (!p) {
2183 pd = IO_MEM_UNASSIGNED;
2184 } else {
2185 pd = p->phys_offset;
2186 }
2187
2188 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2189 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2190 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2191 } else {
2192 unsigned long addr1;
2193 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2194 /* RAM case */
2195 ptr = phys_ram_base + addr1;
2196 stl_p(ptr, val);
2197 if (!cpu_physical_memory_is_dirty(addr1)) {
2198 /* invalidate code */
2199 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2200 /* set dirty bit */
2201 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2202 (0xff & ~CODE_DIRTY_FLAG);
2203 }
2204 }
2205 }
2206
2207 /* XXX: optimize */
2208 void stb_phys(target_phys_addr_t addr, uint32_t val)
2209 {
2210 uint8_t v = val;
2211 cpu_physical_memory_write(addr, &v, 1);
2212 }
2213
2214 /* XXX: optimize */
2215 void stw_phys(target_phys_addr_t addr, uint32_t val)
2216 {
2217 uint16_t v = tswap16(val);
2218 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2219 }
2220
2221 /* XXX: optimize */
2222 void stq_phys(target_phys_addr_t addr, uint64_t val)
2223 {
2224 val = tswap64(val);
2225 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
2226 }
2227
2228 #endif
2229
2230 /* virtual memory access for debug */
2231 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
2232 uint8_t *buf, int len, int is_write)
2233 {
2234 int l;
2235 target_ulong page, phys_addr;
2236
2237 while (len > 0) {
2238 page = addr & TARGET_PAGE_MASK;
2239 phys_addr = cpu_get_phys_page_debug(env, page);
2240 /* if no physical page mapped, return an error */
2241 if (phys_addr == -1)
2242 return -1;
2243 l = (page + TARGET_PAGE_SIZE) - addr;
2244 if (l > len)
2245 l = len;
2246 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
2247 buf, l, is_write);
2248 len -= l;
2249 buf += l;
2250 addr += l;
2251 }
2252 return 0;
2253 }
2254
2255 void dump_exec_info(FILE *f,
2256 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2257 {
2258 int i, target_code_size, max_target_code_size;
2259 int direct_jmp_count, direct_jmp2_count, cross_page;
2260 TranslationBlock *tb;
2261
2262 target_code_size = 0;
2263 max_target_code_size = 0;
2264 cross_page = 0;
2265 direct_jmp_count = 0;
2266 direct_jmp2_count = 0;
2267 for(i = 0; i < nb_tbs; i++) {
2268 tb = &tbs[i];
2269 target_code_size += tb->size;
2270 if (tb->size > max_target_code_size)
2271 max_target_code_size = tb->size;
2272 if (tb->page_addr[1] != -1)
2273 cross_page++;
2274 if (tb->tb_next_offset[0] != 0xffff) {
2275 direct_jmp_count++;
2276 if (tb->tb_next_offset[1] != 0xffff) {
2277 direct_jmp2_count++;
2278 }
2279 }
2280 }
2281 /* XXX: avoid using doubles ? */
2282 cpu_fprintf(f, "TB count %d\n", nb_tbs);
2283 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
2284 nb_tbs ? target_code_size / nb_tbs : 0,
2285 max_target_code_size);
2286 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
2287 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
2288 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
2289 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
2290 cross_page,
2291 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
2292 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
2293 direct_jmp_count,
2294 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
2295 direct_jmp2_count,
2296 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
2297 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
2298 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
2299 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
2300 }
2301
2302 #if !defined(CONFIG_USER_ONLY)
2303
2304 #define MMUSUFFIX _cmmu
2305 #define GETPC() NULL
2306 #define env cpu_single_env
2307 #define SOFTMMU_CODE_ACCESS
2308
2309 #define SHIFT 0
2310 #include "softmmu_template.h"
2311
2312 #define SHIFT 1
2313 #include "softmmu_template.h"
2314
2315 #define SHIFT 2
2316 #include "softmmu_template.h"
2317
2318 #define SHIFT 3
2319 #include "softmmu_template.h"
2320
2321 #undef env
2322
2323 #endif