]> git.proxmox.com Git - qemu.git/blob - exec.c
Add unassigned memory debugging code.
[qemu.git] / exec.c
1 /*
2 * virtual page mapping and translated block handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20 #include "config.h"
21 #ifdef _WIN32
22 #include <windows.h>
23 #else
24 #include <sys/types.h>
25 #include <sys/mman.h>
26 #endif
27 #include <stdlib.h>
28 #include <stdio.h>
29 #include <stdarg.h>
30 #include <string.h>
31 #include <errno.h>
32 #include <unistd.h>
33 #include <inttypes.h>
34
35 #include "cpu.h"
36 #include "exec-all.h"
37 #if defined(CONFIG_USER_ONLY)
38 #include <qemu.h>
39 #endif
40
41 //#define DEBUG_TB_INVALIDATE
42 //#define DEBUG_FLUSH
43 //#define DEBUG_TLB
44 //#define DEBUG_UNASSIGNED
45
46 /* make various TB consistency checks */
47 //#define DEBUG_TB_CHECK
48 //#define DEBUG_TLB_CHECK
49
50 #if !defined(CONFIG_USER_ONLY)
51 /* TB consistency checks only implemented for usermode emulation. */
52 #undef DEBUG_TB_CHECK
53 #endif
54
55 /* threshold to flush the translated code buffer */
56 #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
57
58 #define SMC_BITMAP_USE_THRESHOLD 10
59
60 #define MMAP_AREA_START 0x00000000
61 #define MMAP_AREA_END 0xa8000000
62
63 #if defined(TARGET_SPARC64)
64 #define TARGET_PHYS_ADDR_SPACE_BITS 41
65 #elif defined(TARGET_PPC64)
66 #define TARGET_PHYS_ADDR_SPACE_BITS 42
67 #else
68 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
69 #define TARGET_PHYS_ADDR_SPACE_BITS 32
70 #endif
71
72 TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
73 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
74 int nb_tbs;
75 /* any access to the tbs or the page table must use this lock */
76 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
77
78 uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] __attribute__((aligned (32)));
79 uint8_t *code_gen_ptr;
80
81 int phys_ram_size;
82 int phys_ram_fd;
83 uint8_t *phys_ram_base;
84 uint8_t *phys_ram_dirty;
85
86 CPUState *first_cpu;
87 /* current CPU in the current thread. It is only valid inside
88 cpu_exec() */
89 CPUState *cpu_single_env;
90
91 typedef struct PageDesc {
92 /* list of TBs intersecting this ram page */
93 TranslationBlock *first_tb;
94 /* in order to optimize self modifying code, we count the number
95 of lookups we do to a given page to use a bitmap */
96 unsigned int code_write_count;
97 uint8_t *code_bitmap;
98 #if defined(CONFIG_USER_ONLY)
99 unsigned long flags;
100 #endif
101 } PageDesc;
102
103 typedef struct PhysPageDesc {
104 /* offset in host memory of the page + io_index in the low 12 bits */
105 uint32_t phys_offset;
106 } PhysPageDesc;
107
108 #define L2_BITS 10
109 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
110
111 #define L1_SIZE (1 << L1_BITS)
112 #define L2_SIZE (1 << L2_BITS)
113
114 static void io_mem_init(void);
115
116 unsigned long qemu_real_host_page_size;
117 unsigned long qemu_host_page_bits;
118 unsigned long qemu_host_page_size;
119 unsigned long qemu_host_page_mask;
120
121 /* XXX: for system emulation, it could just be an array */
122 static PageDesc *l1_map[L1_SIZE];
123 PhysPageDesc **l1_phys_map;
124
125 /* io memory support */
126 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
127 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
128 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
129 static int io_mem_nb;
130
131 /* log support */
132 char *logfilename = "/tmp/qemu.log";
133 FILE *logfile;
134 int loglevel;
135
136 /* statistics */
137 static int tlb_flush_count;
138 static int tb_flush_count;
139 static int tb_phys_invalidate_count;
140
141 static void page_init(void)
142 {
143 /* NOTE: we can always suppose that qemu_host_page_size >=
144 TARGET_PAGE_SIZE */
145 #ifdef _WIN32
146 {
147 SYSTEM_INFO system_info;
148 DWORD old_protect;
149
150 GetSystemInfo(&system_info);
151 qemu_real_host_page_size = system_info.dwPageSize;
152
153 VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
154 PAGE_EXECUTE_READWRITE, &old_protect);
155 }
156 #else
157 qemu_real_host_page_size = getpagesize();
158 {
159 unsigned long start, end;
160
161 start = (unsigned long)code_gen_buffer;
162 start &= ~(qemu_real_host_page_size - 1);
163
164 end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
165 end += qemu_real_host_page_size - 1;
166 end &= ~(qemu_real_host_page_size - 1);
167
168 mprotect((void *)start, end - start,
169 PROT_READ | PROT_WRITE | PROT_EXEC);
170 }
171 #endif
172
173 if (qemu_host_page_size == 0)
174 qemu_host_page_size = qemu_real_host_page_size;
175 if (qemu_host_page_size < TARGET_PAGE_SIZE)
176 qemu_host_page_size = TARGET_PAGE_SIZE;
177 qemu_host_page_bits = 0;
178 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
179 qemu_host_page_bits++;
180 qemu_host_page_mask = ~(qemu_host_page_size - 1);
181 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
182 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
183 }
184
185 static inline PageDesc *page_find_alloc(unsigned int index)
186 {
187 PageDesc **lp, *p;
188
189 lp = &l1_map[index >> L2_BITS];
190 p = *lp;
191 if (!p) {
192 /* allocate if not found */
193 p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
194 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
195 *lp = p;
196 }
197 return p + (index & (L2_SIZE - 1));
198 }
199
200 static inline PageDesc *page_find(unsigned int index)
201 {
202 PageDesc *p;
203
204 p = l1_map[index >> L2_BITS];
205 if (!p)
206 return 0;
207 return p + (index & (L2_SIZE - 1));
208 }
209
210 static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
211 {
212 void **lp, **p;
213 PhysPageDesc *pd;
214
215 p = (void **)l1_phys_map;
216 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
217
218 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
219 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
220 #endif
221 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
222 p = *lp;
223 if (!p) {
224 /* allocate if not found */
225 if (!alloc)
226 return NULL;
227 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
228 memset(p, 0, sizeof(void *) * L1_SIZE);
229 *lp = p;
230 }
231 #endif
232 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
233 pd = *lp;
234 if (!pd) {
235 int i;
236 /* allocate if not found */
237 if (!alloc)
238 return NULL;
239 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
240 *lp = pd;
241 for (i = 0; i < L2_SIZE; i++)
242 pd[i].phys_offset = IO_MEM_UNASSIGNED;
243 }
244 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
245 }
246
247 static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
248 {
249 return phys_page_find_alloc(index, 0);
250 }
251
252 #if !defined(CONFIG_USER_ONLY)
253 static void tlb_protect_code(ram_addr_t ram_addr);
254 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
255 target_ulong vaddr);
256 #endif
257
258 void cpu_exec_init(CPUState *env)
259 {
260 CPUState **penv;
261 int cpu_index;
262
263 if (!code_gen_ptr) {
264 code_gen_ptr = code_gen_buffer;
265 page_init();
266 io_mem_init();
267 }
268 env->next_cpu = NULL;
269 penv = &first_cpu;
270 cpu_index = 0;
271 while (*penv != NULL) {
272 penv = (CPUState **)&(*penv)->next_cpu;
273 cpu_index++;
274 }
275 env->cpu_index = cpu_index;
276 *penv = env;
277 }
278
279 static inline void invalidate_page_bitmap(PageDesc *p)
280 {
281 if (p->code_bitmap) {
282 qemu_free(p->code_bitmap);
283 p->code_bitmap = NULL;
284 }
285 p->code_write_count = 0;
286 }
287
288 /* set to NULL all the 'first_tb' fields in all PageDescs */
289 static void page_flush_tb(void)
290 {
291 int i, j;
292 PageDesc *p;
293
294 for(i = 0; i < L1_SIZE; i++) {
295 p = l1_map[i];
296 if (p) {
297 for(j = 0; j < L2_SIZE; j++) {
298 p->first_tb = NULL;
299 invalidate_page_bitmap(p);
300 p++;
301 }
302 }
303 }
304 }
305
306 /* flush all the translation blocks */
307 /* XXX: tb_flush is currently not thread safe */
308 void tb_flush(CPUState *env1)
309 {
310 CPUState *env;
311 #if defined(DEBUG_FLUSH)
312 printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
313 code_gen_ptr - code_gen_buffer,
314 nb_tbs,
315 nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0);
316 #endif
317 nb_tbs = 0;
318
319 for(env = first_cpu; env != NULL; env = env->next_cpu) {
320 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
321 }
322
323 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
324 page_flush_tb();
325
326 code_gen_ptr = code_gen_buffer;
327 /* XXX: flush processor icache at this point if cache flush is
328 expensive */
329 tb_flush_count++;
330 }
331
332 #ifdef DEBUG_TB_CHECK
333
334 static void tb_invalidate_check(unsigned long address)
335 {
336 TranslationBlock *tb;
337 int i;
338 address &= TARGET_PAGE_MASK;
339 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
340 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
341 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
342 address >= tb->pc + tb->size)) {
343 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
344 address, (long)tb->pc, tb->size);
345 }
346 }
347 }
348 }
349
350 /* verify that all the pages have correct rights for code */
351 static void tb_page_check(void)
352 {
353 TranslationBlock *tb;
354 int i, flags1, flags2;
355
356 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
357 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
358 flags1 = page_get_flags(tb->pc);
359 flags2 = page_get_flags(tb->pc + tb->size - 1);
360 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
361 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
362 (long)tb->pc, tb->size, flags1, flags2);
363 }
364 }
365 }
366 }
367
368 void tb_jmp_check(TranslationBlock *tb)
369 {
370 TranslationBlock *tb1;
371 unsigned int n1;
372
373 /* suppress any remaining jumps to this TB */
374 tb1 = tb->jmp_first;
375 for(;;) {
376 n1 = (long)tb1 & 3;
377 tb1 = (TranslationBlock *)((long)tb1 & ~3);
378 if (n1 == 2)
379 break;
380 tb1 = tb1->jmp_next[n1];
381 }
382 /* check end of list */
383 if (tb1 != tb) {
384 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
385 }
386 }
387
388 #endif
389
390 /* invalidate one TB */
391 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
392 int next_offset)
393 {
394 TranslationBlock *tb1;
395 for(;;) {
396 tb1 = *ptb;
397 if (tb1 == tb) {
398 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
399 break;
400 }
401 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
402 }
403 }
404
405 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
406 {
407 TranslationBlock *tb1;
408 unsigned int n1;
409
410 for(;;) {
411 tb1 = *ptb;
412 n1 = (long)tb1 & 3;
413 tb1 = (TranslationBlock *)((long)tb1 & ~3);
414 if (tb1 == tb) {
415 *ptb = tb1->page_next[n1];
416 break;
417 }
418 ptb = &tb1->page_next[n1];
419 }
420 }
421
422 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
423 {
424 TranslationBlock *tb1, **ptb;
425 unsigned int n1;
426
427 ptb = &tb->jmp_next[n];
428 tb1 = *ptb;
429 if (tb1) {
430 /* find tb(n) in circular list */
431 for(;;) {
432 tb1 = *ptb;
433 n1 = (long)tb1 & 3;
434 tb1 = (TranslationBlock *)((long)tb1 & ~3);
435 if (n1 == n && tb1 == tb)
436 break;
437 if (n1 == 2) {
438 ptb = &tb1->jmp_first;
439 } else {
440 ptb = &tb1->jmp_next[n1];
441 }
442 }
443 /* now we can suppress tb(n) from the list */
444 *ptb = tb->jmp_next[n];
445
446 tb->jmp_next[n] = NULL;
447 }
448 }
449
450 /* reset the jump entry 'n' of a TB so that it is not chained to
451 another TB */
452 static inline void tb_reset_jump(TranslationBlock *tb, int n)
453 {
454 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
455 }
456
457 static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
458 {
459 CPUState *env;
460 PageDesc *p;
461 unsigned int h, n1;
462 target_ulong phys_pc;
463 TranslationBlock *tb1, *tb2;
464
465 /* remove the TB from the hash list */
466 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
467 h = tb_phys_hash_func(phys_pc);
468 tb_remove(&tb_phys_hash[h], tb,
469 offsetof(TranslationBlock, phys_hash_next));
470
471 /* remove the TB from the page list */
472 if (tb->page_addr[0] != page_addr) {
473 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
474 tb_page_remove(&p->first_tb, tb);
475 invalidate_page_bitmap(p);
476 }
477 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
478 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
479 tb_page_remove(&p->first_tb, tb);
480 invalidate_page_bitmap(p);
481 }
482
483 tb_invalidated_flag = 1;
484
485 /* remove the TB from the hash list */
486 h = tb_jmp_cache_hash_func(tb->pc);
487 for(env = first_cpu; env != NULL; env = env->next_cpu) {
488 if (env->tb_jmp_cache[h] == tb)
489 env->tb_jmp_cache[h] = NULL;
490 }
491
492 /* suppress this TB from the two jump lists */
493 tb_jmp_remove(tb, 0);
494 tb_jmp_remove(tb, 1);
495
496 /* suppress any remaining jumps to this TB */
497 tb1 = tb->jmp_first;
498 for(;;) {
499 n1 = (long)tb1 & 3;
500 if (n1 == 2)
501 break;
502 tb1 = (TranslationBlock *)((long)tb1 & ~3);
503 tb2 = tb1->jmp_next[n1];
504 tb_reset_jump(tb1, n1);
505 tb1->jmp_next[n1] = NULL;
506 tb1 = tb2;
507 }
508 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
509
510 tb_phys_invalidate_count++;
511 }
512
513 static inline void set_bits(uint8_t *tab, int start, int len)
514 {
515 int end, mask, end1;
516
517 end = start + len;
518 tab += start >> 3;
519 mask = 0xff << (start & 7);
520 if ((start & ~7) == (end & ~7)) {
521 if (start < end) {
522 mask &= ~(0xff << (end & 7));
523 *tab |= mask;
524 }
525 } else {
526 *tab++ |= mask;
527 start = (start + 8) & ~7;
528 end1 = end & ~7;
529 while (start < end1) {
530 *tab++ = 0xff;
531 start += 8;
532 }
533 if (start < end) {
534 mask = ~(0xff << (end & 7));
535 *tab |= mask;
536 }
537 }
538 }
539
540 static void build_page_bitmap(PageDesc *p)
541 {
542 int n, tb_start, tb_end;
543 TranslationBlock *tb;
544
545 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
546 if (!p->code_bitmap)
547 return;
548 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
549
550 tb = p->first_tb;
551 while (tb != NULL) {
552 n = (long)tb & 3;
553 tb = (TranslationBlock *)((long)tb & ~3);
554 /* NOTE: this is subtle as a TB may span two physical pages */
555 if (n == 0) {
556 /* NOTE: tb_end may be after the end of the page, but
557 it is not a problem */
558 tb_start = tb->pc & ~TARGET_PAGE_MASK;
559 tb_end = tb_start + tb->size;
560 if (tb_end > TARGET_PAGE_SIZE)
561 tb_end = TARGET_PAGE_SIZE;
562 } else {
563 tb_start = 0;
564 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
565 }
566 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
567 tb = tb->page_next[n];
568 }
569 }
570
571 #ifdef TARGET_HAS_PRECISE_SMC
572
573 static void tb_gen_code(CPUState *env,
574 target_ulong pc, target_ulong cs_base, int flags,
575 int cflags)
576 {
577 TranslationBlock *tb;
578 uint8_t *tc_ptr;
579 target_ulong phys_pc, phys_page2, virt_page2;
580 int code_gen_size;
581
582 phys_pc = get_phys_addr_code(env, pc);
583 tb = tb_alloc(pc);
584 if (!tb) {
585 /* flush must be done */
586 tb_flush(env);
587 /* cannot fail at this point */
588 tb = tb_alloc(pc);
589 }
590 tc_ptr = code_gen_ptr;
591 tb->tc_ptr = tc_ptr;
592 tb->cs_base = cs_base;
593 tb->flags = flags;
594 tb->cflags = cflags;
595 cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
596 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
597
598 /* check next page if needed */
599 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
600 phys_page2 = -1;
601 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
602 phys_page2 = get_phys_addr_code(env, virt_page2);
603 }
604 tb_link_phys(tb, phys_pc, phys_page2);
605 }
606 #endif
607
608 /* invalidate all TBs which intersect with the target physical page
609 starting in range [start;end[. NOTE: start and end must refer to
610 the same physical page. 'is_cpu_write_access' should be true if called
611 from a real cpu write access: the virtual CPU will exit the current
612 TB if code is modified inside this TB. */
613 void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
614 int is_cpu_write_access)
615 {
616 int n, current_tb_modified, current_tb_not_found, current_flags;
617 CPUState *env = cpu_single_env;
618 PageDesc *p;
619 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
620 target_ulong tb_start, tb_end;
621 target_ulong current_pc, current_cs_base;
622
623 p = page_find(start >> TARGET_PAGE_BITS);
624 if (!p)
625 return;
626 if (!p->code_bitmap &&
627 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
628 is_cpu_write_access) {
629 /* build code bitmap */
630 build_page_bitmap(p);
631 }
632
633 /* we remove all the TBs in the range [start, end[ */
634 /* XXX: see if in some cases it could be faster to invalidate all the code */
635 current_tb_not_found = is_cpu_write_access;
636 current_tb_modified = 0;
637 current_tb = NULL; /* avoid warning */
638 current_pc = 0; /* avoid warning */
639 current_cs_base = 0; /* avoid warning */
640 current_flags = 0; /* avoid warning */
641 tb = p->first_tb;
642 while (tb != NULL) {
643 n = (long)tb & 3;
644 tb = (TranslationBlock *)((long)tb & ~3);
645 tb_next = tb->page_next[n];
646 /* NOTE: this is subtle as a TB may span two physical pages */
647 if (n == 0) {
648 /* NOTE: tb_end may be after the end of the page, but
649 it is not a problem */
650 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
651 tb_end = tb_start + tb->size;
652 } else {
653 tb_start = tb->page_addr[1];
654 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
655 }
656 if (!(tb_end <= start || tb_start >= end)) {
657 #ifdef TARGET_HAS_PRECISE_SMC
658 if (current_tb_not_found) {
659 current_tb_not_found = 0;
660 current_tb = NULL;
661 if (env->mem_write_pc) {
662 /* now we have a real cpu fault */
663 current_tb = tb_find_pc(env->mem_write_pc);
664 }
665 }
666 if (current_tb == tb &&
667 !(current_tb->cflags & CF_SINGLE_INSN)) {
668 /* If we are modifying the current TB, we must stop
669 its execution. We could be more precise by checking
670 that the modification is after the current PC, but it
671 would require a specialized function to partially
672 restore the CPU state */
673
674 current_tb_modified = 1;
675 cpu_restore_state(current_tb, env,
676 env->mem_write_pc, NULL);
677 #if defined(TARGET_I386)
678 current_flags = env->hflags;
679 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
680 current_cs_base = (target_ulong)env->segs[R_CS].base;
681 current_pc = current_cs_base + env->eip;
682 #else
683 #error unsupported CPU
684 #endif
685 }
686 #endif /* TARGET_HAS_PRECISE_SMC */
687 /* we need to do that to handle the case where a signal
688 occurs while doing tb_phys_invalidate() */
689 saved_tb = NULL;
690 if (env) {
691 saved_tb = env->current_tb;
692 env->current_tb = NULL;
693 }
694 tb_phys_invalidate(tb, -1);
695 if (env) {
696 env->current_tb = saved_tb;
697 if (env->interrupt_request && env->current_tb)
698 cpu_interrupt(env, env->interrupt_request);
699 }
700 }
701 tb = tb_next;
702 }
703 #if !defined(CONFIG_USER_ONLY)
704 /* if no code remaining, no need to continue to use slow writes */
705 if (!p->first_tb) {
706 invalidate_page_bitmap(p);
707 if (is_cpu_write_access) {
708 tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
709 }
710 }
711 #endif
712 #ifdef TARGET_HAS_PRECISE_SMC
713 if (current_tb_modified) {
714 /* we generate a block containing just the instruction
715 modifying the memory. It will ensure that it cannot modify
716 itself */
717 env->current_tb = NULL;
718 tb_gen_code(env, current_pc, current_cs_base, current_flags,
719 CF_SINGLE_INSN);
720 cpu_resume_from_signal(env, NULL);
721 }
722 #endif
723 }
724
725 /* len must be <= 8 and start must be a multiple of len */
726 static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
727 {
728 PageDesc *p;
729 int offset, b;
730 #if 0
731 if (1) {
732 if (loglevel) {
733 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
734 cpu_single_env->mem_write_vaddr, len,
735 cpu_single_env->eip,
736 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
737 }
738 }
739 #endif
740 p = page_find(start >> TARGET_PAGE_BITS);
741 if (!p)
742 return;
743 if (p->code_bitmap) {
744 offset = start & ~TARGET_PAGE_MASK;
745 b = p->code_bitmap[offset >> 3] >> (offset & 7);
746 if (b & ((1 << len) - 1))
747 goto do_invalidate;
748 } else {
749 do_invalidate:
750 tb_invalidate_phys_page_range(start, start + len, 1);
751 }
752 }
753
754 #if !defined(CONFIG_SOFTMMU)
755 static void tb_invalidate_phys_page(target_ulong addr,
756 unsigned long pc, void *puc)
757 {
758 int n, current_flags, current_tb_modified;
759 target_ulong current_pc, current_cs_base;
760 PageDesc *p;
761 TranslationBlock *tb, *current_tb;
762 #ifdef TARGET_HAS_PRECISE_SMC
763 CPUState *env = cpu_single_env;
764 #endif
765
766 addr &= TARGET_PAGE_MASK;
767 p = page_find(addr >> TARGET_PAGE_BITS);
768 if (!p)
769 return;
770 tb = p->first_tb;
771 current_tb_modified = 0;
772 current_tb = NULL;
773 current_pc = 0; /* avoid warning */
774 current_cs_base = 0; /* avoid warning */
775 current_flags = 0; /* avoid warning */
776 #ifdef TARGET_HAS_PRECISE_SMC
777 if (tb && pc != 0) {
778 current_tb = tb_find_pc(pc);
779 }
780 #endif
781 while (tb != NULL) {
782 n = (long)tb & 3;
783 tb = (TranslationBlock *)((long)tb & ~3);
784 #ifdef TARGET_HAS_PRECISE_SMC
785 if (current_tb == tb &&
786 !(current_tb->cflags & CF_SINGLE_INSN)) {
787 /* If we are modifying the current TB, we must stop
788 its execution. We could be more precise by checking
789 that the modification is after the current PC, but it
790 would require a specialized function to partially
791 restore the CPU state */
792
793 current_tb_modified = 1;
794 cpu_restore_state(current_tb, env, pc, puc);
795 #if defined(TARGET_I386)
796 current_flags = env->hflags;
797 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
798 current_cs_base = (target_ulong)env->segs[R_CS].base;
799 current_pc = current_cs_base + env->eip;
800 #else
801 #error unsupported CPU
802 #endif
803 }
804 #endif /* TARGET_HAS_PRECISE_SMC */
805 tb_phys_invalidate(tb, addr);
806 tb = tb->page_next[n];
807 }
808 p->first_tb = NULL;
809 #ifdef TARGET_HAS_PRECISE_SMC
810 if (current_tb_modified) {
811 /* we generate a block containing just the instruction
812 modifying the memory. It will ensure that it cannot modify
813 itself */
814 env->current_tb = NULL;
815 tb_gen_code(env, current_pc, current_cs_base, current_flags,
816 CF_SINGLE_INSN);
817 cpu_resume_from_signal(env, puc);
818 }
819 #endif
820 }
821 #endif
822
823 /* add the tb in the target page and protect it if necessary */
824 static inline void tb_alloc_page(TranslationBlock *tb,
825 unsigned int n, target_ulong page_addr)
826 {
827 PageDesc *p;
828 TranslationBlock *last_first_tb;
829
830 tb->page_addr[n] = page_addr;
831 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
832 tb->page_next[n] = p->first_tb;
833 last_first_tb = p->first_tb;
834 p->first_tb = (TranslationBlock *)((long)tb | n);
835 invalidate_page_bitmap(p);
836
837 #if defined(TARGET_HAS_SMC) || 1
838
839 #if defined(CONFIG_USER_ONLY)
840 if (p->flags & PAGE_WRITE) {
841 target_ulong addr;
842 PageDesc *p2;
843 int prot;
844
845 /* force the host page as non writable (writes will have a
846 page fault + mprotect overhead) */
847 page_addr &= qemu_host_page_mask;
848 prot = 0;
849 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
850 addr += TARGET_PAGE_SIZE) {
851
852 p2 = page_find (addr >> TARGET_PAGE_BITS);
853 if (!p2)
854 continue;
855 prot |= p2->flags;
856 p2->flags &= ~PAGE_WRITE;
857 page_get_flags(addr);
858 }
859 mprotect(g2h(page_addr), qemu_host_page_size,
860 (prot & PAGE_BITS) & ~PAGE_WRITE);
861 #ifdef DEBUG_TB_INVALIDATE
862 printf("protecting code page: 0x%08lx\n",
863 page_addr);
864 #endif
865 }
866 #else
867 /* if some code is already present, then the pages are already
868 protected. So we handle the case where only the first TB is
869 allocated in a physical page */
870 if (!last_first_tb) {
871 tlb_protect_code(page_addr);
872 }
873 #endif
874
875 #endif /* TARGET_HAS_SMC */
876 }
877
878 /* Allocate a new translation block. Flush the translation buffer if
879 too many translation blocks or too much generated code. */
880 TranslationBlock *tb_alloc(target_ulong pc)
881 {
882 TranslationBlock *tb;
883
884 if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
885 (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
886 return NULL;
887 tb = &tbs[nb_tbs++];
888 tb->pc = pc;
889 tb->cflags = 0;
890 return tb;
891 }
892
893 /* add a new TB and link it to the physical page tables. phys_page2 is
894 (-1) to indicate that only one page contains the TB. */
895 void tb_link_phys(TranslationBlock *tb,
896 target_ulong phys_pc, target_ulong phys_page2)
897 {
898 unsigned int h;
899 TranslationBlock **ptb;
900
901 /* add in the physical hash table */
902 h = tb_phys_hash_func(phys_pc);
903 ptb = &tb_phys_hash[h];
904 tb->phys_hash_next = *ptb;
905 *ptb = tb;
906
907 /* add in the page list */
908 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
909 if (phys_page2 != -1)
910 tb_alloc_page(tb, 1, phys_page2);
911 else
912 tb->page_addr[1] = -1;
913
914 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
915 tb->jmp_next[0] = NULL;
916 tb->jmp_next[1] = NULL;
917 #ifdef USE_CODE_COPY
918 tb->cflags &= ~CF_FP_USED;
919 if (tb->cflags & CF_TB_FP_USED)
920 tb->cflags |= CF_FP_USED;
921 #endif
922
923 /* init original jump addresses */
924 if (tb->tb_next_offset[0] != 0xffff)
925 tb_reset_jump(tb, 0);
926 if (tb->tb_next_offset[1] != 0xffff)
927 tb_reset_jump(tb, 1);
928
929 #ifdef DEBUG_TB_CHECK
930 tb_page_check();
931 #endif
932 }
933
934 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
935 tb[1].tc_ptr. Return NULL if not found */
936 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
937 {
938 int m_min, m_max, m;
939 unsigned long v;
940 TranslationBlock *tb;
941
942 if (nb_tbs <= 0)
943 return NULL;
944 if (tc_ptr < (unsigned long)code_gen_buffer ||
945 tc_ptr >= (unsigned long)code_gen_ptr)
946 return NULL;
947 /* binary search (cf Knuth) */
948 m_min = 0;
949 m_max = nb_tbs - 1;
950 while (m_min <= m_max) {
951 m = (m_min + m_max) >> 1;
952 tb = &tbs[m];
953 v = (unsigned long)tb->tc_ptr;
954 if (v == tc_ptr)
955 return tb;
956 else if (tc_ptr < v) {
957 m_max = m - 1;
958 } else {
959 m_min = m + 1;
960 }
961 }
962 return &tbs[m_max];
963 }
964
965 static void tb_reset_jump_recursive(TranslationBlock *tb);
966
967 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
968 {
969 TranslationBlock *tb1, *tb_next, **ptb;
970 unsigned int n1;
971
972 tb1 = tb->jmp_next[n];
973 if (tb1 != NULL) {
974 /* find head of list */
975 for(;;) {
976 n1 = (long)tb1 & 3;
977 tb1 = (TranslationBlock *)((long)tb1 & ~3);
978 if (n1 == 2)
979 break;
980 tb1 = tb1->jmp_next[n1];
981 }
982 /* we are now sure now that tb jumps to tb1 */
983 tb_next = tb1;
984
985 /* remove tb from the jmp_first list */
986 ptb = &tb_next->jmp_first;
987 for(;;) {
988 tb1 = *ptb;
989 n1 = (long)tb1 & 3;
990 tb1 = (TranslationBlock *)((long)tb1 & ~3);
991 if (n1 == n && tb1 == tb)
992 break;
993 ptb = &tb1->jmp_next[n1];
994 }
995 *ptb = tb->jmp_next[n];
996 tb->jmp_next[n] = NULL;
997
998 /* suppress the jump to next tb in generated code */
999 tb_reset_jump(tb, n);
1000
1001 /* suppress jumps in the tb on which we could have jumped */
1002 tb_reset_jump_recursive(tb_next);
1003 }
1004 }
1005
1006 static void tb_reset_jump_recursive(TranslationBlock *tb)
1007 {
1008 tb_reset_jump_recursive2(tb, 0);
1009 tb_reset_jump_recursive2(tb, 1);
1010 }
1011
1012 #if defined(TARGET_HAS_ICE)
1013 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1014 {
1015 target_ulong addr, pd;
1016 ram_addr_t ram_addr;
1017 PhysPageDesc *p;
1018
1019 addr = cpu_get_phys_page_debug(env, pc);
1020 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1021 if (!p) {
1022 pd = IO_MEM_UNASSIGNED;
1023 } else {
1024 pd = p->phys_offset;
1025 }
1026 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1027 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1028 }
1029 #endif
1030
1031 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1032 breakpoint is reached */
1033 int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1034 {
1035 #if defined(TARGET_HAS_ICE)
1036 int i;
1037
1038 for(i = 0; i < env->nb_breakpoints; i++) {
1039 if (env->breakpoints[i] == pc)
1040 return 0;
1041 }
1042
1043 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1044 return -1;
1045 env->breakpoints[env->nb_breakpoints++] = pc;
1046
1047 breakpoint_invalidate(env, pc);
1048 return 0;
1049 #else
1050 return -1;
1051 #endif
1052 }
1053
1054 /* remove a breakpoint */
1055 int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1056 {
1057 #if defined(TARGET_HAS_ICE)
1058 int i;
1059 for(i = 0; i < env->nb_breakpoints; i++) {
1060 if (env->breakpoints[i] == pc)
1061 goto found;
1062 }
1063 return -1;
1064 found:
1065 env->nb_breakpoints--;
1066 if (i < env->nb_breakpoints)
1067 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1068
1069 breakpoint_invalidate(env, pc);
1070 return 0;
1071 #else
1072 return -1;
1073 #endif
1074 }
1075
1076 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1077 CPU loop after each instruction */
1078 void cpu_single_step(CPUState *env, int enabled)
1079 {
1080 #if defined(TARGET_HAS_ICE)
1081 if (env->singlestep_enabled != enabled) {
1082 env->singlestep_enabled = enabled;
1083 /* must flush all the translated code to avoid inconsistancies */
1084 /* XXX: only flush what is necessary */
1085 tb_flush(env);
1086 }
1087 #endif
1088 }
1089
1090 /* enable or disable low levels log */
1091 void cpu_set_log(int log_flags)
1092 {
1093 loglevel = log_flags;
1094 if (loglevel && !logfile) {
1095 logfile = fopen(logfilename, "w");
1096 if (!logfile) {
1097 perror(logfilename);
1098 _exit(1);
1099 }
1100 #if !defined(CONFIG_SOFTMMU)
1101 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1102 {
1103 static uint8_t logfile_buf[4096];
1104 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1105 }
1106 #else
1107 setvbuf(logfile, NULL, _IOLBF, 0);
1108 #endif
1109 }
1110 }
1111
1112 void cpu_set_log_filename(const char *filename)
1113 {
1114 logfilename = strdup(filename);
1115 }
1116
1117 /* mask must never be zero, except for A20 change call */
1118 void cpu_interrupt(CPUState *env, int mask)
1119 {
1120 TranslationBlock *tb;
1121 static int interrupt_lock;
1122
1123 env->interrupt_request |= mask;
1124 /* if the cpu is currently executing code, we must unlink it and
1125 all the potentially executing TB */
1126 tb = env->current_tb;
1127 if (tb && !testandset(&interrupt_lock)) {
1128 env->current_tb = NULL;
1129 tb_reset_jump_recursive(tb);
1130 interrupt_lock = 0;
1131 }
1132 }
1133
1134 void cpu_reset_interrupt(CPUState *env, int mask)
1135 {
1136 env->interrupt_request &= ~mask;
1137 }
1138
1139 CPULogItem cpu_log_items[] = {
1140 { CPU_LOG_TB_OUT_ASM, "out_asm",
1141 "show generated host assembly code for each compiled TB" },
1142 { CPU_LOG_TB_IN_ASM, "in_asm",
1143 "show target assembly code for each compiled TB" },
1144 { CPU_LOG_TB_OP, "op",
1145 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1146 #ifdef TARGET_I386
1147 { CPU_LOG_TB_OP_OPT, "op_opt",
1148 "show micro ops after optimization for each compiled TB" },
1149 #endif
1150 { CPU_LOG_INT, "int",
1151 "show interrupts/exceptions in short format" },
1152 { CPU_LOG_EXEC, "exec",
1153 "show trace before each executed TB (lots of logs)" },
1154 { CPU_LOG_TB_CPU, "cpu",
1155 "show CPU state before bloc translation" },
1156 #ifdef TARGET_I386
1157 { CPU_LOG_PCALL, "pcall",
1158 "show protected mode far calls/returns/exceptions" },
1159 #endif
1160 #ifdef DEBUG_IOPORT
1161 { CPU_LOG_IOPORT, "ioport",
1162 "show all i/o ports accesses" },
1163 #endif
1164 { 0, NULL, NULL },
1165 };
1166
1167 static int cmp1(const char *s1, int n, const char *s2)
1168 {
1169 if (strlen(s2) != n)
1170 return 0;
1171 return memcmp(s1, s2, n) == 0;
1172 }
1173
1174 /* takes a comma separated list of log masks. Return 0 if error. */
1175 int cpu_str_to_log_mask(const char *str)
1176 {
1177 CPULogItem *item;
1178 int mask;
1179 const char *p, *p1;
1180
1181 p = str;
1182 mask = 0;
1183 for(;;) {
1184 p1 = strchr(p, ',');
1185 if (!p1)
1186 p1 = p + strlen(p);
1187 if(cmp1(p,p1-p,"all")) {
1188 for(item = cpu_log_items; item->mask != 0; item++) {
1189 mask |= item->mask;
1190 }
1191 } else {
1192 for(item = cpu_log_items; item->mask != 0; item++) {
1193 if (cmp1(p, p1 - p, item->name))
1194 goto found;
1195 }
1196 return 0;
1197 }
1198 found:
1199 mask |= item->mask;
1200 if (*p1 != ',')
1201 break;
1202 p = p1 + 1;
1203 }
1204 return mask;
1205 }
1206
1207 void cpu_abort(CPUState *env, const char *fmt, ...)
1208 {
1209 va_list ap;
1210
1211 va_start(ap, fmt);
1212 fprintf(stderr, "qemu: fatal: ");
1213 vfprintf(stderr, fmt, ap);
1214 fprintf(stderr, "\n");
1215 #ifdef TARGET_I386
1216 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1217 #else
1218 cpu_dump_state(env, stderr, fprintf, 0);
1219 #endif
1220 va_end(ap);
1221 abort();
1222 }
1223
1224 #if !defined(CONFIG_USER_ONLY)
1225
1226 /* NOTE: if flush_global is true, also flush global entries (not
1227 implemented yet) */
1228 void tlb_flush(CPUState *env, int flush_global)
1229 {
1230 int i;
1231
1232 #if defined(DEBUG_TLB)
1233 printf("tlb_flush:\n");
1234 #endif
1235 /* must reset current TB so that interrupts cannot modify the
1236 links while we are modifying them */
1237 env->current_tb = NULL;
1238
1239 for(i = 0; i < CPU_TLB_SIZE; i++) {
1240 env->tlb_table[0][i].addr_read = -1;
1241 env->tlb_table[0][i].addr_write = -1;
1242 env->tlb_table[0][i].addr_code = -1;
1243 env->tlb_table[1][i].addr_read = -1;
1244 env->tlb_table[1][i].addr_write = -1;
1245 env->tlb_table[1][i].addr_code = -1;
1246 }
1247
1248 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1249
1250 #if !defined(CONFIG_SOFTMMU)
1251 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
1252 #endif
1253 #ifdef USE_KQEMU
1254 if (env->kqemu_enabled) {
1255 kqemu_flush(env, flush_global);
1256 }
1257 #endif
1258 tlb_flush_count++;
1259 }
1260
1261 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1262 {
1263 if (addr == (tlb_entry->addr_read &
1264 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1265 addr == (tlb_entry->addr_write &
1266 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1267 addr == (tlb_entry->addr_code &
1268 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1269 tlb_entry->addr_read = -1;
1270 tlb_entry->addr_write = -1;
1271 tlb_entry->addr_code = -1;
1272 }
1273 }
1274
1275 void tlb_flush_page(CPUState *env, target_ulong addr)
1276 {
1277 int i;
1278 TranslationBlock *tb;
1279
1280 #if defined(DEBUG_TLB)
1281 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1282 #endif
1283 /* must reset current TB so that interrupts cannot modify the
1284 links while we are modifying them */
1285 env->current_tb = NULL;
1286
1287 addr &= TARGET_PAGE_MASK;
1288 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1289 tlb_flush_entry(&env->tlb_table[0][i], addr);
1290 tlb_flush_entry(&env->tlb_table[1][i], addr);
1291
1292 /* Discard jump cache entries for any tb which might potentially
1293 overlap the flushed page. */
1294 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1295 memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
1296
1297 i = tb_jmp_cache_hash_page(addr);
1298 memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
1299
1300 #if !defined(CONFIG_SOFTMMU)
1301 if (addr < MMAP_AREA_END)
1302 munmap((void *)addr, TARGET_PAGE_SIZE);
1303 #endif
1304 #ifdef USE_KQEMU
1305 if (env->kqemu_enabled) {
1306 kqemu_flush_page(env, addr);
1307 }
1308 #endif
1309 }
1310
1311 /* update the TLBs so that writes to code in the virtual page 'addr'
1312 can be detected */
1313 static void tlb_protect_code(ram_addr_t ram_addr)
1314 {
1315 cpu_physical_memory_reset_dirty(ram_addr,
1316 ram_addr + TARGET_PAGE_SIZE,
1317 CODE_DIRTY_FLAG);
1318 }
1319
1320 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1321 tested for self modifying code */
1322 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1323 target_ulong vaddr)
1324 {
1325 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1326 }
1327
1328 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1329 unsigned long start, unsigned long length)
1330 {
1331 unsigned long addr;
1332 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1333 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1334 if ((addr - start) < length) {
1335 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1336 }
1337 }
1338 }
1339
1340 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1341 int dirty_flags)
1342 {
1343 CPUState *env;
1344 unsigned long length, start1;
1345 int i, mask, len;
1346 uint8_t *p;
1347
1348 start &= TARGET_PAGE_MASK;
1349 end = TARGET_PAGE_ALIGN(end);
1350
1351 length = end - start;
1352 if (length == 0)
1353 return;
1354 len = length >> TARGET_PAGE_BITS;
1355 #ifdef USE_KQEMU
1356 /* XXX: should not depend on cpu context */
1357 env = first_cpu;
1358 if (env->kqemu_enabled) {
1359 ram_addr_t addr;
1360 addr = start;
1361 for(i = 0; i < len; i++) {
1362 kqemu_set_notdirty(env, addr);
1363 addr += TARGET_PAGE_SIZE;
1364 }
1365 }
1366 #endif
1367 mask = ~dirty_flags;
1368 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1369 for(i = 0; i < len; i++)
1370 p[i] &= mask;
1371
1372 /* we modify the TLB cache so that the dirty bit will be set again
1373 when accessing the range */
1374 start1 = start + (unsigned long)phys_ram_base;
1375 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1376 for(i = 0; i < CPU_TLB_SIZE; i++)
1377 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1378 for(i = 0; i < CPU_TLB_SIZE; i++)
1379 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1380 }
1381
1382 #if !defined(CONFIG_SOFTMMU)
1383 /* XXX: this is expensive */
1384 {
1385 VirtPageDesc *p;
1386 int j;
1387 target_ulong addr;
1388
1389 for(i = 0; i < L1_SIZE; i++) {
1390 p = l1_virt_map[i];
1391 if (p) {
1392 addr = i << (TARGET_PAGE_BITS + L2_BITS);
1393 for(j = 0; j < L2_SIZE; j++) {
1394 if (p->valid_tag == virt_valid_tag &&
1395 p->phys_addr >= start && p->phys_addr < end &&
1396 (p->prot & PROT_WRITE)) {
1397 if (addr < MMAP_AREA_END) {
1398 mprotect((void *)addr, TARGET_PAGE_SIZE,
1399 p->prot & ~PROT_WRITE);
1400 }
1401 }
1402 addr += TARGET_PAGE_SIZE;
1403 p++;
1404 }
1405 }
1406 }
1407 }
1408 #endif
1409 }
1410
1411 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1412 {
1413 ram_addr_t ram_addr;
1414
1415 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1416 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1417 tlb_entry->addend - (unsigned long)phys_ram_base;
1418 if (!cpu_physical_memory_is_dirty(ram_addr)) {
1419 tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
1420 }
1421 }
1422 }
1423
1424 /* update the TLB according to the current state of the dirty bits */
1425 void cpu_tlb_update_dirty(CPUState *env)
1426 {
1427 int i;
1428 for(i = 0; i < CPU_TLB_SIZE; i++)
1429 tlb_update_dirty(&env->tlb_table[0][i]);
1430 for(i = 0; i < CPU_TLB_SIZE; i++)
1431 tlb_update_dirty(&env->tlb_table[1][i]);
1432 }
1433
1434 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
1435 unsigned long start)
1436 {
1437 unsigned long addr;
1438 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1439 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1440 if (addr == start) {
1441 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;
1442 }
1443 }
1444 }
1445
1446 /* update the TLB corresponding to virtual page vaddr and phys addr
1447 addr so that it is no longer dirty */
1448 static inline void tlb_set_dirty(CPUState *env,
1449 unsigned long addr, target_ulong vaddr)
1450 {
1451 int i;
1452
1453 addr &= TARGET_PAGE_MASK;
1454 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1455 tlb_set_dirty1(&env->tlb_table[0][i], addr);
1456 tlb_set_dirty1(&env->tlb_table[1][i], addr);
1457 }
1458
1459 /* add a new TLB entry. At most one entry for a given virtual address
1460 is permitted. Return 0 if OK or 2 if the page could not be mapped
1461 (can only happen in non SOFTMMU mode for I/O pages or pages
1462 conflicting with the host address space). */
1463 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1464 target_phys_addr_t paddr, int prot,
1465 int is_user, int is_softmmu)
1466 {
1467 PhysPageDesc *p;
1468 unsigned long pd;
1469 unsigned int index;
1470 target_ulong address;
1471 target_phys_addr_t addend;
1472 int ret;
1473 CPUTLBEntry *te;
1474
1475 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1476 if (!p) {
1477 pd = IO_MEM_UNASSIGNED;
1478 } else {
1479 pd = p->phys_offset;
1480 }
1481 #if defined(DEBUG_TLB)
1482 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x u=%d smmu=%d pd=0x%08lx\n",
1483 vaddr, (int)paddr, prot, is_user, is_softmmu, pd);
1484 #endif
1485
1486 ret = 0;
1487 #if !defined(CONFIG_SOFTMMU)
1488 if (is_softmmu)
1489 #endif
1490 {
1491 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1492 /* IO memory case */
1493 address = vaddr | pd;
1494 addend = paddr;
1495 } else {
1496 /* standard memory */
1497 address = vaddr;
1498 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1499 }
1500
1501 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1502 addend -= vaddr;
1503 te = &env->tlb_table[is_user][index];
1504 te->addend = addend;
1505 if (prot & PAGE_READ) {
1506 te->addr_read = address;
1507 } else {
1508 te->addr_read = -1;
1509 }
1510 if (prot & PAGE_EXEC) {
1511 te->addr_code = address;
1512 } else {
1513 te->addr_code = -1;
1514 }
1515 if (prot & PAGE_WRITE) {
1516 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1517 (pd & IO_MEM_ROMD)) {
1518 /* write access calls the I/O callback */
1519 te->addr_write = vaddr |
1520 (pd & ~(TARGET_PAGE_MASK | IO_MEM_ROMD));
1521 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1522 !cpu_physical_memory_is_dirty(pd)) {
1523 te->addr_write = vaddr | IO_MEM_NOTDIRTY;
1524 } else {
1525 te->addr_write = address;
1526 }
1527 } else {
1528 te->addr_write = -1;
1529 }
1530 }
1531 #if !defined(CONFIG_SOFTMMU)
1532 else {
1533 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1534 /* IO access: no mapping is done as it will be handled by the
1535 soft MMU */
1536 if (!(env->hflags & HF_SOFTMMU_MASK))
1537 ret = 2;
1538 } else {
1539 void *map_addr;
1540
1541 if (vaddr >= MMAP_AREA_END) {
1542 ret = 2;
1543 } else {
1544 if (prot & PROT_WRITE) {
1545 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1546 #if defined(TARGET_HAS_SMC) || 1
1547 first_tb ||
1548 #endif
1549 ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1550 !cpu_physical_memory_is_dirty(pd))) {
1551 /* ROM: we do as if code was inside */
1552 /* if code is present, we only map as read only and save the
1553 original mapping */
1554 VirtPageDesc *vp;
1555
1556 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
1557 vp->phys_addr = pd;
1558 vp->prot = prot;
1559 vp->valid_tag = virt_valid_tag;
1560 prot &= ~PAGE_WRITE;
1561 }
1562 }
1563 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
1564 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1565 if (map_addr == MAP_FAILED) {
1566 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1567 paddr, vaddr);
1568 }
1569 }
1570 }
1571 }
1572 #endif
1573 return ret;
1574 }
1575
1576 /* called from signal handler: invalidate the code and unprotect the
1577 page. Return TRUE if the fault was succesfully handled. */
1578 int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
1579 {
1580 #if !defined(CONFIG_SOFTMMU)
1581 VirtPageDesc *vp;
1582
1583 #if defined(DEBUG_TLB)
1584 printf("page_unprotect: addr=0x%08x\n", addr);
1585 #endif
1586 addr &= TARGET_PAGE_MASK;
1587
1588 /* if it is not mapped, no need to worry here */
1589 if (addr >= MMAP_AREA_END)
1590 return 0;
1591 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1592 if (!vp)
1593 return 0;
1594 /* NOTE: in this case, validate_tag is _not_ tested as it
1595 validates only the code TLB */
1596 if (vp->valid_tag != virt_valid_tag)
1597 return 0;
1598 if (!(vp->prot & PAGE_WRITE))
1599 return 0;
1600 #if defined(DEBUG_TLB)
1601 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1602 addr, vp->phys_addr, vp->prot);
1603 #endif
1604 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1605 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1606 (unsigned long)addr, vp->prot);
1607 /* set the dirty bit */
1608 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
1609 /* flush the code inside */
1610 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
1611 return 1;
1612 #else
1613 return 0;
1614 #endif
1615 }
1616
1617 #else
1618
1619 void tlb_flush(CPUState *env, int flush_global)
1620 {
1621 }
1622
1623 void tlb_flush_page(CPUState *env, target_ulong addr)
1624 {
1625 }
1626
1627 int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1628 target_phys_addr_t paddr, int prot,
1629 int is_user, int is_softmmu)
1630 {
1631 return 0;
1632 }
1633
1634 /* dump memory mappings */
1635 void page_dump(FILE *f)
1636 {
1637 unsigned long start, end;
1638 int i, j, prot, prot1;
1639 PageDesc *p;
1640
1641 fprintf(f, "%-8s %-8s %-8s %s\n",
1642 "start", "end", "size", "prot");
1643 start = -1;
1644 end = -1;
1645 prot = 0;
1646 for(i = 0; i <= L1_SIZE; i++) {
1647 if (i < L1_SIZE)
1648 p = l1_map[i];
1649 else
1650 p = NULL;
1651 for(j = 0;j < L2_SIZE; j++) {
1652 if (!p)
1653 prot1 = 0;
1654 else
1655 prot1 = p[j].flags;
1656 if (prot1 != prot) {
1657 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1658 if (start != -1) {
1659 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1660 start, end, end - start,
1661 prot & PAGE_READ ? 'r' : '-',
1662 prot & PAGE_WRITE ? 'w' : '-',
1663 prot & PAGE_EXEC ? 'x' : '-');
1664 }
1665 if (prot1 != 0)
1666 start = end;
1667 else
1668 start = -1;
1669 prot = prot1;
1670 }
1671 if (!p)
1672 break;
1673 }
1674 }
1675 }
1676
1677 int page_get_flags(target_ulong address)
1678 {
1679 PageDesc *p;
1680
1681 p = page_find(address >> TARGET_PAGE_BITS);
1682 if (!p)
1683 return 0;
1684 return p->flags;
1685 }
1686
1687 /* modify the flags of a page and invalidate the code if
1688 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1689 depending on PAGE_WRITE */
1690 void page_set_flags(target_ulong start, target_ulong end, int flags)
1691 {
1692 PageDesc *p;
1693 target_ulong addr;
1694
1695 start = start & TARGET_PAGE_MASK;
1696 end = TARGET_PAGE_ALIGN(end);
1697 if (flags & PAGE_WRITE)
1698 flags |= PAGE_WRITE_ORG;
1699 spin_lock(&tb_lock);
1700 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1701 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1702 /* if the write protection is set, then we invalidate the code
1703 inside */
1704 if (!(p->flags & PAGE_WRITE) &&
1705 (flags & PAGE_WRITE) &&
1706 p->first_tb) {
1707 tb_invalidate_phys_page(addr, 0, NULL);
1708 }
1709 p->flags = flags;
1710 }
1711 spin_unlock(&tb_lock);
1712 }
1713
1714 /* called from signal handler: invalidate the code and unprotect the
1715 page. Return TRUE if the fault was succesfully handled. */
1716 int page_unprotect(target_ulong address, unsigned long pc, void *puc)
1717 {
1718 unsigned int page_index, prot, pindex;
1719 PageDesc *p, *p1;
1720 target_ulong host_start, host_end, addr;
1721
1722 host_start = address & qemu_host_page_mask;
1723 page_index = host_start >> TARGET_PAGE_BITS;
1724 p1 = page_find(page_index);
1725 if (!p1)
1726 return 0;
1727 host_end = host_start + qemu_host_page_size;
1728 p = p1;
1729 prot = 0;
1730 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1731 prot |= p->flags;
1732 p++;
1733 }
1734 /* if the page was really writable, then we change its
1735 protection back to writable */
1736 if (prot & PAGE_WRITE_ORG) {
1737 pindex = (address - host_start) >> TARGET_PAGE_BITS;
1738 if (!(p1[pindex].flags & PAGE_WRITE)) {
1739 mprotect((void *)g2h(host_start), qemu_host_page_size,
1740 (prot & PAGE_BITS) | PAGE_WRITE);
1741 p1[pindex].flags |= PAGE_WRITE;
1742 /* and since the content will be modified, we must invalidate
1743 the corresponding translated code. */
1744 tb_invalidate_phys_page(address, pc, puc);
1745 #ifdef DEBUG_TB_CHECK
1746 tb_invalidate_check(address);
1747 #endif
1748 return 1;
1749 }
1750 }
1751 return 0;
1752 }
1753
1754 /* call this function when system calls directly modify a memory area */
1755 /* ??? This should be redundant now we have lock_user. */
1756 void page_unprotect_range(target_ulong data, target_ulong data_size)
1757 {
1758 target_ulong start, end, addr;
1759
1760 start = data;
1761 end = start + data_size;
1762 start &= TARGET_PAGE_MASK;
1763 end = TARGET_PAGE_ALIGN(end);
1764 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1765 page_unprotect(addr, 0, NULL);
1766 }
1767 }
1768
1769 static inline void tlb_set_dirty(CPUState *env,
1770 unsigned long addr, target_ulong vaddr)
1771 {
1772 }
1773 #endif /* defined(CONFIG_USER_ONLY) */
1774
1775 /* register physical memory. 'size' must be a multiple of the target
1776 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1777 io memory page */
1778 void cpu_register_physical_memory(target_phys_addr_t start_addr,
1779 unsigned long size,
1780 unsigned long phys_offset)
1781 {
1782 target_phys_addr_t addr, end_addr;
1783 PhysPageDesc *p;
1784 CPUState *env;
1785
1786 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
1787 end_addr = start_addr + size;
1788 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
1789 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
1790 p->phys_offset = phys_offset;
1791 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
1792 (phys_offset & IO_MEM_ROMD))
1793 phys_offset += TARGET_PAGE_SIZE;
1794 }
1795
1796 /* since each CPU stores ram addresses in its TLB cache, we must
1797 reset the modified entries */
1798 /* XXX: slow ! */
1799 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1800 tlb_flush(env, 1);
1801 }
1802 }
1803
1804 /* XXX: temporary until new memory mapping API */
1805 uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr)
1806 {
1807 PhysPageDesc *p;
1808
1809 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1810 if (!p)
1811 return IO_MEM_UNASSIGNED;
1812 return p->phys_offset;
1813 }
1814
1815 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
1816 {
1817 #ifdef DEBUG_UNASSIGNED
1818 printf("Unassigned mem read 0x%08x\n", (int)addr);
1819 #endif
1820 return 0;
1821 }
1822
1823 static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1824 {
1825 #ifdef DEBUG_UNASSIGNED
1826 printf("Unassigned mem write 0x%08x = 0x%x\n", (int)addr, val);
1827 #endif
1828 }
1829
1830 static CPUReadMemoryFunc *unassigned_mem_read[3] = {
1831 unassigned_mem_readb,
1832 unassigned_mem_readb,
1833 unassigned_mem_readb,
1834 };
1835
1836 static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
1837 unassigned_mem_writeb,
1838 unassigned_mem_writeb,
1839 unassigned_mem_writeb,
1840 };
1841
1842 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1843 {
1844 unsigned long ram_addr;
1845 int dirty_flags;
1846 ram_addr = addr - (unsigned long)phys_ram_base;
1847 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1848 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
1849 #if !defined(CONFIG_USER_ONLY)
1850 tb_invalidate_phys_page_fast(ram_addr, 1);
1851 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1852 #endif
1853 }
1854 stb_p((uint8_t *)(long)addr, val);
1855 #ifdef USE_KQEMU
1856 if (cpu_single_env->kqemu_enabled &&
1857 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
1858 kqemu_modify_page(cpu_single_env, ram_addr);
1859 #endif
1860 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1861 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
1862 /* we remove the notdirty callback only if the code has been
1863 flushed */
1864 if (dirty_flags == 0xff)
1865 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
1866 }
1867
1868 static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
1869 {
1870 unsigned long ram_addr;
1871 int dirty_flags;
1872 ram_addr = addr - (unsigned long)phys_ram_base;
1873 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1874 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
1875 #if !defined(CONFIG_USER_ONLY)
1876 tb_invalidate_phys_page_fast(ram_addr, 2);
1877 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1878 #endif
1879 }
1880 stw_p((uint8_t *)(long)addr, val);
1881 #ifdef USE_KQEMU
1882 if (cpu_single_env->kqemu_enabled &&
1883 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
1884 kqemu_modify_page(cpu_single_env, ram_addr);
1885 #endif
1886 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1887 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
1888 /* we remove the notdirty callback only if the code has been
1889 flushed */
1890 if (dirty_flags == 0xff)
1891 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
1892 }
1893
1894 static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
1895 {
1896 unsigned long ram_addr;
1897 int dirty_flags;
1898 ram_addr = addr - (unsigned long)phys_ram_base;
1899 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1900 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
1901 #if !defined(CONFIG_USER_ONLY)
1902 tb_invalidate_phys_page_fast(ram_addr, 4);
1903 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1904 #endif
1905 }
1906 stl_p((uint8_t *)(long)addr, val);
1907 #ifdef USE_KQEMU
1908 if (cpu_single_env->kqemu_enabled &&
1909 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
1910 kqemu_modify_page(cpu_single_env, ram_addr);
1911 #endif
1912 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1913 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
1914 /* we remove the notdirty callback only if the code has been
1915 flushed */
1916 if (dirty_flags == 0xff)
1917 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
1918 }
1919
1920 static CPUReadMemoryFunc *error_mem_read[3] = {
1921 NULL, /* never used */
1922 NULL, /* never used */
1923 NULL, /* never used */
1924 };
1925
1926 static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
1927 notdirty_mem_writeb,
1928 notdirty_mem_writew,
1929 notdirty_mem_writel,
1930 };
1931
1932 static void io_mem_init(void)
1933 {
1934 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
1935 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
1936 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
1937 io_mem_nb = 5;
1938
1939 /* alloc dirty bits array */
1940 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
1941 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
1942 }
1943
1944 /* mem_read and mem_write are arrays of functions containing the
1945 function to access byte (index 0), word (index 1) and dword (index
1946 2). All functions must be supplied. If io_index is non zero, the
1947 corresponding io zone is modified. If it is zero, a new io zone is
1948 allocated. The return value can be used with
1949 cpu_register_physical_memory(). (-1) is returned if error. */
1950 int cpu_register_io_memory(int io_index,
1951 CPUReadMemoryFunc **mem_read,
1952 CPUWriteMemoryFunc **mem_write,
1953 void *opaque)
1954 {
1955 int i;
1956
1957 if (io_index <= 0) {
1958 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
1959 return -1;
1960 io_index = io_mem_nb++;
1961 } else {
1962 if (io_index >= IO_MEM_NB_ENTRIES)
1963 return -1;
1964 }
1965
1966 for(i = 0;i < 3; i++) {
1967 io_mem_read[io_index][i] = mem_read[i];
1968 io_mem_write[io_index][i] = mem_write[i];
1969 }
1970 io_mem_opaque[io_index] = opaque;
1971 return io_index << IO_MEM_SHIFT;
1972 }
1973
1974 CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
1975 {
1976 return io_mem_write[io_index >> IO_MEM_SHIFT];
1977 }
1978
1979 CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
1980 {
1981 return io_mem_read[io_index >> IO_MEM_SHIFT];
1982 }
1983
1984 /* physical memory access (slow version, mainly for debug) */
1985 #if defined(CONFIG_USER_ONLY)
1986 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
1987 int len, int is_write)
1988 {
1989 int l, flags;
1990 target_ulong page;
1991 void * p;
1992
1993 while (len > 0) {
1994 page = addr & TARGET_PAGE_MASK;
1995 l = (page + TARGET_PAGE_SIZE) - addr;
1996 if (l > len)
1997 l = len;
1998 flags = page_get_flags(page);
1999 if (!(flags & PAGE_VALID))
2000 return;
2001 if (is_write) {
2002 if (!(flags & PAGE_WRITE))
2003 return;
2004 p = lock_user(addr, len, 0);
2005 memcpy(p, buf, len);
2006 unlock_user(p, addr, len);
2007 } else {
2008 if (!(flags & PAGE_READ))
2009 return;
2010 p = lock_user(addr, len, 1);
2011 memcpy(buf, p, len);
2012 unlock_user(p, addr, 0);
2013 }
2014 len -= l;
2015 buf += l;
2016 addr += l;
2017 }
2018 }
2019
2020 #else
2021 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2022 int len, int is_write)
2023 {
2024 int l, io_index;
2025 uint8_t *ptr;
2026 uint32_t val;
2027 target_phys_addr_t page;
2028 unsigned long pd;
2029 PhysPageDesc *p;
2030
2031 while (len > 0) {
2032 page = addr & TARGET_PAGE_MASK;
2033 l = (page + TARGET_PAGE_SIZE) - addr;
2034 if (l > len)
2035 l = len;
2036 p = phys_page_find(page >> TARGET_PAGE_BITS);
2037 if (!p) {
2038 pd = IO_MEM_UNASSIGNED;
2039 } else {
2040 pd = p->phys_offset;
2041 }
2042
2043 if (is_write) {
2044 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2045 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2046 /* XXX: could force cpu_single_env to NULL to avoid
2047 potential bugs */
2048 if (l >= 4 && ((addr & 3) == 0)) {
2049 /* 32 bit write access */
2050 val = ldl_p(buf);
2051 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2052 l = 4;
2053 } else if (l >= 2 && ((addr & 1) == 0)) {
2054 /* 16 bit write access */
2055 val = lduw_p(buf);
2056 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2057 l = 2;
2058 } else {
2059 /* 8 bit write access */
2060 val = ldub_p(buf);
2061 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2062 l = 1;
2063 }
2064 } else {
2065 unsigned long addr1;
2066 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2067 /* RAM case */
2068 ptr = phys_ram_base + addr1;
2069 memcpy(ptr, buf, l);
2070 if (!cpu_physical_memory_is_dirty(addr1)) {
2071 /* invalidate code */
2072 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2073 /* set dirty bit */
2074 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2075 (0xff & ~CODE_DIRTY_FLAG);
2076 }
2077 }
2078 } else {
2079 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2080 !(pd & IO_MEM_ROMD)) {
2081 /* I/O case */
2082 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2083 if (l >= 4 && ((addr & 3) == 0)) {
2084 /* 32 bit read access */
2085 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2086 stl_p(buf, val);
2087 l = 4;
2088 } else if (l >= 2 && ((addr & 1) == 0)) {
2089 /* 16 bit read access */
2090 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2091 stw_p(buf, val);
2092 l = 2;
2093 } else {
2094 /* 8 bit read access */
2095 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2096 stb_p(buf, val);
2097 l = 1;
2098 }
2099 } else {
2100 /* RAM case */
2101 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2102 (addr & ~TARGET_PAGE_MASK);
2103 memcpy(buf, ptr, l);
2104 }
2105 }
2106 len -= l;
2107 buf += l;
2108 addr += l;
2109 }
2110 }
2111
2112 /* used for ROM loading : can write in RAM and ROM */
2113 void cpu_physical_memory_write_rom(target_phys_addr_t addr,
2114 const uint8_t *buf, int len)
2115 {
2116 int l;
2117 uint8_t *ptr;
2118 target_phys_addr_t page;
2119 unsigned long pd;
2120 PhysPageDesc *p;
2121
2122 while (len > 0) {
2123 page = addr & TARGET_PAGE_MASK;
2124 l = (page + TARGET_PAGE_SIZE) - addr;
2125 if (l > len)
2126 l = len;
2127 p = phys_page_find(page >> TARGET_PAGE_BITS);
2128 if (!p) {
2129 pd = IO_MEM_UNASSIGNED;
2130 } else {
2131 pd = p->phys_offset;
2132 }
2133
2134 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2135 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2136 !(pd & IO_MEM_ROMD)) {
2137 /* do nothing */
2138 } else {
2139 unsigned long addr1;
2140 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2141 /* ROM/RAM case */
2142 ptr = phys_ram_base + addr1;
2143 memcpy(ptr, buf, l);
2144 }
2145 len -= l;
2146 buf += l;
2147 addr += l;
2148 }
2149 }
2150
2151
2152 /* warning: addr must be aligned */
2153 uint32_t ldl_phys(target_phys_addr_t addr)
2154 {
2155 int io_index;
2156 uint8_t *ptr;
2157 uint32_t val;
2158 unsigned long pd;
2159 PhysPageDesc *p;
2160
2161 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2162 if (!p) {
2163 pd = IO_MEM_UNASSIGNED;
2164 } else {
2165 pd = p->phys_offset;
2166 }
2167
2168 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2169 !(pd & IO_MEM_ROMD)) {
2170 /* I/O case */
2171 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2172 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2173 } else {
2174 /* RAM case */
2175 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2176 (addr & ~TARGET_PAGE_MASK);
2177 val = ldl_p(ptr);
2178 }
2179 return val;
2180 }
2181
2182 /* warning: addr must be aligned */
2183 uint64_t ldq_phys(target_phys_addr_t addr)
2184 {
2185 int io_index;
2186 uint8_t *ptr;
2187 uint64_t val;
2188 unsigned long pd;
2189 PhysPageDesc *p;
2190
2191 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2192 if (!p) {
2193 pd = IO_MEM_UNASSIGNED;
2194 } else {
2195 pd = p->phys_offset;
2196 }
2197
2198 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2199 !(pd & IO_MEM_ROMD)) {
2200 /* I/O case */
2201 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2202 #ifdef TARGET_WORDS_BIGENDIAN
2203 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2204 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2205 #else
2206 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2207 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2208 #endif
2209 } else {
2210 /* RAM case */
2211 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2212 (addr & ~TARGET_PAGE_MASK);
2213 val = ldq_p(ptr);
2214 }
2215 return val;
2216 }
2217
2218 /* XXX: optimize */
2219 uint32_t ldub_phys(target_phys_addr_t addr)
2220 {
2221 uint8_t val;
2222 cpu_physical_memory_read(addr, &val, 1);
2223 return val;
2224 }
2225
2226 /* XXX: optimize */
2227 uint32_t lduw_phys(target_phys_addr_t addr)
2228 {
2229 uint16_t val;
2230 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2231 return tswap16(val);
2232 }
2233
2234 /* warning: addr must be aligned. The ram page is not masked as dirty
2235 and the code inside is not invalidated. It is useful if the dirty
2236 bits are used to track modified PTEs */
2237 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2238 {
2239 int io_index;
2240 uint8_t *ptr;
2241 unsigned long pd;
2242 PhysPageDesc *p;
2243
2244 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2245 if (!p) {
2246 pd = IO_MEM_UNASSIGNED;
2247 } else {
2248 pd = p->phys_offset;
2249 }
2250
2251 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2252 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2253 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2254 } else {
2255 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2256 (addr & ~TARGET_PAGE_MASK);
2257 stl_p(ptr, val);
2258 }
2259 }
2260
2261 /* warning: addr must be aligned */
2262 void stl_phys(target_phys_addr_t addr, uint32_t val)
2263 {
2264 int io_index;
2265 uint8_t *ptr;
2266 unsigned long pd;
2267 PhysPageDesc *p;
2268
2269 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2270 if (!p) {
2271 pd = IO_MEM_UNASSIGNED;
2272 } else {
2273 pd = p->phys_offset;
2274 }
2275
2276 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2277 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2278 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2279 } else {
2280 unsigned long addr1;
2281 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2282 /* RAM case */
2283 ptr = phys_ram_base + addr1;
2284 stl_p(ptr, val);
2285 if (!cpu_physical_memory_is_dirty(addr1)) {
2286 /* invalidate code */
2287 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2288 /* set dirty bit */
2289 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2290 (0xff & ~CODE_DIRTY_FLAG);
2291 }
2292 }
2293 }
2294
2295 /* XXX: optimize */
2296 void stb_phys(target_phys_addr_t addr, uint32_t val)
2297 {
2298 uint8_t v = val;
2299 cpu_physical_memory_write(addr, &v, 1);
2300 }
2301
2302 /* XXX: optimize */
2303 void stw_phys(target_phys_addr_t addr, uint32_t val)
2304 {
2305 uint16_t v = tswap16(val);
2306 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2307 }
2308
2309 /* XXX: optimize */
2310 void stq_phys(target_phys_addr_t addr, uint64_t val)
2311 {
2312 val = tswap64(val);
2313 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
2314 }
2315
2316 #endif
2317
2318 /* virtual memory access for debug */
2319 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
2320 uint8_t *buf, int len, int is_write)
2321 {
2322 int l;
2323 target_ulong page, phys_addr;
2324
2325 while (len > 0) {
2326 page = addr & TARGET_PAGE_MASK;
2327 phys_addr = cpu_get_phys_page_debug(env, page);
2328 /* if no physical page mapped, return an error */
2329 if (phys_addr == -1)
2330 return -1;
2331 l = (page + TARGET_PAGE_SIZE) - addr;
2332 if (l > len)
2333 l = len;
2334 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
2335 buf, l, is_write);
2336 len -= l;
2337 buf += l;
2338 addr += l;
2339 }
2340 return 0;
2341 }
2342
2343 void dump_exec_info(FILE *f,
2344 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2345 {
2346 int i, target_code_size, max_target_code_size;
2347 int direct_jmp_count, direct_jmp2_count, cross_page;
2348 TranslationBlock *tb;
2349
2350 target_code_size = 0;
2351 max_target_code_size = 0;
2352 cross_page = 0;
2353 direct_jmp_count = 0;
2354 direct_jmp2_count = 0;
2355 for(i = 0; i < nb_tbs; i++) {
2356 tb = &tbs[i];
2357 target_code_size += tb->size;
2358 if (tb->size > max_target_code_size)
2359 max_target_code_size = tb->size;
2360 if (tb->page_addr[1] != -1)
2361 cross_page++;
2362 if (tb->tb_next_offset[0] != 0xffff) {
2363 direct_jmp_count++;
2364 if (tb->tb_next_offset[1] != 0xffff) {
2365 direct_jmp2_count++;
2366 }
2367 }
2368 }
2369 /* XXX: avoid using doubles ? */
2370 cpu_fprintf(f, "TB count %d\n", nb_tbs);
2371 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
2372 nb_tbs ? target_code_size / nb_tbs : 0,
2373 max_target_code_size);
2374 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
2375 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
2376 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
2377 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
2378 cross_page,
2379 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
2380 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
2381 direct_jmp_count,
2382 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
2383 direct_jmp2_count,
2384 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
2385 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
2386 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
2387 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
2388 }
2389
2390 #if !defined(CONFIG_USER_ONLY)
2391
2392 #define MMUSUFFIX _cmmu
2393 #define GETPC() NULL
2394 #define env cpu_single_env
2395 #define SOFTMMU_CODE_ACCESS
2396
2397 #define SHIFT 0
2398 #include "softmmu_template.h"
2399
2400 #define SHIFT 1
2401 #include "softmmu_template.h"
2402
2403 #define SHIFT 2
2404 #include "softmmu_template.h"
2405
2406 #define SHIFT 3
2407 #include "softmmu_template.h"
2408
2409 #undef env
2410
2411 #endif