]> git.proxmox.com Git - mirror_qemu.git/blob - exec.c
make the TB cache independent of MMU mappings (faster MMU context switches and needed...
[mirror_qemu.git] / exec.c
1 /*
2 * virtual page mapping and translated block handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20 #include "config.h"
21 #ifdef _WIN32
22 #include <windows.h>
23 #else
24 #include <sys/types.h>
25 #include <sys/mman.h>
26 #endif
27 #include <stdlib.h>
28 #include <stdio.h>
29 #include <stdarg.h>
30 #include <string.h>
31 #include <errno.h>
32 #include <unistd.h>
33 #include <inttypes.h>
34
35 #include "cpu.h"
36 #include "exec-all.h"
37
38 //#define DEBUG_TB_INVALIDATE
39 //#define DEBUG_FLUSH
40 //#define DEBUG_TLB
41
42 /* make various TB consistency checks */
43 //#define DEBUG_TB_CHECK
44 //#define DEBUG_TLB_CHECK
45
46 /* threshold to flush the translated code buffer */
47 #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
48
49 #define SMC_BITMAP_USE_THRESHOLD 10
50
51 #define MMAP_AREA_START 0x00000000
52 #define MMAP_AREA_END 0xa8000000
53
54 #if defined(TARGET_SPARC64)
55 #define TARGET_PHYS_ADDR_SPACE_BITS 41
56 #elif defined(TARGET_PPC64)
57 #define TARGET_PHYS_ADDR_SPACE_BITS 42
58 #else
59 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
60 #define TARGET_PHYS_ADDR_SPACE_BITS 32
61 #endif
62
63 TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
64 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
65 int nb_tbs;
66 /* any access to the tbs or the page table must use this lock */
67 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
68
69 uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] __attribute__((aligned (32)));
70 uint8_t *code_gen_ptr;
71
72 int phys_ram_size;
73 int phys_ram_fd;
74 uint8_t *phys_ram_base;
75 uint8_t *phys_ram_dirty;
76
77 typedef struct PageDesc {
78 /* list of TBs intersecting this ram page */
79 TranslationBlock *first_tb;
80 /* in order to optimize self modifying code, we count the number
81 of lookups we do to a given page to use a bitmap */
82 unsigned int code_write_count;
83 uint8_t *code_bitmap;
84 #if defined(CONFIG_USER_ONLY)
85 unsigned long flags;
86 #endif
87 } PageDesc;
88
89 typedef struct PhysPageDesc {
90 /* offset in host memory of the page + io_index in the low 12 bits */
91 uint32_t phys_offset;
92 } PhysPageDesc;
93
94 #define L2_BITS 10
95 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
96
97 #define L1_SIZE (1 << L1_BITS)
98 #define L2_SIZE (1 << L2_BITS)
99
100 static void io_mem_init(void);
101
102 unsigned long qemu_real_host_page_size;
103 unsigned long qemu_host_page_bits;
104 unsigned long qemu_host_page_size;
105 unsigned long qemu_host_page_mask;
106
107 /* XXX: for system emulation, it could just be an array */
108 static PageDesc *l1_map[L1_SIZE];
109 PhysPageDesc **l1_phys_map;
110
111 /* io memory support */
112 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
113 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
114 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
115 static int io_mem_nb;
116
117 /* log support */
118 char *logfilename = "/tmp/qemu.log";
119 FILE *logfile;
120 int loglevel;
121
122 /* statistics */
123 static int tlb_flush_count;
124 static int tb_flush_count;
125 static int tb_phys_invalidate_count;
126
127 static void page_init(void)
128 {
129 /* NOTE: we can always suppose that qemu_host_page_size >=
130 TARGET_PAGE_SIZE */
131 #ifdef _WIN32
132 {
133 SYSTEM_INFO system_info;
134 DWORD old_protect;
135
136 GetSystemInfo(&system_info);
137 qemu_real_host_page_size = system_info.dwPageSize;
138
139 VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
140 PAGE_EXECUTE_READWRITE, &old_protect);
141 }
142 #else
143 qemu_real_host_page_size = getpagesize();
144 {
145 unsigned long start, end;
146
147 start = (unsigned long)code_gen_buffer;
148 start &= ~(qemu_real_host_page_size - 1);
149
150 end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
151 end += qemu_real_host_page_size - 1;
152 end &= ~(qemu_real_host_page_size - 1);
153
154 mprotect((void *)start, end - start,
155 PROT_READ | PROT_WRITE | PROT_EXEC);
156 }
157 #endif
158
159 if (qemu_host_page_size == 0)
160 qemu_host_page_size = qemu_real_host_page_size;
161 if (qemu_host_page_size < TARGET_PAGE_SIZE)
162 qemu_host_page_size = TARGET_PAGE_SIZE;
163 qemu_host_page_bits = 0;
164 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
165 qemu_host_page_bits++;
166 qemu_host_page_mask = ~(qemu_host_page_size - 1);
167 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
168 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
169 }
170
171 static inline PageDesc *page_find_alloc(unsigned int index)
172 {
173 PageDesc **lp, *p;
174
175 lp = &l1_map[index >> L2_BITS];
176 p = *lp;
177 if (!p) {
178 /* allocate if not found */
179 p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
180 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
181 *lp = p;
182 }
183 return p + (index & (L2_SIZE - 1));
184 }
185
186 static inline PageDesc *page_find(unsigned int index)
187 {
188 PageDesc *p;
189
190 p = l1_map[index >> L2_BITS];
191 if (!p)
192 return 0;
193 return p + (index & (L2_SIZE - 1));
194 }
195
196 static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
197 {
198 void **lp, **p;
199
200 p = (void **)l1_phys_map;
201 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
202
203 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
204 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
205 #endif
206 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
207 p = *lp;
208 if (!p) {
209 /* allocate if not found */
210 if (!alloc)
211 return NULL;
212 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
213 memset(p, 0, sizeof(void *) * L1_SIZE);
214 *lp = p;
215 }
216 #endif
217 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
218 p = *lp;
219 if (!p) {
220 /* allocate if not found */
221 if (!alloc)
222 return NULL;
223 p = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
224 memset(p, 0, sizeof(PhysPageDesc) * L2_SIZE);
225 *lp = p;
226 }
227 return ((PhysPageDesc *)p) + (index & (L2_SIZE - 1));
228 }
229
230 static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
231 {
232 return phys_page_find_alloc(index, 0);
233 }
234
235 #if !defined(CONFIG_USER_ONLY)
236 static void tlb_protect_code(CPUState *env, ram_addr_t ram_addr,
237 target_ulong vaddr);
238 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
239 target_ulong vaddr);
240 #endif
241
242 void cpu_exec_init(void)
243 {
244 if (!code_gen_ptr) {
245 code_gen_ptr = code_gen_buffer;
246 page_init();
247 io_mem_init();
248 }
249 }
250
251 static inline void invalidate_page_bitmap(PageDesc *p)
252 {
253 if (p->code_bitmap) {
254 qemu_free(p->code_bitmap);
255 p->code_bitmap = NULL;
256 }
257 p->code_write_count = 0;
258 }
259
260 /* set to NULL all the 'first_tb' fields in all PageDescs */
261 static void page_flush_tb(void)
262 {
263 int i, j;
264 PageDesc *p;
265
266 for(i = 0; i < L1_SIZE; i++) {
267 p = l1_map[i];
268 if (p) {
269 for(j = 0; j < L2_SIZE; j++) {
270 p->first_tb = NULL;
271 invalidate_page_bitmap(p);
272 p++;
273 }
274 }
275 }
276 }
277
278 /* flush all the translation blocks */
279 /* XXX: tb_flush is currently not thread safe */
280 void tb_flush(CPUState *env)
281 {
282 #if defined(DEBUG_FLUSH)
283 printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
284 code_gen_ptr - code_gen_buffer,
285 nb_tbs,
286 nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0);
287 #endif
288 nb_tbs = 0;
289 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
290
291 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
292 page_flush_tb();
293
294 code_gen_ptr = code_gen_buffer;
295 /* XXX: flush processor icache at this point if cache flush is
296 expensive */
297 tb_flush_count++;
298 }
299
300 #ifdef DEBUG_TB_CHECK
301
302 static void tb_invalidate_check(unsigned long address)
303 {
304 TranslationBlock *tb;
305 int i;
306 address &= TARGET_PAGE_MASK;
307 for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
308 for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
309 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
310 address >= tb->pc + tb->size)) {
311 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
312 address, tb->pc, tb->size);
313 }
314 }
315 }
316 }
317
318 /* verify that all the pages have correct rights for code */
319 static void tb_page_check(void)
320 {
321 TranslationBlock *tb;
322 int i, flags1, flags2;
323
324 for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
325 for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
326 flags1 = page_get_flags(tb->pc);
327 flags2 = page_get_flags(tb->pc + tb->size - 1);
328 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
329 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
330 tb->pc, tb->size, flags1, flags2);
331 }
332 }
333 }
334 }
335
336 void tb_jmp_check(TranslationBlock *tb)
337 {
338 TranslationBlock *tb1;
339 unsigned int n1;
340
341 /* suppress any remaining jumps to this TB */
342 tb1 = tb->jmp_first;
343 for(;;) {
344 n1 = (long)tb1 & 3;
345 tb1 = (TranslationBlock *)((long)tb1 & ~3);
346 if (n1 == 2)
347 break;
348 tb1 = tb1->jmp_next[n1];
349 }
350 /* check end of list */
351 if (tb1 != tb) {
352 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
353 }
354 }
355
356 #endif
357
358 /* invalidate one TB */
359 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
360 int next_offset)
361 {
362 TranslationBlock *tb1;
363 for(;;) {
364 tb1 = *ptb;
365 if (tb1 == tb) {
366 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
367 break;
368 }
369 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
370 }
371 }
372
373 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
374 {
375 TranslationBlock *tb1;
376 unsigned int n1;
377
378 for(;;) {
379 tb1 = *ptb;
380 n1 = (long)tb1 & 3;
381 tb1 = (TranslationBlock *)((long)tb1 & ~3);
382 if (tb1 == tb) {
383 *ptb = tb1->page_next[n1];
384 break;
385 }
386 ptb = &tb1->page_next[n1];
387 }
388 }
389
390 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
391 {
392 TranslationBlock *tb1, **ptb;
393 unsigned int n1;
394
395 ptb = &tb->jmp_next[n];
396 tb1 = *ptb;
397 if (tb1) {
398 /* find tb(n) in circular list */
399 for(;;) {
400 tb1 = *ptb;
401 n1 = (long)tb1 & 3;
402 tb1 = (TranslationBlock *)((long)tb1 & ~3);
403 if (n1 == n && tb1 == tb)
404 break;
405 if (n1 == 2) {
406 ptb = &tb1->jmp_first;
407 } else {
408 ptb = &tb1->jmp_next[n1];
409 }
410 }
411 /* now we can suppress tb(n) from the list */
412 *ptb = tb->jmp_next[n];
413
414 tb->jmp_next[n] = NULL;
415 }
416 }
417
418 /* reset the jump entry 'n' of a TB so that it is not chained to
419 another TB */
420 static inline void tb_reset_jump(TranslationBlock *tb, int n)
421 {
422 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
423 }
424
425 static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
426 {
427 PageDesc *p;
428 unsigned int h, n1;
429 target_ulong phys_pc;
430 TranslationBlock *tb1, *tb2;
431
432 /* remove the TB from the hash list */
433 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
434 h = tb_phys_hash_func(phys_pc);
435 tb_remove(&tb_phys_hash[h], tb,
436 offsetof(TranslationBlock, phys_hash_next));
437
438 /* remove the TB from the page list */
439 if (tb->page_addr[0] != page_addr) {
440 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
441 tb_page_remove(&p->first_tb, tb);
442 invalidate_page_bitmap(p);
443 }
444 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
445 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
446 tb_page_remove(&p->first_tb, tb);
447 invalidate_page_bitmap(p);
448 }
449
450 tb_invalidated_flag = 1;
451
452 /* remove the TB from the hash list */
453 h = tb_jmp_cache_hash_func(tb->pc);
454 cpu_single_env->tb_jmp_cache[h] = NULL;
455
456 /* suppress this TB from the two jump lists */
457 tb_jmp_remove(tb, 0);
458 tb_jmp_remove(tb, 1);
459
460 /* suppress any remaining jumps to this TB */
461 tb1 = tb->jmp_first;
462 for(;;) {
463 n1 = (long)tb1 & 3;
464 if (n1 == 2)
465 break;
466 tb1 = (TranslationBlock *)((long)tb1 & ~3);
467 tb2 = tb1->jmp_next[n1];
468 tb_reset_jump(tb1, n1);
469 tb1->jmp_next[n1] = NULL;
470 tb1 = tb2;
471 }
472 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
473
474 tb_phys_invalidate_count++;
475 }
476
477 static inline void set_bits(uint8_t *tab, int start, int len)
478 {
479 int end, mask, end1;
480
481 end = start + len;
482 tab += start >> 3;
483 mask = 0xff << (start & 7);
484 if ((start & ~7) == (end & ~7)) {
485 if (start < end) {
486 mask &= ~(0xff << (end & 7));
487 *tab |= mask;
488 }
489 } else {
490 *tab++ |= mask;
491 start = (start + 8) & ~7;
492 end1 = end & ~7;
493 while (start < end1) {
494 *tab++ = 0xff;
495 start += 8;
496 }
497 if (start < end) {
498 mask = ~(0xff << (end & 7));
499 *tab |= mask;
500 }
501 }
502 }
503
504 static void build_page_bitmap(PageDesc *p)
505 {
506 int n, tb_start, tb_end;
507 TranslationBlock *tb;
508
509 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
510 if (!p->code_bitmap)
511 return;
512 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
513
514 tb = p->first_tb;
515 while (tb != NULL) {
516 n = (long)tb & 3;
517 tb = (TranslationBlock *)((long)tb & ~3);
518 /* NOTE: this is subtle as a TB may span two physical pages */
519 if (n == 0) {
520 /* NOTE: tb_end may be after the end of the page, but
521 it is not a problem */
522 tb_start = tb->pc & ~TARGET_PAGE_MASK;
523 tb_end = tb_start + tb->size;
524 if (tb_end > TARGET_PAGE_SIZE)
525 tb_end = TARGET_PAGE_SIZE;
526 } else {
527 tb_start = 0;
528 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
529 }
530 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
531 tb = tb->page_next[n];
532 }
533 }
534
535 #ifdef TARGET_HAS_PRECISE_SMC
536
537 static void tb_gen_code(CPUState *env,
538 target_ulong pc, target_ulong cs_base, int flags,
539 int cflags)
540 {
541 TranslationBlock *tb;
542 uint8_t *tc_ptr;
543 target_ulong phys_pc, phys_page2, virt_page2;
544 int code_gen_size;
545
546 phys_pc = get_phys_addr_code(env, pc);
547 tb = tb_alloc(pc);
548 if (!tb) {
549 /* flush must be done */
550 tb_flush(env);
551 /* cannot fail at this point */
552 tb = tb_alloc(pc);
553 }
554 tc_ptr = code_gen_ptr;
555 tb->tc_ptr = tc_ptr;
556 tb->cs_base = cs_base;
557 tb->flags = flags;
558 tb->cflags = cflags;
559 cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
560 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
561
562 /* check next page if needed */
563 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
564 phys_page2 = -1;
565 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
566 phys_page2 = get_phys_addr_code(env, virt_page2);
567 }
568 tb_link_phys(tb, phys_pc, phys_page2);
569 }
570 #endif
571
572 /* invalidate all TBs which intersect with the target physical page
573 starting in range [start;end[. NOTE: start and end must refer to
574 the same physical page. 'is_cpu_write_access' should be true if called
575 from a real cpu write access: the virtual CPU will exit the current
576 TB if code is modified inside this TB. */
577 void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
578 int is_cpu_write_access)
579 {
580 int n, current_tb_modified, current_tb_not_found, current_flags;
581 CPUState *env = cpu_single_env;
582 PageDesc *p;
583 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
584 target_ulong tb_start, tb_end;
585 target_ulong current_pc, current_cs_base;
586
587 p = page_find(start >> TARGET_PAGE_BITS);
588 if (!p)
589 return;
590 if (!p->code_bitmap &&
591 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
592 is_cpu_write_access) {
593 /* build code bitmap */
594 build_page_bitmap(p);
595 }
596
597 /* we remove all the TBs in the range [start, end[ */
598 /* XXX: see if in some cases it could be faster to invalidate all the code */
599 current_tb_not_found = is_cpu_write_access;
600 current_tb_modified = 0;
601 current_tb = NULL; /* avoid warning */
602 current_pc = 0; /* avoid warning */
603 current_cs_base = 0; /* avoid warning */
604 current_flags = 0; /* avoid warning */
605 tb = p->first_tb;
606 while (tb != NULL) {
607 n = (long)tb & 3;
608 tb = (TranslationBlock *)((long)tb & ~3);
609 tb_next = tb->page_next[n];
610 /* NOTE: this is subtle as a TB may span two physical pages */
611 if (n == 0) {
612 /* NOTE: tb_end may be after the end of the page, but
613 it is not a problem */
614 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
615 tb_end = tb_start + tb->size;
616 } else {
617 tb_start = tb->page_addr[1];
618 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
619 }
620 if (!(tb_end <= start || tb_start >= end)) {
621 #ifdef TARGET_HAS_PRECISE_SMC
622 if (current_tb_not_found) {
623 current_tb_not_found = 0;
624 current_tb = NULL;
625 if (env->mem_write_pc) {
626 /* now we have a real cpu fault */
627 current_tb = tb_find_pc(env->mem_write_pc);
628 }
629 }
630 if (current_tb == tb &&
631 !(current_tb->cflags & CF_SINGLE_INSN)) {
632 /* If we are modifying the current TB, we must stop
633 its execution. We could be more precise by checking
634 that the modification is after the current PC, but it
635 would require a specialized function to partially
636 restore the CPU state */
637
638 current_tb_modified = 1;
639 cpu_restore_state(current_tb, env,
640 env->mem_write_pc, NULL);
641 #if defined(TARGET_I386)
642 current_flags = env->hflags;
643 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
644 current_cs_base = (target_ulong)env->segs[R_CS].base;
645 current_pc = current_cs_base + env->eip;
646 #else
647 #error unsupported CPU
648 #endif
649 }
650 #endif /* TARGET_HAS_PRECISE_SMC */
651 saved_tb = env->current_tb;
652 env->current_tb = NULL;
653 tb_phys_invalidate(tb, -1);
654 env->current_tb = saved_tb;
655 if (env->interrupt_request && env->current_tb)
656 cpu_interrupt(env, env->interrupt_request);
657 }
658 tb = tb_next;
659 }
660 #if !defined(CONFIG_USER_ONLY)
661 /* if no code remaining, no need to continue to use slow writes */
662 if (!p->first_tb) {
663 invalidate_page_bitmap(p);
664 if (is_cpu_write_access) {
665 tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
666 }
667 }
668 #endif
669 #ifdef TARGET_HAS_PRECISE_SMC
670 if (current_tb_modified) {
671 /* we generate a block containing just the instruction
672 modifying the memory. It will ensure that it cannot modify
673 itself */
674 env->current_tb = NULL;
675 tb_gen_code(env, current_pc, current_cs_base, current_flags,
676 CF_SINGLE_INSN);
677 cpu_resume_from_signal(env, NULL);
678 }
679 #endif
680 }
681
682 /* len must be <= 8 and start must be a multiple of len */
683 static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
684 {
685 PageDesc *p;
686 int offset, b;
687 #if 0
688 if (1) {
689 if (loglevel) {
690 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
691 cpu_single_env->mem_write_vaddr, len,
692 cpu_single_env->eip,
693 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
694 }
695 }
696 #endif
697 p = page_find(start >> TARGET_PAGE_BITS);
698 if (!p)
699 return;
700 if (p->code_bitmap) {
701 offset = start & ~TARGET_PAGE_MASK;
702 b = p->code_bitmap[offset >> 3] >> (offset & 7);
703 if (b & ((1 << len) - 1))
704 goto do_invalidate;
705 } else {
706 do_invalidate:
707 tb_invalidate_phys_page_range(start, start + len, 1);
708 }
709 }
710
711 #if !defined(CONFIG_SOFTMMU)
712 static void tb_invalidate_phys_page(target_ulong addr,
713 unsigned long pc, void *puc)
714 {
715 int n, current_flags, current_tb_modified;
716 target_ulong current_pc, current_cs_base;
717 PageDesc *p;
718 TranslationBlock *tb, *current_tb;
719 #ifdef TARGET_HAS_PRECISE_SMC
720 CPUState *env = cpu_single_env;
721 #endif
722
723 addr &= TARGET_PAGE_MASK;
724 p = page_find(addr >> TARGET_PAGE_BITS);
725 if (!p)
726 return;
727 tb = p->first_tb;
728 current_tb_modified = 0;
729 current_tb = NULL;
730 current_pc = 0; /* avoid warning */
731 current_cs_base = 0; /* avoid warning */
732 current_flags = 0; /* avoid warning */
733 #ifdef TARGET_HAS_PRECISE_SMC
734 if (tb && pc != 0) {
735 current_tb = tb_find_pc(pc);
736 }
737 #endif
738 while (tb != NULL) {
739 n = (long)tb & 3;
740 tb = (TranslationBlock *)((long)tb & ~3);
741 #ifdef TARGET_HAS_PRECISE_SMC
742 if (current_tb == tb &&
743 !(current_tb->cflags & CF_SINGLE_INSN)) {
744 /* If we are modifying the current TB, we must stop
745 its execution. We could be more precise by checking
746 that the modification is after the current PC, but it
747 would require a specialized function to partially
748 restore the CPU state */
749
750 current_tb_modified = 1;
751 cpu_restore_state(current_tb, env, pc, puc);
752 #if defined(TARGET_I386)
753 current_flags = env->hflags;
754 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
755 current_cs_base = (target_ulong)env->segs[R_CS].base;
756 current_pc = current_cs_base + env->eip;
757 #else
758 #error unsupported CPU
759 #endif
760 }
761 #endif /* TARGET_HAS_PRECISE_SMC */
762 tb_phys_invalidate(tb, addr);
763 tb = tb->page_next[n];
764 }
765 p->first_tb = NULL;
766 #ifdef TARGET_HAS_PRECISE_SMC
767 if (current_tb_modified) {
768 /* we generate a block containing just the instruction
769 modifying the memory. It will ensure that it cannot modify
770 itself */
771 env->current_tb = NULL;
772 tb_gen_code(env, current_pc, current_cs_base, current_flags,
773 CF_SINGLE_INSN);
774 cpu_resume_from_signal(env, puc);
775 }
776 #endif
777 }
778 #endif
779
780 /* add the tb in the target page and protect it if necessary */
781 static inline void tb_alloc_page(TranslationBlock *tb,
782 unsigned int n, unsigned int page_addr)
783 {
784 PageDesc *p;
785 TranslationBlock *last_first_tb;
786
787 tb->page_addr[n] = page_addr;
788 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
789 tb->page_next[n] = p->first_tb;
790 last_first_tb = p->first_tb;
791 p->first_tb = (TranslationBlock *)((long)tb | n);
792 invalidate_page_bitmap(p);
793
794 #if defined(TARGET_HAS_SMC) || 1
795
796 #if defined(CONFIG_USER_ONLY)
797 if (p->flags & PAGE_WRITE) {
798 unsigned long host_start, host_end, addr;
799 int prot;
800
801 /* force the host page as non writable (writes will have a
802 page fault + mprotect overhead) */
803 host_start = page_addr & qemu_host_page_mask;
804 host_end = host_start + qemu_host_page_size;
805 prot = 0;
806 for(addr = host_start; addr < host_end; addr += TARGET_PAGE_SIZE)
807 prot |= page_get_flags(addr);
808 mprotect((void *)host_start, qemu_host_page_size,
809 (prot & PAGE_BITS) & ~PAGE_WRITE);
810 #ifdef DEBUG_TB_INVALIDATE
811 printf("protecting code page: 0x%08lx\n",
812 host_start);
813 #endif
814 p->flags &= ~PAGE_WRITE;
815 }
816 #else
817 /* if some code is already present, then the pages are already
818 protected. So we handle the case where only the first TB is
819 allocated in a physical page */
820 if (!last_first_tb) {
821 target_ulong virt_addr;
822
823 virt_addr = (tb->pc & TARGET_PAGE_MASK) + (n << TARGET_PAGE_BITS);
824 tlb_protect_code(cpu_single_env, page_addr, virt_addr);
825 }
826 #endif
827
828 #endif /* TARGET_HAS_SMC */
829 }
830
831 /* Allocate a new translation block. Flush the translation buffer if
832 too many translation blocks or too much generated code. */
833 TranslationBlock *tb_alloc(target_ulong pc)
834 {
835 TranslationBlock *tb;
836
837 if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
838 (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
839 return NULL;
840 tb = &tbs[nb_tbs++];
841 tb->pc = pc;
842 tb->cflags = 0;
843 return tb;
844 }
845
846 /* add a new TB and link it to the physical page tables. phys_page2 is
847 (-1) to indicate that only one page contains the TB. */
848 void tb_link_phys(TranslationBlock *tb,
849 target_ulong phys_pc, target_ulong phys_page2)
850 {
851 unsigned int h;
852 TranslationBlock **ptb;
853
854 /* add in the physical hash table */
855 h = tb_phys_hash_func(phys_pc);
856 ptb = &tb_phys_hash[h];
857 tb->phys_hash_next = *ptb;
858 *ptb = tb;
859
860 /* add in the page list */
861 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
862 if (phys_page2 != -1)
863 tb_alloc_page(tb, 1, phys_page2);
864 else
865 tb->page_addr[1] = -1;
866
867 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
868 tb->jmp_next[0] = NULL;
869 tb->jmp_next[1] = NULL;
870 #ifdef USE_CODE_COPY
871 tb->cflags &= ~CF_FP_USED;
872 if (tb->cflags & CF_TB_FP_USED)
873 tb->cflags |= CF_FP_USED;
874 #endif
875
876 /* init original jump addresses */
877 if (tb->tb_next_offset[0] != 0xffff)
878 tb_reset_jump(tb, 0);
879 if (tb->tb_next_offset[1] != 0xffff)
880 tb_reset_jump(tb, 1);
881
882 #ifdef DEBUG_TB_CHECK
883 tb_page_check();
884 #endif
885 }
886
887 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
888 tb[1].tc_ptr. Return NULL if not found */
889 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
890 {
891 int m_min, m_max, m;
892 unsigned long v;
893 TranslationBlock *tb;
894
895 if (nb_tbs <= 0)
896 return NULL;
897 if (tc_ptr < (unsigned long)code_gen_buffer ||
898 tc_ptr >= (unsigned long)code_gen_ptr)
899 return NULL;
900 /* binary search (cf Knuth) */
901 m_min = 0;
902 m_max = nb_tbs - 1;
903 while (m_min <= m_max) {
904 m = (m_min + m_max) >> 1;
905 tb = &tbs[m];
906 v = (unsigned long)tb->tc_ptr;
907 if (v == tc_ptr)
908 return tb;
909 else if (tc_ptr < v) {
910 m_max = m - 1;
911 } else {
912 m_min = m + 1;
913 }
914 }
915 return &tbs[m_max];
916 }
917
918 static void tb_reset_jump_recursive(TranslationBlock *tb);
919
920 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
921 {
922 TranslationBlock *tb1, *tb_next, **ptb;
923 unsigned int n1;
924
925 tb1 = tb->jmp_next[n];
926 if (tb1 != NULL) {
927 /* find head of list */
928 for(;;) {
929 n1 = (long)tb1 & 3;
930 tb1 = (TranslationBlock *)((long)tb1 & ~3);
931 if (n1 == 2)
932 break;
933 tb1 = tb1->jmp_next[n1];
934 }
935 /* we are now sure now that tb jumps to tb1 */
936 tb_next = tb1;
937
938 /* remove tb from the jmp_first list */
939 ptb = &tb_next->jmp_first;
940 for(;;) {
941 tb1 = *ptb;
942 n1 = (long)tb1 & 3;
943 tb1 = (TranslationBlock *)((long)tb1 & ~3);
944 if (n1 == n && tb1 == tb)
945 break;
946 ptb = &tb1->jmp_next[n1];
947 }
948 *ptb = tb->jmp_next[n];
949 tb->jmp_next[n] = NULL;
950
951 /* suppress the jump to next tb in generated code */
952 tb_reset_jump(tb, n);
953
954 /* suppress jumps in the tb on which we could have jumped */
955 tb_reset_jump_recursive(tb_next);
956 }
957 }
958
959 static void tb_reset_jump_recursive(TranslationBlock *tb)
960 {
961 tb_reset_jump_recursive2(tb, 0);
962 tb_reset_jump_recursive2(tb, 1);
963 }
964
965 #if defined(TARGET_HAS_ICE)
966 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
967 {
968 target_ulong phys_addr;
969
970 phys_addr = cpu_get_phys_page_debug(env, pc);
971 tb_invalidate_phys_page_range(phys_addr, phys_addr + 1, 0);
972 }
973 #endif
974
975 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
976 breakpoint is reached */
977 int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
978 {
979 #if defined(TARGET_HAS_ICE)
980 int i;
981
982 for(i = 0; i < env->nb_breakpoints; i++) {
983 if (env->breakpoints[i] == pc)
984 return 0;
985 }
986
987 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
988 return -1;
989 env->breakpoints[env->nb_breakpoints++] = pc;
990
991 breakpoint_invalidate(env, pc);
992 return 0;
993 #else
994 return -1;
995 #endif
996 }
997
998 /* remove a breakpoint */
999 int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1000 {
1001 #if defined(TARGET_HAS_ICE)
1002 int i;
1003 for(i = 0; i < env->nb_breakpoints; i++) {
1004 if (env->breakpoints[i] == pc)
1005 goto found;
1006 }
1007 return -1;
1008 found:
1009 env->nb_breakpoints--;
1010 if (i < env->nb_breakpoints)
1011 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1012
1013 breakpoint_invalidate(env, pc);
1014 return 0;
1015 #else
1016 return -1;
1017 #endif
1018 }
1019
1020 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1021 CPU loop after each instruction */
1022 void cpu_single_step(CPUState *env, int enabled)
1023 {
1024 #if defined(TARGET_HAS_ICE)
1025 if (env->singlestep_enabled != enabled) {
1026 env->singlestep_enabled = enabled;
1027 /* must flush all the translated code to avoid inconsistancies */
1028 /* XXX: only flush what is necessary */
1029 tb_flush(env);
1030 }
1031 #endif
1032 }
1033
1034 /* enable or disable low levels log */
1035 void cpu_set_log(int log_flags)
1036 {
1037 loglevel = log_flags;
1038 if (loglevel && !logfile) {
1039 logfile = fopen(logfilename, "w");
1040 if (!logfile) {
1041 perror(logfilename);
1042 _exit(1);
1043 }
1044 #if !defined(CONFIG_SOFTMMU)
1045 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1046 {
1047 static uint8_t logfile_buf[4096];
1048 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1049 }
1050 #else
1051 setvbuf(logfile, NULL, _IOLBF, 0);
1052 #endif
1053 }
1054 }
1055
1056 void cpu_set_log_filename(const char *filename)
1057 {
1058 logfilename = strdup(filename);
1059 }
1060
1061 /* mask must never be zero, except for A20 change call */
1062 void cpu_interrupt(CPUState *env, int mask)
1063 {
1064 TranslationBlock *tb;
1065 static int interrupt_lock;
1066
1067 env->interrupt_request |= mask;
1068 /* if the cpu is currently executing code, we must unlink it and
1069 all the potentially executing TB */
1070 tb = env->current_tb;
1071 if (tb && !testandset(&interrupt_lock)) {
1072 env->current_tb = NULL;
1073 tb_reset_jump_recursive(tb);
1074 interrupt_lock = 0;
1075 }
1076 }
1077
1078 void cpu_reset_interrupt(CPUState *env, int mask)
1079 {
1080 env->interrupt_request &= ~mask;
1081 }
1082
1083 CPULogItem cpu_log_items[] = {
1084 { CPU_LOG_TB_OUT_ASM, "out_asm",
1085 "show generated host assembly code for each compiled TB" },
1086 { CPU_LOG_TB_IN_ASM, "in_asm",
1087 "show target assembly code for each compiled TB" },
1088 { CPU_LOG_TB_OP, "op",
1089 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1090 #ifdef TARGET_I386
1091 { CPU_LOG_TB_OP_OPT, "op_opt",
1092 "show micro ops after optimization for each compiled TB" },
1093 #endif
1094 { CPU_LOG_INT, "int",
1095 "show interrupts/exceptions in short format" },
1096 { CPU_LOG_EXEC, "exec",
1097 "show trace before each executed TB (lots of logs)" },
1098 { CPU_LOG_TB_CPU, "cpu",
1099 "show CPU state before bloc translation" },
1100 #ifdef TARGET_I386
1101 { CPU_LOG_PCALL, "pcall",
1102 "show protected mode far calls/returns/exceptions" },
1103 #endif
1104 #ifdef DEBUG_IOPORT
1105 { CPU_LOG_IOPORT, "ioport",
1106 "show all i/o ports accesses" },
1107 #endif
1108 { 0, NULL, NULL },
1109 };
1110
1111 static int cmp1(const char *s1, int n, const char *s2)
1112 {
1113 if (strlen(s2) != n)
1114 return 0;
1115 return memcmp(s1, s2, n) == 0;
1116 }
1117
1118 /* takes a comma separated list of log masks. Return 0 if error. */
1119 int cpu_str_to_log_mask(const char *str)
1120 {
1121 CPULogItem *item;
1122 int mask;
1123 const char *p, *p1;
1124
1125 p = str;
1126 mask = 0;
1127 for(;;) {
1128 p1 = strchr(p, ',');
1129 if (!p1)
1130 p1 = p + strlen(p);
1131 if(cmp1(p,p1-p,"all")) {
1132 for(item = cpu_log_items; item->mask != 0; item++) {
1133 mask |= item->mask;
1134 }
1135 } else {
1136 for(item = cpu_log_items; item->mask != 0; item++) {
1137 if (cmp1(p, p1 - p, item->name))
1138 goto found;
1139 }
1140 return 0;
1141 }
1142 found:
1143 mask |= item->mask;
1144 if (*p1 != ',')
1145 break;
1146 p = p1 + 1;
1147 }
1148 return mask;
1149 }
1150
1151 void cpu_abort(CPUState *env, const char *fmt, ...)
1152 {
1153 va_list ap;
1154
1155 va_start(ap, fmt);
1156 fprintf(stderr, "qemu: fatal: ");
1157 vfprintf(stderr, fmt, ap);
1158 fprintf(stderr, "\n");
1159 #ifdef TARGET_I386
1160 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1161 #else
1162 cpu_dump_state(env, stderr, fprintf, 0);
1163 #endif
1164 va_end(ap);
1165 abort();
1166 }
1167
1168 #if !defined(CONFIG_USER_ONLY)
1169
1170 /* NOTE: if flush_global is true, also flush global entries (not
1171 implemented yet) */
1172 void tlb_flush(CPUState *env, int flush_global)
1173 {
1174 int i;
1175
1176 #if defined(DEBUG_TLB)
1177 printf("tlb_flush:\n");
1178 #endif
1179 /* must reset current TB so that interrupts cannot modify the
1180 links while we are modifying them */
1181 env->current_tb = NULL;
1182
1183 for(i = 0; i < CPU_TLB_SIZE; i++) {
1184 env->tlb_read[0][i].address = -1;
1185 env->tlb_write[0][i].address = -1;
1186 env->tlb_read[1][i].address = -1;
1187 env->tlb_write[1][i].address = -1;
1188 }
1189
1190 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1191
1192 #if !defined(CONFIG_SOFTMMU)
1193 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
1194 #endif
1195 #ifdef USE_KQEMU
1196 if (env->kqemu_enabled) {
1197 kqemu_flush(env, flush_global);
1198 }
1199 #endif
1200 tlb_flush_count++;
1201 }
1202
1203 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1204 {
1205 if (addr == (tlb_entry->address &
1206 (TARGET_PAGE_MASK | TLB_INVALID_MASK)))
1207 tlb_entry->address = -1;
1208 }
1209
1210 void tlb_flush_page(CPUState *env, target_ulong addr)
1211 {
1212 int i;
1213 TranslationBlock *tb;
1214
1215 #if defined(DEBUG_TLB)
1216 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1217 #endif
1218 /* must reset current TB so that interrupts cannot modify the
1219 links while we are modifying them */
1220 env->current_tb = NULL;
1221
1222 addr &= TARGET_PAGE_MASK;
1223 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1224 tlb_flush_entry(&env->tlb_read[0][i], addr);
1225 tlb_flush_entry(&env->tlb_write[0][i], addr);
1226 tlb_flush_entry(&env->tlb_read[1][i], addr);
1227 tlb_flush_entry(&env->tlb_write[1][i], addr);
1228
1229 for(i = 0; i < TB_JMP_CACHE_SIZE; i++) {
1230 tb = env->tb_jmp_cache[i];
1231 if (tb &&
1232 ((tb->pc & TARGET_PAGE_MASK) == addr ||
1233 ((tb->pc + tb->size - 1) & TARGET_PAGE_MASK) == addr)) {
1234 env->tb_jmp_cache[i] = NULL;
1235 }
1236 }
1237
1238 #if !defined(CONFIG_SOFTMMU)
1239 if (addr < MMAP_AREA_END)
1240 munmap((void *)addr, TARGET_PAGE_SIZE);
1241 #endif
1242 #ifdef USE_KQEMU
1243 if (env->kqemu_enabled) {
1244 kqemu_flush_page(env, addr);
1245 }
1246 #endif
1247 }
1248
1249 static inline void tlb_protect_code1(CPUTLBEntry *tlb_entry, target_ulong addr)
1250 {
1251 if (addr == (tlb_entry->address &
1252 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) &&
1253 (tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1254 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1255 }
1256 }
1257
1258 /* update the TLBs so that writes to code in the virtual page 'addr'
1259 can be detected */
1260 static void tlb_protect_code(CPUState *env, ram_addr_t ram_addr,
1261 target_ulong vaddr)
1262 {
1263 int i;
1264
1265 vaddr &= TARGET_PAGE_MASK;
1266 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1267 tlb_protect_code1(&env->tlb_write[0][i], vaddr);
1268 tlb_protect_code1(&env->tlb_write[1][i], vaddr);
1269
1270 #ifdef USE_KQEMU
1271 if (env->kqemu_enabled) {
1272 kqemu_set_notdirty(env, ram_addr);
1273 }
1274 #endif
1275 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] &= ~CODE_DIRTY_FLAG;
1276
1277 #if !defined(CONFIG_SOFTMMU)
1278 /* NOTE: as we generated the code for this page, it is already at
1279 least readable */
1280 if (vaddr < MMAP_AREA_END)
1281 mprotect((void *)vaddr, TARGET_PAGE_SIZE, PROT_READ);
1282 #endif
1283 }
1284
1285 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1286 tested for self modifying code */
1287 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1288 target_ulong vaddr)
1289 {
1290 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1291 }
1292
1293 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1294 unsigned long start, unsigned long length)
1295 {
1296 unsigned long addr;
1297 if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1298 addr = (tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend;
1299 if ((addr - start) < length) {
1300 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1301 }
1302 }
1303 }
1304
1305 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1306 int dirty_flags)
1307 {
1308 CPUState *env;
1309 unsigned long length, start1;
1310 int i, mask, len;
1311 uint8_t *p;
1312
1313 start &= TARGET_PAGE_MASK;
1314 end = TARGET_PAGE_ALIGN(end);
1315
1316 length = end - start;
1317 if (length == 0)
1318 return;
1319 len = length >> TARGET_PAGE_BITS;
1320 env = cpu_single_env;
1321 #ifdef USE_KQEMU
1322 if (env->kqemu_enabled) {
1323 ram_addr_t addr;
1324 addr = start;
1325 for(i = 0; i < len; i++) {
1326 kqemu_set_notdirty(env, addr);
1327 addr += TARGET_PAGE_SIZE;
1328 }
1329 }
1330 #endif
1331 mask = ~dirty_flags;
1332 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1333 for(i = 0; i < len; i++)
1334 p[i] &= mask;
1335
1336 /* we modify the TLB cache so that the dirty bit will be set again
1337 when accessing the range */
1338 start1 = start + (unsigned long)phys_ram_base;
1339 for(i = 0; i < CPU_TLB_SIZE; i++)
1340 tlb_reset_dirty_range(&env->tlb_write[0][i], start1, length);
1341 for(i = 0; i < CPU_TLB_SIZE; i++)
1342 tlb_reset_dirty_range(&env->tlb_write[1][i], start1, length);
1343
1344 #if !defined(CONFIG_SOFTMMU)
1345 /* XXX: this is expensive */
1346 {
1347 VirtPageDesc *p;
1348 int j;
1349 target_ulong addr;
1350
1351 for(i = 0; i < L1_SIZE; i++) {
1352 p = l1_virt_map[i];
1353 if (p) {
1354 addr = i << (TARGET_PAGE_BITS + L2_BITS);
1355 for(j = 0; j < L2_SIZE; j++) {
1356 if (p->valid_tag == virt_valid_tag &&
1357 p->phys_addr >= start && p->phys_addr < end &&
1358 (p->prot & PROT_WRITE)) {
1359 if (addr < MMAP_AREA_END) {
1360 mprotect((void *)addr, TARGET_PAGE_SIZE,
1361 p->prot & ~PROT_WRITE);
1362 }
1363 }
1364 addr += TARGET_PAGE_SIZE;
1365 p++;
1366 }
1367 }
1368 }
1369 }
1370 #endif
1371 }
1372
1373 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1374 {
1375 ram_addr_t ram_addr;
1376
1377 if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1378 ram_addr = (tlb_entry->address & TARGET_PAGE_MASK) +
1379 tlb_entry->addend - (unsigned long)phys_ram_base;
1380 if (!cpu_physical_memory_is_dirty(ram_addr)) {
1381 tlb_entry->address |= IO_MEM_NOTDIRTY;
1382 }
1383 }
1384 }
1385
1386 /* update the TLB according to the current state of the dirty bits */
1387 void cpu_tlb_update_dirty(CPUState *env)
1388 {
1389 int i;
1390 for(i = 0; i < CPU_TLB_SIZE; i++)
1391 tlb_update_dirty(&env->tlb_write[0][i]);
1392 for(i = 0; i < CPU_TLB_SIZE; i++)
1393 tlb_update_dirty(&env->tlb_write[1][i]);
1394 }
1395
1396 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
1397 unsigned long start)
1398 {
1399 unsigned long addr;
1400 if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1401 addr = (tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend;
1402 if (addr == start) {
1403 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_RAM;
1404 }
1405 }
1406 }
1407
1408 /* update the TLB corresponding to virtual page vaddr and phys addr
1409 addr so that it is no longer dirty */
1410 static inline void tlb_set_dirty(unsigned long addr, target_ulong vaddr)
1411 {
1412 CPUState *env = cpu_single_env;
1413 int i;
1414
1415 addr &= TARGET_PAGE_MASK;
1416 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1417 tlb_set_dirty1(&env->tlb_write[0][i], addr);
1418 tlb_set_dirty1(&env->tlb_write[1][i], addr);
1419 }
1420
1421 /* add a new TLB entry. At most one entry for a given virtual address
1422 is permitted. Return 0 if OK or 2 if the page could not be mapped
1423 (can only happen in non SOFTMMU mode for I/O pages or pages
1424 conflicting with the host address space). */
1425 int tlb_set_page(CPUState *env, target_ulong vaddr,
1426 target_phys_addr_t paddr, int prot,
1427 int is_user, int is_softmmu)
1428 {
1429 PhysPageDesc *p;
1430 unsigned long pd;
1431 unsigned int index;
1432 target_ulong address;
1433 target_phys_addr_t addend;
1434 int ret;
1435
1436 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1437 if (!p) {
1438 pd = IO_MEM_UNASSIGNED;
1439 } else {
1440 pd = p->phys_offset;
1441 }
1442 #if defined(DEBUG_TLB)
1443 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x u=%d smmu=%d pd=0x%08lx\n",
1444 vaddr, paddr, prot, is_user, is_softmmu, pd);
1445 #endif
1446
1447 ret = 0;
1448 #if !defined(CONFIG_SOFTMMU)
1449 if (is_softmmu)
1450 #endif
1451 {
1452 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1453 /* IO memory case */
1454 address = vaddr | pd;
1455 addend = paddr;
1456 } else {
1457 /* standard memory */
1458 address = vaddr;
1459 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1460 }
1461
1462 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1463 addend -= vaddr;
1464 if (prot & PAGE_READ) {
1465 env->tlb_read[is_user][index].address = address;
1466 env->tlb_read[is_user][index].addend = addend;
1467 } else {
1468 env->tlb_read[is_user][index].address = -1;
1469 env->tlb_read[is_user][index].addend = -1;
1470 }
1471 if (prot & PAGE_WRITE) {
1472 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM) {
1473 /* ROM: access is ignored (same as unassigned) */
1474 env->tlb_write[is_user][index].address = vaddr | IO_MEM_ROM;
1475 env->tlb_write[is_user][index].addend = addend;
1476 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1477 !cpu_physical_memory_is_dirty(pd)) {
1478 env->tlb_write[is_user][index].address = vaddr | IO_MEM_NOTDIRTY;
1479 env->tlb_write[is_user][index].addend = addend;
1480 } else {
1481 env->tlb_write[is_user][index].address = address;
1482 env->tlb_write[is_user][index].addend = addend;
1483 }
1484 } else {
1485 env->tlb_write[is_user][index].address = -1;
1486 env->tlb_write[is_user][index].addend = -1;
1487 }
1488 }
1489 #if !defined(CONFIG_SOFTMMU)
1490 else {
1491 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1492 /* IO access: no mapping is done as it will be handled by the
1493 soft MMU */
1494 if (!(env->hflags & HF_SOFTMMU_MASK))
1495 ret = 2;
1496 } else {
1497 void *map_addr;
1498
1499 if (vaddr >= MMAP_AREA_END) {
1500 ret = 2;
1501 } else {
1502 if (prot & PROT_WRITE) {
1503 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1504 #if defined(TARGET_HAS_SMC) || 1
1505 first_tb ||
1506 #endif
1507 ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1508 !cpu_physical_memory_is_dirty(pd))) {
1509 /* ROM: we do as if code was inside */
1510 /* if code is present, we only map as read only and save the
1511 original mapping */
1512 VirtPageDesc *vp;
1513
1514 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
1515 vp->phys_addr = pd;
1516 vp->prot = prot;
1517 vp->valid_tag = virt_valid_tag;
1518 prot &= ~PAGE_WRITE;
1519 }
1520 }
1521 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
1522 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1523 if (map_addr == MAP_FAILED) {
1524 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1525 paddr, vaddr);
1526 }
1527 }
1528 }
1529 }
1530 #endif
1531 return ret;
1532 }
1533
1534 /* called from signal handler: invalidate the code and unprotect the
1535 page. Return TRUE if the fault was succesfully handled. */
1536 int page_unprotect(unsigned long addr, unsigned long pc, void *puc)
1537 {
1538 #if !defined(CONFIG_SOFTMMU)
1539 VirtPageDesc *vp;
1540
1541 #if defined(DEBUG_TLB)
1542 printf("page_unprotect: addr=0x%08x\n", addr);
1543 #endif
1544 addr &= TARGET_PAGE_MASK;
1545
1546 /* if it is not mapped, no need to worry here */
1547 if (addr >= MMAP_AREA_END)
1548 return 0;
1549 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1550 if (!vp)
1551 return 0;
1552 /* NOTE: in this case, validate_tag is _not_ tested as it
1553 validates only the code TLB */
1554 if (vp->valid_tag != virt_valid_tag)
1555 return 0;
1556 if (!(vp->prot & PAGE_WRITE))
1557 return 0;
1558 #if defined(DEBUG_TLB)
1559 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1560 addr, vp->phys_addr, vp->prot);
1561 #endif
1562 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1563 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1564 (unsigned long)addr, vp->prot);
1565 /* set the dirty bit */
1566 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
1567 /* flush the code inside */
1568 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
1569 return 1;
1570 #else
1571 return 0;
1572 #endif
1573 }
1574
1575 #else
1576
1577 void tlb_flush(CPUState *env, int flush_global)
1578 {
1579 }
1580
1581 void tlb_flush_page(CPUState *env, target_ulong addr)
1582 {
1583 }
1584
1585 int tlb_set_page(CPUState *env, target_ulong vaddr,
1586 target_phys_addr_t paddr, int prot,
1587 int is_user, int is_softmmu)
1588 {
1589 return 0;
1590 }
1591
1592 /* dump memory mappings */
1593 void page_dump(FILE *f)
1594 {
1595 unsigned long start, end;
1596 int i, j, prot, prot1;
1597 PageDesc *p;
1598
1599 fprintf(f, "%-8s %-8s %-8s %s\n",
1600 "start", "end", "size", "prot");
1601 start = -1;
1602 end = -1;
1603 prot = 0;
1604 for(i = 0; i <= L1_SIZE; i++) {
1605 if (i < L1_SIZE)
1606 p = l1_map[i];
1607 else
1608 p = NULL;
1609 for(j = 0;j < L2_SIZE; j++) {
1610 if (!p)
1611 prot1 = 0;
1612 else
1613 prot1 = p[j].flags;
1614 if (prot1 != prot) {
1615 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1616 if (start != -1) {
1617 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1618 start, end, end - start,
1619 prot & PAGE_READ ? 'r' : '-',
1620 prot & PAGE_WRITE ? 'w' : '-',
1621 prot & PAGE_EXEC ? 'x' : '-');
1622 }
1623 if (prot1 != 0)
1624 start = end;
1625 else
1626 start = -1;
1627 prot = prot1;
1628 }
1629 if (!p)
1630 break;
1631 }
1632 }
1633 }
1634
1635 int page_get_flags(unsigned long address)
1636 {
1637 PageDesc *p;
1638
1639 p = page_find(address >> TARGET_PAGE_BITS);
1640 if (!p)
1641 return 0;
1642 return p->flags;
1643 }
1644
1645 /* modify the flags of a page and invalidate the code if
1646 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1647 depending on PAGE_WRITE */
1648 void page_set_flags(unsigned long start, unsigned long end, int flags)
1649 {
1650 PageDesc *p;
1651 unsigned long addr;
1652
1653 start = start & TARGET_PAGE_MASK;
1654 end = TARGET_PAGE_ALIGN(end);
1655 if (flags & PAGE_WRITE)
1656 flags |= PAGE_WRITE_ORG;
1657 spin_lock(&tb_lock);
1658 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1659 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1660 /* if the write protection is set, then we invalidate the code
1661 inside */
1662 if (!(p->flags & PAGE_WRITE) &&
1663 (flags & PAGE_WRITE) &&
1664 p->first_tb) {
1665 tb_invalidate_phys_page(addr, 0, NULL);
1666 }
1667 p->flags = flags;
1668 }
1669 spin_unlock(&tb_lock);
1670 }
1671
1672 /* called from signal handler: invalidate the code and unprotect the
1673 page. Return TRUE if the fault was succesfully handled. */
1674 int page_unprotect(unsigned long address, unsigned long pc, void *puc)
1675 {
1676 unsigned int page_index, prot, pindex;
1677 PageDesc *p, *p1;
1678 unsigned long host_start, host_end, addr;
1679
1680 host_start = address & qemu_host_page_mask;
1681 page_index = host_start >> TARGET_PAGE_BITS;
1682 p1 = page_find(page_index);
1683 if (!p1)
1684 return 0;
1685 host_end = host_start + qemu_host_page_size;
1686 p = p1;
1687 prot = 0;
1688 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1689 prot |= p->flags;
1690 p++;
1691 }
1692 /* if the page was really writable, then we change its
1693 protection back to writable */
1694 if (prot & PAGE_WRITE_ORG) {
1695 pindex = (address - host_start) >> TARGET_PAGE_BITS;
1696 if (!(p1[pindex].flags & PAGE_WRITE)) {
1697 mprotect((void *)host_start, qemu_host_page_size,
1698 (prot & PAGE_BITS) | PAGE_WRITE);
1699 p1[pindex].flags |= PAGE_WRITE;
1700 /* and since the content will be modified, we must invalidate
1701 the corresponding translated code. */
1702 tb_invalidate_phys_page(address, pc, puc);
1703 #ifdef DEBUG_TB_CHECK
1704 tb_invalidate_check(address);
1705 #endif
1706 return 1;
1707 }
1708 }
1709 return 0;
1710 }
1711
1712 /* call this function when system calls directly modify a memory area */
1713 void page_unprotect_range(uint8_t *data, unsigned long data_size)
1714 {
1715 unsigned long start, end, addr;
1716
1717 start = (unsigned long)data;
1718 end = start + data_size;
1719 start &= TARGET_PAGE_MASK;
1720 end = TARGET_PAGE_ALIGN(end);
1721 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1722 page_unprotect(addr, 0, NULL);
1723 }
1724 }
1725
1726 static inline void tlb_set_dirty(unsigned long addr, target_ulong vaddr)
1727 {
1728 }
1729 #endif /* defined(CONFIG_USER_ONLY) */
1730
1731 /* register physical memory. 'size' must be a multiple of the target
1732 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1733 io memory page */
1734 void cpu_register_physical_memory(target_phys_addr_t start_addr,
1735 unsigned long size,
1736 unsigned long phys_offset)
1737 {
1738 target_phys_addr_t addr, end_addr;
1739 PhysPageDesc *p;
1740
1741 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
1742 end_addr = start_addr + size;
1743 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
1744 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
1745 p->phys_offset = phys_offset;
1746 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM)
1747 phys_offset += TARGET_PAGE_SIZE;
1748 }
1749 }
1750
1751 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
1752 {
1753 return 0;
1754 }
1755
1756 static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1757 {
1758 }
1759
1760 static CPUReadMemoryFunc *unassigned_mem_read[3] = {
1761 unassigned_mem_readb,
1762 unassigned_mem_readb,
1763 unassigned_mem_readb,
1764 };
1765
1766 static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
1767 unassigned_mem_writeb,
1768 unassigned_mem_writeb,
1769 unassigned_mem_writeb,
1770 };
1771
1772 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1773 {
1774 unsigned long ram_addr;
1775 int dirty_flags;
1776 ram_addr = addr - (unsigned long)phys_ram_base;
1777 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1778 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
1779 #if !defined(CONFIG_USER_ONLY)
1780 tb_invalidate_phys_page_fast(ram_addr, 1);
1781 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1782 #endif
1783 }
1784 stb_p((uint8_t *)(long)addr, val);
1785 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1786 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
1787 /* we remove the notdirty callback only if the code has been
1788 flushed */
1789 if (dirty_flags == 0xff)
1790 tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
1791 }
1792
1793 static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
1794 {
1795 unsigned long ram_addr;
1796 int dirty_flags;
1797 ram_addr = addr - (unsigned long)phys_ram_base;
1798 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1799 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
1800 #if !defined(CONFIG_USER_ONLY)
1801 tb_invalidate_phys_page_fast(ram_addr, 2);
1802 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1803 #endif
1804 }
1805 stw_p((uint8_t *)(long)addr, val);
1806 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1807 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
1808 /* we remove the notdirty callback only if the code has been
1809 flushed */
1810 if (dirty_flags == 0xff)
1811 tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
1812 }
1813
1814 static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
1815 {
1816 unsigned long ram_addr;
1817 int dirty_flags;
1818 ram_addr = addr - (unsigned long)phys_ram_base;
1819 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1820 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
1821 #if !defined(CONFIG_USER_ONLY)
1822 tb_invalidate_phys_page_fast(ram_addr, 4);
1823 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1824 #endif
1825 }
1826 stl_p((uint8_t *)(long)addr, val);
1827 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1828 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
1829 /* we remove the notdirty callback only if the code has been
1830 flushed */
1831 if (dirty_flags == 0xff)
1832 tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
1833 }
1834
1835 static CPUReadMemoryFunc *error_mem_read[3] = {
1836 NULL, /* never used */
1837 NULL, /* never used */
1838 NULL, /* never used */
1839 };
1840
1841 static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
1842 notdirty_mem_writeb,
1843 notdirty_mem_writew,
1844 notdirty_mem_writel,
1845 };
1846
1847 static void io_mem_init(void)
1848 {
1849 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
1850 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
1851 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
1852 io_mem_nb = 5;
1853
1854 /* alloc dirty bits array */
1855 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
1856 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
1857 }
1858
1859 /* mem_read and mem_write are arrays of functions containing the
1860 function to access byte (index 0), word (index 1) and dword (index
1861 2). All functions must be supplied. If io_index is non zero, the
1862 corresponding io zone is modified. If it is zero, a new io zone is
1863 allocated. The return value can be used with
1864 cpu_register_physical_memory(). (-1) is returned if error. */
1865 int cpu_register_io_memory(int io_index,
1866 CPUReadMemoryFunc **mem_read,
1867 CPUWriteMemoryFunc **mem_write,
1868 void *opaque)
1869 {
1870 int i;
1871
1872 if (io_index <= 0) {
1873 if (io_index >= IO_MEM_NB_ENTRIES)
1874 return -1;
1875 io_index = io_mem_nb++;
1876 } else {
1877 if (io_index >= IO_MEM_NB_ENTRIES)
1878 return -1;
1879 }
1880
1881 for(i = 0;i < 3; i++) {
1882 io_mem_read[io_index][i] = mem_read[i];
1883 io_mem_write[io_index][i] = mem_write[i];
1884 }
1885 io_mem_opaque[io_index] = opaque;
1886 return io_index << IO_MEM_SHIFT;
1887 }
1888
1889 CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
1890 {
1891 return io_mem_write[io_index >> IO_MEM_SHIFT];
1892 }
1893
1894 CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
1895 {
1896 return io_mem_read[io_index >> IO_MEM_SHIFT];
1897 }
1898
1899 /* physical memory access (slow version, mainly for debug) */
1900 #if defined(CONFIG_USER_ONLY)
1901 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
1902 int len, int is_write)
1903 {
1904 int l, flags;
1905 target_ulong page;
1906
1907 while (len > 0) {
1908 page = addr & TARGET_PAGE_MASK;
1909 l = (page + TARGET_PAGE_SIZE) - addr;
1910 if (l > len)
1911 l = len;
1912 flags = page_get_flags(page);
1913 if (!(flags & PAGE_VALID))
1914 return;
1915 if (is_write) {
1916 if (!(flags & PAGE_WRITE))
1917 return;
1918 memcpy((uint8_t *)addr, buf, len);
1919 } else {
1920 if (!(flags & PAGE_READ))
1921 return;
1922 memcpy(buf, (uint8_t *)addr, len);
1923 }
1924 len -= l;
1925 buf += l;
1926 addr += l;
1927 }
1928 }
1929
1930 #else
1931 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
1932 int len, int is_write)
1933 {
1934 int l, io_index;
1935 uint8_t *ptr;
1936 uint32_t val;
1937 target_phys_addr_t page;
1938 unsigned long pd;
1939 PhysPageDesc *p;
1940
1941 while (len > 0) {
1942 page = addr & TARGET_PAGE_MASK;
1943 l = (page + TARGET_PAGE_SIZE) - addr;
1944 if (l > len)
1945 l = len;
1946 p = phys_page_find(page >> TARGET_PAGE_BITS);
1947 if (!p) {
1948 pd = IO_MEM_UNASSIGNED;
1949 } else {
1950 pd = p->phys_offset;
1951 }
1952
1953 if (is_write) {
1954 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
1955 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
1956 if (l >= 4 && ((addr & 3) == 0)) {
1957 /* 32 bit write access */
1958 val = ldl_p(buf);
1959 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
1960 l = 4;
1961 } else if (l >= 2 && ((addr & 1) == 0)) {
1962 /* 16 bit write access */
1963 val = lduw_p(buf);
1964 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
1965 l = 2;
1966 } else {
1967 /* 8 bit write access */
1968 val = ldub_p(buf);
1969 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
1970 l = 1;
1971 }
1972 } else {
1973 unsigned long addr1;
1974 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
1975 /* RAM case */
1976 ptr = phys_ram_base + addr1;
1977 memcpy(ptr, buf, l);
1978 if (!cpu_physical_memory_is_dirty(addr1)) {
1979 /* invalidate code */
1980 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
1981 /* set dirty bit */
1982 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
1983 (0xff & ~CODE_DIRTY_FLAG);
1984 }
1985 }
1986 } else {
1987 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1988 /* I/O case */
1989 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
1990 if (l >= 4 && ((addr & 3) == 0)) {
1991 /* 32 bit read access */
1992 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
1993 stl_p(buf, val);
1994 l = 4;
1995 } else if (l >= 2 && ((addr & 1) == 0)) {
1996 /* 16 bit read access */
1997 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
1998 stw_p(buf, val);
1999 l = 2;
2000 } else {
2001 /* 8 bit read access */
2002 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2003 stb_p(buf, val);
2004 l = 1;
2005 }
2006 } else {
2007 /* RAM case */
2008 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2009 (addr & ~TARGET_PAGE_MASK);
2010 memcpy(buf, ptr, l);
2011 }
2012 }
2013 len -= l;
2014 buf += l;
2015 addr += l;
2016 }
2017 }
2018
2019 /* warning: addr must be aligned */
2020 uint32_t ldl_phys(target_phys_addr_t addr)
2021 {
2022 int io_index;
2023 uint8_t *ptr;
2024 uint32_t val;
2025 unsigned long pd;
2026 PhysPageDesc *p;
2027
2028 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2029 if (!p) {
2030 pd = IO_MEM_UNASSIGNED;
2031 } else {
2032 pd = p->phys_offset;
2033 }
2034
2035 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
2036 /* I/O case */
2037 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2038 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2039 } else {
2040 /* RAM case */
2041 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2042 (addr & ~TARGET_PAGE_MASK);
2043 val = ldl_p(ptr);
2044 }
2045 return val;
2046 }
2047
2048 /* XXX: optimize */
2049 uint32_t ldub_phys(target_phys_addr_t addr)
2050 {
2051 uint8_t val;
2052 cpu_physical_memory_read(addr, &val, 1);
2053 return val;
2054 }
2055
2056 /* XXX: optimize */
2057 uint32_t lduw_phys(target_phys_addr_t addr)
2058 {
2059 uint16_t val;
2060 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2061 return tswap16(val);
2062 }
2063
2064 /* XXX: optimize */
2065 uint64_t ldq_phys(target_phys_addr_t addr)
2066 {
2067 uint64_t val;
2068 cpu_physical_memory_read(addr, (uint8_t *)&val, 8);
2069 return tswap64(val);
2070 }
2071
2072 /* warning: addr must be aligned. The ram page is not masked as dirty
2073 and the code inside is not invalidated. It is useful if the dirty
2074 bits are used to track modified PTEs */
2075 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2076 {
2077 int io_index;
2078 uint8_t *ptr;
2079 unsigned long pd;
2080 PhysPageDesc *p;
2081
2082 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2083 if (!p) {
2084 pd = IO_MEM_UNASSIGNED;
2085 } else {
2086 pd = p->phys_offset;
2087 }
2088
2089 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2090 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2091 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2092 } else {
2093 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2094 (addr & ~TARGET_PAGE_MASK);
2095 stl_p(ptr, val);
2096 }
2097 }
2098
2099 /* warning: addr must be aligned */
2100 void stl_phys(target_phys_addr_t addr, uint32_t val)
2101 {
2102 int io_index;
2103 uint8_t *ptr;
2104 unsigned long pd;
2105 PhysPageDesc *p;
2106
2107 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2108 if (!p) {
2109 pd = IO_MEM_UNASSIGNED;
2110 } else {
2111 pd = p->phys_offset;
2112 }
2113
2114 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2115 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2116 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2117 } else {
2118 unsigned long addr1;
2119 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2120 /* RAM case */
2121 ptr = phys_ram_base + addr1;
2122 stl_p(ptr, val);
2123 if (!cpu_physical_memory_is_dirty(addr1)) {
2124 /* invalidate code */
2125 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2126 /* set dirty bit */
2127 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2128 (0xff & ~CODE_DIRTY_FLAG);
2129 }
2130 }
2131 }
2132
2133 /* XXX: optimize */
2134 void stb_phys(target_phys_addr_t addr, uint32_t val)
2135 {
2136 uint8_t v = val;
2137 cpu_physical_memory_write(addr, &v, 1);
2138 }
2139
2140 /* XXX: optimize */
2141 void stw_phys(target_phys_addr_t addr, uint32_t val)
2142 {
2143 uint16_t v = tswap16(val);
2144 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2145 }
2146
2147 /* XXX: optimize */
2148 void stq_phys(target_phys_addr_t addr, uint64_t val)
2149 {
2150 val = tswap64(val);
2151 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
2152 }
2153
2154 #endif
2155
2156 /* virtual memory access for debug */
2157 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
2158 uint8_t *buf, int len, int is_write)
2159 {
2160 int l;
2161 target_ulong page, phys_addr;
2162
2163 while (len > 0) {
2164 page = addr & TARGET_PAGE_MASK;
2165 phys_addr = cpu_get_phys_page_debug(env, page);
2166 /* if no physical page mapped, return an error */
2167 if (phys_addr == -1)
2168 return -1;
2169 l = (page + TARGET_PAGE_SIZE) - addr;
2170 if (l > len)
2171 l = len;
2172 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
2173 buf, l, is_write);
2174 len -= l;
2175 buf += l;
2176 addr += l;
2177 }
2178 return 0;
2179 }
2180
2181 void dump_exec_info(FILE *f,
2182 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2183 {
2184 int i, target_code_size, max_target_code_size;
2185 int direct_jmp_count, direct_jmp2_count, cross_page;
2186 TranslationBlock *tb;
2187
2188 target_code_size = 0;
2189 max_target_code_size = 0;
2190 cross_page = 0;
2191 direct_jmp_count = 0;
2192 direct_jmp2_count = 0;
2193 for(i = 0; i < nb_tbs; i++) {
2194 tb = &tbs[i];
2195 target_code_size += tb->size;
2196 if (tb->size > max_target_code_size)
2197 max_target_code_size = tb->size;
2198 if (tb->page_addr[1] != -1)
2199 cross_page++;
2200 if (tb->tb_next_offset[0] != 0xffff) {
2201 direct_jmp_count++;
2202 if (tb->tb_next_offset[1] != 0xffff) {
2203 direct_jmp2_count++;
2204 }
2205 }
2206 }
2207 /* XXX: avoid using doubles ? */
2208 cpu_fprintf(f, "TB count %d\n", nb_tbs);
2209 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
2210 nb_tbs ? target_code_size / nb_tbs : 0,
2211 max_target_code_size);
2212 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
2213 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
2214 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
2215 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
2216 cross_page,
2217 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
2218 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
2219 direct_jmp_count,
2220 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
2221 direct_jmp2_count,
2222 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
2223 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
2224 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
2225 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
2226 }
2227
2228 #if !defined(CONFIG_USER_ONLY)
2229
2230 #define MMUSUFFIX _cmmu
2231 #define GETPC() NULL
2232 #define env cpu_single_env
2233 #define SOFTMMU_CODE_ACCESS
2234
2235 #define SHIFT 0
2236 #include "softmmu_template.h"
2237
2238 #define SHIFT 1
2239 #include "softmmu_template.h"
2240
2241 #define SHIFT 2
2242 #include "softmmu_template.h"
2243
2244 #define SHIFT 3
2245 #include "softmmu_template.h"
2246
2247 #undef env
2248
2249 #endif