]> git.proxmox.com Git - qemu.git/blob - exec.c
merge self modifying code handling in dirty ram page mecanism
[qemu.git] / exec.c
1 /*
2 * virtual page mapping and translated block handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20 #include "config.h"
21 #ifdef _WIN32
22 #include <windows.h>
23 #else
24 #include <sys/types.h>
25 #include <sys/mman.h>
26 #endif
27 #include <stdlib.h>
28 #include <stdio.h>
29 #include <stdarg.h>
30 #include <string.h>
31 #include <errno.h>
32 #include <unistd.h>
33 #include <inttypes.h>
34
35 #include "cpu.h"
36 #include "exec-all.h"
37
38 //#define DEBUG_TB_INVALIDATE
39 //#define DEBUG_FLUSH
40 //#define DEBUG_TLB
41
42 /* make various TB consistency checks */
43 //#define DEBUG_TB_CHECK
44 //#define DEBUG_TLB_CHECK
45
46 /* threshold to flush the translated code buffer */
47 #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
48
49 #define SMC_BITMAP_USE_THRESHOLD 10
50
51 #define MMAP_AREA_START 0x00000000
52 #define MMAP_AREA_END 0xa8000000
53
54 #if defined(TARGET_SPARC64)
55 #define TARGET_PHYS_ADDR_SPACE_BITS 41
56 #elif defined(TARGET_PPC64)
57 #define TARGET_PHYS_ADDR_SPACE_BITS 42
58 #else
59 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
60 #define TARGET_PHYS_ADDR_SPACE_BITS 32
61 #endif
62
63 TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
64 TranslationBlock *tb_hash[CODE_GEN_HASH_SIZE];
65 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
66 int nb_tbs;
67 /* any access to the tbs or the page table must use this lock */
68 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
69
70 uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] __attribute__((aligned (32)));
71 uint8_t *code_gen_ptr;
72
73 int phys_ram_size;
74 int phys_ram_fd;
75 uint8_t *phys_ram_base;
76 uint8_t *phys_ram_dirty;
77
78 typedef struct PageDesc {
79 /* list of TBs intersecting this ram page */
80 TranslationBlock *first_tb;
81 /* in order to optimize self modifying code, we count the number
82 of lookups we do to a given page to use a bitmap */
83 unsigned int code_write_count;
84 uint8_t *code_bitmap;
85 #if defined(CONFIG_USER_ONLY)
86 unsigned long flags;
87 #endif
88 } PageDesc;
89
90 typedef struct PhysPageDesc {
91 /* offset in host memory of the page + io_index in the low 12 bits */
92 uint32_t phys_offset;
93 } PhysPageDesc;
94
95 /* Note: the VirtPage handling is absolete and will be suppressed
96 ASAP */
97 typedef struct VirtPageDesc {
98 /* physical address of code page. It is valid only if 'valid_tag'
99 matches 'virt_valid_tag' */
100 target_ulong phys_addr;
101 unsigned int valid_tag;
102 #if !defined(CONFIG_SOFTMMU)
103 /* original page access rights. It is valid only if 'valid_tag'
104 matches 'virt_valid_tag' */
105 unsigned int prot;
106 #endif
107 } VirtPageDesc;
108
109 #define L2_BITS 10
110 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
111
112 #define L1_SIZE (1 << L1_BITS)
113 #define L2_SIZE (1 << L2_BITS)
114
115 static void io_mem_init(void);
116
117 unsigned long qemu_real_host_page_size;
118 unsigned long qemu_host_page_bits;
119 unsigned long qemu_host_page_size;
120 unsigned long qemu_host_page_mask;
121
122 /* XXX: for system emulation, it could just be an array */
123 static PageDesc *l1_map[L1_SIZE];
124 PhysPageDesc **l1_phys_map;
125
126 #if !defined(CONFIG_USER_ONLY)
127 #if TARGET_LONG_BITS > 32
128 #define VIRT_L_BITS 9
129 #define VIRT_L_SIZE (1 << VIRT_L_BITS)
130 static void *l1_virt_map[VIRT_L_SIZE];
131 #else
132 static VirtPageDesc *l1_virt_map[L1_SIZE];
133 #endif
134 static unsigned int virt_valid_tag;
135 #endif
136
137 /* io memory support */
138 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
139 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
140 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
141 static int io_mem_nb;
142
143 /* log support */
144 char *logfilename = "/tmp/qemu.log";
145 FILE *logfile;
146 int loglevel;
147
148 /* statistics */
149 static int tlb_flush_count;
150 static int tb_flush_count;
151 static int tb_phys_invalidate_count;
152
153 static void page_init(void)
154 {
155 /* NOTE: we can always suppose that qemu_host_page_size >=
156 TARGET_PAGE_SIZE */
157 #ifdef _WIN32
158 {
159 SYSTEM_INFO system_info;
160 DWORD old_protect;
161
162 GetSystemInfo(&system_info);
163 qemu_real_host_page_size = system_info.dwPageSize;
164
165 VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
166 PAGE_EXECUTE_READWRITE, &old_protect);
167 }
168 #else
169 qemu_real_host_page_size = getpagesize();
170 {
171 unsigned long start, end;
172
173 start = (unsigned long)code_gen_buffer;
174 start &= ~(qemu_real_host_page_size - 1);
175
176 end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
177 end += qemu_real_host_page_size - 1;
178 end &= ~(qemu_real_host_page_size - 1);
179
180 mprotect((void *)start, end - start,
181 PROT_READ | PROT_WRITE | PROT_EXEC);
182 }
183 #endif
184
185 if (qemu_host_page_size == 0)
186 qemu_host_page_size = qemu_real_host_page_size;
187 if (qemu_host_page_size < TARGET_PAGE_SIZE)
188 qemu_host_page_size = TARGET_PAGE_SIZE;
189 qemu_host_page_bits = 0;
190 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
191 qemu_host_page_bits++;
192 qemu_host_page_mask = ~(qemu_host_page_size - 1);
193 #if !defined(CONFIG_USER_ONLY)
194 virt_valid_tag = 1;
195 #endif
196 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
197 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
198 }
199
200 static inline PageDesc *page_find_alloc(unsigned int index)
201 {
202 PageDesc **lp, *p;
203
204 lp = &l1_map[index >> L2_BITS];
205 p = *lp;
206 if (!p) {
207 /* allocate if not found */
208 p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
209 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
210 *lp = p;
211 }
212 return p + (index & (L2_SIZE - 1));
213 }
214
215 static inline PageDesc *page_find(unsigned int index)
216 {
217 PageDesc *p;
218
219 p = l1_map[index >> L2_BITS];
220 if (!p)
221 return 0;
222 return p + (index & (L2_SIZE - 1));
223 }
224
225 static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
226 {
227 void **lp, **p;
228
229 p = (void **)l1_phys_map;
230 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
231
232 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
233 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
234 #endif
235 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
236 p = *lp;
237 if (!p) {
238 /* allocate if not found */
239 if (!alloc)
240 return NULL;
241 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
242 memset(p, 0, sizeof(void *) * L1_SIZE);
243 *lp = p;
244 }
245 #endif
246 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
247 p = *lp;
248 if (!p) {
249 /* allocate if not found */
250 if (!alloc)
251 return NULL;
252 p = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
253 memset(p, 0, sizeof(PhysPageDesc) * L2_SIZE);
254 *lp = p;
255 }
256 return ((PhysPageDesc *)p) + (index & (L2_SIZE - 1));
257 }
258
259 static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
260 {
261 return phys_page_find_alloc(index, 0);
262 }
263
264 #if !defined(CONFIG_USER_ONLY)
265 static void tlb_protect_code(CPUState *env, ram_addr_t ram_addr,
266 target_ulong vaddr);
267 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
268 target_ulong vaddr);
269
270 static VirtPageDesc *virt_page_find_alloc(target_ulong index, int alloc)
271 {
272 #if TARGET_LONG_BITS > 32
273 void **p, **lp;
274
275 p = l1_virt_map;
276 lp = p + ((index >> (5 * VIRT_L_BITS)) & (VIRT_L_SIZE - 1));
277 p = *lp;
278 if (!p) {
279 if (!alloc)
280 return NULL;
281 p = qemu_mallocz(sizeof(void *) * VIRT_L_SIZE);
282 *lp = p;
283 }
284 lp = p + ((index >> (4 * VIRT_L_BITS)) & (VIRT_L_SIZE - 1));
285 p = *lp;
286 if (!p) {
287 if (!alloc)
288 return NULL;
289 p = qemu_mallocz(sizeof(void *) * VIRT_L_SIZE);
290 *lp = p;
291 }
292 lp = p + ((index >> (3 * VIRT_L_BITS)) & (VIRT_L_SIZE - 1));
293 p = *lp;
294 if (!p) {
295 if (!alloc)
296 return NULL;
297 p = qemu_mallocz(sizeof(void *) * VIRT_L_SIZE);
298 *lp = p;
299 }
300 lp = p + ((index >> (2 * VIRT_L_BITS)) & (VIRT_L_SIZE - 1));
301 p = *lp;
302 if (!p) {
303 if (!alloc)
304 return NULL;
305 p = qemu_mallocz(sizeof(void *) * VIRT_L_SIZE);
306 *lp = p;
307 }
308 lp = p + ((index >> (1 * VIRT_L_BITS)) & (VIRT_L_SIZE - 1));
309 p = *lp;
310 if (!p) {
311 if (!alloc)
312 return NULL;
313 p = qemu_mallocz(sizeof(VirtPageDesc) * VIRT_L_SIZE);
314 *lp = p;
315 }
316 return ((VirtPageDesc *)p) + (index & (VIRT_L_SIZE - 1));
317 #else
318 VirtPageDesc *p, **lp;
319
320 lp = &l1_virt_map[index >> L2_BITS];
321 p = *lp;
322 if (!p) {
323 /* allocate if not found */
324 if (!alloc)
325 return NULL;
326 p = qemu_mallocz(sizeof(VirtPageDesc) * L2_SIZE);
327 *lp = p;
328 }
329 return p + (index & (L2_SIZE - 1));
330 #endif
331 }
332
333 static inline VirtPageDesc *virt_page_find(target_ulong index)
334 {
335 return virt_page_find_alloc(index, 0);
336 }
337
338 #if TARGET_LONG_BITS > 32
339 static void virt_page_flush_internal(void **p, int level)
340 {
341 int i;
342 if (level == 0) {
343 VirtPageDesc *q = (VirtPageDesc *)p;
344 for(i = 0; i < VIRT_L_SIZE; i++)
345 q[i].valid_tag = 0;
346 } else {
347 level--;
348 for(i = 0; i < VIRT_L_SIZE; i++) {
349 if (p[i])
350 virt_page_flush_internal(p[i], level);
351 }
352 }
353 }
354 #endif
355
356 static void virt_page_flush(void)
357 {
358 virt_valid_tag++;
359
360 if (virt_valid_tag == 0) {
361 virt_valid_tag = 1;
362 #if TARGET_LONG_BITS > 32
363 virt_page_flush_internal(l1_virt_map, 5);
364 #else
365 {
366 int i, j;
367 VirtPageDesc *p;
368 for(i = 0; i < L1_SIZE; i++) {
369 p = l1_virt_map[i];
370 if (p) {
371 for(j = 0; j < L2_SIZE; j++)
372 p[j].valid_tag = 0;
373 }
374 }
375 }
376 #endif
377 }
378 }
379 #else
380 static void virt_page_flush(void)
381 {
382 }
383 #endif
384
385 void cpu_exec_init(void)
386 {
387 if (!code_gen_ptr) {
388 code_gen_ptr = code_gen_buffer;
389 page_init();
390 io_mem_init();
391 }
392 }
393
394 static inline void invalidate_page_bitmap(PageDesc *p)
395 {
396 if (p->code_bitmap) {
397 qemu_free(p->code_bitmap);
398 p->code_bitmap = NULL;
399 }
400 p->code_write_count = 0;
401 }
402
403 /* set to NULL all the 'first_tb' fields in all PageDescs */
404 static void page_flush_tb(void)
405 {
406 int i, j;
407 PageDesc *p;
408
409 for(i = 0; i < L1_SIZE; i++) {
410 p = l1_map[i];
411 if (p) {
412 for(j = 0; j < L2_SIZE; j++) {
413 p->first_tb = NULL;
414 invalidate_page_bitmap(p);
415 p++;
416 }
417 }
418 }
419 }
420
421 /* flush all the translation blocks */
422 /* XXX: tb_flush is currently not thread safe */
423 void tb_flush(CPUState *env)
424 {
425 #if defined(DEBUG_FLUSH)
426 printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
427 code_gen_ptr - code_gen_buffer,
428 nb_tbs,
429 nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0);
430 #endif
431 nb_tbs = 0;
432 memset (tb_hash, 0, CODE_GEN_HASH_SIZE * sizeof (void *));
433 virt_page_flush();
434
435 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
436 page_flush_tb();
437
438 code_gen_ptr = code_gen_buffer;
439 /* XXX: flush processor icache at this point if cache flush is
440 expensive */
441 tb_flush_count++;
442 }
443
444 #ifdef DEBUG_TB_CHECK
445
446 static void tb_invalidate_check(unsigned long address)
447 {
448 TranslationBlock *tb;
449 int i;
450 address &= TARGET_PAGE_MASK;
451 for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
452 for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
453 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
454 address >= tb->pc + tb->size)) {
455 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
456 address, tb->pc, tb->size);
457 }
458 }
459 }
460 }
461
462 /* verify that all the pages have correct rights for code */
463 static void tb_page_check(void)
464 {
465 TranslationBlock *tb;
466 int i, flags1, flags2;
467
468 for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
469 for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
470 flags1 = page_get_flags(tb->pc);
471 flags2 = page_get_flags(tb->pc + tb->size - 1);
472 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
473 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
474 tb->pc, tb->size, flags1, flags2);
475 }
476 }
477 }
478 }
479
480 void tb_jmp_check(TranslationBlock *tb)
481 {
482 TranslationBlock *tb1;
483 unsigned int n1;
484
485 /* suppress any remaining jumps to this TB */
486 tb1 = tb->jmp_first;
487 for(;;) {
488 n1 = (long)tb1 & 3;
489 tb1 = (TranslationBlock *)((long)tb1 & ~3);
490 if (n1 == 2)
491 break;
492 tb1 = tb1->jmp_next[n1];
493 }
494 /* check end of list */
495 if (tb1 != tb) {
496 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
497 }
498 }
499
500 #endif
501
502 /* invalidate one TB */
503 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
504 int next_offset)
505 {
506 TranslationBlock *tb1;
507 for(;;) {
508 tb1 = *ptb;
509 if (tb1 == tb) {
510 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
511 break;
512 }
513 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
514 }
515 }
516
517 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
518 {
519 TranslationBlock *tb1;
520 unsigned int n1;
521
522 for(;;) {
523 tb1 = *ptb;
524 n1 = (long)tb1 & 3;
525 tb1 = (TranslationBlock *)((long)tb1 & ~3);
526 if (tb1 == tb) {
527 *ptb = tb1->page_next[n1];
528 break;
529 }
530 ptb = &tb1->page_next[n1];
531 }
532 }
533
534 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
535 {
536 TranslationBlock *tb1, **ptb;
537 unsigned int n1;
538
539 ptb = &tb->jmp_next[n];
540 tb1 = *ptb;
541 if (tb1) {
542 /* find tb(n) in circular list */
543 for(;;) {
544 tb1 = *ptb;
545 n1 = (long)tb1 & 3;
546 tb1 = (TranslationBlock *)((long)tb1 & ~3);
547 if (n1 == n && tb1 == tb)
548 break;
549 if (n1 == 2) {
550 ptb = &tb1->jmp_first;
551 } else {
552 ptb = &tb1->jmp_next[n1];
553 }
554 }
555 /* now we can suppress tb(n) from the list */
556 *ptb = tb->jmp_next[n];
557
558 tb->jmp_next[n] = NULL;
559 }
560 }
561
562 /* reset the jump entry 'n' of a TB so that it is not chained to
563 another TB */
564 static inline void tb_reset_jump(TranslationBlock *tb, int n)
565 {
566 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
567 }
568
569 static inline void tb_invalidate(TranslationBlock *tb)
570 {
571 unsigned int h, n1;
572 TranslationBlock *tb1, *tb2, **ptb;
573
574 tb_invalidated_flag = 1;
575
576 /* remove the TB from the hash list */
577 h = tb_hash_func(tb->pc);
578 ptb = &tb_hash[h];
579 for(;;) {
580 tb1 = *ptb;
581 /* NOTE: the TB is not necessarily linked in the hash. It
582 indicates that it is not currently used */
583 if (tb1 == NULL)
584 return;
585 if (tb1 == tb) {
586 *ptb = tb1->hash_next;
587 break;
588 }
589 ptb = &tb1->hash_next;
590 }
591
592 /* suppress this TB from the two jump lists */
593 tb_jmp_remove(tb, 0);
594 tb_jmp_remove(tb, 1);
595
596 /* suppress any remaining jumps to this TB */
597 tb1 = tb->jmp_first;
598 for(;;) {
599 n1 = (long)tb1 & 3;
600 if (n1 == 2)
601 break;
602 tb1 = (TranslationBlock *)((long)tb1 & ~3);
603 tb2 = tb1->jmp_next[n1];
604 tb_reset_jump(tb1, n1);
605 tb1->jmp_next[n1] = NULL;
606 tb1 = tb2;
607 }
608 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
609 }
610
611 static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
612 {
613 PageDesc *p;
614 unsigned int h;
615 target_ulong phys_pc;
616
617 /* remove the TB from the hash list */
618 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
619 h = tb_phys_hash_func(phys_pc);
620 tb_remove(&tb_phys_hash[h], tb,
621 offsetof(TranslationBlock, phys_hash_next));
622
623 /* remove the TB from the page list */
624 if (tb->page_addr[0] != page_addr) {
625 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
626 tb_page_remove(&p->first_tb, tb);
627 invalidate_page_bitmap(p);
628 }
629 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
630 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
631 tb_page_remove(&p->first_tb, tb);
632 invalidate_page_bitmap(p);
633 }
634
635 tb_invalidate(tb);
636 tb_phys_invalidate_count++;
637 }
638
639 static inline void set_bits(uint8_t *tab, int start, int len)
640 {
641 int end, mask, end1;
642
643 end = start + len;
644 tab += start >> 3;
645 mask = 0xff << (start & 7);
646 if ((start & ~7) == (end & ~7)) {
647 if (start < end) {
648 mask &= ~(0xff << (end & 7));
649 *tab |= mask;
650 }
651 } else {
652 *tab++ |= mask;
653 start = (start + 8) & ~7;
654 end1 = end & ~7;
655 while (start < end1) {
656 *tab++ = 0xff;
657 start += 8;
658 }
659 if (start < end) {
660 mask = ~(0xff << (end & 7));
661 *tab |= mask;
662 }
663 }
664 }
665
666 static void build_page_bitmap(PageDesc *p)
667 {
668 int n, tb_start, tb_end;
669 TranslationBlock *tb;
670
671 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
672 if (!p->code_bitmap)
673 return;
674 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
675
676 tb = p->first_tb;
677 while (tb != NULL) {
678 n = (long)tb & 3;
679 tb = (TranslationBlock *)((long)tb & ~3);
680 /* NOTE: this is subtle as a TB may span two physical pages */
681 if (n == 0) {
682 /* NOTE: tb_end may be after the end of the page, but
683 it is not a problem */
684 tb_start = tb->pc & ~TARGET_PAGE_MASK;
685 tb_end = tb_start + tb->size;
686 if (tb_end > TARGET_PAGE_SIZE)
687 tb_end = TARGET_PAGE_SIZE;
688 } else {
689 tb_start = 0;
690 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
691 }
692 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
693 tb = tb->page_next[n];
694 }
695 }
696
697 #ifdef TARGET_HAS_PRECISE_SMC
698
699 static void tb_gen_code(CPUState *env,
700 target_ulong pc, target_ulong cs_base, int flags,
701 int cflags)
702 {
703 TranslationBlock *tb;
704 uint8_t *tc_ptr;
705 target_ulong phys_pc, phys_page2, virt_page2;
706 int code_gen_size;
707
708 phys_pc = get_phys_addr_code(env, pc);
709 tb = tb_alloc(pc);
710 if (!tb) {
711 /* flush must be done */
712 tb_flush(env);
713 /* cannot fail at this point */
714 tb = tb_alloc(pc);
715 }
716 tc_ptr = code_gen_ptr;
717 tb->tc_ptr = tc_ptr;
718 tb->cs_base = cs_base;
719 tb->flags = flags;
720 tb->cflags = cflags;
721 cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
722 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
723
724 /* check next page if needed */
725 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
726 phys_page2 = -1;
727 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
728 phys_page2 = get_phys_addr_code(env, virt_page2);
729 }
730 tb_link_phys(tb, phys_pc, phys_page2);
731 }
732 #endif
733
734 /* invalidate all TBs which intersect with the target physical page
735 starting in range [start;end[. NOTE: start and end must refer to
736 the same physical page. 'is_cpu_write_access' should be true if called
737 from a real cpu write access: the virtual CPU will exit the current
738 TB if code is modified inside this TB. */
739 void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
740 int is_cpu_write_access)
741 {
742 int n, current_tb_modified, current_tb_not_found, current_flags;
743 CPUState *env = cpu_single_env;
744 PageDesc *p;
745 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
746 target_ulong tb_start, tb_end;
747 target_ulong current_pc, current_cs_base;
748
749 p = page_find(start >> TARGET_PAGE_BITS);
750 if (!p)
751 return;
752 if (!p->code_bitmap &&
753 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
754 is_cpu_write_access) {
755 /* build code bitmap */
756 build_page_bitmap(p);
757 }
758
759 /* we remove all the TBs in the range [start, end[ */
760 /* XXX: see if in some cases it could be faster to invalidate all the code */
761 current_tb_not_found = is_cpu_write_access;
762 current_tb_modified = 0;
763 current_tb = NULL; /* avoid warning */
764 current_pc = 0; /* avoid warning */
765 current_cs_base = 0; /* avoid warning */
766 current_flags = 0; /* avoid warning */
767 tb = p->first_tb;
768 while (tb != NULL) {
769 n = (long)tb & 3;
770 tb = (TranslationBlock *)((long)tb & ~3);
771 tb_next = tb->page_next[n];
772 /* NOTE: this is subtle as a TB may span two physical pages */
773 if (n == 0) {
774 /* NOTE: tb_end may be after the end of the page, but
775 it is not a problem */
776 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
777 tb_end = tb_start + tb->size;
778 } else {
779 tb_start = tb->page_addr[1];
780 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
781 }
782 if (!(tb_end <= start || tb_start >= end)) {
783 #ifdef TARGET_HAS_PRECISE_SMC
784 if (current_tb_not_found) {
785 current_tb_not_found = 0;
786 current_tb = NULL;
787 if (env->mem_write_pc) {
788 /* now we have a real cpu fault */
789 current_tb = tb_find_pc(env->mem_write_pc);
790 }
791 }
792 if (current_tb == tb &&
793 !(current_tb->cflags & CF_SINGLE_INSN)) {
794 /* If we are modifying the current TB, we must stop
795 its execution. We could be more precise by checking
796 that the modification is after the current PC, but it
797 would require a specialized function to partially
798 restore the CPU state */
799
800 current_tb_modified = 1;
801 cpu_restore_state(current_tb, env,
802 env->mem_write_pc, NULL);
803 #if defined(TARGET_I386)
804 current_flags = env->hflags;
805 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
806 current_cs_base = (target_ulong)env->segs[R_CS].base;
807 current_pc = current_cs_base + env->eip;
808 #else
809 #error unsupported CPU
810 #endif
811 }
812 #endif /* TARGET_HAS_PRECISE_SMC */
813 saved_tb = env->current_tb;
814 env->current_tb = NULL;
815 tb_phys_invalidate(tb, -1);
816 env->current_tb = saved_tb;
817 if (env->interrupt_request && env->current_tb)
818 cpu_interrupt(env, env->interrupt_request);
819 }
820 tb = tb_next;
821 }
822 #if !defined(CONFIG_USER_ONLY)
823 /* if no code remaining, no need to continue to use slow writes */
824 if (!p->first_tb) {
825 invalidate_page_bitmap(p);
826 if (is_cpu_write_access) {
827 tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
828 }
829 }
830 #endif
831 #ifdef TARGET_HAS_PRECISE_SMC
832 if (current_tb_modified) {
833 /* we generate a block containing just the instruction
834 modifying the memory. It will ensure that it cannot modify
835 itself */
836 env->current_tb = NULL;
837 tb_gen_code(env, current_pc, current_cs_base, current_flags,
838 CF_SINGLE_INSN);
839 cpu_resume_from_signal(env, NULL);
840 }
841 #endif
842 }
843
844 /* len must be <= 8 and start must be a multiple of len */
845 static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
846 {
847 PageDesc *p;
848 int offset, b;
849 #if 0
850 if (1) {
851 if (loglevel) {
852 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
853 cpu_single_env->mem_write_vaddr, len,
854 cpu_single_env->eip,
855 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
856 }
857 }
858 #endif
859 p = page_find(start >> TARGET_PAGE_BITS);
860 if (!p)
861 return;
862 if (p->code_bitmap) {
863 offset = start & ~TARGET_PAGE_MASK;
864 b = p->code_bitmap[offset >> 3] >> (offset & 7);
865 if (b & ((1 << len) - 1))
866 goto do_invalidate;
867 } else {
868 do_invalidate:
869 tb_invalidate_phys_page_range(start, start + len, 1);
870 }
871 }
872
873 #if !defined(CONFIG_SOFTMMU)
874 static void tb_invalidate_phys_page(target_ulong addr,
875 unsigned long pc, void *puc)
876 {
877 int n, current_flags, current_tb_modified;
878 target_ulong current_pc, current_cs_base;
879 PageDesc *p;
880 TranslationBlock *tb, *current_tb;
881 #ifdef TARGET_HAS_PRECISE_SMC
882 CPUState *env = cpu_single_env;
883 #endif
884
885 addr &= TARGET_PAGE_MASK;
886 p = page_find(addr >> TARGET_PAGE_BITS);
887 if (!p)
888 return;
889 tb = p->first_tb;
890 current_tb_modified = 0;
891 current_tb = NULL;
892 current_pc = 0; /* avoid warning */
893 current_cs_base = 0; /* avoid warning */
894 current_flags = 0; /* avoid warning */
895 #ifdef TARGET_HAS_PRECISE_SMC
896 if (tb && pc != 0) {
897 current_tb = tb_find_pc(pc);
898 }
899 #endif
900 while (tb != NULL) {
901 n = (long)tb & 3;
902 tb = (TranslationBlock *)((long)tb & ~3);
903 #ifdef TARGET_HAS_PRECISE_SMC
904 if (current_tb == tb &&
905 !(current_tb->cflags & CF_SINGLE_INSN)) {
906 /* If we are modifying the current TB, we must stop
907 its execution. We could be more precise by checking
908 that the modification is after the current PC, but it
909 would require a specialized function to partially
910 restore the CPU state */
911
912 current_tb_modified = 1;
913 cpu_restore_state(current_tb, env, pc, puc);
914 #if defined(TARGET_I386)
915 current_flags = env->hflags;
916 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
917 current_cs_base = (target_ulong)env->segs[R_CS].base;
918 current_pc = current_cs_base + env->eip;
919 #else
920 #error unsupported CPU
921 #endif
922 }
923 #endif /* TARGET_HAS_PRECISE_SMC */
924 tb_phys_invalidate(tb, addr);
925 tb = tb->page_next[n];
926 }
927 p->first_tb = NULL;
928 #ifdef TARGET_HAS_PRECISE_SMC
929 if (current_tb_modified) {
930 /* we generate a block containing just the instruction
931 modifying the memory. It will ensure that it cannot modify
932 itself */
933 env->current_tb = NULL;
934 tb_gen_code(env, current_pc, current_cs_base, current_flags,
935 CF_SINGLE_INSN);
936 cpu_resume_from_signal(env, puc);
937 }
938 #endif
939 }
940 #endif
941
942 /* add the tb in the target page and protect it if necessary */
943 static inline void tb_alloc_page(TranslationBlock *tb,
944 unsigned int n, unsigned int page_addr)
945 {
946 PageDesc *p;
947 TranslationBlock *last_first_tb;
948
949 tb->page_addr[n] = page_addr;
950 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
951 tb->page_next[n] = p->first_tb;
952 last_first_tb = p->first_tb;
953 p->first_tb = (TranslationBlock *)((long)tb | n);
954 invalidate_page_bitmap(p);
955
956 #if defined(TARGET_HAS_SMC) || 1
957
958 #if defined(CONFIG_USER_ONLY)
959 if (p->flags & PAGE_WRITE) {
960 unsigned long host_start, host_end, addr;
961 int prot;
962
963 /* force the host page as non writable (writes will have a
964 page fault + mprotect overhead) */
965 host_start = page_addr & qemu_host_page_mask;
966 host_end = host_start + qemu_host_page_size;
967 prot = 0;
968 for(addr = host_start; addr < host_end; addr += TARGET_PAGE_SIZE)
969 prot |= page_get_flags(addr);
970 mprotect((void *)host_start, qemu_host_page_size,
971 (prot & PAGE_BITS) & ~PAGE_WRITE);
972 #ifdef DEBUG_TB_INVALIDATE
973 printf("protecting code page: 0x%08lx\n",
974 host_start);
975 #endif
976 p->flags &= ~PAGE_WRITE;
977 }
978 #else
979 /* if some code is already present, then the pages are already
980 protected. So we handle the case where only the first TB is
981 allocated in a physical page */
982 if (!last_first_tb) {
983 target_ulong virt_addr;
984
985 virt_addr = (tb->pc & TARGET_PAGE_MASK) + (n << TARGET_PAGE_BITS);
986 tlb_protect_code(cpu_single_env, page_addr, virt_addr);
987 }
988 #endif
989
990 #endif /* TARGET_HAS_SMC */
991 }
992
993 /* Allocate a new translation block. Flush the translation buffer if
994 too many translation blocks or too much generated code. */
995 TranslationBlock *tb_alloc(target_ulong pc)
996 {
997 TranslationBlock *tb;
998
999 if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
1000 (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
1001 return NULL;
1002 tb = &tbs[nb_tbs++];
1003 tb->pc = pc;
1004 tb->cflags = 0;
1005 return tb;
1006 }
1007
1008 /* add a new TB and link it to the physical page tables. phys_page2 is
1009 (-1) to indicate that only one page contains the TB. */
1010 void tb_link_phys(TranslationBlock *tb,
1011 target_ulong phys_pc, target_ulong phys_page2)
1012 {
1013 unsigned int h;
1014 TranslationBlock **ptb;
1015
1016 /* add in the physical hash table */
1017 h = tb_phys_hash_func(phys_pc);
1018 ptb = &tb_phys_hash[h];
1019 tb->phys_hash_next = *ptb;
1020 *ptb = tb;
1021
1022 /* add in the page list */
1023 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1024 if (phys_page2 != -1)
1025 tb_alloc_page(tb, 1, phys_page2);
1026 else
1027 tb->page_addr[1] = -1;
1028 #ifdef DEBUG_TB_CHECK
1029 tb_page_check();
1030 #endif
1031 }
1032
1033 /* link the tb with the other TBs */
1034 void tb_link(TranslationBlock *tb)
1035 {
1036 #if !defined(CONFIG_USER_ONLY)
1037 {
1038 VirtPageDesc *vp;
1039 target_ulong addr;
1040
1041 /* save the code memory mappings (needed to invalidate the code) */
1042 addr = tb->pc & TARGET_PAGE_MASK;
1043 vp = virt_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
1044 #ifdef DEBUG_TLB_CHECK
1045 if (vp->valid_tag == virt_valid_tag &&
1046 vp->phys_addr != tb->page_addr[0]) {
1047 printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",
1048 addr, tb->page_addr[0], vp->phys_addr);
1049 }
1050 #endif
1051 vp->phys_addr = tb->page_addr[0];
1052 if (vp->valid_tag != virt_valid_tag) {
1053 vp->valid_tag = virt_valid_tag;
1054 #if !defined(CONFIG_SOFTMMU)
1055 vp->prot = 0;
1056 #endif
1057 }
1058
1059 if (tb->page_addr[1] != -1) {
1060 addr += TARGET_PAGE_SIZE;
1061 vp = virt_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
1062 #ifdef DEBUG_TLB_CHECK
1063 if (vp->valid_tag == virt_valid_tag &&
1064 vp->phys_addr != tb->page_addr[1]) {
1065 printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",
1066 addr, tb->page_addr[1], vp->phys_addr);
1067 }
1068 #endif
1069 vp->phys_addr = tb->page_addr[1];
1070 if (vp->valid_tag != virt_valid_tag) {
1071 vp->valid_tag = virt_valid_tag;
1072 #if !defined(CONFIG_SOFTMMU)
1073 vp->prot = 0;
1074 #endif
1075 }
1076 }
1077 }
1078 #endif
1079
1080 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1081 tb->jmp_next[0] = NULL;
1082 tb->jmp_next[1] = NULL;
1083 #ifdef USE_CODE_COPY
1084 tb->cflags &= ~CF_FP_USED;
1085 if (tb->cflags & CF_TB_FP_USED)
1086 tb->cflags |= CF_FP_USED;
1087 #endif
1088
1089 /* init original jump addresses */
1090 if (tb->tb_next_offset[0] != 0xffff)
1091 tb_reset_jump(tb, 0);
1092 if (tb->tb_next_offset[1] != 0xffff)
1093 tb_reset_jump(tb, 1);
1094 }
1095
1096 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1097 tb[1].tc_ptr. Return NULL if not found */
1098 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1099 {
1100 int m_min, m_max, m;
1101 unsigned long v;
1102 TranslationBlock *tb;
1103
1104 if (nb_tbs <= 0)
1105 return NULL;
1106 if (tc_ptr < (unsigned long)code_gen_buffer ||
1107 tc_ptr >= (unsigned long)code_gen_ptr)
1108 return NULL;
1109 /* binary search (cf Knuth) */
1110 m_min = 0;
1111 m_max = nb_tbs - 1;
1112 while (m_min <= m_max) {
1113 m = (m_min + m_max) >> 1;
1114 tb = &tbs[m];
1115 v = (unsigned long)tb->tc_ptr;
1116 if (v == tc_ptr)
1117 return tb;
1118 else if (tc_ptr < v) {
1119 m_max = m - 1;
1120 } else {
1121 m_min = m + 1;
1122 }
1123 }
1124 return &tbs[m_max];
1125 }
1126
1127 static void tb_reset_jump_recursive(TranslationBlock *tb);
1128
1129 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1130 {
1131 TranslationBlock *tb1, *tb_next, **ptb;
1132 unsigned int n1;
1133
1134 tb1 = tb->jmp_next[n];
1135 if (tb1 != NULL) {
1136 /* find head of list */
1137 for(;;) {
1138 n1 = (long)tb1 & 3;
1139 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1140 if (n1 == 2)
1141 break;
1142 tb1 = tb1->jmp_next[n1];
1143 }
1144 /* we are now sure now that tb jumps to tb1 */
1145 tb_next = tb1;
1146
1147 /* remove tb from the jmp_first list */
1148 ptb = &tb_next->jmp_first;
1149 for(;;) {
1150 tb1 = *ptb;
1151 n1 = (long)tb1 & 3;
1152 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1153 if (n1 == n && tb1 == tb)
1154 break;
1155 ptb = &tb1->jmp_next[n1];
1156 }
1157 *ptb = tb->jmp_next[n];
1158 tb->jmp_next[n] = NULL;
1159
1160 /* suppress the jump to next tb in generated code */
1161 tb_reset_jump(tb, n);
1162
1163 /* suppress jumps in the tb on which we could have jumped */
1164 tb_reset_jump_recursive(tb_next);
1165 }
1166 }
1167
1168 static void tb_reset_jump_recursive(TranslationBlock *tb)
1169 {
1170 tb_reset_jump_recursive2(tb, 0);
1171 tb_reset_jump_recursive2(tb, 1);
1172 }
1173
1174 #if defined(TARGET_HAS_ICE)
1175 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1176 {
1177 target_ulong phys_addr;
1178
1179 phys_addr = cpu_get_phys_page_debug(env, pc);
1180 tb_invalidate_phys_page_range(phys_addr, phys_addr + 1, 0);
1181 }
1182 #endif
1183
1184 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1185 breakpoint is reached */
1186 int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1187 {
1188 #if defined(TARGET_HAS_ICE)
1189 int i;
1190
1191 for(i = 0; i < env->nb_breakpoints; i++) {
1192 if (env->breakpoints[i] == pc)
1193 return 0;
1194 }
1195
1196 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1197 return -1;
1198 env->breakpoints[env->nb_breakpoints++] = pc;
1199
1200 breakpoint_invalidate(env, pc);
1201 return 0;
1202 #else
1203 return -1;
1204 #endif
1205 }
1206
1207 /* remove a breakpoint */
1208 int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1209 {
1210 #if defined(TARGET_HAS_ICE)
1211 int i;
1212 for(i = 0; i < env->nb_breakpoints; i++) {
1213 if (env->breakpoints[i] == pc)
1214 goto found;
1215 }
1216 return -1;
1217 found:
1218 env->nb_breakpoints--;
1219 if (i < env->nb_breakpoints)
1220 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1221
1222 breakpoint_invalidate(env, pc);
1223 return 0;
1224 #else
1225 return -1;
1226 #endif
1227 }
1228
1229 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1230 CPU loop after each instruction */
1231 void cpu_single_step(CPUState *env, int enabled)
1232 {
1233 #if defined(TARGET_HAS_ICE)
1234 if (env->singlestep_enabled != enabled) {
1235 env->singlestep_enabled = enabled;
1236 /* must flush all the translated code to avoid inconsistancies */
1237 /* XXX: only flush what is necessary */
1238 tb_flush(env);
1239 }
1240 #endif
1241 }
1242
1243 /* enable or disable low levels log */
1244 void cpu_set_log(int log_flags)
1245 {
1246 loglevel = log_flags;
1247 if (loglevel && !logfile) {
1248 logfile = fopen(logfilename, "w");
1249 if (!logfile) {
1250 perror(logfilename);
1251 _exit(1);
1252 }
1253 #if !defined(CONFIG_SOFTMMU)
1254 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1255 {
1256 static uint8_t logfile_buf[4096];
1257 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1258 }
1259 #else
1260 setvbuf(logfile, NULL, _IOLBF, 0);
1261 #endif
1262 }
1263 }
1264
1265 void cpu_set_log_filename(const char *filename)
1266 {
1267 logfilename = strdup(filename);
1268 }
1269
1270 /* mask must never be zero, except for A20 change call */
1271 void cpu_interrupt(CPUState *env, int mask)
1272 {
1273 TranslationBlock *tb;
1274 static int interrupt_lock;
1275
1276 env->interrupt_request |= mask;
1277 /* if the cpu is currently executing code, we must unlink it and
1278 all the potentially executing TB */
1279 tb = env->current_tb;
1280 if (tb && !testandset(&interrupt_lock)) {
1281 env->current_tb = NULL;
1282 tb_reset_jump_recursive(tb);
1283 interrupt_lock = 0;
1284 }
1285 }
1286
1287 void cpu_reset_interrupt(CPUState *env, int mask)
1288 {
1289 env->interrupt_request &= ~mask;
1290 }
1291
1292 CPULogItem cpu_log_items[] = {
1293 { CPU_LOG_TB_OUT_ASM, "out_asm",
1294 "show generated host assembly code for each compiled TB" },
1295 { CPU_LOG_TB_IN_ASM, "in_asm",
1296 "show target assembly code for each compiled TB" },
1297 { CPU_LOG_TB_OP, "op",
1298 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1299 #ifdef TARGET_I386
1300 { CPU_LOG_TB_OP_OPT, "op_opt",
1301 "show micro ops after optimization for each compiled TB" },
1302 #endif
1303 { CPU_LOG_INT, "int",
1304 "show interrupts/exceptions in short format" },
1305 { CPU_LOG_EXEC, "exec",
1306 "show trace before each executed TB (lots of logs)" },
1307 { CPU_LOG_TB_CPU, "cpu",
1308 "show CPU state before bloc translation" },
1309 #ifdef TARGET_I386
1310 { CPU_LOG_PCALL, "pcall",
1311 "show protected mode far calls/returns/exceptions" },
1312 #endif
1313 #ifdef DEBUG_IOPORT
1314 { CPU_LOG_IOPORT, "ioport",
1315 "show all i/o ports accesses" },
1316 #endif
1317 { 0, NULL, NULL },
1318 };
1319
1320 static int cmp1(const char *s1, int n, const char *s2)
1321 {
1322 if (strlen(s2) != n)
1323 return 0;
1324 return memcmp(s1, s2, n) == 0;
1325 }
1326
1327 /* takes a comma separated list of log masks. Return 0 if error. */
1328 int cpu_str_to_log_mask(const char *str)
1329 {
1330 CPULogItem *item;
1331 int mask;
1332 const char *p, *p1;
1333
1334 p = str;
1335 mask = 0;
1336 for(;;) {
1337 p1 = strchr(p, ',');
1338 if (!p1)
1339 p1 = p + strlen(p);
1340 if(cmp1(p,p1-p,"all")) {
1341 for(item = cpu_log_items; item->mask != 0; item++) {
1342 mask |= item->mask;
1343 }
1344 } else {
1345 for(item = cpu_log_items; item->mask != 0; item++) {
1346 if (cmp1(p, p1 - p, item->name))
1347 goto found;
1348 }
1349 return 0;
1350 }
1351 found:
1352 mask |= item->mask;
1353 if (*p1 != ',')
1354 break;
1355 p = p1 + 1;
1356 }
1357 return mask;
1358 }
1359
1360 void cpu_abort(CPUState *env, const char *fmt, ...)
1361 {
1362 va_list ap;
1363
1364 va_start(ap, fmt);
1365 fprintf(stderr, "qemu: fatal: ");
1366 vfprintf(stderr, fmt, ap);
1367 fprintf(stderr, "\n");
1368 #ifdef TARGET_I386
1369 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1370 #else
1371 cpu_dump_state(env, stderr, fprintf, 0);
1372 #endif
1373 va_end(ap);
1374 abort();
1375 }
1376
1377 #if !defined(CONFIG_USER_ONLY)
1378
1379 /* NOTE: if flush_global is true, also flush global entries (not
1380 implemented yet) */
1381 void tlb_flush(CPUState *env, int flush_global)
1382 {
1383 int i;
1384
1385 #if defined(DEBUG_TLB)
1386 printf("tlb_flush:\n");
1387 #endif
1388 /* must reset current TB so that interrupts cannot modify the
1389 links while we are modifying them */
1390 env->current_tb = NULL;
1391
1392 for(i = 0; i < CPU_TLB_SIZE; i++) {
1393 env->tlb_read[0][i].address = -1;
1394 env->tlb_write[0][i].address = -1;
1395 env->tlb_read[1][i].address = -1;
1396 env->tlb_write[1][i].address = -1;
1397 }
1398
1399 virt_page_flush();
1400 memset (tb_hash, 0, CODE_GEN_HASH_SIZE * sizeof (void *));
1401
1402 #if !defined(CONFIG_SOFTMMU)
1403 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
1404 #endif
1405 #ifdef USE_KQEMU
1406 if (env->kqemu_enabled) {
1407 kqemu_flush(env, flush_global);
1408 }
1409 #endif
1410 tlb_flush_count++;
1411 }
1412
1413 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1414 {
1415 if (addr == (tlb_entry->address &
1416 (TARGET_PAGE_MASK | TLB_INVALID_MASK)))
1417 tlb_entry->address = -1;
1418 }
1419
1420 void tlb_flush_page(CPUState *env, target_ulong addr)
1421 {
1422 int i, n;
1423 VirtPageDesc *vp;
1424 PageDesc *p;
1425 TranslationBlock *tb;
1426
1427 #if defined(DEBUG_TLB)
1428 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1429 #endif
1430 /* must reset current TB so that interrupts cannot modify the
1431 links while we are modifying them */
1432 env->current_tb = NULL;
1433
1434 addr &= TARGET_PAGE_MASK;
1435 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1436 tlb_flush_entry(&env->tlb_read[0][i], addr);
1437 tlb_flush_entry(&env->tlb_write[0][i], addr);
1438 tlb_flush_entry(&env->tlb_read[1][i], addr);
1439 tlb_flush_entry(&env->tlb_write[1][i], addr);
1440
1441 /* remove from the virtual pc hash table all the TB at this
1442 virtual address */
1443
1444 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1445 if (vp && vp->valid_tag == virt_valid_tag) {
1446 p = page_find(vp->phys_addr >> TARGET_PAGE_BITS);
1447 if (p) {
1448 /* we remove all the links to the TBs in this virtual page */
1449 tb = p->first_tb;
1450 while (tb != NULL) {
1451 n = (long)tb & 3;
1452 tb = (TranslationBlock *)((long)tb & ~3);
1453 if ((tb->pc & TARGET_PAGE_MASK) == addr ||
1454 ((tb->pc + tb->size - 1) & TARGET_PAGE_MASK) == addr) {
1455 tb_invalidate(tb);
1456 }
1457 tb = tb->page_next[n];
1458 }
1459 }
1460 vp->valid_tag = 0;
1461 }
1462
1463 #if !defined(CONFIG_SOFTMMU)
1464 if (addr < MMAP_AREA_END)
1465 munmap((void *)addr, TARGET_PAGE_SIZE);
1466 #endif
1467 #ifdef USE_KQEMU
1468 if (env->kqemu_enabled) {
1469 kqemu_flush_page(env, addr);
1470 }
1471 #endif
1472 }
1473
1474 static inline void tlb_protect_code1(CPUTLBEntry *tlb_entry, target_ulong addr)
1475 {
1476 if (addr == (tlb_entry->address &
1477 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) &&
1478 (tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1479 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1480 }
1481 }
1482
1483 /* update the TLBs so that writes to code in the virtual page 'addr'
1484 can be detected */
1485 static void tlb_protect_code(CPUState *env, ram_addr_t ram_addr,
1486 target_ulong vaddr)
1487 {
1488 int i;
1489
1490 vaddr &= TARGET_PAGE_MASK;
1491 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1492 tlb_protect_code1(&env->tlb_write[0][i], vaddr);
1493 tlb_protect_code1(&env->tlb_write[1][i], vaddr);
1494
1495 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] &= ~CODE_DIRTY_FLAG;
1496 #ifdef USE_KQEMU
1497 if (env->kqemu_enabled) {
1498 kqemu_set_notdirty(env, ram_addr);
1499 }
1500 #endif
1501
1502 #if !defined(CONFIG_SOFTMMU)
1503 /* NOTE: as we generated the code for this page, it is already at
1504 least readable */
1505 if (vaddr < MMAP_AREA_END)
1506 mprotect((void *)vaddr, TARGET_PAGE_SIZE, PROT_READ);
1507 #endif
1508 }
1509
1510 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1511 tested for self modifying code */
1512 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1513 target_ulong vaddr)
1514 {
1515 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1516 }
1517
1518 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1519 unsigned long start, unsigned long length)
1520 {
1521 unsigned long addr;
1522 if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1523 addr = (tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend;
1524 if ((addr - start) < length) {
1525 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1526 }
1527 }
1528 }
1529
1530 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1531 int dirty_flags)
1532 {
1533 CPUState *env;
1534 unsigned long length, start1;
1535 int i, mask, len;
1536 uint8_t *p;
1537
1538 start &= TARGET_PAGE_MASK;
1539 end = TARGET_PAGE_ALIGN(end);
1540
1541 length = end - start;
1542 if (length == 0)
1543 return;
1544 mask = ~dirty_flags;
1545 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1546 len = length >> TARGET_PAGE_BITS;
1547 for(i = 0; i < len; i++)
1548 p[i] &= mask;
1549
1550 env = cpu_single_env;
1551 #ifdef USE_KQEMU
1552 if (env->kqemu_enabled) {
1553 for(i = 0; i < len; i++)
1554 kqemu_set_notdirty(env, (unsigned long)i << TARGET_PAGE_BITS);
1555 }
1556 #endif
1557 /* we modify the TLB cache so that the dirty bit will be set again
1558 when accessing the range */
1559 start1 = start + (unsigned long)phys_ram_base;
1560 for(i = 0; i < CPU_TLB_SIZE; i++)
1561 tlb_reset_dirty_range(&env->tlb_write[0][i], start1, length);
1562 for(i = 0; i < CPU_TLB_SIZE; i++)
1563 tlb_reset_dirty_range(&env->tlb_write[1][i], start1, length);
1564
1565 #if !defined(CONFIG_SOFTMMU)
1566 /* XXX: this is expensive */
1567 {
1568 VirtPageDesc *p;
1569 int j;
1570 target_ulong addr;
1571
1572 for(i = 0; i < L1_SIZE; i++) {
1573 p = l1_virt_map[i];
1574 if (p) {
1575 addr = i << (TARGET_PAGE_BITS + L2_BITS);
1576 for(j = 0; j < L2_SIZE; j++) {
1577 if (p->valid_tag == virt_valid_tag &&
1578 p->phys_addr >= start && p->phys_addr < end &&
1579 (p->prot & PROT_WRITE)) {
1580 if (addr < MMAP_AREA_END) {
1581 mprotect((void *)addr, TARGET_PAGE_SIZE,
1582 p->prot & ~PROT_WRITE);
1583 }
1584 }
1585 addr += TARGET_PAGE_SIZE;
1586 p++;
1587 }
1588 }
1589 }
1590 }
1591 #endif
1592 }
1593
1594 static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1595 {
1596 ram_addr_t ram_addr;
1597
1598 if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1599 ram_addr = (tlb_entry->address & TARGET_PAGE_MASK) +
1600 tlb_entry->addend - (unsigned long)phys_ram_base;
1601 if (!cpu_physical_memory_is_dirty(ram_addr)) {
1602 tlb_entry->address |= IO_MEM_NOTDIRTY;
1603 }
1604 }
1605 }
1606
1607 /* update the TLB according to the current state of the dirty bits */
1608 void cpu_tlb_update_dirty(CPUState *env)
1609 {
1610 int i;
1611 for(i = 0; i < CPU_TLB_SIZE; i++)
1612 tlb_update_dirty(&env->tlb_write[0][i]);
1613 for(i = 0; i < CPU_TLB_SIZE; i++)
1614 tlb_update_dirty(&env->tlb_write[1][i]);
1615 }
1616
1617 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
1618 unsigned long start)
1619 {
1620 unsigned long addr;
1621 if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1622 addr = (tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend;
1623 if (addr == start) {
1624 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_RAM;
1625 }
1626 }
1627 }
1628
1629 /* update the TLB corresponding to virtual page vaddr and phys addr
1630 addr so that it is no longer dirty */
1631 static inline void tlb_set_dirty(unsigned long addr, target_ulong vaddr)
1632 {
1633 CPUState *env = cpu_single_env;
1634 int i;
1635
1636 phys_ram_dirty[(addr - (unsigned long)phys_ram_base) >> TARGET_PAGE_BITS] = 0xff;
1637
1638 addr &= TARGET_PAGE_MASK;
1639 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1640 tlb_set_dirty1(&env->tlb_write[0][i], addr);
1641 tlb_set_dirty1(&env->tlb_write[1][i], addr);
1642 }
1643
1644 /* add a new TLB entry. At most one entry for a given virtual address
1645 is permitted. Return 0 if OK or 2 if the page could not be mapped
1646 (can only happen in non SOFTMMU mode for I/O pages or pages
1647 conflicting with the host address space). */
1648 int tlb_set_page(CPUState *env, target_ulong vaddr,
1649 target_phys_addr_t paddr, int prot,
1650 int is_user, int is_softmmu)
1651 {
1652 PhysPageDesc *p;
1653 unsigned long pd;
1654 unsigned int index;
1655 target_ulong address;
1656 target_phys_addr_t addend;
1657 int ret;
1658
1659 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1660 if (!p) {
1661 pd = IO_MEM_UNASSIGNED;
1662 } else {
1663 pd = p->phys_offset;
1664 }
1665 #if defined(DEBUG_TLB)
1666 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x u=%d smmu=%d pd=0x%08lx\n",
1667 vaddr, paddr, prot, is_user, is_softmmu, pd);
1668 #endif
1669
1670 ret = 0;
1671 #if !defined(CONFIG_SOFTMMU)
1672 if (is_softmmu)
1673 #endif
1674 {
1675 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1676 /* IO memory case */
1677 address = vaddr | pd;
1678 addend = paddr;
1679 } else {
1680 /* standard memory */
1681 address = vaddr;
1682 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1683 }
1684
1685 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1686 addend -= vaddr;
1687 if (prot & PAGE_READ) {
1688 env->tlb_read[is_user][index].address = address;
1689 env->tlb_read[is_user][index].addend = addend;
1690 } else {
1691 env->tlb_read[is_user][index].address = -1;
1692 env->tlb_read[is_user][index].addend = -1;
1693 }
1694 if (prot & PAGE_WRITE) {
1695 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM) {
1696 /* ROM: access is ignored (same as unassigned) */
1697 env->tlb_write[is_user][index].address = vaddr | IO_MEM_ROM;
1698 env->tlb_write[is_user][index].addend = addend;
1699 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1700 !cpu_physical_memory_is_dirty(pd)) {
1701 env->tlb_write[is_user][index].address = vaddr | IO_MEM_NOTDIRTY;
1702 env->tlb_write[is_user][index].addend = addend;
1703 } else {
1704 env->tlb_write[is_user][index].address = address;
1705 env->tlb_write[is_user][index].addend = addend;
1706 }
1707 } else {
1708 env->tlb_write[is_user][index].address = -1;
1709 env->tlb_write[is_user][index].addend = -1;
1710 }
1711 }
1712 #if !defined(CONFIG_SOFTMMU)
1713 else {
1714 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1715 /* IO access: no mapping is done as it will be handled by the
1716 soft MMU */
1717 if (!(env->hflags & HF_SOFTMMU_MASK))
1718 ret = 2;
1719 } else {
1720 void *map_addr;
1721
1722 if (vaddr >= MMAP_AREA_END) {
1723 ret = 2;
1724 } else {
1725 if (prot & PROT_WRITE) {
1726 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1727 #if defined(TARGET_HAS_SMC) || 1
1728 first_tb ||
1729 #endif
1730 ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1731 !cpu_physical_memory_is_dirty(pd))) {
1732 /* ROM: we do as if code was inside */
1733 /* if code is present, we only map as read only and save the
1734 original mapping */
1735 VirtPageDesc *vp;
1736
1737 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
1738 vp->phys_addr = pd;
1739 vp->prot = prot;
1740 vp->valid_tag = virt_valid_tag;
1741 prot &= ~PAGE_WRITE;
1742 }
1743 }
1744 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
1745 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1746 if (map_addr == MAP_FAILED) {
1747 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1748 paddr, vaddr);
1749 }
1750 }
1751 }
1752 }
1753 #endif
1754 return ret;
1755 }
1756
1757 /* called from signal handler: invalidate the code and unprotect the
1758 page. Return TRUE if the fault was succesfully handled. */
1759 int page_unprotect(unsigned long addr, unsigned long pc, void *puc)
1760 {
1761 #if !defined(CONFIG_SOFTMMU)
1762 VirtPageDesc *vp;
1763
1764 #if defined(DEBUG_TLB)
1765 printf("page_unprotect: addr=0x%08x\n", addr);
1766 #endif
1767 addr &= TARGET_PAGE_MASK;
1768
1769 /* if it is not mapped, no need to worry here */
1770 if (addr >= MMAP_AREA_END)
1771 return 0;
1772 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1773 if (!vp)
1774 return 0;
1775 /* NOTE: in this case, validate_tag is _not_ tested as it
1776 validates only the code TLB */
1777 if (vp->valid_tag != virt_valid_tag)
1778 return 0;
1779 if (!(vp->prot & PAGE_WRITE))
1780 return 0;
1781 #if defined(DEBUG_TLB)
1782 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1783 addr, vp->phys_addr, vp->prot);
1784 #endif
1785 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1786 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1787 (unsigned long)addr, vp->prot);
1788 /* set the dirty bit */
1789 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
1790 /* flush the code inside */
1791 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
1792 return 1;
1793 #else
1794 return 0;
1795 #endif
1796 }
1797
1798 #else
1799
1800 void tlb_flush(CPUState *env, int flush_global)
1801 {
1802 }
1803
1804 void tlb_flush_page(CPUState *env, target_ulong addr)
1805 {
1806 }
1807
1808 int tlb_set_page(CPUState *env, target_ulong vaddr,
1809 target_phys_addr_t paddr, int prot,
1810 int is_user, int is_softmmu)
1811 {
1812 return 0;
1813 }
1814
1815 /* dump memory mappings */
1816 void page_dump(FILE *f)
1817 {
1818 unsigned long start, end;
1819 int i, j, prot, prot1;
1820 PageDesc *p;
1821
1822 fprintf(f, "%-8s %-8s %-8s %s\n",
1823 "start", "end", "size", "prot");
1824 start = -1;
1825 end = -1;
1826 prot = 0;
1827 for(i = 0; i <= L1_SIZE; i++) {
1828 if (i < L1_SIZE)
1829 p = l1_map[i];
1830 else
1831 p = NULL;
1832 for(j = 0;j < L2_SIZE; j++) {
1833 if (!p)
1834 prot1 = 0;
1835 else
1836 prot1 = p[j].flags;
1837 if (prot1 != prot) {
1838 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1839 if (start != -1) {
1840 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1841 start, end, end - start,
1842 prot & PAGE_READ ? 'r' : '-',
1843 prot & PAGE_WRITE ? 'w' : '-',
1844 prot & PAGE_EXEC ? 'x' : '-');
1845 }
1846 if (prot1 != 0)
1847 start = end;
1848 else
1849 start = -1;
1850 prot = prot1;
1851 }
1852 if (!p)
1853 break;
1854 }
1855 }
1856 }
1857
1858 int page_get_flags(unsigned long address)
1859 {
1860 PageDesc *p;
1861
1862 p = page_find(address >> TARGET_PAGE_BITS);
1863 if (!p)
1864 return 0;
1865 return p->flags;
1866 }
1867
1868 /* modify the flags of a page and invalidate the code if
1869 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1870 depending on PAGE_WRITE */
1871 void page_set_flags(unsigned long start, unsigned long end, int flags)
1872 {
1873 PageDesc *p;
1874 unsigned long addr;
1875
1876 start = start & TARGET_PAGE_MASK;
1877 end = TARGET_PAGE_ALIGN(end);
1878 if (flags & PAGE_WRITE)
1879 flags |= PAGE_WRITE_ORG;
1880 spin_lock(&tb_lock);
1881 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1882 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1883 /* if the write protection is set, then we invalidate the code
1884 inside */
1885 if (!(p->flags & PAGE_WRITE) &&
1886 (flags & PAGE_WRITE) &&
1887 p->first_tb) {
1888 tb_invalidate_phys_page(addr, 0, NULL);
1889 }
1890 p->flags = flags;
1891 }
1892 spin_unlock(&tb_lock);
1893 }
1894
1895 /* called from signal handler: invalidate the code and unprotect the
1896 page. Return TRUE if the fault was succesfully handled. */
1897 int page_unprotect(unsigned long address, unsigned long pc, void *puc)
1898 {
1899 unsigned int page_index, prot, pindex;
1900 PageDesc *p, *p1;
1901 unsigned long host_start, host_end, addr;
1902
1903 host_start = address & qemu_host_page_mask;
1904 page_index = host_start >> TARGET_PAGE_BITS;
1905 p1 = page_find(page_index);
1906 if (!p1)
1907 return 0;
1908 host_end = host_start + qemu_host_page_size;
1909 p = p1;
1910 prot = 0;
1911 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1912 prot |= p->flags;
1913 p++;
1914 }
1915 /* if the page was really writable, then we change its
1916 protection back to writable */
1917 if (prot & PAGE_WRITE_ORG) {
1918 pindex = (address - host_start) >> TARGET_PAGE_BITS;
1919 if (!(p1[pindex].flags & PAGE_WRITE)) {
1920 mprotect((void *)host_start, qemu_host_page_size,
1921 (prot & PAGE_BITS) | PAGE_WRITE);
1922 p1[pindex].flags |= PAGE_WRITE;
1923 /* and since the content will be modified, we must invalidate
1924 the corresponding translated code. */
1925 tb_invalidate_phys_page(address, pc, puc);
1926 #ifdef DEBUG_TB_CHECK
1927 tb_invalidate_check(address);
1928 #endif
1929 return 1;
1930 }
1931 }
1932 return 0;
1933 }
1934
1935 /* call this function when system calls directly modify a memory area */
1936 void page_unprotect_range(uint8_t *data, unsigned long data_size)
1937 {
1938 unsigned long start, end, addr;
1939
1940 start = (unsigned long)data;
1941 end = start + data_size;
1942 start &= TARGET_PAGE_MASK;
1943 end = TARGET_PAGE_ALIGN(end);
1944 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1945 page_unprotect(addr, 0, NULL);
1946 }
1947 }
1948
1949 static inline void tlb_set_dirty(unsigned long addr, target_ulong vaddr)
1950 {
1951 }
1952 #endif /* defined(CONFIG_USER_ONLY) */
1953
1954 /* register physical memory. 'size' must be a multiple of the target
1955 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1956 io memory page */
1957 void cpu_register_physical_memory(target_phys_addr_t start_addr,
1958 unsigned long size,
1959 unsigned long phys_offset)
1960 {
1961 target_phys_addr_t addr, end_addr;
1962 PhysPageDesc *p;
1963
1964 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
1965 end_addr = start_addr + size;
1966 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
1967 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
1968 p->phys_offset = phys_offset;
1969 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM)
1970 phys_offset += TARGET_PAGE_SIZE;
1971 }
1972 }
1973
1974 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
1975 {
1976 return 0;
1977 }
1978
1979 static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1980 {
1981 }
1982
1983 static CPUReadMemoryFunc *unassigned_mem_read[3] = {
1984 unassigned_mem_readb,
1985 unassigned_mem_readb,
1986 unassigned_mem_readb,
1987 };
1988
1989 static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
1990 unassigned_mem_writeb,
1991 unassigned_mem_writeb,
1992 unassigned_mem_writeb,
1993 };
1994
1995 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1996 {
1997 unsigned long ram_addr;
1998 int dirty_flags;
1999 ram_addr = addr - (unsigned long)phys_ram_base;
2000 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2001 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2002 #if !defined(CONFIG_USER_ONLY)
2003 tb_invalidate_phys_page_fast(ram_addr, 1);
2004 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2005 #endif
2006 }
2007 stb_p((uint8_t *)(long)addr, val);
2008 /* we set the page as dirty only if the code has been flushed */
2009 if (dirty_flags & CODE_DIRTY_FLAG)
2010 tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
2011 }
2012
2013 static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2014 {
2015 unsigned long ram_addr;
2016 int dirty_flags;
2017 ram_addr = addr - (unsigned long)phys_ram_base;
2018 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2019 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2020 #if !defined(CONFIG_USER_ONLY)
2021 tb_invalidate_phys_page_fast(ram_addr, 2);
2022 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2023 #endif
2024 }
2025 stw_p((uint8_t *)(long)addr, val);
2026 /* we set the page as dirty only if the code has been flushed */
2027 if (dirty_flags & CODE_DIRTY_FLAG)
2028 tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
2029 }
2030
2031 static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2032 {
2033 unsigned long ram_addr;
2034 int dirty_flags;
2035 ram_addr = addr - (unsigned long)phys_ram_base;
2036 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2037 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2038 #if !defined(CONFIG_USER_ONLY)
2039 tb_invalidate_phys_page_fast(ram_addr, 4);
2040 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2041 #endif
2042 }
2043 stl_p((uint8_t *)(long)addr, val);
2044 /* we set the page as dirty only if the code has been flushed */
2045 if (dirty_flags & CODE_DIRTY_FLAG)
2046 tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
2047 }
2048
2049 static CPUReadMemoryFunc *error_mem_read[3] = {
2050 NULL, /* never used */
2051 NULL, /* never used */
2052 NULL, /* never used */
2053 };
2054
2055 static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2056 notdirty_mem_writeb,
2057 notdirty_mem_writew,
2058 notdirty_mem_writel,
2059 };
2060
2061 static void io_mem_init(void)
2062 {
2063 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2064 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2065 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2066 io_mem_nb = 5;
2067
2068 /* alloc dirty bits array */
2069 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2070 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
2071 }
2072
2073 /* mem_read and mem_write are arrays of functions containing the
2074 function to access byte (index 0), word (index 1) and dword (index
2075 2). All functions must be supplied. If io_index is non zero, the
2076 corresponding io zone is modified. If it is zero, a new io zone is
2077 allocated. The return value can be used with
2078 cpu_register_physical_memory(). (-1) is returned if error. */
2079 int cpu_register_io_memory(int io_index,
2080 CPUReadMemoryFunc **mem_read,
2081 CPUWriteMemoryFunc **mem_write,
2082 void *opaque)
2083 {
2084 int i;
2085
2086 if (io_index <= 0) {
2087 if (io_index >= IO_MEM_NB_ENTRIES)
2088 return -1;
2089 io_index = io_mem_nb++;
2090 } else {
2091 if (io_index >= IO_MEM_NB_ENTRIES)
2092 return -1;
2093 }
2094
2095 for(i = 0;i < 3; i++) {
2096 io_mem_read[io_index][i] = mem_read[i];
2097 io_mem_write[io_index][i] = mem_write[i];
2098 }
2099 io_mem_opaque[io_index] = opaque;
2100 return io_index << IO_MEM_SHIFT;
2101 }
2102
2103 CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2104 {
2105 return io_mem_write[io_index >> IO_MEM_SHIFT];
2106 }
2107
2108 CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2109 {
2110 return io_mem_read[io_index >> IO_MEM_SHIFT];
2111 }
2112
2113 /* physical memory access (slow version, mainly for debug) */
2114 #if defined(CONFIG_USER_ONLY)
2115 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2116 int len, int is_write)
2117 {
2118 int l, flags;
2119 target_ulong page;
2120
2121 while (len > 0) {
2122 page = addr & TARGET_PAGE_MASK;
2123 l = (page + TARGET_PAGE_SIZE) - addr;
2124 if (l > len)
2125 l = len;
2126 flags = page_get_flags(page);
2127 if (!(flags & PAGE_VALID))
2128 return;
2129 if (is_write) {
2130 if (!(flags & PAGE_WRITE))
2131 return;
2132 memcpy((uint8_t *)addr, buf, len);
2133 } else {
2134 if (!(flags & PAGE_READ))
2135 return;
2136 memcpy(buf, (uint8_t *)addr, len);
2137 }
2138 len -= l;
2139 buf += l;
2140 addr += l;
2141 }
2142 }
2143
2144 /* never used */
2145 uint32_t ldl_phys(target_phys_addr_t addr)
2146 {
2147 return 0;
2148 }
2149
2150 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2151 {
2152 }
2153
2154 void stl_phys(target_phys_addr_t addr, uint32_t val)
2155 {
2156 }
2157
2158 #else
2159 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2160 int len, int is_write)
2161 {
2162 int l, io_index;
2163 uint8_t *ptr;
2164 uint32_t val;
2165 target_phys_addr_t page;
2166 unsigned long pd;
2167 PhysPageDesc *p;
2168
2169 while (len > 0) {
2170 page = addr & TARGET_PAGE_MASK;
2171 l = (page + TARGET_PAGE_SIZE) - addr;
2172 if (l > len)
2173 l = len;
2174 p = phys_page_find(page >> TARGET_PAGE_BITS);
2175 if (!p) {
2176 pd = IO_MEM_UNASSIGNED;
2177 } else {
2178 pd = p->phys_offset;
2179 }
2180
2181 if (is_write) {
2182 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2183 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2184 if (l >= 4 && ((addr & 3) == 0)) {
2185 /* 32 bit read access */
2186 val = ldl_p(buf);
2187 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2188 l = 4;
2189 } else if (l >= 2 && ((addr & 1) == 0)) {
2190 /* 16 bit read access */
2191 val = lduw_p(buf);
2192 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2193 l = 2;
2194 } else {
2195 /* 8 bit access */
2196 val = ldub_p(buf);
2197 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2198 l = 1;
2199 }
2200 } else {
2201 unsigned long addr1;
2202 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2203 /* RAM case */
2204 ptr = phys_ram_base + addr1;
2205 memcpy(ptr, buf, l);
2206 if (!cpu_physical_memory_is_dirty(addr1)) {
2207 /* invalidate code */
2208 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2209 /* set dirty bit */
2210 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] = 0xff;
2211 }
2212 }
2213 } else {
2214 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
2215 /* I/O case */
2216 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2217 if (l >= 4 && ((addr & 3) == 0)) {
2218 /* 32 bit read access */
2219 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2220 stl_p(buf, val);
2221 l = 4;
2222 } else if (l >= 2 && ((addr & 1) == 0)) {
2223 /* 16 bit read access */
2224 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2225 stw_p(buf, val);
2226 l = 2;
2227 } else {
2228 /* 8 bit access */
2229 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2230 stb_p(buf, val);
2231 l = 1;
2232 }
2233 } else {
2234 /* RAM case */
2235 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2236 (addr & ~TARGET_PAGE_MASK);
2237 memcpy(buf, ptr, l);
2238 }
2239 }
2240 len -= l;
2241 buf += l;
2242 addr += l;
2243 }
2244 }
2245
2246 /* warning: addr must be aligned */
2247 uint32_t ldl_phys(target_phys_addr_t addr)
2248 {
2249 int io_index;
2250 uint8_t *ptr;
2251 uint32_t val;
2252 unsigned long pd;
2253 PhysPageDesc *p;
2254
2255 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2256 if (!p) {
2257 pd = IO_MEM_UNASSIGNED;
2258 } else {
2259 pd = p->phys_offset;
2260 }
2261
2262 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
2263 /* I/O case */
2264 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2265 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2266 } else {
2267 /* RAM case */
2268 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2269 (addr & ~TARGET_PAGE_MASK);
2270 val = ldl_p(ptr);
2271 }
2272 return val;
2273 }
2274
2275 /* warning: addr must be aligned. The ram page is not masked as dirty
2276 and the code inside is not invalidated. It is useful if the dirty
2277 bits are used to track modified PTEs */
2278 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2279 {
2280 int io_index;
2281 uint8_t *ptr;
2282 unsigned long pd;
2283 PhysPageDesc *p;
2284
2285 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2286 if (!p) {
2287 pd = IO_MEM_UNASSIGNED;
2288 } else {
2289 pd = p->phys_offset;
2290 }
2291
2292 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2293 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2294 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2295 } else {
2296 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2297 (addr & ~TARGET_PAGE_MASK);
2298 stl_p(ptr, val);
2299 }
2300 }
2301
2302 /* warning: addr must be aligned */
2303 void stl_phys(target_phys_addr_t addr, uint32_t val)
2304 {
2305 int io_index;
2306 uint8_t *ptr;
2307 unsigned long pd;
2308 PhysPageDesc *p;
2309
2310 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2311 if (!p) {
2312 pd = IO_MEM_UNASSIGNED;
2313 } else {
2314 pd = p->phys_offset;
2315 }
2316
2317 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2318 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2319 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2320 } else {
2321 unsigned long addr1;
2322 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2323 /* RAM case */
2324 ptr = phys_ram_base + addr1;
2325 stl_p(ptr, val);
2326 if (!cpu_physical_memory_is_dirty(addr1)) {
2327 /* invalidate code */
2328 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2329 /* set dirty bit */
2330 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] = 0xff;
2331 }
2332 }
2333 }
2334
2335 #endif
2336
2337 /* virtual memory access for debug */
2338 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
2339 uint8_t *buf, int len, int is_write)
2340 {
2341 int l;
2342 target_ulong page, phys_addr;
2343
2344 while (len > 0) {
2345 page = addr & TARGET_PAGE_MASK;
2346 phys_addr = cpu_get_phys_page_debug(env, page);
2347 /* if no physical page mapped, return an error */
2348 if (phys_addr == -1)
2349 return -1;
2350 l = (page + TARGET_PAGE_SIZE) - addr;
2351 if (l > len)
2352 l = len;
2353 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
2354 buf, l, is_write);
2355 len -= l;
2356 buf += l;
2357 addr += l;
2358 }
2359 return 0;
2360 }
2361
2362 void dump_exec_info(FILE *f,
2363 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2364 {
2365 int i, target_code_size, max_target_code_size;
2366 int direct_jmp_count, direct_jmp2_count, cross_page;
2367 TranslationBlock *tb;
2368
2369 target_code_size = 0;
2370 max_target_code_size = 0;
2371 cross_page = 0;
2372 direct_jmp_count = 0;
2373 direct_jmp2_count = 0;
2374 for(i = 0; i < nb_tbs; i++) {
2375 tb = &tbs[i];
2376 target_code_size += tb->size;
2377 if (tb->size > max_target_code_size)
2378 max_target_code_size = tb->size;
2379 if (tb->page_addr[1] != -1)
2380 cross_page++;
2381 if (tb->tb_next_offset[0] != 0xffff) {
2382 direct_jmp_count++;
2383 if (tb->tb_next_offset[1] != 0xffff) {
2384 direct_jmp2_count++;
2385 }
2386 }
2387 }
2388 /* XXX: avoid using doubles ? */
2389 cpu_fprintf(f, "TB count %d\n", nb_tbs);
2390 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
2391 nb_tbs ? target_code_size / nb_tbs : 0,
2392 max_target_code_size);
2393 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
2394 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
2395 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
2396 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
2397 cross_page,
2398 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
2399 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
2400 direct_jmp_count,
2401 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
2402 direct_jmp2_count,
2403 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
2404 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
2405 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
2406 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
2407 }
2408
2409 #if !defined(CONFIG_USER_ONLY)
2410
2411 #define MMUSUFFIX _cmmu
2412 #define GETPC() NULL
2413 #define env cpu_single_env
2414 #define SOFTMMU_CODE_ACCESS
2415
2416 #define SHIFT 0
2417 #include "softmmu_template.h"
2418
2419 #define SHIFT 1
2420 #include "softmmu_template.h"
2421
2422 #define SHIFT 2
2423 #include "softmmu_template.h"
2424
2425 #define SHIFT 3
2426 #include "softmmu_template.h"
2427
2428 #undef env
2429
2430 #endif