]> git.proxmox.com Git - qemu.git/blob - exec.c
allow more than 32 bit of physical memory
[qemu.git] / exec.c
1 /*
2 * virtual page mapping and translated block handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20 #include "config.h"
21 #ifdef _WIN32
22 #include <windows.h>
23 #else
24 #include <sys/types.h>
25 #include <sys/mman.h>
26 #endif
27 #include <stdlib.h>
28 #include <stdio.h>
29 #include <stdarg.h>
30 #include <string.h>
31 #include <errno.h>
32 #include <unistd.h>
33 #include <inttypes.h>
34
35 #include "cpu.h"
36 #include "exec-all.h"
37
38 //#define DEBUG_TB_INVALIDATE
39 //#define DEBUG_FLUSH
40 //#define DEBUG_TLB
41
42 /* make various TB consistency checks */
43 //#define DEBUG_TB_CHECK
44 //#define DEBUG_TLB_CHECK
45
46 /* threshold to flush the translated code buffer */
47 #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
48
49 #define SMC_BITMAP_USE_THRESHOLD 10
50
51 #define MMAP_AREA_START 0x00000000
52 #define MMAP_AREA_END 0xa8000000
53
54 #if defined(TARGET_SPARC64)
55 #define TARGET_PHYS_ADDR_SPACE_BITS 41
56 #elif defined(TARGET_PPC64)
57 #define TARGET_PHYS_ADDR_SPACE_BITS 42
58 #else
59 /* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
60 #define TARGET_PHYS_ADDR_SPACE_BITS 32
61 #endif
62
63 TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
64 TranslationBlock *tb_hash[CODE_GEN_HASH_SIZE];
65 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
66 int nb_tbs;
67 /* any access to the tbs or the page table must use this lock */
68 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
69
70 uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] __attribute__((aligned (32)));
71 uint8_t *code_gen_ptr;
72
73 int phys_ram_size;
74 int phys_ram_fd;
75 uint8_t *phys_ram_base;
76 uint8_t *phys_ram_dirty;
77
78 typedef struct PageDesc {
79 /* list of TBs intersecting this ram page */
80 TranslationBlock *first_tb;
81 /* in order to optimize self modifying code, we count the number
82 of lookups we do to a given page to use a bitmap */
83 unsigned int code_write_count;
84 uint8_t *code_bitmap;
85 #if defined(CONFIG_USER_ONLY)
86 unsigned long flags;
87 #endif
88 } PageDesc;
89
90 typedef struct PhysPageDesc {
91 /* offset in host memory of the page + io_index in the low 12 bits */
92 uint32_t phys_offset;
93 } PhysPageDesc;
94
95 /* Note: the VirtPage handling is absolete and will be suppressed
96 ASAP */
97 typedef struct VirtPageDesc {
98 /* physical address of code page. It is valid only if 'valid_tag'
99 matches 'virt_valid_tag' */
100 target_ulong phys_addr;
101 unsigned int valid_tag;
102 #if !defined(CONFIG_SOFTMMU)
103 /* original page access rights. It is valid only if 'valid_tag'
104 matches 'virt_valid_tag' */
105 unsigned int prot;
106 #endif
107 } VirtPageDesc;
108
109 #define L2_BITS 10
110 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
111
112 #define L1_SIZE (1 << L1_BITS)
113 #define L2_SIZE (1 << L2_BITS)
114
115 static void io_mem_init(void);
116
117 unsigned long qemu_real_host_page_size;
118 unsigned long qemu_host_page_bits;
119 unsigned long qemu_host_page_size;
120 unsigned long qemu_host_page_mask;
121
122 /* XXX: for system emulation, it could just be an array */
123 static PageDesc *l1_map[L1_SIZE];
124 PhysPageDesc **l1_phys_map;
125
126 #if !defined(CONFIG_USER_ONLY)
127 #if TARGET_LONG_BITS > 32
128 #define VIRT_L_BITS 9
129 #define VIRT_L_SIZE (1 << VIRT_L_BITS)
130 static void *l1_virt_map[VIRT_L_SIZE];
131 #else
132 static VirtPageDesc *l1_virt_map[L1_SIZE];
133 #endif
134 static unsigned int virt_valid_tag;
135 #endif
136
137 /* io memory support */
138 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
139 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
140 void *io_mem_opaque[IO_MEM_NB_ENTRIES];
141 static int io_mem_nb;
142
143 /* log support */
144 char *logfilename = "/tmp/qemu.log";
145 FILE *logfile;
146 int loglevel;
147
148 /* statistics */
149 static int tlb_flush_count;
150 static int tb_flush_count;
151 static int tb_phys_invalidate_count;
152
153 static void page_init(void)
154 {
155 /* NOTE: we can always suppose that qemu_host_page_size >=
156 TARGET_PAGE_SIZE */
157 #ifdef _WIN32
158 {
159 SYSTEM_INFO system_info;
160 DWORD old_protect;
161
162 GetSystemInfo(&system_info);
163 qemu_real_host_page_size = system_info.dwPageSize;
164
165 VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
166 PAGE_EXECUTE_READWRITE, &old_protect);
167 }
168 #else
169 qemu_real_host_page_size = getpagesize();
170 {
171 unsigned long start, end;
172
173 start = (unsigned long)code_gen_buffer;
174 start &= ~(qemu_real_host_page_size - 1);
175
176 end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
177 end += qemu_real_host_page_size - 1;
178 end &= ~(qemu_real_host_page_size - 1);
179
180 mprotect((void *)start, end - start,
181 PROT_READ | PROT_WRITE | PROT_EXEC);
182 }
183 #endif
184
185 if (qemu_host_page_size == 0)
186 qemu_host_page_size = qemu_real_host_page_size;
187 if (qemu_host_page_size < TARGET_PAGE_SIZE)
188 qemu_host_page_size = TARGET_PAGE_SIZE;
189 qemu_host_page_bits = 0;
190 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
191 qemu_host_page_bits++;
192 qemu_host_page_mask = ~(qemu_host_page_size - 1);
193 #if !defined(CONFIG_USER_ONLY)
194 virt_valid_tag = 1;
195 #endif
196 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
197 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
198 }
199
200 static inline PageDesc *page_find_alloc(unsigned int index)
201 {
202 PageDesc **lp, *p;
203
204 lp = &l1_map[index >> L2_BITS];
205 p = *lp;
206 if (!p) {
207 /* allocate if not found */
208 p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
209 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
210 *lp = p;
211 }
212 return p + (index & (L2_SIZE - 1));
213 }
214
215 static inline PageDesc *page_find(unsigned int index)
216 {
217 PageDesc *p;
218
219 p = l1_map[index >> L2_BITS];
220 if (!p)
221 return 0;
222 return p + (index & (L2_SIZE - 1));
223 }
224
225 static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
226 {
227 void **lp, **p;
228
229 p = (void **)l1_phys_map;
230 #if TARGET_PHYS_ADDR_SPACE_BITS > 32
231
232 #if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
233 #error unsupported TARGET_PHYS_ADDR_SPACE_BITS
234 #endif
235 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
236 p = *lp;
237 if (!p) {
238 /* allocate if not found */
239 if (!alloc)
240 return NULL;
241 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
242 memset(p, 0, sizeof(void *) * L1_SIZE);
243 *lp = p;
244 }
245 #endif
246 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
247 p = *lp;
248 if (!p) {
249 /* allocate if not found */
250 if (!alloc)
251 return NULL;
252 p = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
253 memset(p, 0, sizeof(PhysPageDesc) * L2_SIZE);
254 *lp = p;
255 }
256 return ((PhysPageDesc *)p) + (index & (L2_SIZE - 1));
257 }
258
259 static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
260 {
261 return phys_page_find_alloc(index, 0);
262 }
263
264 #if !defined(CONFIG_USER_ONLY)
265 static void tlb_protect_code(CPUState *env, target_ulong addr);
266 static void tlb_unprotect_code_phys(CPUState *env, unsigned long phys_addr, target_ulong vaddr);
267
268 static VirtPageDesc *virt_page_find_alloc(target_ulong index, int alloc)
269 {
270 #if TARGET_LONG_BITS > 32
271 void **p, **lp;
272
273 p = l1_virt_map;
274 lp = p + ((index >> (5 * VIRT_L_BITS)) & (VIRT_L_SIZE - 1));
275 p = *lp;
276 if (!p) {
277 if (!alloc)
278 return NULL;
279 p = qemu_mallocz(sizeof(void *) * VIRT_L_SIZE);
280 *lp = p;
281 }
282 lp = p + ((index >> (4 * VIRT_L_BITS)) & (VIRT_L_SIZE - 1));
283 p = *lp;
284 if (!p) {
285 if (!alloc)
286 return NULL;
287 p = qemu_mallocz(sizeof(void *) * VIRT_L_SIZE);
288 *lp = p;
289 }
290 lp = p + ((index >> (3 * VIRT_L_BITS)) & (VIRT_L_SIZE - 1));
291 p = *lp;
292 if (!p) {
293 if (!alloc)
294 return NULL;
295 p = qemu_mallocz(sizeof(void *) * VIRT_L_SIZE);
296 *lp = p;
297 }
298 lp = p + ((index >> (2 * VIRT_L_BITS)) & (VIRT_L_SIZE - 1));
299 p = *lp;
300 if (!p) {
301 if (!alloc)
302 return NULL;
303 p = qemu_mallocz(sizeof(void *) * VIRT_L_SIZE);
304 *lp = p;
305 }
306 lp = p + ((index >> (1 * VIRT_L_BITS)) & (VIRT_L_SIZE - 1));
307 p = *lp;
308 if (!p) {
309 if (!alloc)
310 return NULL;
311 p = qemu_mallocz(sizeof(VirtPageDesc) * VIRT_L_SIZE);
312 *lp = p;
313 }
314 return ((VirtPageDesc *)p) + (index & (VIRT_L_SIZE - 1));
315 #else
316 VirtPageDesc *p, **lp;
317
318 lp = &l1_virt_map[index >> L2_BITS];
319 p = *lp;
320 if (!p) {
321 /* allocate if not found */
322 if (!alloc)
323 return NULL;
324 p = qemu_mallocz(sizeof(VirtPageDesc) * L2_SIZE);
325 *lp = p;
326 }
327 return p + (index & (L2_SIZE - 1));
328 #endif
329 }
330
331 static inline VirtPageDesc *virt_page_find(target_ulong index)
332 {
333 return virt_page_find_alloc(index, 0);
334 }
335
336 #if TARGET_LONG_BITS > 32
337 static void virt_page_flush_internal(void **p, int level)
338 {
339 int i;
340 if (level == 0) {
341 VirtPageDesc *q = (VirtPageDesc *)p;
342 for(i = 0; i < VIRT_L_SIZE; i++)
343 q[i].valid_tag = 0;
344 } else {
345 level--;
346 for(i = 0; i < VIRT_L_SIZE; i++) {
347 if (p[i])
348 virt_page_flush_internal(p[i], level);
349 }
350 }
351 }
352 #endif
353
354 static void virt_page_flush(void)
355 {
356 virt_valid_tag++;
357
358 if (virt_valid_tag == 0) {
359 virt_valid_tag = 1;
360 #if TARGET_LONG_BITS > 32
361 virt_page_flush_internal(l1_virt_map, 5);
362 #else
363 {
364 int i, j;
365 VirtPageDesc *p;
366 for(i = 0; i < L1_SIZE; i++) {
367 p = l1_virt_map[i];
368 if (p) {
369 for(j = 0; j < L2_SIZE; j++)
370 p[j].valid_tag = 0;
371 }
372 }
373 }
374 #endif
375 }
376 }
377 #else
378 static void virt_page_flush(void)
379 {
380 }
381 #endif
382
383 void cpu_exec_init(void)
384 {
385 if (!code_gen_ptr) {
386 code_gen_ptr = code_gen_buffer;
387 page_init();
388 io_mem_init();
389 }
390 }
391
392 static inline void invalidate_page_bitmap(PageDesc *p)
393 {
394 if (p->code_bitmap) {
395 qemu_free(p->code_bitmap);
396 p->code_bitmap = NULL;
397 }
398 p->code_write_count = 0;
399 }
400
401 /* set to NULL all the 'first_tb' fields in all PageDescs */
402 static void page_flush_tb(void)
403 {
404 int i, j;
405 PageDesc *p;
406
407 for(i = 0; i < L1_SIZE; i++) {
408 p = l1_map[i];
409 if (p) {
410 for(j = 0; j < L2_SIZE; j++) {
411 p->first_tb = NULL;
412 invalidate_page_bitmap(p);
413 p++;
414 }
415 }
416 }
417 }
418
419 /* flush all the translation blocks */
420 /* XXX: tb_flush is currently not thread safe */
421 void tb_flush(CPUState *env)
422 {
423 #if defined(DEBUG_FLUSH)
424 printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
425 code_gen_ptr - code_gen_buffer,
426 nb_tbs,
427 nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0);
428 #endif
429 nb_tbs = 0;
430 memset (tb_hash, 0, CODE_GEN_HASH_SIZE * sizeof (void *));
431 virt_page_flush();
432
433 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
434 page_flush_tb();
435
436 code_gen_ptr = code_gen_buffer;
437 /* XXX: flush processor icache at this point if cache flush is
438 expensive */
439 tb_flush_count++;
440 }
441
442 #ifdef DEBUG_TB_CHECK
443
444 static void tb_invalidate_check(unsigned long address)
445 {
446 TranslationBlock *tb;
447 int i;
448 address &= TARGET_PAGE_MASK;
449 for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
450 for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
451 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
452 address >= tb->pc + tb->size)) {
453 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
454 address, tb->pc, tb->size);
455 }
456 }
457 }
458 }
459
460 /* verify that all the pages have correct rights for code */
461 static void tb_page_check(void)
462 {
463 TranslationBlock *tb;
464 int i, flags1, flags2;
465
466 for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
467 for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
468 flags1 = page_get_flags(tb->pc);
469 flags2 = page_get_flags(tb->pc + tb->size - 1);
470 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
471 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
472 tb->pc, tb->size, flags1, flags2);
473 }
474 }
475 }
476 }
477
478 void tb_jmp_check(TranslationBlock *tb)
479 {
480 TranslationBlock *tb1;
481 unsigned int n1;
482
483 /* suppress any remaining jumps to this TB */
484 tb1 = tb->jmp_first;
485 for(;;) {
486 n1 = (long)tb1 & 3;
487 tb1 = (TranslationBlock *)((long)tb1 & ~3);
488 if (n1 == 2)
489 break;
490 tb1 = tb1->jmp_next[n1];
491 }
492 /* check end of list */
493 if (tb1 != tb) {
494 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
495 }
496 }
497
498 #endif
499
500 /* invalidate one TB */
501 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
502 int next_offset)
503 {
504 TranslationBlock *tb1;
505 for(;;) {
506 tb1 = *ptb;
507 if (tb1 == tb) {
508 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
509 break;
510 }
511 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
512 }
513 }
514
515 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
516 {
517 TranslationBlock *tb1;
518 unsigned int n1;
519
520 for(;;) {
521 tb1 = *ptb;
522 n1 = (long)tb1 & 3;
523 tb1 = (TranslationBlock *)((long)tb1 & ~3);
524 if (tb1 == tb) {
525 *ptb = tb1->page_next[n1];
526 break;
527 }
528 ptb = &tb1->page_next[n1];
529 }
530 }
531
532 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
533 {
534 TranslationBlock *tb1, **ptb;
535 unsigned int n1;
536
537 ptb = &tb->jmp_next[n];
538 tb1 = *ptb;
539 if (tb1) {
540 /* find tb(n) in circular list */
541 for(;;) {
542 tb1 = *ptb;
543 n1 = (long)tb1 & 3;
544 tb1 = (TranslationBlock *)((long)tb1 & ~3);
545 if (n1 == n && tb1 == tb)
546 break;
547 if (n1 == 2) {
548 ptb = &tb1->jmp_first;
549 } else {
550 ptb = &tb1->jmp_next[n1];
551 }
552 }
553 /* now we can suppress tb(n) from the list */
554 *ptb = tb->jmp_next[n];
555
556 tb->jmp_next[n] = NULL;
557 }
558 }
559
560 /* reset the jump entry 'n' of a TB so that it is not chained to
561 another TB */
562 static inline void tb_reset_jump(TranslationBlock *tb, int n)
563 {
564 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
565 }
566
567 static inline void tb_invalidate(TranslationBlock *tb)
568 {
569 unsigned int h, n1;
570 TranslationBlock *tb1, *tb2, **ptb;
571
572 tb_invalidated_flag = 1;
573
574 /* remove the TB from the hash list */
575 h = tb_hash_func(tb->pc);
576 ptb = &tb_hash[h];
577 for(;;) {
578 tb1 = *ptb;
579 /* NOTE: the TB is not necessarily linked in the hash. It
580 indicates that it is not currently used */
581 if (tb1 == NULL)
582 return;
583 if (tb1 == tb) {
584 *ptb = tb1->hash_next;
585 break;
586 }
587 ptb = &tb1->hash_next;
588 }
589
590 /* suppress this TB from the two jump lists */
591 tb_jmp_remove(tb, 0);
592 tb_jmp_remove(tb, 1);
593
594 /* suppress any remaining jumps to this TB */
595 tb1 = tb->jmp_first;
596 for(;;) {
597 n1 = (long)tb1 & 3;
598 if (n1 == 2)
599 break;
600 tb1 = (TranslationBlock *)((long)tb1 & ~3);
601 tb2 = tb1->jmp_next[n1];
602 tb_reset_jump(tb1, n1);
603 tb1->jmp_next[n1] = NULL;
604 tb1 = tb2;
605 }
606 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
607 }
608
609 static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
610 {
611 PageDesc *p;
612 unsigned int h;
613 target_ulong phys_pc;
614
615 /* remove the TB from the hash list */
616 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
617 h = tb_phys_hash_func(phys_pc);
618 tb_remove(&tb_phys_hash[h], tb,
619 offsetof(TranslationBlock, phys_hash_next));
620
621 /* remove the TB from the page list */
622 if (tb->page_addr[0] != page_addr) {
623 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
624 tb_page_remove(&p->first_tb, tb);
625 invalidate_page_bitmap(p);
626 }
627 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
628 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
629 tb_page_remove(&p->first_tb, tb);
630 invalidate_page_bitmap(p);
631 }
632
633 tb_invalidate(tb);
634 tb_phys_invalidate_count++;
635 }
636
637 static inline void set_bits(uint8_t *tab, int start, int len)
638 {
639 int end, mask, end1;
640
641 end = start + len;
642 tab += start >> 3;
643 mask = 0xff << (start & 7);
644 if ((start & ~7) == (end & ~7)) {
645 if (start < end) {
646 mask &= ~(0xff << (end & 7));
647 *tab |= mask;
648 }
649 } else {
650 *tab++ |= mask;
651 start = (start + 8) & ~7;
652 end1 = end & ~7;
653 while (start < end1) {
654 *tab++ = 0xff;
655 start += 8;
656 }
657 if (start < end) {
658 mask = ~(0xff << (end & 7));
659 *tab |= mask;
660 }
661 }
662 }
663
664 static void build_page_bitmap(PageDesc *p)
665 {
666 int n, tb_start, tb_end;
667 TranslationBlock *tb;
668
669 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
670 if (!p->code_bitmap)
671 return;
672 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
673
674 tb = p->first_tb;
675 while (tb != NULL) {
676 n = (long)tb & 3;
677 tb = (TranslationBlock *)((long)tb & ~3);
678 /* NOTE: this is subtle as a TB may span two physical pages */
679 if (n == 0) {
680 /* NOTE: tb_end may be after the end of the page, but
681 it is not a problem */
682 tb_start = tb->pc & ~TARGET_PAGE_MASK;
683 tb_end = tb_start + tb->size;
684 if (tb_end > TARGET_PAGE_SIZE)
685 tb_end = TARGET_PAGE_SIZE;
686 } else {
687 tb_start = 0;
688 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
689 }
690 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
691 tb = tb->page_next[n];
692 }
693 }
694
695 #ifdef TARGET_HAS_PRECISE_SMC
696
697 static void tb_gen_code(CPUState *env,
698 target_ulong pc, target_ulong cs_base, int flags,
699 int cflags)
700 {
701 TranslationBlock *tb;
702 uint8_t *tc_ptr;
703 target_ulong phys_pc, phys_page2, virt_page2;
704 int code_gen_size;
705
706 phys_pc = get_phys_addr_code(env, pc);
707 tb = tb_alloc(pc);
708 if (!tb) {
709 /* flush must be done */
710 tb_flush(env);
711 /* cannot fail at this point */
712 tb = tb_alloc(pc);
713 }
714 tc_ptr = code_gen_ptr;
715 tb->tc_ptr = tc_ptr;
716 tb->cs_base = cs_base;
717 tb->flags = flags;
718 tb->cflags = cflags;
719 cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
720 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
721
722 /* check next page if needed */
723 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
724 phys_page2 = -1;
725 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
726 phys_page2 = get_phys_addr_code(env, virt_page2);
727 }
728 tb_link_phys(tb, phys_pc, phys_page2);
729 }
730 #endif
731
732 /* invalidate all TBs which intersect with the target physical page
733 starting in range [start;end[. NOTE: start and end must refer to
734 the same physical page. 'is_cpu_write_access' should be true if called
735 from a real cpu write access: the virtual CPU will exit the current
736 TB if code is modified inside this TB. */
737 void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
738 int is_cpu_write_access)
739 {
740 int n, current_tb_modified, current_tb_not_found, current_flags;
741 CPUState *env = cpu_single_env;
742 PageDesc *p;
743 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
744 target_ulong tb_start, tb_end;
745 target_ulong current_pc, current_cs_base;
746
747 p = page_find(start >> TARGET_PAGE_BITS);
748 if (!p)
749 return;
750 if (!p->code_bitmap &&
751 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
752 is_cpu_write_access) {
753 /* build code bitmap */
754 build_page_bitmap(p);
755 }
756
757 /* we remove all the TBs in the range [start, end[ */
758 /* XXX: see if in some cases it could be faster to invalidate all the code */
759 current_tb_not_found = is_cpu_write_access;
760 current_tb_modified = 0;
761 current_tb = NULL; /* avoid warning */
762 current_pc = 0; /* avoid warning */
763 current_cs_base = 0; /* avoid warning */
764 current_flags = 0; /* avoid warning */
765 tb = p->first_tb;
766 while (tb != NULL) {
767 n = (long)tb & 3;
768 tb = (TranslationBlock *)((long)tb & ~3);
769 tb_next = tb->page_next[n];
770 /* NOTE: this is subtle as a TB may span two physical pages */
771 if (n == 0) {
772 /* NOTE: tb_end may be after the end of the page, but
773 it is not a problem */
774 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
775 tb_end = tb_start + tb->size;
776 } else {
777 tb_start = tb->page_addr[1];
778 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
779 }
780 if (!(tb_end <= start || tb_start >= end)) {
781 #ifdef TARGET_HAS_PRECISE_SMC
782 if (current_tb_not_found) {
783 current_tb_not_found = 0;
784 current_tb = NULL;
785 if (env->mem_write_pc) {
786 /* now we have a real cpu fault */
787 current_tb = tb_find_pc(env->mem_write_pc);
788 }
789 }
790 if (current_tb == tb &&
791 !(current_tb->cflags & CF_SINGLE_INSN)) {
792 /* If we are modifying the current TB, we must stop
793 its execution. We could be more precise by checking
794 that the modification is after the current PC, but it
795 would require a specialized function to partially
796 restore the CPU state */
797
798 current_tb_modified = 1;
799 cpu_restore_state(current_tb, env,
800 env->mem_write_pc, NULL);
801 #if defined(TARGET_I386)
802 current_flags = env->hflags;
803 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
804 current_cs_base = (target_ulong)env->segs[R_CS].base;
805 current_pc = current_cs_base + env->eip;
806 #else
807 #error unsupported CPU
808 #endif
809 }
810 #endif /* TARGET_HAS_PRECISE_SMC */
811 saved_tb = env->current_tb;
812 env->current_tb = NULL;
813 tb_phys_invalidate(tb, -1);
814 env->current_tb = saved_tb;
815 if (env->interrupt_request && env->current_tb)
816 cpu_interrupt(env, env->interrupt_request);
817 }
818 tb = tb_next;
819 }
820 #if !defined(CONFIG_USER_ONLY)
821 /* if no code remaining, no need to continue to use slow writes */
822 if (!p->first_tb) {
823 invalidate_page_bitmap(p);
824 if (is_cpu_write_access) {
825 tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
826 }
827 }
828 #endif
829 #ifdef TARGET_HAS_PRECISE_SMC
830 if (current_tb_modified) {
831 /* we generate a block containing just the instruction
832 modifying the memory. It will ensure that it cannot modify
833 itself */
834 env->current_tb = NULL;
835 tb_gen_code(env, current_pc, current_cs_base, current_flags,
836 CF_SINGLE_INSN);
837 cpu_resume_from_signal(env, NULL);
838 }
839 #endif
840 }
841
842 /* len must be <= 8 and start must be a multiple of len */
843 static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
844 {
845 PageDesc *p;
846 int offset, b;
847 #if 0
848 if (1) {
849 if (loglevel) {
850 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
851 cpu_single_env->mem_write_vaddr, len,
852 cpu_single_env->eip,
853 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
854 }
855 }
856 #endif
857 p = page_find(start >> TARGET_PAGE_BITS);
858 if (!p)
859 return;
860 if (p->code_bitmap) {
861 offset = start & ~TARGET_PAGE_MASK;
862 b = p->code_bitmap[offset >> 3] >> (offset & 7);
863 if (b & ((1 << len) - 1))
864 goto do_invalidate;
865 } else {
866 do_invalidate:
867 tb_invalidate_phys_page_range(start, start + len, 1);
868 }
869 }
870
871 #if !defined(CONFIG_SOFTMMU)
872 static void tb_invalidate_phys_page(target_ulong addr,
873 unsigned long pc, void *puc)
874 {
875 int n, current_flags, current_tb_modified;
876 target_ulong current_pc, current_cs_base;
877 PageDesc *p;
878 TranslationBlock *tb, *current_tb;
879 #ifdef TARGET_HAS_PRECISE_SMC
880 CPUState *env = cpu_single_env;
881 #endif
882
883 addr &= TARGET_PAGE_MASK;
884 p = page_find(addr >> TARGET_PAGE_BITS);
885 if (!p)
886 return;
887 tb = p->first_tb;
888 current_tb_modified = 0;
889 current_tb = NULL;
890 current_pc = 0; /* avoid warning */
891 current_cs_base = 0; /* avoid warning */
892 current_flags = 0; /* avoid warning */
893 #ifdef TARGET_HAS_PRECISE_SMC
894 if (tb && pc != 0) {
895 current_tb = tb_find_pc(pc);
896 }
897 #endif
898 while (tb != NULL) {
899 n = (long)tb & 3;
900 tb = (TranslationBlock *)((long)tb & ~3);
901 #ifdef TARGET_HAS_PRECISE_SMC
902 if (current_tb == tb &&
903 !(current_tb->cflags & CF_SINGLE_INSN)) {
904 /* If we are modifying the current TB, we must stop
905 its execution. We could be more precise by checking
906 that the modification is after the current PC, but it
907 would require a specialized function to partially
908 restore the CPU state */
909
910 current_tb_modified = 1;
911 cpu_restore_state(current_tb, env, pc, puc);
912 #if defined(TARGET_I386)
913 current_flags = env->hflags;
914 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
915 current_cs_base = (target_ulong)env->segs[R_CS].base;
916 current_pc = current_cs_base + env->eip;
917 #else
918 #error unsupported CPU
919 #endif
920 }
921 #endif /* TARGET_HAS_PRECISE_SMC */
922 tb_phys_invalidate(tb, addr);
923 tb = tb->page_next[n];
924 }
925 p->first_tb = NULL;
926 #ifdef TARGET_HAS_PRECISE_SMC
927 if (current_tb_modified) {
928 /* we generate a block containing just the instruction
929 modifying the memory. It will ensure that it cannot modify
930 itself */
931 env->current_tb = NULL;
932 tb_gen_code(env, current_pc, current_cs_base, current_flags,
933 CF_SINGLE_INSN);
934 cpu_resume_from_signal(env, puc);
935 }
936 #endif
937 }
938 #endif
939
940 /* add the tb in the target page and protect it if necessary */
941 static inline void tb_alloc_page(TranslationBlock *tb,
942 unsigned int n, unsigned int page_addr)
943 {
944 PageDesc *p;
945 TranslationBlock *last_first_tb;
946
947 tb->page_addr[n] = page_addr;
948 p = page_find(page_addr >> TARGET_PAGE_BITS);
949 tb->page_next[n] = p->first_tb;
950 last_first_tb = p->first_tb;
951 p->first_tb = (TranslationBlock *)((long)tb | n);
952 invalidate_page_bitmap(p);
953
954 #if defined(TARGET_HAS_SMC) || 1
955
956 #if defined(CONFIG_USER_ONLY)
957 if (p->flags & PAGE_WRITE) {
958 unsigned long host_start, host_end, addr;
959 int prot;
960
961 /* force the host page as non writable (writes will have a
962 page fault + mprotect overhead) */
963 host_start = page_addr & qemu_host_page_mask;
964 host_end = host_start + qemu_host_page_size;
965 prot = 0;
966 for(addr = host_start; addr < host_end; addr += TARGET_PAGE_SIZE)
967 prot |= page_get_flags(addr);
968 mprotect((void *)host_start, qemu_host_page_size,
969 (prot & PAGE_BITS) & ~PAGE_WRITE);
970 #ifdef DEBUG_TB_INVALIDATE
971 printf("protecting code page: 0x%08lx\n",
972 host_start);
973 #endif
974 p->flags &= ~PAGE_WRITE;
975 }
976 #else
977 /* if some code is already present, then the pages are already
978 protected. So we handle the case where only the first TB is
979 allocated in a physical page */
980 if (!last_first_tb) {
981 target_ulong virt_addr;
982
983 virt_addr = (tb->pc & TARGET_PAGE_MASK) + (n << TARGET_PAGE_BITS);
984 tlb_protect_code(cpu_single_env, virt_addr);
985 }
986 #endif
987
988 #endif /* TARGET_HAS_SMC */
989 }
990
991 /* Allocate a new translation block. Flush the translation buffer if
992 too many translation blocks or too much generated code. */
993 TranslationBlock *tb_alloc(target_ulong pc)
994 {
995 TranslationBlock *tb;
996
997 if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
998 (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
999 return NULL;
1000 tb = &tbs[nb_tbs++];
1001 tb->pc = pc;
1002 tb->cflags = 0;
1003 return tb;
1004 }
1005
1006 /* add a new TB and link it to the physical page tables. phys_page2 is
1007 (-1) to indicate that only one page contains the TB. */
1008 void tb_link_phys(TranslationBlock *tb,
1009 target_ulong phys_pc, target_ulong phys_page2)
1010 {
1011 unsigned int h;
1012 TranslationBlock **ptb;
1013
1014 /* add in the physical hash table */
1015 h = tb_phys_hash_func(phys_pc);
1016 ptb = &tb_phys_hash[h];
1017 tb->phys_hash_next = *ptb;
1018 *ptb = tb;
1019
1020 /* add in the page list */
1021 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1022 if (phys_page2 != -1)
1023 tb_alloc_page(tb, 1, phys_page2);
1024 else
1025 tb->page_addr[1] = -1;
1026 #ifdef DEBUG_TB_CHECK
1027 tb_page_check();
1028 #endif
1029 }
1030
1031 /* link the tb with the other TBs */
1032 void tb_link(TranslationBlock *tb)
1033 {
1034 #if !defined(CONFIG_USER_ONLY)
1035 {
1036 VirtPageDesc *vp;
1037 target_ulong addr;
1038
1039 /* save the code memory mappings (needed to invalidate the code) */
1040 addr = tb->pc & TARGET_PAGE_MASK;
1041 vp = virt_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
1042 #ifdef DEBUG_TLB_CHECK
1043 if (vp->valid_tag == virt_valid_tag &&
1044 vp->phys_addr != tb->page_addr[0]) {
1045 printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",
1046 addr, tb->page_addr[0], vp->phys_addr);
1047 }
1048 #endif
1049 vp->phys_addr = tb->page_addr[0];
1050 if (vp->valid_tag != virt_valid_tag) {
1051 vp->valid_tag = virt_valid_tag;
1052 #if !defined(CONFIG_SOFTMMU)
1053 vp->prot = 0;
1054 #endif
1055 }
1056
1057 if (tb->page_addr[1] != -1) {
1058 addr += TARGET_PAGE_SIZE;
1059 vp = virt_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
1060 #ifdef DEBUG_TLB_CHECK
1061 if (vp->valid_tag == virt_valid_tag &&
1062 vp->phys_addr != tb->page_addr[1]) {
1063 printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",
1064 addr, tb->page_addr[1], vp->phys_addr);
1065 }
1066 #endif
1067 vp->phys_addr = tb->page_addr[1];
1068 if (vp->valid_tag != virt_valid_tag) {
1069 vp->valid_tag = virt_valid_tag;
1070 #if !defined(CONFIG_SOFTMMU)
1071 vp->prot = 0;
1072 #endif
1073 }
1074 }
1075 }
1076 #endif
1077
1078 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1079 tb->jmp_next[0] = NULL;
1080 tb->jmp_next[1] = NULL;
1081 #ifdef USE_CODE_COPY
1082 tb->cflags &= ~CF_FP_USED;
1083 if (tb->cflags & CF_TB_FP_USED)
1084 tb->cflags |= CF_FP_USED;
1085 #endif
1086
1087 /* init original jump addresses */
1088 if (tb->tb_next_offset[0] != 0xffff)
1089 tb_reset_jump(tb, 0);
1090 if (tb->tb_next_offset[1] != 0xffff)
1091 tb_reset_jump(tb, 1);
1092 }
1093
1094 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1095 tb[1].tc_ptr. Return NULL if not found */
1096 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1097 {
1098 int m_min, m_max, m;
1099 unsigned long v;
1100 TranslationBlock *tb;
1101
1102 if (nb_tbs <= 0)
1103 return NULL;
1104 if (tc_ptr < (unsigned long)code_gen_buffer ||
1105 tc_ptr >= (unsigned long)code_gen_ptr)
1106 return NULL;
1107 /* binary search (cf Knuth) */
1108 m_min = 0;
1109 m_max = nb_tbs - 1;
1110 while (m_min <= m_max) {
1111 m = (m_min + m_max) >> 1;
1112 tb = &tbs[m];
1113 v = (unsigned long)tb->tc_ptr;
1114 if (v == tc_ptr)
1115 return tb;
1116 else if (tc_ptr < v) {
1117 m_max = m - 1;
1118 } else {
1119 m_min = m + 1;
1120 }
1121 }
1122 return &tbs[m_max];
1123 }
1124
1125 static void tb_reset_jump_recursive(TranslationBlock *tb);
1126
1127 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1128 {
1129 TranslationBlock *tb1, *tb_next, **ptb;
1130 unsigned int n1;
1131
1132 tb1 = tb->jmp_next[n];
1133 if (tb1 != NULL) {
1134 /* find head of list */
1135 for(;;) {
1136 n1 = (long)tb1 & 3;
1137 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1138 if (n1 == 2)
1139 break;
1140 tb1 = tb1->jmp_next[n1];
1141 }
1142 /* we are now sure now that tb jumps to tb1 */
1143 tb_next = tb1;
1144
1145 /* remove tb from the jmp_first list */
1146 ptb = &tb_next->jmp_first;
1147 for(;;) {
1148 tb1 = *ptb;
1149 n1 = (long)tb1 & 3;
1150 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1151 if (n1 == n && tb1 == tb)
1152 break;
1153 ptb = &tb1->jmp_next[n1];
1154 }
1155 *ptb = tb->jmp_next[n];
1156 tb->jmp_next[n] = NULL;
1157
1158 /* suppress the jump to next tb in generated code */
1159 tb_reset_jump(tb, n);
1160
1161 /* suppress jumps in the tb on which we could have jumped */
1162 tb_reset_jump_recursive(tb_next);
1163 }
1164 }
1165
1166 static void tb_reset_jump_recursive(TranslationBlock *tb)
1167 {
1168 tb_reset_jump_recursive2(tb, 0);
1169 tb_reset_jump_recursive2(tb, 1);
1170 }
1171
1172 #if defined(TARGET_HAS_ICE)
1173 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1174 {
1175 target_ulong phys_addr;
1176
1177 phys_addr = cpu_get_phys_page_debug(env, pc);
1178 tb_invalidate_phys_page_range(phys_addr, phys_addr + 1, 0);
1179 }
1180 #endif
1181
1182 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1183 breakpoint is reached */
1184 int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1185 {
1186 #if defined(TARGET_HAS_ICE)
1187 int i;
1188
1189 for(i = 0; i < env->nb_breakpoints; i++) {
1190 if (env->breakpoints[i] == pc)
1191 return 0;
1192 }
1193
1194 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1195 return -1;
1196 env->breakpoints[env->nb_breakpoints++] = pc;
1197
1198 breakpoint_invalidate(env, pc);
1199 return 0;
1200 #else
1201 return -1;
1202 #endif
1203 }
1204
1205 /* remove a breakpoint */
1206 int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1207 {
1208 #if defined(TARGET_HAS_ICE)
1209 int i;
1210 for(i = 0; i < env->nb_breakpoints; i++) {
1211 if (env->breakpoints[i] == pc)
1212 goto found;
1213 }
1214 return -1;
1215 found:
1216 env->nb_breakpoints--;
1217 if (i < env->nb_breakpoints)
1218 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1219
1220 breakpoint_invalidate(env, pc);
1221 return 0;
1222 #else
1223 return -1;
1224 #endif
1225 }
1226
1227 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1228 CPU loop after each instruction */
1229 void cpu_single_step(CPUState *env, int enabled)
1230 {
1231 #if defined(TARGET_HAS_ICE)
1232 if (env->singlestep_enabled != enabled) {
1233 env->singlestep_enabled = enabled;
1234 /* must flush all the translated code to avoid inconsistancies */
1235 /* XXX: only flush what is necessary */
1236 tb_flush(env);
1237 }
1238 #endif
1239 }
1240
1241 /* enable or disable low levels log */
1242 void cpu_set_log(int log_flags)
1243 {
1244 loglevel = log_flags;
1245 if (loglevel && !logfile) {
1246 logfile = fopen(logfilename, "w");
1247 if (!logfile) {
1248 perror(logfilename);
1249 _exit(1);
1250 }
1251 #if !defined(CONFIG_SOFTMMU)
1252 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1253 {
1254 static uint8_t logfile_buf[4096];
1255 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1256 }
1257 #else
1258 setvbuf(logfile, NULL, _IOLBF, 0);
1259 #endif
1260 }
1261 }
1262
1263 void cpu_set_log_filename(const char *filename)
1264 {
1265 logfilename = strdup(filename);
1266 }
1267
1268 /* mask must never be zero, except for A20 change call */
1269 void cpu_interrupt(CPUState *env, int mask)
1270 {
1271 TranslationBlock *tb;
1272 static int interrupt_lock;
1273
1274 env->interrupt_request |= mask;
1275 /* if the cpu is currently executing code, we must unlink it and
1276 all the potentially executing TB */
1277 tb = env->current_tb;
1278 if (tb && !testandset(&interrupt_lock)) {
1279 env->current_tb = NULL;
1280 tb_reset_jump_recursive(tb);
1281 interrupt_lock = 0;
1282 }
1283 }
1284
1285 void cpu_reset_interrupt(CPUState *env, int mask)
1286 {
1287 env->interrupt_request &= ~mask;
1288 }
1289
1290 CPULogItem cpu_log_items[] = {
1291 { CPU_LOG_TB_OUT_ASM, "out_asm",
1292 "show generated host assembly code for each compiled TB" },
1293 { CPU_LOG_TB_IN_ASM, "in_asm",
1294 "show target assembly code for each compiled TB" },
1295 { CPU_LOG_TB_OP, "op",
1296 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1297 #ifdef TARGET_I386
1298 { CPU_LOG_TB_OP_OPT, "op_opt",
1299 "show micro ops after optimization for each compiled TB" },
1300 #endif
1301 { CPU_LOG_INT, "int",
1302 "show interrupts/exceptions in short format" },
1303 { CPU_LOG_EXEC, "exec",
1304 "show trace before each executed TB (lots of logs)" },
1305 { CPU_LOG_TB_CPU, "cpu",
1306 "show CPU state before bloc translation" },
1307 #ifdef TARGET_I386
1308 { CPU_LOG_PCALL, "pcall",
1309 "show protected mode far calls/returns/exceptions" },
1310 #endif
1311 #ifdef DEBUG_IOPORT
1312 { CPU_LOG_IOPORT, "ioport",
1313 "show all i/o ports accesses" },
1314 #endif
1315 { 0, NULL, NULL },
1316 };
1317
1318 static int cmp1(const char *s1, int n, const char *s2)
1319 {
1320 if (strlen(s2) != n)
1321 return 0;
1322 return memcmp(s1, s2, n) == 0;
1323 }
1324
1325 /* takes a comma separated list of log masks. Return 0 if error. */
1326 int cpu_str_to_log_mask(const char *str)
1327 {
1328 CPULogItem *item;
1329 int mask;
1330 const char *p, *p1;
1331
1332 p = str;
1333 mask = 0;
1334 for(;;) {
1335 p1 = strchr(p, ',');
1336 if (!p1)
1337 p1 = p + strlen(p);
1338 if(cmp1(p,p1-p,"all")) {
1339 for(item = cpu_log_items; item->mask != 0; item++) {
1340 mask |= item->mask;
1341 }
1342 } else {
1343 for(item = cpu_log_items; item->mask != 0; item++) {
1344 if (cmp1(p, p1 - p, item->name))
1345 goto found;
1346 }
1347 return 0;
1348 }
1349 found:
1350 mask |= item->mask;
1351 if (*p1 != ',')
1352 break;
1353 p = p1 + 1;
1354 }
1355 return mask;
1356 }
1357
1358 void cpu_abort(CPUState *env, const char *fmt, ...)
1359 {
1360 va_list ap;
1361
1362 va_start(ap, fmt);
1363 fprintf(stderr, "qemu: fatal: ");
1364 vfprintf(stderr, fmt, ap);
1365 fprintf(stderr, "\n");
1366 #ifdef TARGET_I386
1367 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1368 #else
1369 cpu_dump_state(env, stderr, fprintf, 0);
1370 #endif
1371 va_end(ap);
1372 abort();
1373 }
1374
1375 #if !defined(CONFIG_USER_ONLY)
1376
1377 /* NOTE: if flush_global is true, also flush global entries (not
1378 implemented yet) */
1379 void tlb_flush(CPUState *env, int flush_global)
1380 {
1381 int i;
1382
1383 #if defined(DEBUG_TLB)
1384 printf("tlb_flush:\n");
1385 #endif
1386 /* must reset current TB so that interrupts cannot modify the
1387 links while we are modifying them */
1388 env->current_tb = NULL;
1389
1390 for(i = 0; i < CPU_TLB_SIZE; i++) {
1391 env->tlb_read[0][i].address = -1;
1392 env->tlb_write[0][i].address = -1;
1393 env->tlb_read[1][i].address = -1;
1394 env->tlb_write[1][i].address = -1;
1395 }
1396
1397 virt_page_flush();
1398 memset (tb_hash, 0, CODE_GEN_HASH_SIZE * sizeof (void *));
1399
1400 #if !defined(CONFIG_SOFTMMU)
1401 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
1402 #endif
1403 #ifdef USE_KQEMU
1404 if (env->kqemu_enabled) {
1405 kqemu_flush(env, flush_global);
1406 }
1407 #endif
1408 tlb_flush_count++;
1409 }
1410
1411 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1412 {
1413 if (addr == (tlb_entry->address &
1414 (TARGET_PAGE_MASK | TLB_INVALID_MASK)))
1415 tlb_entry->address = -1;
1416 }
1417
1418 void tlb_flush_page(CPUState *env, target_ulong addr)
1419 {
1420 int i, n;
1421 VirtPageDesc *vp;
1422 PageDesc *p;
1423 TranslationBlock *tb;
1424
1425 #if defined(DEBUG_TLB)
1426 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1427 #endif
1428 /* must reset current TB so that interrupts cannot modify the
1429 links while we are modifying them */
1430 env->current_tb = NULL;
1431
1432 addr &= TARGET_PAGE_MASK;
1433 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1434 tlb_flush_entry(&env->tlb_read[0][i], addr);
1435 tlb_flush_entry(&env->tlb_write[0][i], addr);
1436 tlb_flush_entry(&env->tlb_read[1][i], addr);
1437 tlb_flush_entry(&env->tlb_write[1][i], addr);
1438
1439 /* remove from the virtual pc hash table all the TB at this
1440 virtual address */
1441
1442 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1443 if (vp && vp->valid_tag == virt_valid_tag) {
1444 p = page_find(vp->phys_addr >> TARGET_PAGE_BITS);
1445 if (p) {
1446 /* we remove all the links to the TBs in this virtual page */
1447 tb = p->first_tb;
1448 while (tb != NULL) {
1449 n = (long)tb & 3;
1450 tb = (TranslationBlock *)((long)tb & ~3);
1451 if ((tb->pc & TARGET_PAGE_MASK) == addr ||
1452 ((tb->pc + tb->size - 1) & TARGET_PAGE_MASK) == addr) {
1453 tb_invalidate(tb);
1454 }
1455 tb = tb->page_next[n];
1456 }
1457 }
1458 vp->valid_tag = 0;
1459 }
1460
1461 #if !defined(CONFIG_SOFTMMU)
1462 if (addr < MMAP_AREA_END)
1463 munmap((void *)addr, TARGET_PAGE_SIZE);
1464 #endif
1465 #ifdef USE_KQEMU
1466 if (env->kqemu_enabled) {
1467 kqemu_flush_page(env, addr);
1468 }
1469 #endif
1470 }
1471
1472 static inline void tlb_protect_code1(CPUTLBEntry *tlb_entry, target_ulong addr)
1473 {
1474 if (addr == (tlb_entry->address &
1475 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) &&
1476 (tlb_entry->address & ~TARGET_PAGE_MASK) != IO_MEM_CODE &&
1477 (tlb_entry->address & ~TARGET_PAGE_MASK) != IO_MEM_ROM) {
1478 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_CODE;
1479 }
1480 }
1481
1482 /* update the TLBs so that writes to code in the virtual page 'addr'
1483 can be detected */
1484 static void tlb_protect_code(CPUState *env, target_ulong addr)
1485 {
1486 int i;
1487
1488 addr &= TARGET_PAGE_MASK;
1489 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1490 tlb_protect_code1(&env->tlb_write[0][i], addr);
1491 tlb_protect_code1(&env->tlb_write[1][i], addr);
1492 #if !defined(CONFIG_SOFTMMU)
1493 /* NOTE: as we generated the code for this page, it is already at
1494 least readable */
1495 if (addr < MMAP_AREA_END)
1496 mprotect((void *)addr, TARGET_PAGE_SIZE, PROT_READ);
1497 #endif
1498 }
1499
1500 static inline void tlb_unprotect_code2(CPUTLBEntry *tlb_entry,
1501 unsigned long phys_addr)
1502 {
1503 if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_CODE &&
1504 ((tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend) == phys_addr) {
1505 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1506 }
1507 }
1508
1509 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1510 tested self modifying code */
1511 static void tlb_unprotect_code_phys(CPUState *env, unsigned long phys_addr, target_ulong vaddr)
1512 {
1513 int i;
1514
1515 phys_addr &= TARGET_PAGE_MASK;
1516 phys_addr += (long)phys_ram_base;
1517 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1518 tlb_unprotect_code2(&env->tlb_write[0][i], phys_addr);
1519 tlb_unprotect_code2(&env->tlb_write[1][i], phys_addr);
1520 }
1521
1522 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1523 unsigned long start, unsigned long length)
1524 {
1525 unsigned long addr;
1526 if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1527 addr = (tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend;
1528 if ((addr - start) < length) {
1529 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1530 }
1531 }
1532 }
1533
1534 void cpu_physical_memory_reset_dirty(target_ulong start, target_ulong end,
1535 int dirty_flags)
1536 {
1537 CPUState *env;
1538 unsigned long length, start1;
1539 int i, mask, len;
1540 uint8_t *p;
1541
1542 start &= TARGET_PAGE_MASK;
1543 end = TARGET_PAGE_ALIGN(end);
1544
1545 length = end - start;
1546 if (length == 0)
1547 return;
1548 mask = ~dirty_flags;
1549 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1550 len = length >> TARGET_PAGE_BITS;
1551 for(i = 0; i < len; i++)
1552 p[i] &= mask;
1553
1554 env = cpu_single_env;
1555 /* we modify the TLB cache so that the dirty bit will be set again
1556 when accessing the range */
1557 start1 = start + (unsigned long)phys_ram_base;
1558 for(i = 0; i < CPU_TLB_SIZE; i++)
1559 tlb_reset_dirty_range(&env->tlb_write[0][i], start1, length);
1560 for(i = 0; i < CPU_TLB_SIZE; i++)
1561 tlb_reset_dirty_range(&env->tlb_write[1][i], start1, length);
1562
1563 #if !defined(CONFIG_SOFTMMU)
1564 /* XXX: this is expensive */
1565 {
1566 VirtPageDesc *p;
1567 int j;
1568 target_ulong addr;
1569
1570 for(i = 0; i < L1_SIZE; i++) {
1571 p = l1_virt_map[i];
1572 if (p) {
1573 addr = i << (TARGET_PAGE_BITS + L2_BITS);
1574 for(j = 0; j < L2_SIZE; j++) {
1575 if (p->valid_tag == virt_valid_tag &&
1576 p->phys_addr >= start && p->phys_addr < end &&
1577 (p->prot & PROT_WRITE)) {
1578 if (addr < MMAP_AREA_END) {
1579 mprotect((void *)addr, TARGET_PAGE_SIZE,
1580 p->prot & ~PROT_WRITE);
1581 }
1582 }
1583 addr += TARGET_PAGE_SIZE;
1584 p++;
1585 }
1586 }
1587 }
1588 }
1589 #endif
1590 }
1591
1592 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
1593 unsigned long start)
1594 {
1595 unsigned long addr;
1596 if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1597 addr = (tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend;
1598 if (addr == start) {
1599 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_RAM;
1600 }
1601 }
1602 }
1603
1604 /* update the TLB corresponding to virtual page vaddr and phys addr
1605 addr so that it is no longer dirty */
1606 static inline void tlb_set_dirty(unsigned long addr, target_ulong vaddr)
1607 {
1608 CPUState *env = cpu_single_env;
1609 int i;
1610
1611 phys_ram_dirty[(addr - (unsigned long)phys_ram_base) >> TARGET_PAGE_BITS] = 0xff;
1612
1613 addr &= TARGET_PAGE_MASK;
1614 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1615 tlb_set_dirty1(&env->tlb_write[0][i], addr);
1616 tlb_set_dirty1(&env->tlb_write[1][i], addr);
1617 }
1618
1619 /* add a new TLB entry. At most one entry for a given virtual address
1620 is permitted. Return 0 if OK or 2 if the page could not be mapped
1621 (can only happen in non SOFTMMU mode for I/O pages or pages
1622 conflicting with the host address space). */
1623 int tlb_set_page(CPUState *env, target_ulong vaddr,
1624 target_phys_addr_t paddr, int prot,
1625 int is_user, int is_softmmu)
1626 {
1627 PhysPageDesc *p;
1628 unsigned long pd;
1629 TranslationBlock *first_tb;
1630 unsigned int index;
1631 target_ulong address;
1632 target_phys_addr_t addend;
1633 int ret;
1634
1635 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1636 first_tb = NULL;
1637 if (!p) {
1638 pd = IO_MEM_UNASSIGNED;
1639 } else {
1640 PageDesc *p1;
1641 pd = p->phys_offset;
1642 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
1643 /* NOTE: we also allocate the page at this stage */
1644 p1 = page_find_alloc(pd >> TARGET_PAGE_BITS);
1645 first_tb = p1->first_tb;
1646 }
1647 }
1648 #if defined(DEBUG_TLB)
1649 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x u=%d c=%d smmu=%d pd=0x%08x\n",
1650 vaddr, paddr, prot, is_user, (first_tb != NULL), is_softmmu, pd);
1651 #endif
1652
1653 ret = 0;
1654 #if !defined(CONFIG_SOFTMMU)
1655 if (is_softmmu)
1656 #endif
1657 {
1658 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1659 /* IO memory case */
1660 address = vaddr | pd;
1661 addend = paddr;
1662 } else {
1663 /* standard memory */
1664 address = vaddr;
1665 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1666 }
1667
1668 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1669 addend -= vaddr;
1670 if (prot & PAGE_READ) {
1671 env->tlb_read[is_user][index].address = address;
1672 env->tlb_read[is_user][index].addend = addend;
1673 } else {
1674 env->tlb_read[is_user][index].address = -1;
1675 env->tlb_read[is_user][index].addend = -1;
1676 }
1677 if (prot & PAGE_WRITE) {
1678 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM) {
1679 /* ROM: access is ignored (same as unassigned) */
1680 env->tlb_write[is_user][index].address = vaddr | IO_MEM_ROM;
1681 env->tlb_write[is_user][index].addend = addend;
1682 } else
1683 /* XXX: the PowerPC code seems not ready to handle
1684 self modifying code with DCBI */
1685 #if defined(TARGET_HAS_SMC) || 1
1686 if (first_tb) {
1687 /* if code is present, we use a specific memory
1688 handler. It works only for physical memory access */
1689 env->tlb_write[is_user][index].address = vaddr | IO_MEM_CODE;
1690 env->tlb_write[is_user][index].addend = addend;
1691 } else
1692 #endif
1693 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1694 !cpu_physical_memory_is_dirty(pd)) {
1695 env->tlb_write[is_user][index].address = vaddr | IO_MEM_NOTDIRTY;
1696 env->tlb_write[is_user][index].addend = addend;
1697 } else {
1698 env->tlb_write[is_user][index].address = address;
1699 env->tlb_write[is_user][index].addend = addend;
1700 }
1701 } else {
1702 env->tlb_write[is_user][index].address = -1;
1703 env->tlb_write[is_user][index].addend = -1;
1704 }
1705 }
1706 #if !defined(CONFIG_SOFTMMU)
1707 else {
1708 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1709 /* IO access: no mapping is done as it will be handled by the
1710 soft MMU */
1711 if (!(env->hflags & HF_SOFTMMU_MASK))
1712 ret = 2;
1713 } else {
1714 void *map_addr;
1715
1716 if (vaddr >= MMAP_AREA_END) {
1717 ret = 2;
1718 } else {
1719 if (prot & PROT_WRITE) {
1720 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1721 #if defined(TARGET_HAS_SMC) || 1
1722 first_tb ||
1723 #endif
1724 ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1725 !cpu_physical_memory_is_dirty(pd))) {
1726 /* ROM: we do as if code was inside */
1727 /* if code is present, we only map as read only and save the
1728 original mapping */
1729 VirtPageDesc *vp;
1730
1731 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
1732 vp->phys_addr = pd;
1733 vp->prot = prot;
1734 vp->valid_tag = virt_valid_tag;
1735 prot &= ~PAGE_WRITE;
1736 }
1737 }
1738 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
1739 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1740 if (map_addr == MAP_FAILED) {
1741 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1742 paddr, vaddr);
1743 }
1744 }
1745 }
1746 }
1747 #endif
1748 return ret;
1749 }
1750
1751 /* called from signal handler: invalidate the code and unprotect the
1752 page. Return TRUE if the fault was succesfully handled. */
1753 int page_unprotect(unsigned long addr, unsigned long pc, void *puc)
1754 {
1755 #if !defined(CONFIG_SOFTMMU)
1756 VirtPageDesc *vp;
1757
1758 #if defined(DEBUG_TLB)
1759 printf("page_unprotect: addr=0x%08x\n", addr);
1760 #endif
1761 addr &= TARGET_PAGE_MASK;
1762
1763 /* if it is not mapped, no need to worry here */
1764 if (addr >= MMAP_AREA_END)
1765 return 0;
1766 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1767 if (!vp)
1768 return 0;
1769 /* NOTE: in this case, validate_tag is _not_ tested as it
1770 validates only the code TLB */
1771 if (vp->valid_tag != virt_valid_tag)
1772 return 0;
1773 if (!(vp->prot & PAGE_WRITE))
1774 return 0;
1775 #if defined(DEBUG_TLB)
1776 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1777 addr, vp->phys_addr, vp->prot);
1778 #endif
1779 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1780 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1781 (unsigned long)addr, vp->prot);
1782 /* set the dirty bit */
1783 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
1784 /* flush the code inside */
1785 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
1786 return 1;
1787 #else
1788 return 0;
1789 #endif
1790 }
1791
1792 #else
1793
1794 void tlb_flush(CPUState *env, int flush_global)
1795 {
1796 }
1797
1798 void tlb_flush_page(CPUState *env, target_ulong addr)
1799 {
1800 }
1801
1802 int tlb_set_page(CPUState *env, target_ulong vaddr,
1803 target_phys_addr_t paddr, int prot,
1804 int is_user, int is_softmmu)
1805 {
1806 return 0;
1807 }
1808
1809 /* dump memory mappings */
1810 void page_dump(FILE *f)
1811 {
1812 unsigned long start, end;
1813 int i, j, prot, prot1;
1814 PageDesc *p;
1815
1816 fprintf(f, "%-8s %-8s %-8s %s\n",
1817 "start", "end", "size", "prot");
1818 start = -1;
1819 end = -1;
1820 prot = 0;
1821 for(i = 0; i <= L1_SIZE; i++) {
1822 if (i < L1_SIZE)
1823 p = l1_map[i];
1824 else
1825 p = NULL;
1826 for(j = 0;j < L2_SIZE; j++) {
1827 if (!p)
1828 prot1 = 0;
1829 else
1830 prot1 = p[j].flags;
1831 if (prot1 != prot) {
1832 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1833 if (start != -1) {
1834 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1835 start, end, end - start,
1836 prot & PAGE_READ ? 'r' : '-',
1837 prot & PAGE_WRITE ? 'w' : '-',
1838 prot & PAGE_EXEC ? 'x' : '-');
1839 }
1840 if (prot1 != 0)
1841 start = end;
1842 else
1843 start = -1;
1844 prot = prot1;
1845 }
1846 if (!p)
1847 break;
1848 }
1849 }
1850 }
1851
1852 int page_get_flags(unsigned long address)
1853 {
1854 PageDesc *p;
1855
1856 p = page_find(address >> TARGET_PAGE_BITS);
1857 if (!p)
1858 return 0;
1859 return p->flags;
1860 }
1861
1862 /* modify the flags of a page and invalidate the code if
1863 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1864 depending on PAGE_WRITE */
1865 void page_set_flags(unsigned long start, unsigned long end, int flags)
1866 {
1867 PageDesc *p;
1868 unsigned long addr;
1869
1870 start = start & TARGET_PAGE_MASK;
1871 end = TARGET_PAGE_ALIGN(end);
1872 if (flags & PAGE_WRITE)
1873 flags |= PAGE_WRITE_ORG;
1874 spin_lock(&tb_lock);
1875 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1876 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1877 /* if the write protection is set, then we invalidate the code
1878 inside */
1879 if (!(p->flags & PAGE_WRITE) &&
1880 (flags & PAGE_WRITE) &&
1881 p->first_tb) {
1882 tb_invalidate_phys_page(addr, 0, NULL);
1883 }
1884 p->flags = flags;
1885 }
1886 spin_unlock(&tb_lock);
1887 }
1888
1889 /* called from signal handler: invalidate the code and unprotect the
1890 page. Return TRUE if the fault was succesfully handled. */
1891 int page_unprotect(unsigned long address, unsigned long pc, void *puc)
1892 {
1893 unsigned int page_index, prot, pindex;
1894 PageDesc *p, *p1;
1895 unsigned long host_start, host_end, addr;
1896
1897 host_start = address & qemu_host_page_mask;
1898 page_index = host_start >> TARGET_PAGE_BITS;
1899 p1 = page_find(page_index);
1900 if (!p1)
1901 return 0;
1902 host_end = host_start + qemu_host_page_size;
1903 p = p1;
1904 prot = 0;
1905 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1906 prot |= p->flags;
1907 p++;
1908 }
1909 /* if the page was really writable, then we change its
1910 protection back to writable */
1911 if (prot & PAGE_WRITE_ORG) {
1912 pindex = (address - host_start) >> TARGET_PAGE_BITS;
1913 if (!(p1[pindex].flags & PAGE_WRITE)) {
1914 mprotect((void *)host_start, qemu_host_page_size,
1915 (prot & PAGE_BITS) | PAGE_WRITE);
1916 p1[pindex].flags |= PAGE_WRITE;
1917 /* and since the content will be modified, we must invalidate
1918 the corresponding translated code. */
1919 tb_invalidate_phys_page(address, pc, puc);
1920 #ifdef DEBUG_TB_CHECK
1921 tb_invalidate_check(address);
1922 #endif
1923 return 1;
1924 }
1925 }
1926 return 0;
1927 }
1928
1929 /* call this function when system calls directly modify a memory area */
1930 void page_unprotect_range(uint8_t *data, unsigned long data_size)
1931 {
1932 unsigned long start, end, addr;
1933
1934 start = (unsigned long)data;
1935 end = start + data_size;
1936 start &= TARGET_PAGE_MASK;
1937 end = TARGET_PAGE_ALIGN(end);
1938 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1939 page_unprotect(addr, 0, NULL);
1940 }
1941 }
1942
1943 static inline void tlb_set_dirty(unsigned long addr, target_ulong vaddr)
1944 {
1945 }
1946 #endif /* defined(CONFIG_USER_ONLY) */
1947
1948 /* register physical memory. 'size' must be a multiple of the target
1949 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1950 io memory page */
1951 void cpu_register_physical_memory(target_phys_addr_t start_addr,
1952 unsigned long size,
1953 unsigned long phys_offset)
1954 {
1955 target_phys_addr_t addr, end_addr;
1956 PhysPageDesc *p;
1957
1958 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
1959 end_addr = start_addr + size;
1960 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
1961 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
1962 p->phys_offset = phys_offset;
1963 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM)
1964 phys_offset += TARGET_PAGE_SIZE;
1965 }
1966 }
1967
1968 static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
1969 {
1970 return 0;
1971 }
1972
1973 static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1974 {
1975 }
1976
1977 static CPUReadMemoryFunc *unassigned_mem_read[3] = {
1978 unassigned_mem_readb,
1979 unassigned_mem_readb,
1980 unassigned_mem_readb,
1981 };
1982
1983 static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
1984 unassigned_mem_writeb,
1985 unassigned_mem_writeb,
1986 unassigned_mem_writeb,
1987 };
1988
1989 /* self modifying code support in soft mmu mode : writing to a page
1990 containing code comes to these functions */
1991
1992 static void code_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1993 {
1994 unsigned long phys_addr;
1995
1996 phys_addr = addr - (unsigned long)phys_ram_base;
1997 #if !defined(CONFIG_USER_ONLY)
1998 tb_invalidate_phys_page_fast(phys_addr, 1);
1999 #endif
2000 stb_p((uint8_t *)(long)addr, val);
2001 phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 0xff;
2002 }
2003
2004 static void code_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2005 {
2006 unsigned long phys_addr;
2007
2008 phys_addr = addr - (unsigned long)phys_ram_base;
2009 #if !defined(CONFIG_USER_ONLY)
2010 tb_invalidate_phys_page_fast(phys_addr, 2);
2011 #endif
2012 stw_p((uint8_t *)(long)addr, val);
2013 phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 0xff;
2014 }
2015
2016 static void code_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2017 {
2018 unsigned long phys_addr;
2019
2020 phys_addr = addr - (unsigned long)phys_ram_base;
2021 #if !defined(CONFIG_USER_ONLY)
2022 tb_invalidate_phys_page_fast(phys_addr, 4);
2023 #endif
2024 stl_p((uint8_t *)(long)addr, val);
2025 phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 0xff;
2026 }
2027
2028 static CPUReadMemoryFunc *code_mem_read[3] = {
2029 NULL, /* never used */
2030 NULL, /* never used */
2031 NULL, /* never used */
2032 };
2033
2034 static CPUWriteMemoryFunc *code_mem_write[3] = {
2035 code_mem_writeb,
2036 code_mem_writew,
2037 code_mem_writel,
2038 };
2039
2040 static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2041 {
2042 stb_p((uint8_t *)(long)addr, val);
2043 tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
2044 }
2045
2046 static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2047 {
2048 stw_p((uint8_t *)(long)addr, val);
2049 tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
2050 }
2051
2052 static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2053 {
2054 stl_p((uint8_t *)(long)addr, val);
2055 tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
2056 }
2057
2058 static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2059 notdirty_mem_writeb,
2060 notdirty_mem_writew,
2061 notdirty_mem_writel,
2062 };
2063
2064 static void io_mem_init(void)
2065 {
2066 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, code_mem_read, unassigned_mem_write, NULL);
2067 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2068 cpu_register_io_memory(IO_MEM_CODE >> IO_MEM_SHIFT, code_mem_read, code_mem_write, NULL);
2069 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, code_mem_read, notdirty_mem_write, NULL);
2070 io_mem_nb = 5;
2071
2072 /* alloc dirty bits array */
2073 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2074 }
2075
2076 /* mem_read and mem_write are arrays of functions containing the
2077 function to access byte (index 0), word (index 1) and dword (index
2078 2). All functions must be supplied. If io_index is non zero, the
2079 corresponding io zone is modified. If it is zero, a new io zone is
2080 allocated. The return value can be used with
2081 cpu_register_physical_memory(). (-1) is returned if error. */
2082 int cpu_register_io_memory(int io_index,
2083 CPUReadMemoryFunc **mem_read,
2084 CPUWriteMemoryFunc **mem_write,
2085 void *opaque)
2086 {
2087 int i;
2088
2089 if (io_index <= 0) {
2090 if (io_index >= IO_MEM_NB_ENTRIES)
2091 return -1;
2092 io_index = io_mem_nb++;
2093 } else {
2094 if (io_index >= IO_MEM_NB_ENTRIES)
2095 return -1;
2096 }
2097
2098 for(i = 0;i < 3; i++) {
2099 io_mem_read[io_index][i] = mem_read[i];
2100 io_mem_write[io_index][i] = mem_write[i];
2101 }
2102 io_mem_opaque[io_index] = opaque;
2103 return io_index << IO_MEM_SHIFT;
2104 }
2105
2106 CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2107 {
2108 return io_mem_write[io_index >> IO_MEM_SHIFT];
2109 }
2110
2111 CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2112 {
2113 return io_mem_read[io_index >> IO_MEM_SHIFT];
2114 }
2115
2116 /* physical memory access (slow version, mainly for debug) */
2117 #if defined(CONFIG_USER_ONLY)
2118 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2119 int len, int is_write)
2120 {
2121 int l, flags;
2122 target_ulong page;
2123
2124 while (len > 0) {
2125 page = addr & TARGET_PAGE_MASK;
2126 l = (page + TARGET_PAGE_SIZE) - addr;
2127 if (l > len)
2128 l = len;
2129 flags = page_get_flags(page);
2130 if (!(flags & PAGE_VALID))
2131 return;
2132 if (is_write) {
2133 if (!(flags & PAGE_WRITE))
2134 return;
2135 memcpy((uint8_t *)addr, buf, len);
2136 } else {
2137 if (!(flags & PAGE_READ))
2138 return;
2139 memcpy(buf, (uint8_t *)addr, len);
2140 }
2141 len -= l;
2142 buf += l;
2143 addr += l;
2144 }
2145 }
2146
2147 /* never used */
2148 uint32_t ldl_phys(target_phys_addr_t addr)
2149 {
2150 return 0;
2151 }
2152
2153 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2154 {
2155 }
2156
2157 void stl_phys(target_phys_addr_t addr, uint32_t val)
2158 {
2159 }
2160
2161 #else
2162 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2163 int len, int is_write)
2164 {
2165 int l, io_index;
2166 uint8_t *ptr;
2167 uint32_t val;
2168 target_phys_addr_t page;
2169 unsigned long pd;
2170 PhysPageDesc *p;
2171
2172 while (len > 0) {
2173 page = addr & TARGET_PAGE_MASK;
2174 l = (page + TARGET_PAGE_SIZE) - addr;
2175 if (l > len)
2176 l = len;
2177 p = phys_page_find(page >> TARGET_PAGE_BITS);
2178 if (!p) {
2179 pd = IO_MEM_UNASSIGNED;
2180 } else {
2181 pd = p->phys_offset;
2182 }
2183
2184 if (is_write) {
2185 if ((pd & ~TARGET_PAGE_MASK) != 0) {
2186 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2187 if (l >= 4 && ((addr & 3) == 0)) {
2188 /* 32 bit read access */
2189 val = ldl_p(buf);
2190 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2191 l = 4;
2192 } else if (l >= 2 && ((addr & 1) == 0)) {
2193 /* 16 bit read access */
2194 val = lduw_p(buf);
2195 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2196 l = 2;
2197 } else {
2198 /* 8 bit access */
2199 val = ldub_p(buf);
2200 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2201 l = 1;
2202 }
2203 } else {
2204 unsigned long addr1;
2205 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2206 /* RAM case */
2207 ptr = phys_ram_base + addr1;
2208 memcpy(ptr, buf, l);
2209 /* invalidate code */
2210 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2211 /* set dirty bit */
2212 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] = 0xff;
2213 }
2214 } else {
2215 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2216 (pd & ~TARGET_PAGE_MASK) != IO_MEM_CODE) {
2217 /* I/O case */
2218 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2219 if (l >= 4 && ((addr & 3) == 0)) {
2220 /* 32 bit read access */
2221 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2222 stl_p(buf, val);
2223 l = 4;
2224 } else if (l >= 2 && ((addr & 1) == 0)) {
2225 /* 16 bit read access */
2226 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2227 stw_p(buf, val);
2228 l = 2;
2229 } else {
2230 /* 8 bit access */
2231 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2232 stb_p(buf, val);
2233 l = 1;
2234 }
2235 } else {
2236 /* RAM case */
2237 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2238 (addr & ~TARGET_PAGE_MASK);
2239 memcpy(buf, ptr, l);
2240 }
2241 }
2242 len -= l;
2243 buf += l;
2244 addr += l;
2245 }
2246 }
2247
2248 /* warning: addr must be aligned */
2249 uint32_t ldl_phys(target_phys_addr_t addr)
2250 {
2251 int io_index;
2252 uint8_t *ptr;
2253 uint32_t val;
2254 unsigned long pd;
2255 PhysPageDesc *p;
2256
2257 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2258 if (!p) {
2259 pd = IO_MEM_UNASSIGNED;
2260 } else {
2261 pd = p->phys_offset;
2262 }
2263
2264 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2265 (pd & ~TARGET_PAGE_MASK) != IO_MEM_CODE) {
2266 /* I/O case */
2267 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2268 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2269 } else {
2270 /* RAM case */
2271 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2272 (addr & ~TARGET_PAGE_MASK);
2273 val = ldl_p(ptr);
2274 }
2275 return val;
2276 }
2277
2278 /* warning: addr must be aligned. The ram page is not masked as dirty
2279 and the code inside is not invalidated. It is useful if the dirty
2280 bits are used to track modified PTEs */
2281 void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2282 {
2283 int io_index;
2284 uint8_t *ptr;
2285 unsigned long pd;
2286 PhysPageDesc *p;
2287
2288 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2289 if (!p) {
2290 pd = IO_MEM_UNASSIGNED;
2291 } else {
2292 pd = p->phys_offset;
2293 }
2294
2295 if ((pd & ~TARGET_PAGE_MASK) != 0) {
2296 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2297 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2298 } else {
2299 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2300 (addr & ~TARGET_PAGE_MASK);
2301 stl_p(ptr, val);
2302 }
2303 }
2304
2305 /* warning: addr must be aligned */
2306 /* XXX: optimize code invalidation test */
2307 void stl_phys(target_phys_addr_t addr, uint32_t val)
2308 {
2309 int io_index;
2310 uint8_t *ptr;
2311 unsigned long pd;
2312 PhysPageDesc *p;
2313
2314 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2315 if (!p) {
2316 pd = IO_MEM_UNASSIGNED;
2317 } else {
2318 pd = p->phys_offset;
2319 }
2320
2321 if ((pd & ~TARGET_PAGE_MASK) != 0) {
2322 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2323 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2324 } else {
2325 unsigned long addr1;
2326 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2327 /* RAM case */
2328 ptr = phys_ram_base + addr1;
2329 stl_p(ptr, val);
2330 /* invalidate code */
2331 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2332 /* set dirty bit */
2333 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] = 0xff;
2334 }
2335 }
2336
2337 #endif
2338
2339 /* virtual memory access for debug */
2340 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
2341 uint8_t *buf, int len, int is_write)
2342 {
2343 int l;
2344 target_ulong page, phys_addr;
2345
2346 while (len > 0) {
2347 page = addr & TARGET_PAGE_MASK;
2348 phys_addr = cpu_get_phys_page_debug(env, page);
2349 /* if no physical page mapped, return an error */
2350 if (phys_addr == -1)
2351 return -1;
2352 l = (page + TARGET_PAGE_SIZE) - addr;
2353 if (l > len)
2354 l = len;
2355 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
2356 buf, l, is_write);
2357 len -= l;
2358 buf += l;
2359 addr += l;
2360 }
2361 return 0;
2362 }
2363
2364 void dump_exec_info(FILE *f,
2365 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2366 {
2367 int i, target_code_size, max_target_code_size;
2368 int direct_jmp_count, direct_jmp2_count, cross_page;
2369 TranslationBlock *tb;
2370
2371 target_code_size = 0;
2372 max_target_code_size = 0;
2373 cross_page = 0;
2374 direct_jmp_count = 0;
2375 direct_jmp2_count = 0;
2376 for(i = 0; i < nb_tbs; i++) {
2377 tb = &tbs[i];
2378 target_code_size += tb->size;
2379 if (tb->size > max_target_code_size)
2380 max_target_code_size = tb->size;
2381 if (tb->page_addr[1] != -1)
2382 cross_page++;
2383 if (tb->tb_next_offset[0] != 0xffff) {
2384 direct_jmp_count++;
2385 if (tb->tb_next_offset[1] != 0xffff) {
2386 direct_jmp2_count++;
2387 }
2388 }
2389 }
2390 /* XXX: avoid using doubles ? */
2391 cpu_fprintf(f, "TB count %d\n", nb_tbs);
2392 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
2393 nb_tbs ? target_code_size / nb_tbs : 0,
2394 max_target_code_size);
2395 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
2396 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
2397 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
2398 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
2399 cross_page,
2400 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
2401 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
2402 direct_jmp_count,
2403 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
2404 direct_jmp2_count,
2405 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
2406 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
2407 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
2408 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
2409 }
2410
2411 #if !defined(CONFIG_USER_ONLY)
2412
2413 #define MMUSUFFIX _cmmu
2414 #define GETPC() NULL
2415 #define env cpu_single_env
2416 #define SOFTMMU_CODE_ACCESS
2417
2418 #define SHIFT 0
2419 #include "softmmu_template.h"
2420
2421 #define SHIFT 1
2422 #include "softmmu_template.h"
2423
2424 #define SHIFT 2
2425 #include "softmmu_template.h"
2426
2427 #define SHIFT 3
2428 #include "softmmu_template.h"
2429
2430 #undef env
2431
2432 #endif