]> git.proxmox.com Git - qemu.git/blob - exec.c
added generic physical memory dirty bit support
[qemu.git] / exec.c
1 /*
2 * virtual page mapping and translated block handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20 #include <stdlib.h>
21 #include <stdio.h>
22 #include <stdarg.h>
23 #include <string.h>
24 #include <errno.h>
25 #include <unistd.h>
26 #include <inttypes.h>
27 #include <sys/mman.h>
28
29 #include "config.h"
30 #include "cpu.h"
31 #include "exec-all.h"
32
33 //#define DEBUG_TB_INVALIDATE
34 //#define DEBUG_FLUSH
35 //#define DEBUG_TLB
36
37 /* make various TB consistency checks */
38 //#define DEBUG_TB_CHECK
39 //#define DEBUG_TLB_CHECK
40
41 /* threshold to flush the translated code buffer */
42 #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
43
44 #define SMC_BITMAP_USE_THRESHOLD 10
45
46 #define MMAP_AREA_START 0x00000000
47 #define MMAP_AREA_END 0xa8000000
48
49 TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
50 TranslationBlock *tb_hash[CODE_GEN_HASH_SIZE];
51 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
52 int nb_tbs;
53 /* any access to the tbs or the page table must use this lock */
54 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
55
56 uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE];
57 uint8_t *code_gen_ptr;
58
59 int phys_ram_size;
60 int phys_ram_fd;
61 uint8_t *phys_ram_base;
62 uint8_t *phys_ram_dirty;
63
64 typedef struct PageDesc {
65 /* offset in memory of the page + io_index in the low 12 bits */
66 unsigned long phys_offset;
67 /* list of TBs intersecting this physical page */
68 TranslationBlock *first_tb;
69 /* in order to optimize self modifying code, we count the number
70 of lookups we do to a given page to use a bitmap */
71 unsigned int code_write_count;
72 uint8_t *code_bitmap;
73 #if defined(CONFIG_USER_ONLY)
74 unsigned long flags;
75 #endif
76 } PageDesc;
77
78 typedef struct VirtPageDesc {
79 /* physical address of code page. It is valid only if 'valid_tag'
80 matches 'virt_valid_tag' */
81 target_ulong phys_addr;
82 unsigned int valid_tag;
83 #if !defined(CONFIG_SOFTMMU)
84 /* original page access rights. It is valid only if 'valid_tag'
85 matches 'virt_valid_tag' */
86 unsigned int prot;
87 #endif
88 } VirtPageDesc;
89
90 #define L2_BITS 10
91 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
92
93 #define L1_SIZE (1 << L1_BITS)
94 #define L2_SIZE (1 << L2_BITS)
95
96 static void io_mem_init(void);
97
98 unsigned long real_host_page_size;
99 unsigned long host_page_bits;
100 unsigned long host_page_size;
101 unsigned long host_page_mask;
102
103 static PageDesc *l1_map[L1_SIZE];
104
105 #if !defined(CONFIG_USER_ONLY)
106 static VirtPageDesc *l1_virt_map[L1_SIZE];
107 static unsigned int virt_valid_tag;
108 #endif
109
110 /* io memory support */
111 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
112 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
113 static int io_mem_nb;
114
115 /* log support */
116 char *logfilename = "/tmp/qemu.log";
117 FILE *logfile;
118 int loglevel;
119
120 static void page_init(void)
121 {
122 /* NOTE: we can always suppose that host_page_size >=
123 TARGET_PAGE_SIZE */
124 real_host_page_size = getpagesize();
125 if (host_page_size == 0)
126 host_page_size = real_host_page_size;
127 if (host_page_size < TARGET_PAGE_SIZE)
128 host_page_size = TARGET_PAGE_SIZE;
129 host_page_bits = 0;
130 while ((1 << host_page_bits) < host_page_size)
131 host_page_bits++;
132 host_page_mask = ~(host_page_size - 1);
133 #if !defined(CONFIG_USER_ONLY)
134 virt_valid_tag = 1;
135 #endif
136 }
137
138 static inline PageDesc *page_find_alloc(unsigned int index)
139 {
140 PageDesc **lp, *p;
141
142 lp = &l1_map[index >> L2_BITS];
143 p = *lp;
144 if (!p) {
145 /* allocate if not found */
146 p = malloc(sizeof(PageDesc) * L2_SIZE);
147 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
148 *lp = p;
149 }
150 return p + (index & (L2_SIZE - 1));
151 }
152
153 static inline PageDesc *page_find(unsigned int index)
154 {
155 PageDesc *p;
156
157 p = l1_map[index >> L2_BITS];
158 if (!p)
159 return 0;
160 return p + (index & (L2_SIZE - 1));
161 }
162
163 #if !defined(CONFIG_USER_ONLY)
164 static void tlb_protect_code(CPUState *env, uint32_t addr);
165 static void tlb_unprotect_code(CPUState *env, uint32_t addr);
166 static void tlb_unprotect_code_phys(CPUState *env, uint32_t phys_addr, target_ulong vaddr);
167
168 static inline VirtPageDesc *virt_page_find_alloc(unsigned int index)
169 {
170 VirtPageDesc **lp, *p;
171
172 lp = &l1_virt_map[index >> L2_BITS];
173 p = *lp;
174 if (!p) {
175 /* allocate if not found */
176 p = malloc(sizeof(VirtPageDesc) * L2_SIZE);
177 memset(p, 0, sizeof(VirtPageDesc) * L2_SIZE);
178 *lp = p;
179 }
180 return p + (index & (L2_SIZE - 1));
181 }
182
183 static inline VirtPageDesc *virt_page_find(unsigned int index)
184 {
185 VirtPageDesc *p;
186
187 p = l1_virt_map[index >> L2_BITS];
188 if (!p)
189 return 0;
190 return p + (index & (L2_SIZE - 1));
191 }
192
193 static void virt_page_flush(void)
194 {
195 int i, j;
196 VirtPageDesc *p;
197
198 virt_valid_tag++;
199
200 if (virt_valid_tag == 0) {
201 virt_valid_tag = 1;
202 for(i = 0; i < L1_SIZE; i++) {
203 p = l1_virt_map[i];
204 if (p) {
205 for(j = 0; j < L2_SIZE; j++)
206 p[j].valid_tag = 0;
207 }
208 }
209 }
210 }
211 #else
212 static void virt_page_flush(void)
213 {
214 }
215 #endif
216
217 void cpu_exec_init(void)
218 {
219 if (!code_gen_ptr) {
220 code_gen_ptr = code_gen_buffer;
221 page_init();
222 io_mem_init();
223 }
224 }
225
226 static inline void invalidate_page_bitmap(PageDesc *p)
227 {
228 if (p->code_bitmap) {
229 free(p->code_bitmap);
230 p->code_bitmap = NULL;
231 }
232 p->code_write_count = 0;
233 }
234
235 /* set to NULL all the 'first_tb' fields in all PageDescs */
236 static void page_flush_tb(void)
237 {
238 int i, j;
239 PageDesc *p;
240
241 for(i = 0; i < L1_SIZE; i++) {
242 p = l1_map[i];
243 if (p) {
244 for(j = 0; j < L2_SIZE; j++) {
245 p->first_tb = NULL;
246 invalidate_page_bitmap(p);
247 p++;
248 }
249 }
250 }
251 }
252
253 /* flush all the translation blocks */
254 /* XXX: tb_flush is currently not thread safe */
255 void tb_flush(CPUState *env)
256 {
257 int i;
258 #if defined(DEBUG_FLUSH)
259 printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
260 code_gen_ptr - code_gen_buffer,
261 nb_tbs,
262 nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0);
263 #endif
264 nb_tbs = 0;
265 for(i = 0;i < CODE_GEN_HASH_SIZE; i++)
266 tb_hash[i] = NULL;
267 virt_page_flush();
268
269 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++)
270 tb_phys_hash[i] = NULL;
271 page_flush_tb();
272
273 code_gen_ptr = code_gen_buffer;
274 /* XXX: flush processor icache at this point if cache flush is
275 expensive */
276 }
277
278 #ifdef DEBUG_TB_CHECK
279
280 static void tb_invalidate_check(unsigned long address)
281 {
282 TranslationBlock *tb;
283 int i;
284 address &= TARGET_PAGE_MASK;
285 for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
286 for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
287 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
288 address >= tb->pc + tb->size)) {
289 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
290 address, tb->pc, tb->size);
291 }
292 }
293 }
294 }
295
296 /* verify that all the pages have correct rights for code */
297 static void tb_page_check(void)
298 {
299 TranslationBlock *tb;
300 int i, flags1, flags2;
301
302 for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
303 for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
304 flags1 = page_get_flags(tb->pc);
305 flags2 = page_get_flags(tb->pc + tb->size - 1);
306 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
307 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
308 tb->pc, tb->size, flags1, flags2);
309 }
310 }
311 }
312 }
313
314 void tb_jmp_check(TranslationBlock *tb)
315 {
316 TranslationBlock *tb1;
317 unsigned int n1;
318
319 /* suppress any remaining jumps to this TB */
320 tb1 = tb->jmp_first;
321 for(;;) {
322 n1 = (long)tb1 & 3;
323 tb1 = (TranslationBlock *)((long)tb1 & ~3);
324 if (n1 == 2)
325 break;
326 tb1 = tb1->jmp_next[n1];
327 }
328 /* check end of list */
329 if (tb1 != tb) {
330 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
331 }
332 }
333
334 #endif
335
336 /* invalidate one TB */
337 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
338 int next_offset)
339 {
340 TranslationBlock *tb1;
341 for(;;) {
342 tb1 = *ptb;
343 if (tb1 == tb) {
344 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
345 break;
346 }
347 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
348 }
349 }
350
351 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
352 {
353 TranslationBlock *tb1;
354 unsigned int n1;
355
356 for(;;) {
357 tb1 = *ptb;
358 n1 = (long)tb1 & 3;
359 tb1 = (TranslationBlock *)((long)tb1 & ~3);
360 if (tb1 == tb) {
361 *ptb = tb1->page_next[n1];
362 break;
363 }
364 ptb = &tb1->page_next[n1];
365 }
366 }
367
368 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
369 {
370 TranslationBlock *tb1, **ptb;
371 unsigned int n1;
372
373 ptb = &tb->jmp_next[n];
374 tb1 = *ptb;
375 if (tb1) {
376 /* find tb(n) in circular list */
377 for(;;) {
378 tb1 = *ptb;
379 n1 = (long)tb1 & 3;
380 tb1 = (TranslationBlock *)((long)tb1 & ~3);
381 if (n1 == n && tb1 == tb)
382 break;
383 if (n1 == 2) {
384 ptb = &tb1->jmp_first;
385 } else {
386 ptb = &tb1->jmp_next[n1];
387 }
388 }
389 /* now we can suppress tb(n) from the list */
390 *ptb = tb->jmp_next[n];
391
392 tb->jmp_next[n] = NULL;
393 }
394 }
395
396 /* reset the jump entry 'n' of a TB so that it is not chained to
397 another TB */
398 static inline void tb_reset_jump(TranslationBlock *tb, int n)
399 {
400 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
401 }
402
403 static inline void tb_invalidate(TranslationBlock *tb)
404 {
405 unsigned int h, n1;
406 TranslationBlock *tb1, *tb2, **ptb;
407
408 tb_invalidated_flag = 1;
409
410 /* remove the TB from the hash list */
411 h = tb_hash_func(tb->pc);
412 ptb = &tb_hash[h];
413 for(;;) {
414 tb1 = *ptb;
415 /* NOTE: the TB is not necessarily linked in the hash. It
416 indicates that it is not currently used */
417 if (tb1 == NULL)
418 return;
419 if (tb1 == tb) {
420 *ptb = tb1->hash_next;
421 break;
422 }
423 ptb = &tb1->hash_next;
424 }
425
426 /* suppress this TB from the two jump lists */
427 tb_jmp_remove(tb, 0);
428 tb_jmp_remove(tb, 1);
429
430 /* suppress any remaining jumps to this TB */
431 tb1 = tb->jmp_first;
432 for(;;) {
433 n1 = (long)tb1 & 3;
434 if (n1 == 2)
435 break;
436 tb1 = (TranslationBlock *)((long)tb1 & ~3);
437 tb2 = tb1->jmp_next[n1];
438 tb_reset_jump(tb1, n1);
439 tb1->jmp_next[n1] = NULL;
440 tb1 = tb2;
441 }
442 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
443 }
444
445 static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
446 {
447 PageDesc *p;
448 unsigned int h;
449 target_ulong phys_pc;
450
451 /* remove the TB from the hash list */
452 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
453 h = tb_phys_hash_func(phys_pc);
454 tb_remove(&tb_phys_hash[h], tb,
455 offsetof(TranslationBlock, phys_hash_next));
456
457 /* remove the TB from the page list */
458 if (tb->page_addr[0] != page_addr) {
459 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
460 tb_page_remove(&p->first_tb, tb);
461 invalidate_page_bitmap(p);
462 }
463 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
464 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
465 tb_page_remove(&p->first_tb, tb);
466 invalidate_page_bitmap(p);
467 }
468
469 tb_invalidate(tb);
470 }
471
472 static inline void set_bits(uint8_t *tab, int start, int len)
473 {
474 int end, mask, end1;
475
476 end = start + len;
477 tab += start >> 3;
478 mask = 0xff << (start & 7);
479 if ((start & ~7) == (end & ~7)) {
480 if (start < end) {
481 mask &= ~(0xff << (end & 7));
482 *tab |= mask;
483 }
484 } else {
485 *tab++ |= mask;
486 start = (start + 8) & ~7;
487 end1 = end & ~7;
488 while (start < end1) {
489 *tab++ = 0xff;
490 start += 8;
491 }
492 if (start < end) {
493 mask = ~(0xff << (end & 7));
494 *tab |= mask;
495 }
496 }
497 }
498
499 static void build_page_bitmap(PageDesc *p)
500 {
501 int n, tb_start, tb_end;
502 TranslationBlock *tb;
503
504 p->code_bitmap = malloc(TARGET_PAGE_SIZE / 8);
505 if (!p->code_bitmap)
506 return;
507 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
508
509 tb = p->first_tb;
510 while (tb != NULL) {
511 n = (long)tb & 3;
512 tb = (TranslationBlock *)((long)tb & ~3);
513 /* NOTE: this is subtle as a TB may span two physical pages */
514 if (n == 0) {
515 /* NOTE: tb_end may be after the end of the page, but
516 it is not a problem */
517 tb_start = tb->pc & ~TARGET_PAGE_MASK;
518 tb_end = tb_start + tb->size;
519 if (tb_end > TARGET_PAGE_SIZE)
520 tb_end = TARGET_PAGE_SIZE;
521 } else {
522 tb_start = 0;
523 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
524 }
525 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
526 tb = tb->page_next[n];
527 }
528 }
529
530 /* invalidate all TBs which intersect with the target physical page
531 starting in range [start;end[. NOTE: start and end must refer to
532 the same physical page. 'vaddr' is a virtual address referencing
533 the physical page of code. It is only used an a hint if there is no
534 code left. */
535 static void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
536 target_ulong vaddr)
537 {
538 int n;
539 PageDesc *p;
540 TranslationBlock *tb, *tb_next;
541 target_ulong tb_start, tb_end;
542
543 p = page_find(start >> TARGET_PAGE_BITS);
544 if (!p)
545 return;
546 if (!p->code_bitmap &&
547 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD) {
548 /* build code bitmap */
549 build_page_bitmap(p);
550 }
551
552 /* we remove all the TBs in the range [start, end[ */
553 /* XXX: see if in some cases it could be faster to invalidate all the code */
554 tb = p->first_tb;
555 while (tb != NULL) {
556 n = (long)tb & 3;
557 tb = (TranslationBlock *)((long)tb & ~3);
558 tb_next = tb->page_next[n];
559 /* NOTE: this is subtle as a TB may span two physical pages */
560 if (n == 0) {
561 /* NOTE: tb_end may be after the end of the page, but
562 it is not a problem */
563 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
564 tb_end = tb_start + tb->size;
565 } else {
566 tb_start = tb->page_addr[1];
567 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
568 }
569 if (!(tb_end <= start || tb_start >= end)) {
570 tb_phys_invalidate(tb, -1);
571 }
572 tb = tb_next;
573 }
574 #if !defined(CONFIG_USER_ONLY)
575 /* if no code remaining, no need to continue to use slow writes */
576 if (!p->first_tb) {
577 invalidate_page_bitmap(p);
578 tlb_unprotect_code_phys(cpu_single_env, start, vaddr);
579 }
580 #endif
581 }
582
583 /* len must be <= 8 and start must be a multiple of len */
584 static inline void tb_invalidate_phys_page_fast(target_ulong start, int len, target_ulong vaddr)
585 {
586 PageDesc *p;
587 int offset, b;
588
589 p = page_find(start >> TARGET_PAGE_BITS);
590 if (!p)
591 return;
592 if (p->code_bitmap) {
593 offset = start & ~TARGET_PAGE_MASK;
594 b = p->code_bitmap[offset >> 3] >> (offset & 7);
595 if (b & ((1 << len) - 1))
596 goto do_invalidate;
597 } else {
598 do_invalidate:
599 tb_invalidate_phys_page_range(start, start + len, vaddr);
600 }
601 }
602
603 /* invalidate all TBs which intersect with the target virtual page
604 starting in range [start;end[. This function is usually used when
605 the target processor flushes its I-cache. NOTE: start and end must
606 refer to the same physical page */
607 void tb_invalidate_page_range(target_ulong start, target_ulong end)
608 {
609 int n;
610 PageDesc *p;
611 TranslationBlock *tb, *tb_next;
612 target_ulong pc;
613 target_ulong phys_start;
614
615 #if !defined(CONFIG_USER_ONLY)
616 {
617 VirtPageDesc *vp;
618 vp = virt_page_find(start >> TARGET_PAGE_BITS);
619 if (!vp)
620 return;
621 if (vp->valid_tag != virt_valid_tag)
622 return;
623 phys_start = vp->phys_addr + (start & ~TARGET_PAGE_MASK);
624 }
625 #else
626 phys_start = start;
627 #endif
628 p = page_find(phys_start >> TARGET_PAGE_BITS);
629 if (!p)
630 return;
631 /* we remove all the TBs in the range [start, end[ */
632 /* XXX: see if in some cases it could be faster to invalidate all the code */
633 tb = p->first_tb;
634 while (tb != NULL) {
635 n = (long)tb & 3;
636 tb = (TranslationBlock *)((long)tb & ~3);
637 tb_next = tb->page_next[n];
638 pc = tb->pc;
639 if (!((pc + tb->size) <= start || pc >= end)) {
640 tb_phys_invalidate(tb, -1);
641 }
642 tb = tb_next;
643 }
644 #if !defined(CONFIG_USER_ONLY)
645 /* if no code remaining, no need to continue to use slow writes */
646 if (!p->first_tb)
647 tlb_unprotect_code(cpu_single_env, start);
648 #endif
649 }
650
651 #if !defined(CONFIG_SOFTMMU)
652 static void tb_invalidate_phys_page(target_ulong addr)
653 {
654 int n;
655 PageDesc *p;
656 TranslationBlock *tb;
657
658 addr &= TARGET_PAGE_MASK;
659 p = page_find(addr >> TARGET_PAGE_BITS);
660 if (!p)
661 return;
662 tb = p->first_tb;
663 while (tb != NULL) {
664 n = (long)tb & 3;
665 tb = (TranslationBlock *)((long)tb & ~3);
666 tb_phys_invalidate(tb, addr);
667 tb = tb->page_next[n];
668 }
669 p->first_tb = NULL;
670 }
671 #endif
672
673 /* add the tb in the target page and protect it if necessary */
674 static inline void tb_alloc_page(TranslationBlock *tb,
675 unsigned int n, unsigned int page_addr)
676 {
677 PageDesc *p;
678 TranslationBlock *last_first_tb;
679
680 tb->page_addr[n] = page_addr;
681 p = page_find(page_addr >> TARGET_PAGE_BITS);
682 tb->page_next[n] = p->first_tb;
683 last_first_tb = p->first_tb;
684 p->first_tb = (TranslationBlock *)((long)tb | n);
685 invalidate_page_bitmap(p);
686
687 #if defined(CONFIG_USER_ONLY)
688 if (p->flags & PAGE_WRITE) {
689 unsigned long host_start, host_end, addr;
690 int prot;
691
692 /* force the host page as non writable (writes will have a
693 page fault + mprotect overhead) */
694 host_start = page_addr & host_page_mask;
695 host_end = host_start + host_page_size;
696 prot = 0;
697 for(addr = host_start; addr < host_end; addr += TARGET_PAGE_SIZE)
698 prot |= page_get_flags(addr);
699 mprotect((void *)host_start, host_page_size,
700 (prot & PAGE_BITS) & ~PAGE_WRITE);
701 #ifdef DEBUG_TB_INVALIDATE
702 printf("protecting code page: 0x%08lx\n",
703 host_start);
704 #endif
705 p->flags &= ~PAGE_WRITE;
706 }
707 #else
708 /* if some code is already present, then the pages are already
709 protected. So we handle the case where only the first TB is
710 allocated in a physical page */
711 if (!last_first_tb) {
712 target_ulong virt_addr;
713
714 virt_addr = (tb->pc & TARGET_PAGE_MASK) + (n << TARGET_PAGE_BITS);
715 tlb_protect_code(cpu_single_env, virt_addr);
716 }
717 #endif
718 }
719
720 /* Allocate a new translation block. Flush the translation buffer if
721 too many translation blocks or too much generated code. */
722 TranslationBlock *tb_alloc(unsigned long pc)
723 {
724 TranslationBlock *tb;
725
726 if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
727 (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
728 return NULL;
729 tb = &tbs[nb_tbs++];
730 tb->pc = pc;
731 return tb;
732 }
733
734 /* add a new TB and link it to the physical page tables. phys_page2 is
735 (-1) to indicate that only one page contains the TB. */
736 void tb_link_phys(TranslationBlock *tb,
737 target_ulong phys_pc, target_ulong phys_page2)
738 {
739 unsigned int h;
740 TranslationBlock **ptb;
741
742 /* add in the physical hash table */
743 h = tb_phys_hash_func(phys_pc);
744 ptb = &tb_phys_hash[h];
745 tb->phys_hash_next = *ptb;
746 *ptb = tb;
747
748 /* add in the page list */
749 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
750 if (phys_page2 != -1)
751 tb_alloc_page(tb, 1, phys_page2);
752 else
753 tb->page_addr[1] = -1;
754 #ifdef DEBUG_TB_CHECK
755 tb_page_check();
756 #endif
757 }
758
759 /* link the tb with the other TBs */
760 void tb_link(TranslationBlock *tb)
761 {
762 #if !defined(CONFIG_USER_ONLY)
763 {
764 VirtPageDesc *vp;
765 target_ulong addr;
766
767 /* save the code memory mappings (needed to invalidate the code) */
768 addr = tb->pc & TARGET_PAGE_MASK;
769 vp = virt_page_find_alloc(addr >> TARGET_PAGE_BITS);
770 #ifdef DEBUG_TLB_CHECK
771 if (vp->valid_tag == virt_valid_tag &&
772 vp->phys_addr != tb->page_addr[0]) {
773 printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",
774 addr, tb->page_addr[0], vp->phys_addr);
775 }
776 #endif
777 vp->phys_addr = tb->page_addr[0];
778 vp->valid_tag = virt_valid_tag;
779
780 if (tb->page_addr[1] != -1) {
781 addr += TARGET_PAGE_SIZE;
782 vp = virt_page_find_alloc(addr >> TARGET_PAGE_BITS);
783 #ifdef DEBUG_TLB_CHECK
784 if (vp->valid_tag == virt_valid_tag &&
785 vp->phys_addr != tb->page_addr[1]) {
786 printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",
787 addr, tb->page_addr[1], vp->phys_addr);
788 }
789 #endif
790 vp->phys_addr = tb->page_addr[1];
791 vp->valid_tag = virt_valid_tag;
792 }
793 }
794 #endif
795
796 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
797 tb->jmp_next[0] = NULL;
798 tb->jmp_next[1] = NULL;
799
800 /* init original jump addresses */
801 if (tb->tb_next_offset[0] != 0xffff)
802 tb_reset_jump(tb, 0);
803 if (tb->tb_next_offset[1] != 0xffff)
804 tb_reset_jump(tb, 1);
805 }
806
807 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
808 tb[1].tc_ptr. Return NULL if not found */
809 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
810 {
811 int m_min, m_max, m;
812 unsigned long v;
813 TranslationBlock *tb;
814
815 if (nb_tbs <= 0)
816 return NULL;
817 if (tc_ptr < (unsigned long)code_gen_buffer ||
818 tc_ptr >= (unsigned long)code_gen_ptr)
819 return NULL;
820 /* binary search (cf Knuth) */
821 m_min = 0;
822 m_max = nb_tbs - 1;
823 while (m_min <= m_max) {
824 m = (m_min + m_max) >> 1;
825 tb = &tbs[m];
826 v = (unsigned long)tb->tc_ptr;
827 if (v == tc_ptr)
828 return tb;
829 else if (tc_ptr < v) {
830 m_max = m - 1;
831 } else {
832 m_min = m + 1;
833 }
834 }
835 return &tbs[m_max];
836 }
837
838 static void tb_reset_jump_recursive(TranslationBlock *tb);
839
840 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
841 {
842 TranslationBlock *tb1, *tb_next, **ptb;
843 unsigned int n1;
844
845 tb1 = tb->jmp_next[n];
846 if (tb1 != NULL) {
847 /* find head of list */
848 for(;;) {
849 n1 = (long)tb1 & 3;
850 tb1 = (TranslationBlock *)((long)tb1 & ~3);
851 if (n1 == 2)
852 break;
853 tb1 = tb1->jmp_next[n1];
854 }
855 /* we are now sure now that tb jumps to tb1 */
856 tb_next = tb1;
857
858 /* remove tb from the jmp_first list */
859 ptb = &tb_next->jmp_first;
860 for(;;) {
861 tb1 = *ptb;
862 n1 = (long)tb1 & 3;
863 tb1 = (TranslationBlock *)((long)tb1 & ~3);
864 if (n1 == n && tb1 == tb)
865 break;
866 ptb = &tb1->jmp_next[n1];
867 }
868 *ptb = tb->jmp_next[n];
869 tb->jmp_next[n] = NULL;
870
871 /* suppress the jump to next tb in generated code */
872 tb_reset_jump(tb, n);
873
874 /* suppress jumps in the tb on which we could have jumped */
875 tb_reset_jump_recursive(tb_next);
876 }
877 }
878
879 static void tb_reset_jump_recursive(TranslationBlock *tb)
880 {
881 tb_reset_jump_recursive2(tb, 0);
882 tb_reset_jump_recursive2(tb, 1);
883 }
884
885 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
886 breakpoint is reached */
887 int cpu_breakpoint_insert(CPUState *env, uint32_t pc)
888 {
889 #if defined(TARGET_I386)
890 int i;
891
892 for(i = 0; i < env->nb_breakpoints; i++) {
893 if (env->breakpoints[i] == pc)
894 return 0;
895 }
896
897 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
898 return -1;
899 env->breakpoints[env->nb_breakpoints++] = pc;
900 tb_invalidate_page_range(pc, pc + 1);
901 return 0;
902 #else
903 return -1;
904 #endif
905 }
906
907 /* remove a breakpoint */
908 int cpu_breakpoint_remove(CPUState *env, uint32_t pc)
909 {
910 #if defined(TARGET_I386)
911 int i;
912 for(i = 0; i < env->nb_breakpoints; i++) {
913 if (env->breakpoints[i] == pc)
914 goto found;
915 }
916 return -1;
917 found:
918 memmove(&env->breakpoints[i], &env->breakpoints[i + 1],
919 (env->nb_breakpoints - (i + 1)) * sizeof(env->breakpoints[0]));
920 env->nb_breakpoints--;
921 tb_invalidate_page_range(pc, pc + 1);
922 return 0;
923 #else
924 return -1;
925 #endif
926 }
927
928 /* enable or disable single step mode. EXCP_DEBUG is returned by the
929 CPU loop after each instruction */
930 void cpu_single_step(CPUState *env, int enabled)
931 {
932 #if defined(TARGET_I386)
933 if (env->singlestep_enabled != enabled) {
934 env->singlestep_enabled = enabled;
935 /* must flush all the translated code to avoid inconsistancies */
936 /* XXX: only flush what is necessary */
937 tb_flush(env);
938 }
939 #endif
940 }
941
942 /* enable or disable low levels log */
943 void cpu_set_log(int log_flags)
944 {
945 loglevel = log_flags;
946 if (loglevel && !logfile) {
947 logfile = fopen(logfilename, "w");
948 if (!logfile) {
949 perror(logfilename);
950 _exit(1);
951 }
952 #if !defined(CONFIG_SOFTMMU)
953 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
954 {
955 static uint8_t logfile_buf[4096];
956 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
957 }
958 #else
959 setvbuf(logfile, NULL, _IOLBF, 0);
960 #endif
961 }
962 }
963
964 void cpu_set_log_filename(const char *filename)
965 {
966 logfilename = strdup(filename);
967 }
968
969 /* mask must never be zero, except for A20 change call */
970 void cpu_interrupt(CPUState *env, int mask)
971 {
972 TranslationBlock *tb;
973 static int interrupt_lock;
974
975 env->interrupt_request |= mask;
976 /* if the cpu is currently executing code, we must unlink it and
977 all the potentially executing TB */
978 tb = env->current_tb;
979 if (tb && !testandset(&interrupt_lock)) {
980 env->current_tb = NULL;
981 tb_reset_jump_recursive(tb);
982 interrupt_lock = 0;
983 }
984 }
985
986
987 void cpu_abort(CPUState *env, const char *fmt, ...)
988 {
989 va_list ap;
990
991 va_start(ap, fmt);
992 fprintf(stderr, "qemu: fatal: ");
993 vfprintf(stderr, fmt, ap);
994 fprintf(stderr, "\n");
995 #ifdef TARGET_I386
996 cpu_x86_dump_state(env, stderr, X86_DUMP_FPU | X86_DUMP_CCOP);
997 #endif
998 va_end(ap);
999 abort();
1000 }
1001
1002 #if !defined(CONFIG_USER_ONLY)
1003
1004 /* NOTE: if flush_global is true, also flush global entries (not
1005 implemented yet) */
1006 void tlb_flush(CPUState *env, int flush_global)
1007 {
1008 int i;
1009
1010 #if defined(DEBUG_TLB)
1011 printf("tlb_flush:\n");
1012 #endif
1013 /* must reset current TB so that interrupts cannot modify the
1014 links while we are modifying them */
1015 env->current_tb = NULL;
1016
1017 for(i = 0; i < CPU_TLB_SIZE; i++) {
1018 env->tlb_read[0][i].address = -1;
1019 env->tlb_write[0][i].address = -1;
1020 env->tlb_read[1][i].address = -1;
1021 env->tlb_write[1][i].address = -1;
1022 }
1023
1024 virt_page_flush();
1025 for(i = 0;i < CODE_GEN_HASH_SIZE; i++)
1026 tb_hash[i] = NULL;
1027
1028 #if !defined(CONFIG_SOFTMMU)
1029 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
1030 #endif
1031 }
1032
1033 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, uint32_t addr)
1034 {
1035 if (addr == (tlb_entry->address &
1036 (TARGET_PAGE_MASK | TLB_INVALID_MASK)))
1037 tlb_entry->address = -1;
1038 }
1039
1040 void tlb_flush_page(CPUState *env, uint32_t addr)
1041 {
1042 int i, n;
1043 VirtPageDesc *vp;
1044 PageDesc *p;
1045 TranslationBlock *tb;
1046
1047 #if defined(DEBUG_TLB)
1048 printf("tlb_flush_page: 0x%08x\n", addr);
1049 #endif
1050 /* must reset current TB so that interrupts cannot modify the
1051 links while we are modifying them */
1052 env->current_tb = NULL;
1053
1054 addr &= TARGET_PAGE_MASK;
1055 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1056 tlb_flush_entry(&env->tlb_read[0][i], addr);
1057 tlb_flush_entry(&env->tlb_write[0][i], addr);
1058 tlb_flush_entry(&env->tlb_read[1][i], addr);
1059 tlb_flush_entry(&env->tlb_write[1][i], addr);
1060
1061 /* remove from the virtual pc hash table all the TB at this
1062 virtual address */
1063
1064 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1065 if (vp && vp->valid_tag == virt_valid_tag) {
1066 p = page_find(vp->phys_addr >> TARGET_PAGE_BITS);
1067 if (p) {
1068 /* we remove all the links to the TBs in this virtual page */
1069 tb = p->first_tb;
1070 while (tb != NULL) {
1071 n = (long)tb & 3;
1072 tb = (TranslationBlock *)((long)tb & ~3);
1073 if ((tb->pc & TARGET_PAGE_MASK) == addr ||
1074 ((tb->pc + tb->size - 1) & TARGET_PAGE_MASK) == addr) {
1075 tb_invalidate(tb);
1076 }
1077 tb = tb->page_next[n];
1078 }
1079 }
1080 vp->valid_tag = 0;
1081 }
1082
1083 #if !defined(CONFIG_SOFTMMU)
1084 if (addr < MMAP_AREA_END)
1085 munmap((void *)addr, TARGET_PAGE_SIZE);
1086 #endif
1087 }
1088
1089 static inline void tlb_protect_code1(CPUTLBEntry *tlb_entry, uint32_t addr)
1090 {
1091 if (addr == (tlb_entry->address &
1092 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) &&
1093 (tlb_entry->address & ~TARGET_PAGE_MASK) != IO_MEM_CODE &&
1094 (tlb_entry->address & ~TARGET_PAGE_MASK) != IO_MEM_ROM) {
1095 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_CODE;
1096 }
1097 }
1098
1099 /* update the TLBs so that writes to code in the virtual page 'addr'
1100 can be detected */
1101 static void tlb_protect_code(CPUState *env, uint32_t addr)
1102 {
1103 int i;
1104
1105 addr &= TARGET_PAGE_MASK;
1106 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1107 tlb_protect_code1(&env->tlb_write[0][i], addr);
1108 tlb_protect_code1(&env->tlb_write[1][i], addr);
1109 #if !defined(CONFIG_SOFTMMU)
1110 /* NOTE: as we generated the code for this page, it is already at
1111 least readable */
1112 if (addr < MMAP_AREA_END)
1113 mprotect((void *)addr, TARGET_PAGE_SIZE, PROT_READ);
1114 #endif
1115 }
1116
1117 static inline void tlb_unprotect_code1(CPUTLBEntry *tlb_entry, uint32_t addr)
1118 {
1119 if (addr == (tlb_entry->address &
1120 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) &&
1121 (tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_CODE) {
1122 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1123 }
1124 }
1125
1126 /* update the TLB so that writes in virtual page 'addr' are no longer
1127 tested self modifying code */
1128 static void tlb_unprotect_code(CPUState *env, uint32_t addr)
1129 {
1130 int i;
1131
1132 addr &= TARGET_PAGE_MASK;
1133 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1134 tlb_unprotect_code1(&env->tlb_write[0][i], addr);
1135 tlb_unprotect_code1(&env->tlb_write[1][i], addr);
1136 }
1137
1138 static inline void tlb_unprotect_code2(CPUTLBEntry *tlb_entry,
1139 uint32_t phys_addr)
1140 {
1141 if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_CODE &&
1142 ((tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend) == phys_addr) {
1143 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1144 }
1145 }
1146
1147 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1148 tested self modifying code */
1149 static void tlb_unprotect_code_phys(CPUState *env, uint32_t phys_addr, target_ulong vaddr)
1150 {
1151 int i;
1152
1153 phys_addr &= TARGET_PAGE_MASK;
1154 phys_addr += (long)phys_ram_base;
1155 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1156 tlb_unprotect_code2(&env->tlb_write[0][i], phys_addr);
1157 tlb_unprotect_code2(&env->tlb_write[1][i], phys_addr);
1158 }
1159
1160 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1161 unsigned long start, unsigned long length)
1162 {
1163 unsigned long addr;
1164 if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1165 addr = (tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend;
1166 if ((addr - start) < length) {
1167 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1168 }
1169 }
1170 }
1171
1172 void cpu_physical_memory_reset_dirty(target_ulong start, target_ulong end)
1173 {
1174 CPUState *env;
1175 target_ulong length;
1176 int i;
1177
1178 start &= TARGET_PAGE_MASK;
1179 end = TARGET_PAGE_ALIGN(end);
1180
1181 length = end - start;
1182 if (length == 0)
1183 return;
1184 memset(phys_ram_dirty + (start >> TARGET_PAGE_BITS), 0, length >> TARGET_PAGE_BITS);
1185
1186 env = cpu_single_env;
1187 /* we modify the TLB cache so that the dirty bit will be set again
1188 when accessing the range */
1189 start += (unsigned long)phys_ram_base;
1190 for(i = 0; i < CPU_TLB_SIZE; i++)
1191 tlb_reset_dirty_range(&env->tlb_write[0][i], start, length);
1192 for(i = 0; i < CPU_TLB_SIZE; i++)
1193 tlb_reset_dirty_range(&env->tlb_write[1][i], start, length);
1194 }
1195
1196 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
1197 unsigned long start)
1198 {
1199 unsigned long addr;
1200 if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1201 addr = (tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend;
1202 if (addr == start) {
1203 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_RAM;
1204 }
1205 }
1206 }
1207
1208 /* update the TLB corresponding to virtual page vaddr and phys addr
1209 addr so that it is no longer dirty */
1210 static inline void tlb_set_dirty(unsigned long addr, target_ulong vaddr)
1211 {
1212 CPUState *env = cpu_single_env;
1213 int i;
1214
1215 phys_ram_dirty[(addr - (unsigned long)phys_ram_base) >> TARGET_PAGE_BITS] = 1;
1216
1217 addr &= TARGET_PAGE_MASK;
1218 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1219 tlb_set_dirty1(&env->tlb_write[0][i], addr);
1220 tlb_set_dirty1(&env->tlb_write[1][i], addr);
1221 }
1222
1223 /* add a new TLB entry. At most one entry for a given virtual
1224 address is permitted. */
1225 int tlb_set_page(CPUState *env, uint32_t vaddr, uint32_t paddr, int prot,
1226 int is_user, int is_softmmu)
1227 {
1228 PageDesc *p;
1229 target_ulong pd;
1230 TranslationBlock *first_tb;
1231 unsigned int index;
1232 target_ulong address, addend;
1233 int ret;
1234
1235 p = page_find(paddr >> TARGET_PAGE_BITS);
1236 if (!p) {
1237 pd = IO_MEM_UNASSIGNED;
1238 first_tb = NULL;
1239 } else {
1240 pd = p->phys_offset;
1241 first_tb = p->first_tb;
1242 }
1243 #if defined(DEBUG_TLB)
1244 printf("tlb_set_page: vaddr=0x%08x paddr=0x%08x prot=%x u=%d c=%d smmu=%d pd=0x%08x\n",
1245 vaddr, paddr, prot, is_user, (first_tb != NULL), is_softmmu, pd);
1246 #endif
1247
1248 ret = 0;
1249 #if !defined(CONFIG_SOFTMMU)
1250 if (is_softmmu)
1251 #endif
1252 {
1253 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1254 /* IO memory case */
1255 address = vaddr | pd;
1256 addend = paddr;
1257 } else {
1258 /* standard memory */
1259 address = vaddr;
1260 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1261 }
1262
1263 index = (vaddr >> 12) & (CPU_TLB_SIZE - 1);
1264 addend -= vaddr;
1265 if (prot & PROT_READ) {
1266 env->tlb_read[is_user][index].address = address;
1267 env->tlb_read[is_user][index].addend = addend;
1268 } else {
1269 env->tlb_read[is_user][index].address = -1;
1270 env->tlb_read[is_user][index].addend = -1;
1271 }
1272 if (prot & PROT_WRITE) {
1273 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM) {
1274 /* ROM: access is ignored (same as unassigned) */
1275 env->tlb_write[is_user][index].address = vaddr | IO_MEM_ROM;
1276 env->tlb_write[is_user][index].addend = addend;
1277 } else if (first_tb) {
1278 /* if code is present, we use a specific memory
1279 handler. It works only for physical memory access */
1280 env->tlb_write[is_user][index].address = vaddr | IO_MEM_CODE;
1281 env->tlb_write[is_user][index].addend = addend;
1282 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1283 !cpu_physical_memory_is_dirty(pd)) {
1284 env->tlb_write[is_user][index].address = vaddr | IO_MEM_NOTDIRTY;
1285 env->tlb_write[is_user][index].addend = addend;
1286 } else {
1287 env->tlb_write[is_user][index].address = address;
1288 env->tlb_write[is_user][index].addend = addend;
1289 }
1290 } else {
1291 env->tlb_write[is_user][index].address = -1;
1292 env->tlb_write[is_user][index].addend = -1;
1293 }
1294 }
1295 #if !defined(CONFIG_SOFTMMU)
1296 else {
1297 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1298 /* IO access: no mapping is done as it will be handled by the
1299 soft MMU */
1300 if (!(env->hflags & HF_SOFTMMU_MASK))
1301 ret = 2;
1302 } else {
1303 void *map_addr;
1304 if (prot & PROT_WRITE) {
1305 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM || first_tb) {
1306 /* ROM: we do as if code was inside */
1307 /* if code is present, we only map as read only and save the
1308 original mapping */
1309 VirtPageDesc *vp;
1310
1311 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS);
1312 vp->phys_addr = pd;
1313 vp->prot = prot;
1314 vp->valid_tag = virt_valid_tag;
1315 prot &= ~PAGE_WRITE;
1316 }
1317 }
1318 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
1319 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1320 if (map_addr == MAP_FAILED) {
1321 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1322 paddr, vaddr);
1323 }
1324 }
1325 }
1326 #endif
1327 return ret;
1328 }
1329
1330 /* called from signal handler: invalidate the code and unprotect the
1331 page. Return TRUE if the fault was succesfully handled. */
1332 int page_unprotect(unsigned long addr)
1333 {
1334 #if !defined(CONFIG_SOFTMMU)
1335 VirtPageDesc *vp;
1336
1337 #if defined(DEBUG_TLB)
1338 printf("page_unprotect: addr=0x%08x\n", addr);
1339 #endif
1340 addr &= TARGET_PAGE_MASK;
1341 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1342 if (!vp)
1343 return 0;
1344 /* NOTE: in this case, validate_tag is _not_ tested as it
1345 validates only the code TLB */
1346 if (vp->valid_tag != virt_valid_tag)
1347 return 0;
1348 if (!(vp->prot & PAGE_WRITE))
1349 return 0;
1350 #if defined(DEBUG_TLB)
1351 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1352 addr, vp->phys_addr, vp->prot);
1353 #endif
1354 tb_invalidate_phys_page(vp->phys_addr);
1355 mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot);
1356 return 1;
1357 #else
1358 return 0;
1359 #endif
1360 }
1361
1362 #else
1363
1364 void tlb_flush(CPUState *env, int flush_global)
1365 {
1366 }
1367
1368 void tlb_flush_page(CPUState *env, uint32_t addr)
1369 {
1370 }
1371
1372 void tlb_flush_page_write(CPUState *env, uint32_t addr)
1373 {
1374 }
1375
1376 int tlb_set_page(CPUState *env, uint32_t vaddr, uint32_t paddr, int prot,
1377 int is_user, int is_softmmu)
1378 {
1379 return 0;
1380 }
1381
1382 /* dump memory mappings */
1383 void page_dump(FILE *f)
1384 {
1385 unsigned long start, end;
1386 int i, j, prot, prot1;
1387 PageDesc *p;
1388
1389 fprintf(f, "%-8s %-8s %-8s %s\n",
1390 "start", "end", "size", "prot");
1391 start = -1;
1392 end = -1;
1393 prot = 0;
1394 for(i = 0; i <= L1_SIZE; i++) {
1395 if (i < L1_SIZE)
1396 p = l1_map[i];
1397 else
1398 p = NULL;
1399 for(j = 0;j < L2_SIZE; j++) {
1400 if (!p)
1401 prot1 = 0;
1402 else
1403 prot1 = p[j].flags;
1404 if (prot1 != prot) {
1405 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1406 if (start != -1) {
1407 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1408 start, end, end - start,
1409 prot & PAGE_READ ? 'r' : '-',
1410 prot & PAGE_WRITE ? 'w' : '-',
1411 prot & PAGE_EXEC ? 'x' : '-');
1412 }
1413 if (prot1 != 0)
1414 start = end;
1415 else
1416 start = -1;
1417 prot = prot1;
1418 }
1419 if (!p)
1420 break;
1421 }
1422 }
1423 }
1424
1425 int page_get_flags(unsigned long address)
1426 {
1427 PageDesc *p;
1428
1429 p = page_find(address >> TARGET_PAGE_BITS);
1430 if (!p)
1431 return 0;
1432 return p->flags;
1433 }
1434
1435 /* modify the flags of a page and invalidate the code if
1436 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1437 depending on PAGE_WRITE */
1438 void page_set_flags(unsigned long start, unsigned long end, int flags)
1439 {
1440 PageDesc *p;
1441 unsigned long addr;
1442
1443 start = start & TARGET_PAGE_MASK;
1444 end = TARGET_PAGE_ALIGN(end);
1445 if (flags & PAGE_WRITE)
1446 flags |= PAGE_WRITE_ORG;
1447 spin_lock(&tb_lock);
1448 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1449 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1450 /* if the write protection is set, then we invalidate the code
1451 inside */
1452 if (!(p->flags & PAGE_WRITE) &&
1453 (flags & PAGE_WRITE) &&
1454 p->first_tb) {
1455 tb_invalidate_phys_page(addr);
1456 }
1457 p->flags = flags;
1458 }
1459 spin_unlock(&tb_lock);
1460 }
1461
1462 /* called from signal handler: invalidate the code and unprotect the
1463 page. Return TRUE if the fault was succesfully handled. */
1464 int page_unprotect(unsigned long address)
1465 {
1466 unsigned int page_index, prot, pindex;
1467 PageDesc *p, *p1;
1468 unsigned long host_start, host_end, addr;
1469
1470 host_start = address & host_page_mask;
1471 page_index = host_start >> TARGET_PAGE_BITS;
1472 p1 = page_find(page_index);
1473 if (!p1)
1474 return 0;
1475 host_end = host_start + host_page_size;
1476 p = p1;
1477 prot = 0;
1478 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1479 prot |= p->flags;
1480 p++;
1481 }
1482 /* if the page was really writable, then we change its
1483 protection back to writable */
1484 if (prot & PAGE_WRITE_ORG) {
1485 pindex = (address - host_start) >> TARGET_PAGE_BITS;
1486 if (!(p1[pindex].flags & PAGE_WRITE)) {
1487 mprotect((void *)host_start, host_page_size,
1488 (prot & PAGE_BITS) | PAGE_WRITE);
1489 p1[pindex].flags |= PAGE_WRITE;
1490 /* and since the content will be modified, we must invalidate
1491 the corresponding translated code. */
1492 tb_invalidate_phys_page(address);
1493 #ifdef DEBUG_TB_CHECK
1494 tb_invalidate_check(address);
1495 #endif
1496 return 1;
1497 }
1498 }
1499 return 0;
1500 }
1501
1502 /* call this function when system calls directly modify a memory area */
1503 void page_unprotect_range(uint8_t *data, unsigned long data_size)
1504 {
1505 unsigned long start, end, addr;
1506
1507 start = (unsigned long)data;
1508 end = start + data_size;
1509 start &= TARGET_PAGE_MASK;
1510 end = TARGET_PAGE_ALIGN(end);
1511 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1512 page_unprotect(addr);
1513 }
1514 }
1515
1516 static inline void tlb_set_dirty(unsigned long addr, target_ulong vaddr)
1517 {
1518 }
1519
1520 #endif /* defined(CONFIG_USER_ONLY) */
1521
1522 /* register physical memory. 'size' must be a multiple of the target
1523 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1524 io memory page */
1525 void cpu_register_physical_memory(unsigned long start_addr, unsigned long size,
1526 long phys_offset)
1527 {
1528 unsigned long addr, end_addr;
1529 PageDesc *p;
1530
1531 end_addr = start_addr + size;
1532 for(addr = start_addr; addr < end_addr; addr += TARGET_PAGE_SIZE) {
1533 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1534 p->phys_offset = phys_offset;
1535 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM)
1536 phys_offset += TARGET_PAGE_SIZE;
1537 }
1538 }
1539
1540 static uint32_t unassigned_mem_readb(uint32_t addr)
1541 {
1542 return 0;
1543 }
1544
1545 static void unassigned_mem_writeb(uint32_t addr, uint32_t val, uint32_t vaddr)
1546 {
1547 }
1548
1549 static CPUReadMemoryFunc *unassigned_mem_read[3] = {
1550 unassigned_mem_readb,
1551 unassigned_mem_readb,
1552 unassigned_mem_readb,
1553 };
1554
1555 static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
1556 unassigned_mem_writeb,
1557 unassigned_mem_writeb,
1558 unassigned_mem_writeb,
1559 };
1560
1561 /* self modifying code support in soft mmu mode : writing to a page
1562 containing code comes to these functions */
1563
1564 static void code_mem_writeb(uint32_t addr, uint32_t val, uint32_t vaddr)
1565 {
1566 unsigned long phys_addr;
1567
1568 phys_addr = addr - (long)phys_ram_base;
1569 #if !defined(CONFIG_USER_ONLY)
1570 tb_invalidate_phys_page_fast(phys_addr, 1, vaddr);
1571 #endif
1572 stb_raw((uint8_t *)addr, val);
1573 phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1;
1574 }
1575
1576 static void code_mem_writew(uint32_t addr, uint32_t val, uint32_t vaddr)
1577 {
1578 unsigned long phys_addr;
1579
1580 phys_addr = addr - (long)phys_ram_base;
1581 #if !defined(CONFIG_USER_ONLY)
1582 tb_invalidate_phys_page_fast(phys_addr, 2, vaddr);
1583 #endif
1584 stw_raw((uint8_t *)addr, val);
1585 phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1;
1586 }
1587
1588 static void code_mem_writel(uint32_t addr, uint32_t val, uint32_t vaddr)
1589 {
1590 unsigned long phys_addr;
1591
1592 phys_addr = addr - (long)phys_ram_base;
1593 #if !defined(CONFIG_USER_ONLY)
1594 tb_invalidate_phys_page_fast(phys_addr, 4, vaddr);
1595 #endif
1596 stl_raw((uint8_t *)addr, val);
1597 phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1;
1598 }
1599
1600 static CPUReadMemoryFunc *code_mem_read[3] = {
1601 NULL, /* never used */
1602 NULL, /* never used */
1603 NULL, /* never used */
1604 };
1605
1606 static CPUWriteMemoryFunc *code_mem_write[3] = {
1607 code_mem_writeb,
1608 code_mem_writew,
1609 code_mem_writel,
1610 };
1611
1612 static void notdirty_mem_writeb(uint32_t addr, uint32_t val, uint32_t vaddr)
1613 {
1614 stb_raw((uint8_t *)addr, val);
1615 tlb_set_dirty(addr, vaddr);
1616 }
1617
1618 static void notdirty_mem_writew(uint32_t addr, uint32_t val, uint32_t vaddr)
1619 {
1620 stw_raw((uint8_t *)addr, val);
1621 tlb_set_dirty(addr, vaddr);
1622 }
1623
1624 static void notdirty_mem_writel(uint32_t addr, uint32_t val, uint32_t vaddr)
1625 {
1626 stl_raw((uint8_t *)addr, val);
1627 tlb_set_dirty(addr, vaddr);
1628 }
1629
1630 static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
1631 notdirty_mem_writeb,
1632 notdirty_mem_writew,
1633 notdirty_mem_writel,
1634 };
1635
1636 static void io_mem_init(void)
1637 {
1638 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, code_mem_read, unassigned_mem_write);
1639 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write);
1640 cpu_register_io_memory(IO_MEM_CODE >> IO_MEM_SHIFT, code_mem_read, code_mem_write);
1641 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, code_mem_read, notdirty_mem_write);
1642 io_mem_nb = 5;
1643
1644 /* alloc dirty bits array */
1645 phys_ram_dirty = malloc(phys_ram_size >> TARGET_PAGE_BITS);
1646 }
1647
1648 /* mem_read and mem_write are arrays of functions containing the
1649 function to access byte (index 0), word (index 1) and dword (index
1650 2). All functions must be supplied. If io_index is non zero, the
1651 corresponding io zone is modified. If it is zero, a new io zone is
1652 allocated. The return value can be used with
1653 cpu_register_physical_memory(). (-1) is returned if error. */
1654 int cpu_register_io_memory(int io_index,
1655 CPUReadMemoryFunc **mem_read,
1656 CPUWriteMemoryFunc **mem_write)
1657 {
1658 int i;
1659
1660 if (io_index <= 0) {
1661 if (io_index >= IO_MEM_NB_ENTRIES)
1662 return -1;
1663 io_index = io_mem_nb++;
1664 } else {
1665 if (io_index >= IO_MEM_NB_ENTRIES)
1666 return -1;
1667 }
1668
1669 for(i = 0;i < 3; i++) {
1670 io_mem_read[io_index][i] = mem_read[i];
1671 io_mem_write[io_index][i] = mem_write[i];
1672 }
1673 return io_index << IO_MEM_SHIFT;
1674 }
1675
1676 /* physical memory access (slow version, mainly for debug) */
1677 #if defined(CONFIG_USER_ONLY)
1678 void cpu_physical_memory_rw(CPUState *env, uint8_t *buf, target_ulong addr,
1679 int len, int is_write)
1680 {
1681 int l, flags;
1682 target_ulong page;
1683
1684 while (len > 0) {
1685 page = addr & TARGET_PAGE_MASK;
1686 l = (page + TARGET_PAGE_SIZE) - addr;
1687 if (l > len)
1688 l = len;
1689 flags = page_get_flags(page);
1690 if (!(flags & PAGE_VALID))
1691 return;
1692 if (is_write) {
1693 if (!(flags & PAGE_WRITE))
1694 return;
1695 memcpy((uint8_t *)addr, buf, len);
1696 } else {
1697 if (!(flags & PAGE_READ))
1698 return;
1699 memcpy(buf, (uint8_t *)addr, len);
1700 }
1701 len -= l;
1702 buf += l;
1703 addr += l;
1704 }
1705 }
1706 #else
1707 void cpu_physical_memory_rw(CPUState *env, uint8_t *buf, target_ulong addr,
1708 int len, int is_write)
1709 {
1710 int l, io_index;
1711 uint8_t *ptr;
1712 uint32_t val;
1713 target_ulong page, pd;
1714 PageDesc *p;
1715
1716 while (len > 0) {
1717 page = addr & TARGET_PAGE_MASK;
1718 l = (page + TARGET_PAGE_SIZE) - addr;
1719 if (l > len)
1720 l = len;
1721 p = page_find(page >> TARGET_PAGE_BITS);
1722 if (!p) {
1723 pd = IO_MEM_UNASSIGNED;
1724 } else {
1725 pd = p->phys_offset;
1726 }
1727
1728 if (is_write) {
1729 if ((pd & ~TARGET_PAGE_MASK) != 0) {
1730 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
1731 if (l >= 4 && ((addr & 3) == 0)) {
1732 /* 32 bit read access */
1733 val = ldl_raw(buf);
1734 io_mem_write[io_index][2](addr, val, 0);
1735 l = 4;
1736 } else if (l >= 2 && ((addr & 1) == 0)) {
1737 /* 16 bit read access */
1738 val = lduw_raw(buf);
1739 io_mem_write[io_index][1](addr, val, 0);
1740 l = 2;
1741 } else {
1742 /* 8 bit access */
1743 val = ldub_raw(buf);
1744 io_mem_write[io_index][0](addr, val, 0);
1745 l = 1;
1746 }
1747 } else {
1748 /* RAM case */
1749 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
1750 (addr & ~TARGET_PAGE_MASK);
1751 memcpy(ptr, buf, l);
1752 }
1753 } else {
1754 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
1755 (pd & ~TARGET_PAGE_MASK) != IO_MEM_CODE) {
1756 /* I/O case */
1757 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
1758 if (l >= 4 && ((addr & 3) == 0)) {
1759 /* 32 bit read access */
1760 val = io_mem_read[io_index][2](addr);
1761 stl_raw(buf, val);
1762 l = 4;
1763 } else if (l >= 2 && ((addr & 1) == 0)) {
1764 /* 16 bit read access */
1765 val = io_mem_read[io_index][1](addr);
1766 stw_raw(buf, val);
1767 l = 2;
1768 } else {
1769 /* 8 bit access */
1770 val = io_mem_read[io_index][0](addr);
1771 stb_raw(buf, val);
1772 l = 1;
1773 }
1774 } else {
1775 /* RAM case */
1776 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
1777 (addr & ~TARGET_PAGE_MASK);
1778 memcpy(buf, ptr, l);
1779 }
1780 }
1781 len -= l;
1782 buf += l;
1783 addr += l;
1784 }
1785 }
1786 #endif
1787
1788 /* virtual memory access for debug */
1789 int cpu_memory_rw_debug(CPUState *env,
1790 uint8_t *buf, target_ulong addr, int len, int is_write)
1791 {
1792 int l;
1793 target_ulong page, phys_addr;
1794
1795 while (len > 0) {
1796 page = addr & TARGET_PAGE_MASK;
1797 phys_addr = cpu_get_phys_page_debug(env, page);
1798 /* if no physical page mapped, return an error */
1799 if (phys_addr == -1)
1800 return -1;
1801 l = (page + TARGET_PAGE_SIZE) - addr;
1802 if (l > len)
1803 l = len;
1804 cpu_physical_memory_rw(env, buf,
1805 phys_addr + (addr & ~TARGET_PAGE_MASK), l,
1806 is_write);
1807 len -= l;
1808 buf += l;
1809 addr += l;
1810 }
1811 return 0;
1812 }
1813
1814 #if !defined(CONFIG_USER_ONLY)
1815
1816 #define MMUSUFFIX _cmmu
1817 #define GETPC() NULL
1818 #define env cpu_single_env
1819
1820 #define SHIFT 0
1821 #include "softmmu_template.h"
1822
1823 #define SHIFT 1
1824 #include "softmmu_template.h"
1825
1826 #define SHIFT 2
1827 #include "softmmu_template.h"
1828
1829 #define SHIFT 3
1830 #include "softmmu_template.h"
1831
1832 #undef env
1833
1834 #endif