]> git.proxmox.com Git - mirror_qemu.git/blob - exec.c
new directory structure
[mirror_qemu.git] / exec.c
1 /*
2 * virtual page mapping and translated block handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20 #include <stdlib.h>
21 #include <stdio.h>
22 #include <stdarg.h>
23 #include <string.h>
24 #include <errno.h>
25 #include <unistd.h>
26 #include <inttypes.h>
27 #include <sys/mman.h>
28
29 #include "config.h"
30 #include "cpu.h"
31 #include "exec-all.h"
32
33 //#define DEBUG_TB_INVALIDATE
34 //#define DEBUG_FLUSH
35
36 /* make various TB consistency checks */
37 //#define DEBUG_TB_CHECK
38
39 /* threshold to flush the translated code buffer */
40 #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
41
42 #define CODE_GEN_MAX_BLOCKS (CODE_GEN_BUFFER_SIZE / 64)
43
44 TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
45 TranslationBlock *tb_hash[CODE_GEN_HASH_SIZE];
46 int nb_tbs;
47 /* any access to the tbs or the page table must use this lock */
48 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
49
50 uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE];
51 uint8_t *code_gen_ptr;
52
53 /* XXX: pack the flags in the low bits of the pointer ? */
54 typedef struct PageDesc {
55 unsigned long flags;
56 TranslationBlock *first_tb;
57 } PageDesc;
58
59 #define L2_BITS 10
60 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
61
62 #define L1_SIZE (1 << L1_BITS)
63 #define L2_SIZE (1 << L2_BITS)
64
65 static void tb_invalidate_page(unsigned long address);
66 static void io_mem_init(void);
67
68 unsigned long real_host_page_size;
69 unsigned long host_page_bits;
70 unsigned long host_page_size;
71 unsigned long host_page_mask;
72
73 static PageDesc *l1_map[L1_SIZE];
74
75 /* io memory support */
76 static unsigned long *l1_physmap[L1_SIZE];
77 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
78 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
79 static int io_mem_nb;
80
81 static void page_init(void)
82 {
83 /* NOTE: we can always suppose that host_page_size >=
84 TARGET_PAGE_SIZE */
85 real_host_page_size = getpagesize();
86 if (host_page_size == 0)
87 host_page_size = real_host_page_size;
88 if (host_page_size < TARGET_PAGE_SIZE)
89 host_page_size = TARGET_PAGE_SIZE;
90 host_page_bits = 0;
91 while ((1 << host_page_bits) < host_page_size)
92 host_page_bits++;
93 host_page_mask = ~(host_page_size - 1);
94 }
95
96 /* dump memory mappings */
97 void page_dump(FILE *f)
98 {
99 unsigned long start, end;
100 int i, j, prot, prot1;
101 PageDesc *p;
102
103 fprintf(f, "%-8s %-8s %-8s %s\n",
104 "start", "end", "size", "prot");
105 start = -1;
106 end = -1;
107 prot = 0;
108 for(i = 0; i <= L1_SIZE; i++) {
109 if (i < L1_SIZE)
110 p = l1_map[i];
111 else
112 p = NULL;
113 for(j = 0;j < L2_SIZE; j++) {
114 if (!p)
115 prot1 = 0;
116 else
117 prot1 = p[j].flags;
118 if (prot1 != prot) {
119 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
120 if (start != -1) {
121 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
122 start, end, end - start,
123 prot & PAGE_READ ? 'r' : '-',
124 prot & PAGE_WRITE ? 'w' : '-',
125 prot & PAGE_EXEC ? 'x' : '-');
126 }
127 if (prot1 != 0)
128 start = end;
129 else
130 start = -1;
131 prot = prot1;
132 }
133 if (!p)
134 break;
135 }
136 }
137 }
138
139 static inline PageDesc *page_find_alloc(unsigned int index)
140 {
141 PageDesc **lp, *p;
142
143 lp = &l1_map[index >> L2_BITS];
144 p = *lp;
145 if (!p) {
146 /* allocate if not found */
147 p = malloc(sizeof(PageDesc) * L2_SIZE);
148 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
149 *lp = p;
150 }
151 return p + (index & (L2_SIZE - 1));
152 }
153
154 static inline PageDesc *page_find(unsigned int index)
155 {
156 PageDesc *p;
157
158 p = l1_map[index >> L2_BITS];
159 if (!p)
160 return 0;
161 return p + (index & (L2_SIZE - 1));
162 }
163
164 int page_get_flags(unsigned long address)
165 {
166 PageDesc *p;
167
168 p = page_find(address >> TARGET_PAGE_BITS);
169 if (!p)
170 return 0;
171 return p->flags;
172 }
173
174 /* modify the flags of a page and invalidate the code if
175 necessary. The flag PAGE_WRITE_ORG is positionned automatically
176 depending on PAGE_WRITE */
177 void page_set_flags(unsigned long start, unsigned long end, int flags)
178 {
179 PageDesc *p;
180 unsigned long addr;
181
182 start = start & TARGET_PAGE_MASK;
183 end = TARGET_PAGE_ALIGN(end);
184 if (flags & PAGE_WRITE)
185 flags |= PAGE_WRITE_ORG;
186 spin_lock(&tb_lock);
187 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
188 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
189 /* if the write protection is set, then we invalidate the code
190 inside */
191 if (!(p->flags & PAGE_WRITE) &&
192 (flags & PAGE_WRITE) &&
193 p->first_tb) {
194 tb_invalidate_page(addr);
195 }
196 p->flags = flags;
197 }
198 spin_unlock(&tb_lock);
199 }
200
201 void cpu_exec_init(void)
202 {
203 if (!code_gen_ptr) {
204 code_gen_ptr = code_gen_buffer;
205 page_init();
206 io_mem_init();
207 }
208 }
209
210 /* set to NULL all the 'first_tb' fields in all PageDescs */
211 static void page_flush_tb(void)
212 {
213 int i, j;
214 PageDesc *p;
215
216 for(i = 0; i < L1_SIZE; i++) {
217 p = l1_map[i];
218 if (p) {
219 for(j = 0; j < L2_SIZE; j++)
220 p[j].first_tb = NULL;
221 }
222 }
223 }
224
225 /* flush all the translation blocks */
226 /* XXX: tb_flush is currently not thread safe */
227 void tb_flush(void)
228 {
229 int i;
230 #ifdef DEBUG_FLUSH
231 printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
232 code_gen_ptr - code_gen_buffer,
233 nb_tbs,
234 (code_gen_ptr - code_gen_buffer) / nb_tbs);
235 #endif
236 nb_tbs = 0;
237 for(i = 0;i < CODE_GEN_HASH_SIZE; i++)
238 tb_hash[i] = NULL;
239 page_flush_tb();
240 code_gen_ptr = code_gen_buffer;
241 /* XXX: flush processor icache at this point if cache flush is
242 expensive */
243 }
244
245 #ifdef DEBUG_TB_CHECK
246
247 static void tb_invalidate_check(unsigned long address)
248 {
249 TranslationBlock *tb;
250 int i;
251 address &= TARGET_PAGE_MASK;
252 for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
253 for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
254 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
255 address >= tb->pc + tb->size)) {
256 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
257 address, tb->pc, tb->size);
258 }
259 }
260 }
261 }
262
263 /* verify that all the pages have correct rights for code */
264 static void tb_page_check(void)
265 {
266 TranslationBlock *tb;
267 int i, flags1, flags2;
268
269 for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
270 for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
271 flags1 = page_get_flags(tb->pc);
272 flags2 = page_get_flags(tb->pc + tb->size - 1);
273 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
274 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
275 tb->pc, tb->size, flags1, flags2);
276 }
277 }
278 }
279 }
280
281 void tb_jmp_check(TranslationBlock *tb)
282 {
283 TranslationBlock *tb1;
284 unsigned int n1;
285
286 /* suppress any remaining jumps to this TB */
287 tb1 = tb->jmp_first;
288 for(;;) {
289 n1 = (long)tb1 & 3;
290 tb1 = (TranslationBlock *)((long)tb1 & ~3);
291 if (n1 == 2)
292 break;
293 tb1 = tb1->jmp_next[n1];
294 }
295 /* check end of list */
296 if (tb1 != tb) {
297 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
298 }
299 }
300
301 #endif
302
303 /* invalidate one TB */
304 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
305 int next_offset)
306 {
307 TranslationBlock *tb1;
308 for(;;) {
309 tb1 = *ptb;
310 if (tb1 == tb) {
311 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
312 break;
313 }
314 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
315 }
316 }
317
318 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
319 {
320 TranslationBlock *tb1, **ptb;
321 unsigned int n1;
322
323 ptb = &tb->jmp_next[n];
324 tb1 = *ptb;
325 if (tb1) {
326 /* find tb(n) in circular list */
327 for(;;) {
328 tb1 = *ptb;
329 n1 = (long)tb1 & 3;
330 tb1 = (TranslationBlock *)((long)tb1 & ~3);
331 if (n1 == n && tb1 == tb)
332 break;
333 if (n1 == 2) {
334 ptb = &tb1->jmp_first;
335 } else {
336 ptb = &tb1->jmp_next[n1];
337 }
338 }
339 /* now we can suppress tb(n) from the list */
340 *ptb = tb->jmp_next[n];
341
342 tb->jmp_next[n] = NULL;
343 }
344 }
345
346 /* reset the jump entry 'n' of a TB so that it is not chained to
347 another TB */
348 static inline void tb_reset_jump(TranslationBlock *tb, int n)
349 {
350 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
351 }
352
353 static inline void tb_invalidate(TranslationBlock *tb, int parity)
354 {
355 PageDesc *p;
356 unsigned int page_index1, page_index2;
357 unsigned int h, n1;
358 TranslationBlock *tb1, *tb2;
359
360 /* remove the TB from the hash list */
361 h = tb_hash_func(tb->pc);
362 tb_remove(&tb_hash[h], tb,
363 offsetof(TranslationBlock, hash_next));
364 /* remove the TB from the page list */
365 page_index1 = tb->pc >> TARGET_PAGE_BITS;
366 if ((page_index1 & 1) == parity) {
367 p = page_find(page_index1);
368 tb_remove(&p->first_tb, tb,
369 offsetof(TranslationBlock, page_next[page_index1 & 1]));
370 }
371 page_index2 = (tb->pc + tb->size - 1) >> TARGET_PAGE_BITS;
372 if ((page_index2 & 1) == parity) {
373 p = page_find(page_index2);
374 tb_remove(&p->first_tb, tb,
375 offsetof(TranslationBlock, page_next[page_index2 & 1]));
376 }
377
378 /* suppress this TB from the two jump lists */
379 tb_jmp_remove(tb, 0);
380 tb_jmp_remove(tb, 1);
381
382 /* suppress any remaining jumps to this TB */
383 tb1 = tb->jmp_first;
384 for(;;) {
385 n1 = (long)tb1 & 3;
386 if (n1 == 2)
387 break;
388 tb1 = (TranslationBlock *)((long)tb1 & ~3);
389 tb2 = tb1->jmp_next[n1];
390 tb_reset_jump(tb1, n1);
391 tb1->jmp_next[n1] = NULL;
392 tb1 = tb2;
393 }
394 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
395 }
396
397 /* invalidate all TBs which intersect with the target page starting at addr */
398 static void tb_invalidate_page(unsigned long address)
399 {
400 TranslationBlock *tb_next, *tb;
401 unsigned int page_index;
402 int parity1, parity2;
403 PageDesc *p;
404 #ifdef DEBUG_TB_INVALIDATE
405 printf("tb_invalidate_page: %lx\n", address);
406 #endif
407
408 page_index = address >> TARGET_PAGE_BITS;
409 p = page_find(page_index);
410 if (!p)
411 return;
412 tb = p->first_tb;
413 parity1 = page_index & 1;
414 parity2 = parity1 ^ 1;
415 while (tb != NULL) {
416 tb_next = tb->page_next[parity1];
417 tb_invalidate(tb, parity2);
418 tb = tb_next;
419 }
420 p->first_tb = NULL;
421 }
422
423 /* add the tb in the target page and protect it if necessary */
424 static inline void tb_alloc_page(TranslationBlock *tb, unsigned int page_index)
425 {
426 PageDesc *p;
427 unsigned long host_start, host_end, addr, page_addr;
428 int prot;
429
430 p = page_find_alloc(page_index);
431 tb->page_next[page_index & 1] = p->first_tb;
432 p->first_tb = tb;
433 if (p->flags & PAGE_WRITE) {
434 /* force the host page as non writable (writes will have a
435 page fault + mprotect overhead) */
436 page_addr = (page_index << TARGET_PAGE_BITS);
437 host_start = page_addr & host_page_mask;
438 host_end = host_start + host_page_size;
439 prot = 0;
440 for(addr = host_start; addr < host_end; addr += TARGET_PAGE_SIZE)
441 prot |= page_get_flags(addr);
442 mprotect((void *)host_start, host_page_size,
443 (prot & PAGE_BITS) & ~PAGE_WRITE);
444 #ifdef DEBUG_TB_INVALIDATE
445 printf("protecting code page: 0x%08lx\n",
446 host_start);
447 #endif
448 p->flags &= ~PAGE_WRITE;
449 #ifdef DEBUG_TB_CHECK
450 tb_page_check();
451 #endif
452 }
453 }
454
455 /* Allocate a new translation block. Flush the translation buffer if
456 too many translation blocks or too much generated code. */
457 TranslationBlock *tb_alloc(unsigned long pc)
458 {
459 TranslationBlock *tb;
460
461 if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
462 (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
463 return NULL;
464 tb = &tbs[nb_tbs++];
465 tb->pc = pc;
466 return tb;
467 }
468
469 /* link the tb with the other TBs */
470 void tb_link(TranslationBlock *tb)
471 {
472 unsigned int page_index1, page_index2;
473
474 /* add in the page list */
475 page_index1 = tb->pc >> TARGET_PAGE_BITS;
476 tb_alloc_page(tb, page_index1);
477 page_index2 = (tb->pc + tb->size - 1) >> TARGET_PAGE_BITS;
478 if (page_index2 != page_index1) {
479 tb_alloc_page(tb, page_index2);
480 }
481 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
482 tb->jmp_next[0] = NULL;
483 tb->jmp_next[1] = NULL;
484
485 /* init original jump addresses */
486 if (tb->tb_next_offset[0] != 0xffff)
487 tb_reset_jump(tb, 0);
488 if (tb->tb_next_offset[1] != 0xffff)
489 tb_reset_jump(tb, 1);
490 }
491
492 /* called from signal handler: invalidate the code and unprotect the
493 page. Return TRUE if the fault was succesfully handled. */
494 int page_unprotect(unsigned long address)
495 {
496 unsigned int page_index, prot, pindex;
497 PageDesc *p, *p1;
498 unsigned long host_start, host_end, addr;
499
500 host_start = address & host_page_mask;
501 page_index = host_start >> TARGET_PAGE_BITS;
502 p1 = page_find(page_index);
503 if (!p1)
504 return 0;
505 host_end = host_start + host_page_size;
506 p = p1;
507 prot = 0;
508 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
509 prot |= p->flags;
510 p++;
511 }
512 /* if the page was really writable, then we change its
513 protection back to writable */
514 if (prot & PAGE_WRITE_ORG) {
515 mprotect((void *)host_start, host_page_size,
516 (prot & PAGE_BITS) | PAGE_WRITE);
517 pindex = (address - host_start) >> TARGET_PAGE_BITS;
518 p1[pindex].flags |= PAGE_WRITE;
519 /* and since the content will be modified, we must invalidate
520 the corresponding translated code. */
521 tb_invalidate_page(address);
522 #ifdef DEBUG_TB_CHECK
523 tb_invalidate_check(address);
524 #endif
525 return 1;
526 } else {
527 return 0;
528 }
529 }
530
531 /* call this function when system calls directly modify a memory area */
532 void page_unprotect_range(uint8_t *data, unsigned long data_size)
533 {
534 unsigned long start, end, addr;
535
536 start = (unsigned long)data;
537 end = start + data_size;
538 start &= TARGET_PAGE_MASK;
539 end = TARGET_PAGE_ALIGN(end);
540 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
541 page_unprotect(addr);
542 }
543 }
544
545 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
546 tb[1].tc_ptr. Return NULL if not found */
547 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
548 {
549 int m_min, m_max, m;
550 unsigned long v;
551 TranslationBlock *tb;
552
553 if (nb_tbs <= 0)
554 return NULL;
555 if (tc_ptr < (unsigned long)code_gen_buffer ||
556 tc_ptr >= (unsigned long)code_gen_ptr)
557 return NULL;
558 /* binary search (cf Knuth) */
559 m_min = 0;
560 m_max = nb_tbs - 1;
561 while (m_min <= m_max) {
562 m = (m_min + m_max) >> 1;
563 tb = &tbs[m];
564 v = (unsigned long)tb->tc_ptr;
565 if (v == tc_ptr)
566 return tb;
567 else if (tc_ptr < v) {
568 m_max = m - 1;
569 } else {
570 m_min = m + 1;
571 }
572 }
573 return &tbs[m_max];
574 }
575
576 static void tb_reset_jump_recursive(TranslationBlock *tb);
577
578 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
579 {
580 TranslationBlock *tb1, *tb_next, **ptb;
581 unsigned int n1;
582
583 tb1 = tb->jmp_next[n];
584 if (tb1 != NULL) {
585 /* find head of list */
586 for(;;) {
587 n1 = (long)tb1 & 3;
588 tb1 = (TranslationBlock *)((long)tb1 & ~3);
589 if (n1 == 2)
590 break;
591 tb1 = tb1->jmp_next[n1];
592 }
593 /* we are now sure now that tb jumps to tb1 */
594 tb_next = tb1;
595
596 /* remove tb from the jmp_first list */
597 ptb = &tb_next->jmp_first;
598 for(;;) {
599 tb1 = *ptb;
600 n1 = (long)tb1 & 3;
601 tb1 = (TranslationBlock *)((long)tb1 & ~3);
602 if (n1 == n && tb1 == tb)
603 break;
604 ptb = &tb1->jmp_next[n1];
605 }
606 *ptb = tb->jmp_next[n];
607 tb->jmp_next[n] = NULL;
608
609 /* suppress the jump to next tb in generated code */
610 tb_reset_jump(tb, n);
611
612 /* suppress jumps in the tb on which we could have jump */
613 tb_reset_jump_recursive(tb_next);
614 }
615 }
616
617 static void tb_reset_jump_recursive(TranslationBlock *tb)
618 {
619 tb_reset_jump_recursive2(tb, 0);
620 tb_reset_jump_recursive2(tb, 1);
621 }
622
623 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
624 breakpoint is reached */
625 int cpu_breakpoint_insert(CPUState *env, uint32_t pc)
626 {
627 #if defined(TARGET_I386)
628 int i;
629
630 for(i = 0; i < env->nb_breakpoints; i++) {
631 if (env->breakpoints[i] == pc)
632 return 0;
633 }
634
635 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
636 return -1;
637 env->breakpoints[env->nb_breakpoints++] = pc;
638 tb_invalidate_page(pc);
639 return 0;
640 #else
641 return -1;
642 #endif
643 }
644
645 /* remove a breakpoint */
646 int cpu_breakpoint_remove(CPUState *env, uint32_t pc)
647 {
648 #if defined(TARGET_I386)
649 int i;
650 for(i = 0; i < env->nb_breakpoints; i++) {
651 if (env->breakpoints[i] == pc)
652 goto found;
653 }
654 return -1;
655 found:
656 memmove(&env->breakpoints[i], &env->breakpoints[i + 1],
657 (env->nb_breakpoints - (i + 1)) * sizeof(env->breakpoints[0]));
658 env->nb_breakpoints--;
659 tb_invalidate_page(pc);
660 return 0;
661 #else
662 return -1;
663 #endif
664 }
665
666 /* enable or disable single step mode. EXCP_DEBUG is returned by the
667 CPU loop after each instruction */
668 void cpu_single_step(CPUState *env, int enabled)
669 {
670 #if defined(TARGET_I386)
671 if (env->singlestep_enabled != enabled) {
672 env->singlestep_enabled = enabled;
673 /* must flush all the translated code to avoid inconsistancies */
674 tb_flush();
675 }
676 #endif
677 }
678
679
680 /* mask must never be zero */
681 void cpu_interrupt(CPUState *env, int mask)
682 {
683 TranslationBlock *tb;
684
685 env->interrupt_request |= mask;
686 /* if the cpu is currently executing code, we must unlink it and
687 all the potentially executing TB */
688 tb = env->current_tb;
689 if (tb) {
690 tb_reset_jump_recursive(tb);
691 }
692 }
693
694
695 void cpu_abort(CPUState *env, const char *fmt, ...)
696 {
697 va_list ap;
698
699 va_start(ap, fmt);
700 fprintf(stderr, "qemu: fatal: ");
701 vfprintf(stderr, fmt, ap);
702 fprintf(stderr, "\n");
703 #ifdef TARGET_I386
704 cpu_x86_dump_state(env, stderr, X86_DUMP_FPU | X86_DUMP_CCOP);
705 #endif
706 va_end(ap);
707 abort();
708 }
709
710 #ifdef TARGET_I386
711 /* unmap all maped pages and flush all associated code */
712 void page_unmap(void)
713 {
714 PageDesc *p, *pmap;
715 unsigned long addr;
716 int i, j, ret, j1;
717
718 for(i = 0; i < L1_SIZE; i++) {
719 pmap = l1_map[i];
720 if (pmap) {
721 p = pmap;
722 for(j = 0;j < L2_SIZE;) {
723 if (p->flags & PAGE_VALID) {
724 addr = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
725 /* we try to find a range to make less syscalls */
726 j1 = j;
727 p++;
728 j++;
729 while (j < L2_SIZE && (p->flags & PAGE_VALID)) {
730 p++;
731 j++;
732 }
733 ret = munmap((void *)addr, (j - j1) << TARGET_PAGE_BITS);
734 if (ret != 0) {
735 fprintf(stderr, "Could not unmap page 0x%08lx\n", addr);
736 exit(1);
737 }
738 } else {
739 p++;
740 j++;
741 }
742 }
743 free(pmap);
744 l1_map[i] = NULL;
745 }
746 }
747 tb_flush();
748 }
749 #endif
750
751 void tlb_flush(CPUState *env)
752 {
753 #if defined(TARGET_I386)
754 int i;
755 for(i = 0; i < CPU_TLB_SIZE; i++) {
756 env->tlb_read[0][i].address = -1;
757 env->tlb_write[0][i].address = -1;
758 env->tlb_read[1][i].address = -1;
759 env->tlb_write[1][i].address = -1;
760 }
761 #endif
762 }
763
764 void tlb_flush_page(CPUState *env, uint32_t addr)
765 {
766 #if defined(TARGET_I386)
767 int i;
768
769 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
770 env->tlb_read[0][i].address = -1;
771 env->tlb_write[0][i].address = -1;
772 env->tlb_read[1][i].address = -1;
773 env->tlb_write[1][i].address = -1;
774 #endif
775 }
776
777 static inline unsigned long *physpage_find_alloc(unsigned int page)
778 {
779 unsigned long **lp, *p;
780 unsigned int index, i;
781
782 index = page >> TARGET_PAGE_BITS;
783 lp = &l1_physmap[index >> L2_BITS];
784 p = *lp;
785 if (!p) {
786 /* allocate if not found */
787 p = malloc(sizeof(unsigned long) * L2_SIZE);
788 for(i = 0; i < L2_SIZE; i++)
789 p[i] = IO_MEM_UNASSIGNED;
790 *lp = p;
791 }
792 return p + (index & (L2_SIZE - 1));
793 }
794
795 /* return NULL if no page defined (unused memory) */
796 unsigned long physpage_find(unsigned long page)
797 {
798 unsigned long *p;
799 unsigned int index;
800 index = page >> TARGET_PAGE_BITS;
801 p = l1_physmap[index >> L2_BITS];
802 if (!p)
803 return IO_MEM_UNASSIGNED;
804 return p[index & (L2_SIZE - 1)];
805 }
806
807 /* register physical memory. 'size' must be a multiple of the target
808 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
809 io memory page */
810 void cpu_register_physical_memory(unsigned long start_addr, unsigned long size,
811 long phys_offset)
812 {
813 unsigned long addr, end_addr;
814 unsigned long *p;
815
816 end_addr = start_addr + size;
817 for(addr = start_addr; addr < end_addr; addr += TARGET_PAGE_SIZE) {
818 p = physpage_find_alloc(addr);
819 *p = phys_offset;
820 if ((phys_offset & ~TARGET_PAGE_MASK) == 0)
821 phys_offset += TARGET_PAGE_SIZE;
822 }
823 }
824
825 static uint32_t unassigned_mem_readb(uint32_t addr)
826 {
827 return 0;
828 }
829
830 static void unassigned_mem_writeb(uint32_t addr, uint32_t val)
831 {
832 }
833
834 static CPUReadMemoryFunc *unassigned_mem_read[3] = {
835 unassigned_mem_readb,
836 unassigned_mem_readb,
837 unassigned_mem_readb,
838 };
839
840 static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
841 unassigned_mem_writeb,
842 unassigned_mem_writeb,
843 unassigned_mem_writeb,
844 };
845
846
847 static void io_mem_init(void)
848 {
849 io_mem_nb = 1;
850 cpu_register_io_memory(0, unassigned_mem_read, unassigned_mem_write);
851 }
852
853 /* mem_read and mem_write are arrays of functions containing the
854 function to access byte (index 0), word (index 1) and dword (index
855 2). All functions must be supplied. If io_index is non zero, the
856 corresponding io zone is modified. If it is zero, a new io zone is
857 allocated. The return value can be used with
858 cpu_register_physical_memory(). (-1) is returned if error. */
859 int cpu_register_io_memory(int io_index,
860 CPUReadMemoryFunc **mem_read,
861 CPUWriteMemoryFunc **mem_write)
862 {
863 int i;
864
865 if (io_index <= 0) {
866 if (io_index >= IO_MEM_NB_ENTRIES)
867 return -1;
868 io_index = io_mem_nb++;
869 } else {
870 if (io_index >= IO_MEM_NB_ENTRIES)
871 return -1;
872 }
873
874 for(i = 0;i < 3; i++) {
875 io_mem_read[io_index][i] = mem_read[i];
876 io_mem_write[io_index][i] = mem_write[i];
877 }
878 return io_index << IO_MEM_SHIFT;
879 }