]> git.proxmox.com Git - qemu.git/blame - exec.c
update
[qemu.git] / exec.c
CommitLineData
54936004 1/*
fd6ce8f6 2 * virtual page mapping and translated block handling
54936004
FB
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20#include <stdlib.h>
21#include <stdio.h>
22#include <stdarg.h>
23#include <string.h>
24#include <errno.h>
25#include <unistd.h>
26#include <inttypes.h>
fd6ce8f6 27#include <sys/mman.h>
54936004 28
ea041c0e 29#include "config.h"
6180a181
FB
30#include "cpu.h"
31#include "exec-all.h"
54936004 32
fd6ce8f6 33//#define DEBUG_TB_INVALIDATE
66e85a21 34//#define DEBUG_FLUSH
9fa3e853 35//#define DEBUG_TLB
fd6ce8f6
FB
36
37/* make various TB consistency checks */
38//#define DEBUG_TB_CHECK
98857888 39//#define DEBUG_TLB_CHECK
fd6ce8f6
FB
40
41/* threshold to flush the translated code buffer */
42#define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
43
9fa3e853
FB
44#define SMC_BITMAP_USE_THRESHOLD 10
45
46#define MMAP_AREA_START 0x00000000
47#define MMAP_AREA_END 0xa8000000
fd6ce8f6
FB
48
49TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
50TranslationBlock *tb_hash[CODE_GEN_HASH_SIZE];
9fa3e853 51TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
fd6ce8f6 52int nb_tbs;
eb51d102
FB
53/* any access to the tbs or the page table must use this lock */
54spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
fd6ce8f6
FB
55
56uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE];
57uint8_t *code_gen_ptr;
58
9fa3e853
FB
59int phys_ram_size;
60int phys_ram_fd;
61uint8_t *phys_ram_base;
62
54936004 63typedef struct PageDesc {
9fa3e853
FB
64 /* offset in memory of the page + io_index in the low 12 bits */
65 unsigned long phys_offset;
66 /* list of TBs intersecting this physical page */
fd6ce8f6 67 TranslationBlock *first_tb;
9fa3e853
FB
68 /* in order to optimize self modifying code, we count the number
69 of lookups we do to a given page to use a bitmap */
70 unsigned int code_write_count;
71 uint8_t *code_bitmap;
72#if defined(CONFIG_USER_ONLY)
73 unsigned long flags;
74#endif
54936004
FB
75} PageDesc;
76
9fa3e853
FB
77typedef struct VirtPageDesc {
78 /* physical address of code page. It is valid only if 'valid_tag'
79 matches 'virt_valid_tag' */
80 target_ulong phys_addr;
81 unsigned int valid_tag;
82#if !defined(CONFIG_SOFTMMU)
83 /* original page access rights. It is valid only if 'valid_tag'
84 matches 'virt_valid_tag' */
85 unsigned int prot;
86#endif
87} VirtPageDesc;
88
54936004
FB
89#define L2_BITS 10
90#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
91
92#define L1_SIZE (1 << L1_BITS)
93#define L2_SIZE (1 << L2_BITS)
94
33417e70 95static void io_mem_init(void);
fd6ce8f6 96
54936004
FB
97unsigned long real_host_page_size;
98unsigned long host_page_bits;
99unsigned long host_page_size;
100unsigned long host_page_mask;
101
102static PageDesc *l1_map[L1_SIZE];
103
9fa3e853
FB
104#if !defined(CONFIG_USER_ONLY)
105static VirtPageDesc *l1_virt_map[L1_SIZE];
106static unsigned int virt_valid_tag;
107#endif
108
33417e70 109/* io memory support */
33417e70
FB
110CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
111CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
112static int io_mem_nb;
113
34865134
FB
114/* log support */
115char *logfilename = "/tmp/qemu.log";
116FILE *logfile;
117int loglevel;
118
b346ff46 119static void page_init(void)
54936004
FB
120{
121 /* NOTE: we can always suppose that host_page_size >=
122 TARGET_PAGE_SIZE */
123 real_host_page_size = getpagesize();
124 if (host_page_size == 0)
125 host_page_size = real_host_page_size;
126 if (host_page_size < TARGET_PAGE_SIZE)
127 host_page_size = TARGET_PAGE_SIZE;
128 host_page_bits = 0;
129 while ((1 << host_page_bits) < host_page_size)
130 host_page_bits++;
131 host_page_mask = ~(host_page_size - 1);
9fa3e853
FB
132#if !defined(CONFIG_USER_ONLY)
133 virt_valid_tag = 1;
134#endif
54936004
FB
135}
136
fd6ce8f6 137static inline PageDesc *page_find_alloc(unsigned int index)
54936004 138{
54936004
FB
139 PageDesc **lp, *p;
140
54936004
FB
141 lp = &l1_map[index >> L2_BITS];
142 p = *lp;
143 if (!p) {
144 /* allocate if not found */
145 p = malloc(sizeof(PageDesc) * L2_SIZE);
fd6ce8f6 146 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
54936004
FB
147 *lp = p;
148 }
149 return p + (index & (L2_SIZE - 1));
150}
151
fd6ce8f6 152static inline PageDesc *page_find(unsigned int index)
54936004 153{
54936004
FB
154 PageDesc *p;
155
54936004
FB
156 p = l1_map[index >> L2_BITS];
157 if (!p)
158 return 0;
fd6ce8f6
FB
159 return p + (index & (L2_SIZE - 1));
160}
161
9fa3e853
FB
162#if !defined(CONFIG_USER_ONLY)
163static void tlb_protect_code(CPUState *env, uint32_t addr);
164static void tlb_unprotect_code(CPUState *env, uint32_t addr);
165static void tlb_unprotect_code_phys(CPUState *env, uint32_t phys_addr);
166
167static inline VirtPageDesc *virt_page_find_alloc(unsigned int index)
fd6ce8f6 168{
9fa3e853 169 VirtPageDesc **lp, *p;
fd6ce8f6 170
9fa3e853
FB
171 lp = &l1_virt_map[index >> L2_BITS];
172 p = *lp;
173 if (!p) {
174 /* allocate if not found */
175 p = malloc(sizeof(VirtPageDesc) * L2_SIZE);
176 memset(p, 0, sizeof(VirtPageDesc) * L2_SIZE);
177 *lp = p;
178 }
179 return p + (index & (L2_SIZE - 1));
180}
181
182static inline VirtPageDesc *virt_page_find(unsigned int index)
183{
184 VirtPageDesc *p;
185
186 p = l1_virt_map[index >> L2_BITS];
fd6ce8f6
FB
187 if (!p)
188 return 0;
9fa3e853 189 return p + (index & (L2_SIZE - 1));
54936004
FB
190}
191
9fa3e853 192static void virt_page_flush(void)
54936004 193{
9fa3e853
FB
194 int i, j;
195 VirtPageDesc *p;
196
197 virt_valid_tag++;
198
199 if (virt_valid_tag == 0) {
200 virt_valid_tag = 1;
201 for(i = 0; i < L1_SIZE; i++) {
202 p = l1_virt_map[i];
203 if (p) {
204 for(j = 0; j < L2_SIZE; j++)
205 p[j].valid_tag = 0;
206 }
fd6ce8f6 207 }
54936004
FB
208 }
209}
9fa3e853
FB
210#else
211static void virt_page_flush(void)
212{
213}
214#endif
fd6ce8f6 215
b346ff46 216void cpu_exec_init(void)
fd6ce8f6
FB
217{
218 if (!code_gen_ptr) {
219 code_gen_ptr = code_gen_buffer;
b346ff46 220 page_init();
33417e70 221 io_mem_init();
fd6ce8f6
FB
222 }
223}
224
9fa3e853
FB
225static inline void invalidate_page_bitmap(PageDesc *p)
226{
227 if (p->code_bitmap) {
228 free(p->code_bitmap);
229 p->code_bitmap = NULL;
230 }
231 p->code_write_count = 0;
232}
233
fd6ce8f6
FB
234/* set to NULL all the 'first_tb' fields in all PageDescs */
235static void page_flush_tb(void)
236{
237 int i, j;
238 PageDesc *p;
239
240 for(i = 0; i < L1_SIZE; i++) {
241 p = l1_map[i];
242 if (p) {
9fa3e853
FB
243 for(j = 0; j < L2_SIZE; j++) {
244 p->first_tb = NULL;
245 invalidate_page_bitmap(p);
246 p++;
247 }
fd6ce8f6
FB
248 }
249 }
250}
251
252/* flush all the translation blocks */
d4e8164f 253/* XXX: tb_flush is currently not thread safe */
0124311e 254void tb_flush(CPUState *env)
fd6ce8f6
FB
255{
256 int i;
0124311e 257#if defined(DEBUG_FLUSH)
fd6ce8f6
FB
258 printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
259 code_gen_ptr - code_gen_buffer,
260 nb_tbs,
0124311e 261 nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0);
fd6ce8f6 262#endif
0124311e
FB
263 /* must reset current TB so that interrupts cannot modify the
264 links while we are modifying them */
265 env->current_tb = NULL;
266
fd6ce8f6
FB
267 nb_tbs = 0;
268 for(i = 0;i < CODE_GEN_HASH_SIZE; i++)
269 tb_hash[i] = NULL;
9fa3e853
FB
270 virt_page_flush();
271
272 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++)
273 tb_phys_hash[i] = NULL;
fd6ce8f6 274 page_flush_tb();
9fa3e853 275
fd6ce8f6 276 code_gen_ptr = code_gen_buffer;
d4e8164f
FB
277 /* XXX: flush processor icache at this point if cache flush is
278 expensive */
fd6ce8f6
FB
279}
280
281#ifdef DEBUG_TB_CHECK
282
283static void tb_invalidate_check(unsigned long address)
284{
285 TranslationBlock *tb;
286 int i;
287 address &= TARGET_PAGE_MASK;
288 for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
289 for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
290 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
291 address >= tb->pc + tb->size)) {
292 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
293 address, tb->pc, tb->size);
294 }
295 }
296 }
297}
298
299/* verify that all the pages have correct rights for code */
300static void tb_page_check(void)
301{
302 TranslationBlock *tb;
303 int i, flags1, flags2;
304
305 for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
306 for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
307 flags1 = page_get_flags(tb->pc);
308 flags2 = page_get_flags(tb->pc + tb->size - 1);
309 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
310 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
311 tb->pc, tb->size, flags1, flags2);
312 }
313 }
314 }
315}
316
d4e8164f
FB
317void tb_jmp_check(TranslationBlock *tb)
318{
319 TranslationBlock *tb1;
320 unsigned int n1;
321
322 /* suppress any remaining jumps to this TB */
323 tb1 = tb->jmp_first;
324 for(;;) {
325 n1 = (long)tb1 & 3;
326 tb1 = (TranslationBlock *)((long)tb1 & ~3);
327 if (n1 == 2)
328 break;
329 tb1 = tb1->jmp_next[n1];
330 }
331 /* check end of list */
332 if (tb1 != tb) {
333 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
334 }
335}
336
fd6ce8f6
FB
337#endif
338
339/* invalidate one TB */
340static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
341 int next_offset)
342{
343 TranslationBlock *tb1;
344 for(;;) {
345 tb1 = *ptb;
346 if (tb1 == tb) {
347 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
348 break;
349 }
350 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
351 }
352}
353
9fa3e853
FB
354static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
355{
356 TranslationBlock *tb1;
357 unsigned int n1;
358
359 for(;;) {
360 tb1 = *ptb;
361 n1 = (long)tb1 & 3;
362 tb1 = (TranslationBlock *)((long)tb1 & ~3);
363 if (tb1 == tb) {
364 *ptb = tb1->page_next[n1];
365 break;
366 }
367 ptb = &tb1->page_next[n1];
368 }
369}
370
d4e8164f
FB
371static inline void tb_jmp_remove(TranslationBlock *tb, int n)
372{
373 TranslationBlock *tb1, **ptb;
374 unsigned int n1;
375
376 ptb = &tb->jmp_next[n];
377 tb1 = *ptb;
378 if (tb1) {
379 /* find tb(n) in circular list */
380 for(;;) {
381 tb1 = *ptb;
382 n1 = (long)tb1 & 3;
383 tb1 = (TranslationBlock *)((long)tb1 & ~3);
384 if (n1 == n && tb1 == tb)
385 break;
386 if (n1 == 2) {
387 ptb = &tb1->jmp_first;
388 } else {
389 ptb = &tb1->jmp_next[n1];
390 }
391 }
392 /* now we can suppress tb(n) from the list */
393 *ptb = tb->jmp_next[n];
394
395 tb->jmp_next[n] = NULL;
396 }
397}
398
399/* reset the jump entry 'n' of a TB so that it is not chained to
400 another TB */
401static inline void tb_reset_jump(TranslationBlock *tb, int n)
402{
403 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
404}
405
9fa3e853 406static inline void tb_invalidate(TranslationBlock *tb)
fd6ce8f6 407{
d4e8164f 408 unsigned int h, n1;
9fa3e853 409 TranslationBlock *tb1, *tb2, **ptb;
d4e8164f 410
36bdbe54
FB
411 tb_invalidated_flag = 1;
412
fd6ce8f6
FB
413 /* remove the TB from the hash list */
414 h = tb_hash_func(tb->pc);
9fa3e853
FB
415 ptb = &tb_hash[h];
416 for(;;) {
417 tb1 = *ptb;
418 /* NOTE: the TB is not necessarily linked in the hash. It
419 indicates that it is not currently used */
420 if (tb1 == NULL)
421 return;
422 if (tb1 == tb) {
423 *ptb = tb1->hash_next;
424 break;
425 }
426 ptb = &tb1->hash_next;
fd6ce8f6 427 }
d4e8164f
FB
428
429 /* suppress this TB from the two jump lists */
430 tb_jmp_remove(tb, 0);
431 tb_jmp_remove(tb, 1);
432
433 /* suppress any remaining jumps to this TB */
434 tb1 = tb->jmp_first;
435 for(;;) {
436 n1 = (long)tb1 & 3;
437 if (n1 == 2)
438 break;
439 tb1 = (TranslationBlock *)((long)tb1 & ~3);
440 tb2 = tb1->jmp_next[n1];
441 tb_reset_jump(tb1, n1);
442 tb1->jmp_next[n1] = NULL;
443 tb1 = tb2;
444 }
445 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
fd6ce8f6
FB
446}
447
9fa3e853 448static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
fd6ce8f6 449{
fd6ce8f6 450 PageDesc *p;
9fa3e853
FB
451 unsigned int h;
452 target_ulong phys_pc;
453
454 /* remove the TB from the hash list */
455 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
456 h = tb_phys_hash_func(phys_pc);
457 tb_remove(&tb_phys_hash[h], tb,
458 offsetof(TranslationBlock, phys_hash_next));
459
460 /* remove the TB from the page list */
461 if (tb->page_addr[0] != page_addr) {
462 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
463 tb_page_remove(&p->first_tb, tb);
464 invalidate_page_bitmap(p);
465 }
466 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
467 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
468 tb_page_remove(&p->first_tb, tb);
469 invalidate_page_bitmap(p);
470 }
471
472 tb_invalidate(tb);
473}
474
475static inline void set_bits(uint8_t *tab, int start, int len)
476{
477 int end, mask, end1;
478
479 end = start + len;
480 tab += start >> 3;
481 mask = 0xff << (start & 7);
482 if ((start & ~7) == (end & ~7)) {
483 if (start < end) {
484 mask &= ~(0xff << (end & 7));
485 *tab |= mask;
486 }
487 } else {
488 *tab++ |= mask;
489 start = (start + 8) & ~7;
490 end1 = end & ~7;
491 while (start < end1) {
492 *tab++ = 0xff;
493 start += 8;
494 }
495 if (start < end) {
496 mask = ~(0xff << (end & 7));
497 *tab |= mask;
498 }
499 }
500}
501
502static void build_page_bitmap(PageDesc *p)
503{
504 int n, tb_start, tb_end;
505 TranslationBlock *tb;
506
507 p->code_bitmap = malloc(TARGET_PAGE_SIZE / 8);
508 if (!p->code_bitmap)
509 return;
510 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
511
512 tb = p->first_tb;
513 while (tb != NULL) {
514 n = (long)tb & 3;
515 tb = (TranslationBlock *)((long)tb & ~3);
516 /* NOTE: this is subtle as a TB may span two physical pages */
517 if (n == 0) {
518 /* NOTE: tb_end may be after the end of the page, but
519 it is not a problem */
520 tb_start = tb->pc & ~TARGET_PAGE_MASK;
521 tb_end = tb_start + tb->size;
522 if (tb_end > TARGET_PAGE_SIZE)
523 tb_end = TARGET_PAGE_SIZE;
524 } else {
525 tb_start = 0;
526 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
527 }
528 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
529 tb = tb->page_next[n];
530 }
531}
532
533/* invalidate all TBs which intersect with the target physical page
534 starting in range [start;end[. NOTE: start and end must refer to
535 the same physical page */
536static void tb_invalidate_phys_page_range(target_ulong start, target_ulong end)
537{
538 int n;
539 PageDesc *p;
540 TranslationBlock *tb, *tb_next;
541 target_ulong tb_start, tb_end;
542
543 p = page_find(start >> TARGET_PAGE_BITS);
544 if (!p)
545 return;
546 if (!p->code_bitmap &&
547 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD) {
548 /* build code bitmap */
549 build_page_bitmap(p);
550 }
551
552 /* we remove all the TBs in the range [start, end[ */
553 /* XXX: see if in some cases it could be faster to invalidate all the code */
554 tb = p->first_tb;
555 while (tb != NULL) {
556 n = (long)tb & 3;
557 tb = (TranslationBlock *)((long)tb & ~3);
558 tb_next = tb->page_next[n];
559 /* NOTE: this is subtle as a TB may span two physical pages */
560 if (n == 0) {
561 /* NOTE: tb_end may be after the end of the page, but
562 it is not a problem */
563 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
564 tb_end = tb_start + tb->size;
565 } else {
566 tb_start = tb->page_addr[1];
567 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
568 }
569 if (!(tb_end <= start || tb_start >= end)) {
570 tb_phys_invalidate(tb, -1);
571 }
572 tb = tb_next;
573 }
574#if !defined(CONFIG_USER_ONLY)
575 /* if no code remaining, no need to continue to use slow writes */
576 if (!p->first_tb) {
577 invalidate_page_bitmap(p);
578 tlb_unprotect_code_phys(cpu_single_env, start);
579 }
fd6ce8f6 580#endif
9fa3e853 581}
fd6ce8f6 582
9fa3e853
FB
583/* len must be <= 8 and start must be a multiple of len */
584static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
585{
586 PageDesc *p;
587 int offset, b;
588
589 p = page_find(start >> TARGET_PAGE_BITS);
590 if (!p)
591 return;
592 if (p->code_bitmap) {
593 offset = start & ~TARGET_PAGE_MASK;
594 b = p->code_bitmap[offset >> 3] >> (offset & 7);
595 if (b & ((1 << len) - 1))
596 goto do_invalidate;
597 } else {
598 do_invalidate:
599 tb_invalidate_phys_page_range(start, start + len);
600 }
601}
602
603/* invalidate all TBs which intersect with the target virtual page
604 starting in range [start;end[. This function is usually used when
605 the target processor flushes its I-cache. NOTE: start and end must
606 refer to the same physical page */
607void tb_invalidate_page_range(target_ulong start, target_ulong end)
608{
609 int n;
610 PageDesc *p;
611 TranslationBlock *tb, *tb_next;
612 target_ulong pc;
613 target_ulong phys_start;
614
615#if !defined(CONFIG_USER_ONLY)
616 {
617 VirtPageDesc *vp;
618 vp = virt_page_find(start >> TARGET_PAGE_BITS);
619 if (!vp)
620 return;
621 if (vp->valid_tag != virt_valid_tag)
622 return;
623 phys_start = vp->phys_addr + (start & ~TARGET_PAGE_MASK);
624 }
625#else
626 phys_start = start;
627#endif
628 p = page_find(phys_start >> TARGET_PAGE_BITS);
629 if (!p)
fd6ce8f6 630 return;
9fa3e853
FB
631 /* we remove all the TBs in the range [start, end[ */
632 /* XXX: see if in some cases it could be faster to invalidate all the code */
fd6ce8f6 633 tb = p->first_tb;
fd6ce8f6 634 while (tb != NULL) {
9fa3e853
FB
635 n = (long)tb & 3;
636 tb = (TranslationBlock *)((long)tb & ~3);
637 tb_next = tb->page_next[n];
638 pc = tb->pc;
639 if (!((pc + tb->size) <= start || pc >= end)) {
640 tb_phys_invalidate(tb, -1);
641 }
fd6ce8f6
FB
642 tb = tb_next;
643 }
9fa3e853
FB
644#if !defined(CONFIG_USER_ONLY)
645 /* if no code remaining, no need to continue to use slow writes */
646 if (!p->first_tb)
647 tlb_unprotect_code(cpu_single_env, start);
648#endif
649}
650
651#if !defined(CONFIG_SOFTMMU)
652static void tb_invalidate_phys_page(target_ulong addr)
653{
654 int n;
655 PageDesc *p;
656 TranslationBlock *tb;
657
658 addr &= TARGET_PAGE_MASK;
659 p = page_find(addr >> TARGET_PAGE_BITS);
660 if (!p)
661 return;
662 tb = p->first_tb;
663 while (tb != NULL) {
664 n = (long)tb & 3;
665 tb = (TranslationBlock *)((long)tb & ~3);
666 tb_phys_invalidate(tb, addr);
667 tb = tb->page_next[n];
668 }
fd6ce8f6
FB
669 p->first_tb = NULL;
670}
9fa3e853 671#endif
fd6ce8f6
FB
672
673/* add the tb in the target page and protect it if necessary */
9fa3e853
FB
674static inline void tb_alloc_page(TranslationBlock *tb,
675 unsigned int n, unsigned int page_addr)
fd6ce8f6
FB
676{
677 PageDesc *p;
9fa3e853
FB
678 TranslationBlock *last_first_tb;
679
680 tb->page_addr[n] = page_addr;
681 p = page_find(page_addr >> TARGET_PAGE_BITS);
682 tb->page_next[n] = p->first_tb;
683 last_first_tb = p->first_tb;
684 p->first_tb = (TranslationBlock *)((long)tb | n);
685 invalidate_page_bitmap(p);
fd6ce8f6 686
9fa3e853 687#if defined(CONFIG_USER_ONLY)
fd6ce8f6 688 if (p->flags & PAGE_WRITE) {
9fa3e853
FB
689 unsigned long host_start, host_end, addr;
690 int prot;
691
fd6ce8f6
FB
692 /* force the host page as non writable (writes will have a
693 page fault + mprotect overhead) */
fd6ce8f6
FB
694 host_start = page_addr & host_page_mask;
695 host_end = host_start + host_page_size;
696 prot = 0;
697 for(addr = host_start; addr < host_end; addr += TARGET_PAGE_SIZE)
698 prot |= page_get_flags(addr);
699 mprotect((void *)host_start, host_page_size,
700 (prot & PAGE_BITS) & ~PAGE_WRITE);
701#ifdef DEBUG_TB_INVALIDATE
702 printf("protecting code page: 0x%08lx\n",
703 host_start);
704#endif
705 p->flags &= ~PAGE_WRITE;
fd6ce8f6 706 }
9fa3e853
FB
707#else
708 /* if some code is already present, then the pages are already
709 protected. So we handle the case where only the first TB is
710 allocated in a physical page */
711 if (!last_first_tb) {
712 target_ulong virt_addr;
713
714 virt_addr = (tb->pc & TARGET_PAGE_MASK) + (n << TARGET_PAGE_BITS);
715 tlb_protect_code(cpu_single_env, virt_addr);
716 }
717#endif
fd6ce8f6
FB
718}
719
720/* Allocate a new translation block. Flush the translation buffer if
721 too many translation blocks or too much generated code. */
d4e8164f 722TranslationBlock *tb_alloc(unsigned long pc)
fd6ce8f6
FB
723{
724 TranslationBlock *tb;
fd6ce8f6
FB
725
726 if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
727 (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
d4e8164f 728 return NULL;
fd6ce8f6
FB
729 tb = &tbs[nb_tbs++];
730 tb->pc = pc;
d4e8164f
FB
731 return tb;
732}
733
9fa3e853
FB
734/* add a new TB and link it to the physical page tables. phys_page2 is
735 (-1) to indicate that only one page contains the TB. */
736void tb_link_phys(TranslationBlock *tb,
737 target_ulong phys_pc, target_ulong phys_page2)
d4e8164f 738{
9fa3e853
FB
739 unsigned int h;
740 TranslationBlock **ptb;
741
742 /* add in the physical hash table */
743 h = tb_phys_hash_func(phys_pc);
744 ptb = &tb_phys_hash[h];
745 tb->phys_hash_next = *ptb;
746 *ptb = tb;
fd6ce8f6
FB
747
748 /* add in the page list */
9fa3e853
FB
749 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
750 if (phys_page2 != -1)
751 tb_alloc_page(tb, 1, phys_page2);
752 else
753 tb->page_addr[1] = -1;
61382a50
FB
754#ifdef DEBUG_TB_CHECK
755 tb_page_check();
756#endif
9fa3e853
FB
757}
758
759/* link the tb with the other TBs */
760void tb_link(TranslationBlock *tb)
761{
762#if !defined(CONFIG_USER_ONLY)
763 {
764 VirtPageDesc *vp;
765 target_ulong addr;
766
767 /* save the code memory mappings (needed to invalidate the code) */
768 addr = tb->pc & TARGET_PAGE_MASK;
769 vp = virt_page_find_alloc(addr >> TARGET_PAGE_BITS);
98857888
FB
770#ifdef DEBUG_TLB_CHECK
771 if (vp->valid_tag == virt_valid_tag &&
772 vp->phys_addr != tb->page_addr[0]) {
773 printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",
774 addr, tb->page_addr[0], vp->phys_addr);
775 }
776#endif
9fa3e853
FB
777 vp->phys_addr = tb->page_addr[0];
778 vp->valid_tag = virt_valid_tag;
779
780 if (tb->page_addr[1] != -1) {
781 addr += TARGET_PAGE_SIZE;
782 vp = virt_page_find_alloc(addr >> TARGET_PAGE_BITS);
98857888
FB
783#ifdef DEBUG_TLB_CHECK
784 if (vp->valid_tag == virt_valid_tag &&
785 vp->phys_addr != tb->page_addr[1]) {
786 printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",
787 addr, tb->page_addr[1], vp->phys_addr);
788 }
789#endif
9fa3e853
FB
790 vp->phys_addr = tb->page_addr[1];
791 vp->valid_tag = virt_valid_tag;
792 }
793 }
794#endif
795
d4e8164f
FB
796 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
797 tb->jmp_next[0] = NULL;
798 tb->jmp_next[1] = NULL;
799
800 /* init original jump addresses */
801 if (tb->tb_next_offset[0] != 0xffff)
802 tb_reset_jump(tb, 0);
803 if (tb->tb_next_offset[1] != 0xffff)
804 tb_reset_jump(tb, 1);
fd6ce8f6
FB
805}
806
9fa3e853
FB
807/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
808 tb[1].tc_ptr. Return NULL if not found */
809TranslationBlock *tb_find_pc(unsigned long tc_ptr)
fd6ce8f6 810{
9fa3e853
FB
811 int m_min, m_max, m;
812 unsigned long v;
813 TranslationBlock *tb;
a513fe19
FB
814
815 if (nb_tbs <= 0)
816 return NULL;
817 if (tc_ptr < (unsigned long)code_gen_buffer ||
818 tc_ptr >= (unsigned long)code_gen_ptr)
819 return NULL;
820 /* binary search (cf Knuth) */
821 m_min = 0;
822 m_max = nb_tbs - 1;
823 while (m_min <= m_max) {
824 m = (m_min + m_max) >> 1;
825 tb = &tbs[m];
826 v = (unsigned long)tb->tc_ptr;
827 if (v == tc_ptr)
828 return tb;
829 else if (tc_ptr < v) {
830 m_max = m - 1;
831 } else {
832 m_min = m + 1;
833 }
834 }
835 return &tbs[m_max];
836}
7501267e 837
ea041c0e
FB
838static void tb_reset_jump_recursive(TranslationBlock *tb);
839
840static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
841{
842 TranslationBlock *tb1, *tb_next, **ptb;
843 unsigned int n1;
844
845 tb1 = tb->jmp_next[n];
846 if (tb1 != NULL) {
847 /* find head of list */
848 for(;;) {
849 n1 = (long)tb1 & 3;
850 tb1 = (TranslationBlock *)((long)tb1 & ~3);
851 if (n1 == 2)
852 break;
853 tb1 = tb1->jmp_next[n1];
854 }
855 /* we are now sure now that tb jumps to tb1 */
856 tb_next = tb1;
857
858 /* remove tb from the jmp_first list */
859 ptb = &tb_next->jmp_first;
860 for(;;) {
861 tb1 = *ptb;
862 n1 = (long)tb1 & 3;
863 tb1 = (TranslationBlock *)((long)tb1 & ~3);
864 if (n1 == n && tb1 == tb)
865 break;
866 ptb = &tb1->jmp_next[n1];
867 }
868 *ptb = tb->jmp_next[n];
869 tb->jmp_next[n] = NULL;
870
871 /* suppress the jump to next tb in generated code */
872 tb_reset_jump(tb, n);
873
0124311e 874 /* suppress jumps in the tb on which we could have jumped */
ea041c0e
FB
875 tb_reset_jump_recursive(tb_next);
876 }
877}
878
879static void tb_reset_jump_recursive(TranslationBlock *tb)
880{
881 tb_reset_jump_recursive2(tb, 0);
882 tb_reset_jump_recursive2(tb, 1);
883}
884
c33a346e
FB
885/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
886 breakpoint is reached */
4c3a88a2
FB
887int cpu_breakpoint_insert(CPUState *env, uint32_t pc)
888{
889#if defined(TARGET_I386)
890 int i;
891
892 for(i = 0; i < env->nb_breakpoints; i++) {
893 if (env->breakpoints[i] == pc)
894 return 0;
895 }
896
897 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
898 return -1;
899 env->breakpoints[env->nb_breakpoints++] = pc;
9fa3e853 900 tb_invalidate_page_range(pc, pc + 1);
4c3a88a2
FB
901 return 0;
902#else
903 return -1;
904#endif
905}
906
907/* remove a breakpoint */
908int cpu_breakpoint_remove(CPUState *env, uint32_t pc)
909{
910#if defined(TARGET_I386)
911 int i;
912 for(i = 0; i < env->nb_breakpoints; i++) {
913 if (env->breakpoints[i] == pc)
914 goto found;
915 }
916 return -1;
917 found:
918 memmove(&env->breakpoints[i], &env->breakpoints[i + 1],
919 (env->nb_breakpoints - (i + 1)) * sizeof(env->breakpoints[0]));
920 env->nb_breakpoints--;
9fa3e853 921 tb_invalidate_page_range(pc, pc + 1);
4c3a88a2
FB
922 return 0;
923#else
924 return -1;
925#endif
926}
927
c33a346e
FB
928/* enable or disable single step mode. EXCP_DEBUG is returned by the
929 CPU loop after each instruction */
930void cpu_single_step(CPUState *env, int enabled)
931{
932#if defined(TARGET_I386)
933 if (env->singlestep_enabled != enabled) {
934 env->singlestep_enabled = enabled;
935 /* must flush all the translated code to avoid inconsistancies */
9fa3e853 936 /* XXX: only flush what is necessary */
0124311e 937 tb_flush(env);
c33a346e
FB
938 }
939#endif
940}
941
34865134
FB
942/* enable or disable low levels log */
943void cpu_set_log(int log_flags)
944{
945 loglevel = log_flags;
946 if (loglevel && !logfile) {
947 logfile = fopen(logfilename, "w");
948 if (!logfile) {
949 perror(logfilename);
950 _exit(1);
951 }
9fa3e853
FB
952#if !defined(CONFIG_SOFTMMU)
953 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
954 {
955 static uint8_t logfile_buf[4096];
956 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
957 }
958#else
34865134 959 setvbuf(logfile, NULL, _IOLBF, 0);
9fa3e853 960#endif
34865134
FB
961 }
962}
963
964void cpu_set_log_filename(const char *filename)
965{
966 logfilename = strdup(filename);
967}
c33a346e 968
0124311e 969/* mask must never be zero, except for A20 change call */
68a79315 970void cpu_interrupt(CPUState *env, int mask)
ea041c0e
FB
971{
972 TranslationBlock *tb;
68a79315
FB
973
974 env->interrupt_request |= mask;
ea041c0e
FB
975 /* if the cpu is currently executing code, we must unlink it and
976 all the potentially executing TB */
977 tb = env->current_tb;
978 if (tb) {
979 tb_reset_jump_recursive(tb);
980 }
981}
982
983
7501267e
FB
984void cpu_abort(CPUState *env, const char *fmt, ...)
985{
986 va_list ap;
987
988 va_start(ap, fmt);
989 fprintf(stderr, "qemu: fatal: ");
990 vfprintf(stderr, fmt, ap);
991 fprintf(stderr, "\n");
992#ifdef TARGET_I386
993 cpu_x86_dump_state(env, stderr, X86_DUMP_FPU | X86_DUMP_CCOP);
994#endif
995 va_end(ap);
996 abort();
997}
998
0124311e
FB
999#if !defined(CONFIG_USER_ONLY)
1000
33417e70
FB
1001void tlb_flush(CPUState *env)
1002{
33417e70 1003 int i;
0124311e 1004
9fa3e853
FB
1005#if defined(DEBUG_TLB)
1006 printf("tlb_flush:\n");
1007#endif
0124311e
FB
1008 /* must reset current TB so that interrupts cannot modify the
1009 links while we are modifying them */
1010 env->current_tb = NULL;
1011
33417e70
FB
1012 for(i = 0; i < CPU_TLB_SIZE; i++) {
1013 env->tlb_read[0][i].address = -1;
1014 env->tlb_write[0][i].address = -1;
1015 env->tlb_read[1][i].address = -1;
1016 env->tlb_write[1][i].address = -1;
1017 }
9fa3e853
FB
1018
1019 virt_page_flush();
1020 for(i = 0;i < CODE_GEN_HASH_SIZE; i++)
1021 tb_hash[i] = NULL;
1022
1023#if !defined(CONFIG_SOFTMMU)
1024 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
1025#endif
33417e70
FB
1026}
1027
61382a50
FB
1028static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, uint32_t addr)
1029{
1030 if (addr == (tlb_entry->address &
1031 (TARGET_PAGE_MASK | TLB_INVALID_MASK)))
1032 tlb_entry->address = -1;
1033}
1034
33417e70
FB
1035void tlb_flush_page(CPUState *env, uint32_t addr)
1036{
9fa3e853
FB
1037 int i, n;
1038 VirtPageDesc *vp;
1039 PageDesc *p;
1040 TranslationBlock *tb;
0124311e 1041
9fa3e853
FB
1042#if defined(DEBUG_TLB)
1043 printf("tlb_flush_page: 0x%08x\n", addr);
1044#endif
0124311e
FB
1045 /* must reset current TB so that interrupts cannot modify the
1046 links while we are modifying them */
1047 env->current_tb = NULL;
61382a50
FB
1048
1049 addr &= TARGET_PAGE_MASK;
1050 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1051 tlb_flush_entry(&env->tlb_read[0][i], addr);
1052 tlb_flush_entry(&env->tlb_write[0][i], addr);
1053 tlb_flush_entry(&env->tlb_read[1][i], addr);
1054 tlb_flush_entry(&env->tlb_write[1][i], addr);
0124311e 1055
9fa3e853
FB
1056 /* remove from the virtual pc hash table all the TB at this
1057 virtual address */
1058
1059 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1060 if (vp && vp->valid_tag == virt_valid_tag) {
1061 p = page_find(vp->phys_addr >> TARGET_PAGE_BITS);
1062 if (p) {
1063 /* we remove all the links to the TBs in this virtual page */
1064 tb = p->first_tb;
1065 while (tb != NULL) {
1066 n = (long)tb & 3;
1067 tb = (TranslationBlock *)((long)tb & ~3);
1068 if ((tb->pc & TARGET_PAGE_MASK) == addr ||
1069 ((tb->pc + tb->size - 1) & TARGET_PAGE_MASK) == addr) {
1070 tb_invalidate(tb);
1071 }
1072 tb = tb->page_next[n];
1073 }
1074 }
98857888 1075 vp->valid_tag = 0;
9fa3e853
FB
1076 }
1077
0124311e 1078#if !defined(CONFIG_SOFTMMU)
9fa3e853 1079 if (addr < MMAP_AREA_END)
0124311e 1080 munmap((void *)addr, TARGET_PAGE_SIZE);
61382a50 1081#endif
9fa3e853
FB
1082}
1083
1084static inline void tlb_protect_code1(CPUTLBEntry *tlb_entry, uint32_t addr)
1085{
1086 if (addr == (tlb_entry->address &
1087 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) &&
98857888
FB
1088 (tlb_entry->address & ~TARGET_PAGE_MASK) != IO_MEM_CODE &&
1089 (tlb_entry->address & ~TARGET_PAGE_MASK) != IO_MEM_ROM) {
9fa3e853
FB
1090 tlb_entry->address |= IO_MEM_CODE;
1091 tlb_entry->addend -= (unsigned long)phys_ram_base;
1092 }
1093}
1094
1095/* update the TLBs so that writes to code in the virtual page 'addr'
1096 can be detected */
1097static void tlb_protect_code(CPUState *env, uint32_t addr)
1098{
1099 int i;
1100
1101 addr &= TARGET_PAGE_MASK;
1102 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1103 tlb_protect_code1(&env->tlb_write[0][i], addr);
1104 tlb_protect_code1(&env->tlb_write[1][i], addr);
1105#if !defined(CONFIG_SOFTMMU)
1106 /* NOTE: as we generated the code for this page, it is already at
1107 least readable */
1108 if (addr < MMAP_AREA_END)
1109 mprotect((void *)addr, TARGET_PAGE_SIZE, PROT_READ);
1110#endif
1111}
1112
1113static inline void tlb_unprotect_code1(CPUTLBEntry *tlb_entry, uint32_t addr)
1114{
1115 if (addr == (tlb_entry->address &
1116 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) &&
1117 (tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_CODE) {
1118 tlb_entry->address &= TARGET_PAGE_MASK;
1119 tlb_entry->addend += (unsigned long)phys_ram_base;
0124311e 1120 }
61382a50
FB
1121}
1122
9fa3e853
FB
1123/* update the TLB so that writes in virtual page 'addr' are no longer
1124 tested self modifying code */
1125static void tlb_unprotect_code(CPUState *env, uint32_t addr)
61382a50 1126{
33417e70
FB
1127 int i;
1128
61382a50 1129 addr &= TARGET_PAGE_MASK;
33417e70 1130 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
9fa3e853
FB
1131 tlb_unprotect_code1(&env->tlb_write[0][i], addr);
1132 tlb_unprotect_code1(&env->tlb_write[1][i], addr);
1133}
1134
1135static inline void tlb_unprotect_code2(CPUTLBEntry *tlb_entry,
1136 uint32_t phys_addr)
1137{
1138 if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_CODE &&
1139 ((tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend) == phys_addr) {
1140 tlb_entry->address &= TARGET_PAGE_MASK;
1141 tlb_entry->addend += (unsigned long)phys_ram_base;
1142 }
1143}
1144
1145/* update the TLB so that writes in physical page 'phys_addr' are no longer
1146 tested self modifying code */
1147/* XXX: find a way to improve it */
1148static void tlb_unprotect_code_phys(CPUState *env, uint32_t phys_addr)
1149{
1150 int i;
1151
1152 phys_addr &= TARGET_PAGE_MASK;
1153 for(i = 0; i < CPU_TLB_SIZE; i++)
1154 tlb_unprotect_code2(&env->tlb_write[0][i], phys_addr);
1155 for(i = 0; i < CPU_TLB_SIZE; i++)
1156 tlb_unprotect_code2(&env->tlb_write[1][i], phys_addr);
1157}
1158
98857888 1159/* add a new TLB entry. At most one entry for a given virtual
9fa3e853
FB
1160 address is permitted. */
1161int tlb_set_page(CPUState *env, uint32_t vaddr, uint32_t paddr, int prot,
1162 int is_user, int is_softmmu)
1163{
1164 PageDesc *p;
1165 target_ulong pd;
1166 TranslationBlock *first_tb;
1167 unsigned int index;
1168 target_ulong address, addend;
1169 int ret;
1170
1171 p = page_find(paddr >> TARGET_PAGE_BITS);
1172 if (!p) {
1173 pd = IO_MEM_UNASSIGNED;
1174 first_tb = NULL;
1175 } else {
1176 pd = p->phys_offset;
1177 first_tb = p->first_tb;
1178 }
1179#if defined(DEBUG_TLB)
1180 printf("tlb_set_page: vaddr=0x%08x paddr=0x%08x prot=%x u=%d c=%d smmu=%d pd=0x%08x\n",
1181 vaddr, paddr, prot, is_user, (first_tb != NULL), is_softmmu, pd);
1182#endif
1183
1184 ret = 0;
1185#if !defined(CONFIG_SOFTMMU)
1186 if (is_softmmu)
1187#endif
1188 {
1189 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1190 /* IO memory case */
1191 address = vaddr | pd;
1192 addend = paddr;
1193 } else {
1194 /* standard memory */
1195 address = vaddr;
1196 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1197 }
1198
1199 index = (vaddr >> 12) & (CPU_TLB_SIZE - 1);
1200 addend -= vaddr;
1201 if (prot & PROT_READ) {
1202 env->tlb_read[is_user][index].address = address;
1203 env->tlb_read[is_user][index].addend = addend;
1204 } else {
1205 env->tlb_read[is_user][index].address = -1;
1206 env->tlb_read[is_user][index].addend = -1;
1207 }
1208 if (prot & PROT_WRITE) {
1209 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM) {
1210 /* ROM: access is ignored (same as unassigned) */
1211 env->tlb_write[is_user][index].address = vaddr | IO_MEM_ROM;
1212 env->tlb_write[is_user][index].addend = addend - (unsigned long)phys_ram_base;
1213 } else if (first_tb) {
1214 /* if code is present, we use a specific memory
1215 handler. It works only for physical memory access */
1216 env->tlb_write[is_user][index].address = vaddr | IO_MEM_CODE;
1217 env->tlb_write[is_user][index].addend = addend - (unsigned long)phys_ram_base;
1218 } else {
1219 env->tlb_write[is_user][index].address = address;
1220 env->tlb_write[is_user][index].addend = addend;
1221 }
1222 } else {
1223 env->tlb_write[is_user][index].address = -1;
1224 env->tlb_write[is_user][index].addend = -1;
1225 }
1226 }
1227#if !defined(CONFIG_SOFTMMU)
1228 else {
1229 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1230 /* IO access: no mapping is done as it will be handled by the
1231 soft MMU */
1232 if (!(env->hflags & HF_SOFTMMU_MASK))
1233 ret = 2;
1234 } else {
1235 void *map_addr;
1236 if (prot & PROT_WRITE) {
1237 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM || first_tb) {
1238 /* ROM: we do as if code was inside */
1239 /* if code is present, we only map as read only and save the
1240 original mapping */
1241 VirtPageDesc *vp;
1242
1243 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS);
1244 vp->phys_addr = pd;
1245 vp->prot = prot;
1246 vp->valid_tag = virt_valid_tag;
1247 prot &= ~PAGE_WRITE;
1248 }
1249 }
1250 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
1251 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1252 if (map_addr == MAP_FAILED) {
1253 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1254 paddr, vaddr);
1255 }
1256 }
1257 }
1258#endif
1259 return ret;
1260}
1261
1262/* called from signal handler: invalidate the code and unprotect the
1263 page. Return TRUE if the fault was succesfully handled. */
1264int page_unprotect(unsigned long addr)
1265{
1266#if !defined(CONFIG_SOFTMMU)
1267 VirtPageDesc *vp;
1268
1269#if defined(DEBUG_TLB)
1270 printf("page_unprotect: addr=0x%08x\n", addr);
1271#endif
1272 addr &= TARGET_PAGE_MASK;
1273 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1274 if (!vp)
1275 return 0;
1276 /* NOTE: in this case, validate_tag is _not_ tested as it
1277 validates only the code TLB */
1278 if (vp->valid_tag != virt_valid_tag)
1279 return 0;
1280 if (!(vp->prot & PAGE_WRITE))
1281 return 0;
1282#if defined(DEBUG_TLB)
1283 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1284 addr, vp->phys_addr, vp->prot);
1285#endif
1286 tb_invalidate_phys_page(vp->phys_addr);
1287 mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot);
1288 return 1;
1289#else
1290 return 0;
1291#endif
33417e70
FB
1292}
1293
0124311e
FB
1294#else
1295
1296void tlb_flush(CPUState *env)
1297{
1298}
1299
1300void tlb_flush_page(CPUState *env, uint32_t addr)
1301{
1302}
1303
1304void tlb_flush_page_write(CPUState *env, uint32_t addr)
1305{
1306}
1307
9fa3e853
FB
1308int tlb_set_page(CPUState *env, uint32_t vaddr, uint32_t paddr, int prot,
1309 int is_user, int is_softmmu)
1310{
1311 return 0;
1312}
0124311e 1313
9fa3e853
FB
1314/* dump memory mappings */
1315void page_dump(FILE *f)
33417e70 1316{
9fa3e853
FB
1317 unsigned long start, end;
1318 int i, j, prot, prot1;
1319 PageDesc *p;
33417e70 1320
9fa3e853
FB
1321 fprintf(f, "%-8s %-8s %-8s %s\n",
1322 "start", "end", "size", "prot");
1323 start = -1;
1324 end = -1;
1325 prot = 0;
1326 for(i = 0; i <= L1_SIZE; i++) {
1327 if (i < L1_SIZE)
1328 p = l1_map[i];
1329 else
1330 p = NULL;
1331 for(j = 0;j < L2_SIZE; j++) {
1332 if (!p)
1333 prot1 = 0;
1334 else
1335 prot1 = p[j].flags;
1336 if (prot1 != prot) {
1337 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1338 if (start != -1) {
1339 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1340 start, end, end - start,
1341 prot & PAGE_READ ? 'r' : '-',
1342 prot & PAGE_WRITE ? 'w' : '-',
1343 prot & PAGE_EXEC ? 'x' : '-');
1344 }
1345 if (prot1 != 0)
1346 start = end;
1347 else
1348 start = -1;
1349 prot = prot1;
1350 }
1351 if (!p)
1352 break;
1353 }
33417e70 1354 }
33417e70
FB
1355}
1356
9fa3e853 1357int page_get_flags(unsigned long address)
33417e70 1358{
9fa3e853
FB
1359 PageDesc *p;
1360
1361 p = page_find(address >> TARGET_PAGE_BITS);
33417e70 1362 if (!p)
9fa3e853
FB
1363 return 0;
1364 return p->flags;
1365}
1366
1367/* modify the flags of a page and invalidate the code if
1368 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1369 depending on PAGE_WRITE */
1370void page_set_flags(unsigned long start, unsigned long end, int flags)
1371{
1372 PageDesc *p;
1373 unsigned long addr;
1374
1375 start = start & TARGET_PAGE_MASK;
1376 end = TARGET_PAGE_ALIGN(end);
1377 if (flags & PAGE_WRITE)
1378 flags |= PAGE_WRITE_ORG;
1379 spin_lock(&tb_lock);
1380 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1381 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1382 /* if the write protection is set, then we invalidate the code
1383 inside */
1384 if (!(p->flags & PAGE_WRITE) &&
1385 (flags & PAGE_WRITE) &&
1386 p->first_tb) {
1387 tb_invalidate_phys_page(addr);
1388 }
1389 p->flags = flags;
1390 }
1391 spin_unlock(&tb_lock);
33417e70
FB
1392}
1393
9fa3e853
FB
1394/* called from signal handler: invalidate the code and unprotect the
1395 page. Return TRUE if the fault was succesfully handled. */
1396int page_unprotect(unsigned long address)
1397{
1398 unsigned int page_index, prot, pindex;
1399 PageDesc *p, *p1;
1400 unsigned long host_start, host_end, addr;
1401
1402 host_start = address & host_page_mask;
1403 page_index = host_start >> TARGET_PAGE_BITS;
1404 p1 = page_find(page_index);
1405 if (!p1)
1406 return 0;
1407 host_end = host_start + host_page_size;
1408 p = p1;
1409 prot = 0;
1410 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1411 prot |= p->flags;
1412 p++;
1413 }
1414 /* if the page was really writable, then we change its
1415 protection back to writable */
1416 if (prot & PAGE_WRITE_ORG) {
1417 pindex = (address - host_start) >> TARGET_PAGE_BITS;
1418 if (!(p1[pindex].flags & PAGE_WRITE)) {
1419 mprotect((void *)host_start, host_page_size,
1420 (prot & PAGE_BITS) | PAGE_WRITE);
1421 p1[pindex].flags |= PAGE_WRITE;
1422 /* and since the content will be modified, we must invalidate
1423 the corresponding translated code. */
1424 tb_invalidate_phys_page(address);
1425#ifdef DEBUG_TB_CHECK
1426 tb_invalidate_check(address);
1427#endif
1428 return 1;
1429 }
1430 }
1431 return 0;
1432}
1433
1434/* call this function when system calls directly modify a memory area */
1435void page_unprotect_range(uint8_t *data, unsigned long data_size)
1436{
1437 unsigned long start, end, addr;
1438
1439 start = (unsigned long)data;
1440 end = start + data_size;
1441 start &= TARGET_PAGE_MASK;
1442 end = TARGET_PAGE_ALIGN(end);
1443 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1444 page_unprotect(addr);
1445 }
1446}
1447
1448#endif /* defined(CONFIG_USER_ONLY) */
1449
33417e70
FB
1450/* register physical memory. 'size' must be a multiple of the target
1451 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1452 io memory page */
1453void cpu_register_physical_memory(unsigned long start_addr, unsigned long size,
1454 long phys_offset)
1455{
1456 unsigned long addr, end_addr;
9fa3e853 1457 PageDesc *p;
33417e70
FB
1458
1459 end_addr = start_addr + size;
1460 for(addr = start_addr; addr < end_addr; addr += TARGET_PAGE_SIZE) {
9fa3e853
FB
1461 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1462 p->phys_offset = phys_offset;
1463 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM)
33417e70
FB
1464 phys_offset += TARGET_PAGE_SIZE;
1465 }
1466}
1467
1468static uint32_t unassigned_mem_readb(uint32_t addr)
1469{
1470 return 0;
1471}
1472
1473static void unassigned_mem_writeb(uint32_t addr, uint32_t val)
1474{
1475}
1476
1477static CPUReadMemoryFunc *unassigned_mem_read[3] = {
1478 unassigned_mem_readb,
1479 unassigned_mem_readb,
1480 unassigned_mem_readb,
1481};
1482
1483static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
1484 unassigned_mem_writeb,
1485 unassigned_mem_writeb,
1486 unassigned_mem_writeb,
1487};
1488
9fa3e853
FB
1489/* self modifying code support in soft mmu mode : writing to a page
1490 containing code comes to these functions */
1491
1492static void code_mem_writeb(uint32_t addr, uint32_t val)
1493{
1494#if !defined(CONFIG_USER_ONLY)
1495 tb_invalidate_phys_page_fast(addr, 1);
1496#endif
1497 stb_raw(phys_ram_base + addr, val);
1498}
1499
1500static void code_mem_writew(uint32_t addr, uint32_t val)
1501{
1502#if !defined(CONFIG_USER_ONLY)
1503 tb_invalidate_phys_page_fast(addr, 2);
1504#endif
1505 stw_raw(phys_ram_base + addr, val);
1506}
1507
1508static void code_mem_writel(uint32_t addr, uint32_t val)
1509{
1510#if !defined(CONFIG_USER_ONLY)
1511 tb_invalidate_phys_page_fast(addr, 4);
1512#endif
1513 stl_raw(phys_ram_base + addr, val);
1514}
1515
1516static CPUReadMemoryFunc *code_mem_read[3] = {
1517 NULL, /* never used */
1518 NULL, /* never used */
1519 NULL, /* never used */
1520};
1521
1522static CPUWriteMemoryFunc *code_mem_write[3] = {
1523 code_mem_writeb,
1524 code_mem_writew,
1525 code_mem_writel,
1526};
33417e70
FB
1527
1528static void io_mem_init(void)
1529{
9fa3e853
FB
1530 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, code_mem_read, unassigned_mem_write);
1531 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write);
1532 cpu_register_io_memory(IO_MEM_CODE >> IO_MEM_SHIFT, code_mem_read, code_mem_write);
1533 io_mem_nb = 4;
33417e70
FB
1534}
1535
1536/* mem_read and mem_write are arrays of functions containing the
1537 function to access byte (index 0), word (index 1) and dword (index
1538 2). All functions must be supplied. If io_index is non zero, the
1539 corresponding io zone is modified. If it is zero, a new io zone is
1540 allocated. The return value can be used with
1541 cpu_register_physical_memory(). (-1) is returned if error. */
1542int cpu_register_io_memory(int io_index,
1543 CPUReadMemoryFunc **mem_read,
1544 CPUWriteMemoryFunc **mem_write)
1545{
1546 int i;
1547
1548 if (io_index <= 0) {
1549 if (io_index >= IO_MEM_NB_ENTRIES)
1550 return -1;
1551 io_index = io_mem_nb++;
1552 } else {
1553 if (io_index >= IO_MEM_NB_ENTRIES)
1554 return -1;
1555 }
1556
1557 for(i = 0;i < 3; i++) {
1558 io_mem_read[io_index][i] = mem_read[i];
1559 io_mem_write[io_index][i] = mem_write[i];
1560 }
1561 return io_index << IO_MEM_SHIFT;
1562}
61382a50 1563
13eb76e0
FB
1564/* physical memory access (slow version, mainly for debug) */
1565#if defined(CONFIG_USER_ONLY)
1566void cpu_physical_memory_rw(CPUState *env, uint8_t *buf, target_ulong addr,
1567 int len, int is_write)
1568{
1569 int l, flags;
1570 target_ulong page;
1571
1572 while (len > 0) {
1573 page = addr & TARGET_PAGE_MASK;
1574 l = (page + TARGET_PAGE_SIZE) - addr;
1575 if (l > len)
1576 l = len;
1577 flags = page_get_flags(page);
1578 if (!(flags & PAGE_VALID))
1579 return;
1580 if (is_write) {
1581 if (!(flags & PAGE_WRITE))
1582 return;
1583 memcpy((uint8_t *)addr, buf, len);
1584 } else {
1585 if (!(flags & PAGE_READ))
1586 return;
1587 memcpy(buf, (uint8_t *)addr, len);
1588 }
1589 len -= l;
1590 buf += l;
1591 addr += l;
1592 }
1593}
1594#else
1595void cpu_physical_memory_rw(CPUState *env, uint8_t *buf, target_ulong addr,
1596 int len, int is_write)
1597{
1598 int l, io_index;
1599 uint8_t *ptr;
1600 uint32_t val;
1601 target_ulong page, pd;
1602 PageDesc *p;
1603
1604 while (len > 0) {
1605 page = addr & TARGET_PAGE_MASK;
1606 l = (page + TARGET_PAGE_SIZE) - addr;
1607 if (l > len)
1608 l = len;
1609 p = page_find(page >> TARGET_PAGE_BITS);
1610 if (!p) {
1611 pd = IO_MEM_UNASSIGNED;
1612 } else {
1613 pd = p->phys_offset;
1614 }
1615
1616 if (is_write) {
1617 if ((pd & ~TARGET_PAGE_MASK) != 0) {
1618 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
1619 if (l >= 4 && ((addr & 3) == 0)) {
1620 /* 32 bit read access */
1621 val = ldl_raw(buf);
1622 io_mem_write[io_index][2](addr, val);
1623 l = 4;
1624 } else if (l >= 2 && ((addr & 1) == 0)) {
1625 /* 16 bit read access */
1626 val = lduw_raw(buf);
1627 io_mem_write[io_index][1](addr, val);
1628 l = 2;
1629 } else {
1630 /* 8 bit access */
1631 val = ldub_raw(buf);
1632 io_mem_write[io_index][0](addr, val);
1633 l = 1;
1634 }
1635 } else {
1636 /* RAM case */
1637 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
1638 (addr & ~TARGET_PAGE_MASK);
1639 memcpy(ptr, buf, l);
1640 }
1641 } else {
1642 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
1643 (pd & ~TARGET_PAGE_MASK) != IO_MEM_CODE) {
1644 /* I/O case */
1645 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
1646 if (l >= 4 && ((addr & 3) == 0)) {
1647 /* 32 bit read access */
1648 val = io_mem_read[io_index][2](addr);
1649 stl_raw(buf, val);
1650 l = 4;
1651 } else if (l >= 2 && ((addr & 1) == 0)) {
1652 /* 16 bit read access */
1653 val = io_mem_read[io_index][1](addr);
1654 stw_raw(buf, val);
1655 l = 2;
1656 } else {
1657 /* 8 bit access */
1658 val = io_mem_read[io_index][0](addr);
1659 stb_raw(buf, val);
1660 l = 1;
1661 }
1662 } else {
1663 /* RAM case */
1664 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
1665 (addr & ~TARGET_PAGE_MASK);
1666 memcpy(buf, ptr, l);
1667 }
1668 }
1669 len -= l;
1670 buf += l;
1671 addr += l;
1672 }
1673}
1674#endif
1675
1676/* virtual memory access for debug */
1677int cpu_memory_rw_debug(CPUState *env,
1678 uint8_t *buf, target_ulong addr, int len, int is_write)
1679{
1680 int l;
1681 target_ulong page, phys_addr;
1682
1683 while (len > 0) {
1684 page = addr & TARGET_PAGE_MASK;
1685 phys_addr = cpu_get_phys_page_debug(env, page);
1686 /* if no physical page mapped, return an error */
1687 if (phys_addr == -1)
1688 return -1;
1689 l = (page + TARGET_PAGE_SIZE) - addr;
1690 if (l > len)
1691 l = len;
1692 cpu_physical_memory_rw(env, buf,
1693 phys_addr + (addr & ~TARGET_PAGE_MASK), l,
1694 is_write);
1695 len -= l;
1696 buf += l;
1697 addr += l;
1698 }
1699 return 0;
1700}
1701
61382a50
FB
1702#if !defined(CONFIG_USER_ONLY)
1703
1704#define MMUSUFFIX _cmmu
1705#define GETPC() NULL
1706#define env cpu_single_env
1707
1708#define SHIFT 0
1709#include "softmmu_template.h"
1710
1711#define SHIFT 1
1712#include "softmmu_template.h"
1713
1714#define SHIFT 2
1715#include "softmmu_template.h"
1716
1717#define SHIFT 3
1718#include "softmmu_template.h"
1719
1720#undef env
1721
1722#endif