]> git.proxmox.com Git - qemu.git/blame - exec.c
jump to gate fix (aka OS/2 Warp install bug)
[qemu.git] / exec.c
CommitLineData
54936004 1/*
fd6ce8f6 2 * virtual page mapping and translated block handling
54936004
FB
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
67b915a5 20#include "config.h"
54936004
FB
21#include <stdlib.h>
22#include <stdio.h>
23#include <stdarg.h>
24#include <string.h>
25#include <errno.h>
26#include <unistd.h>
27#include <inttypes.h>
67b915a5 28#if !defined(CONFIG_SOFTMMU)
fd6ce8f6 29#include <sys/mman.h>
67b915a5 30#endif
54936004 31
6180a181
FB
32#include "cpu.h"
33#include "exec-all.h"
54936004 34
fd6ce8f6 35//#define DEBUG_TB_INVALIDATE
66e85a21 36//#define DEBUG_FLUSH
9fa3e853 37//#define DEBUG_TLB
fd6ce8f6
FB
38
39/* make various TB consistency checks */
40//#define DEBUG_TB_CHECK
98857888 41//#define DEBUG_TLB_CHECK
fd6ce8f6
FB
42
43/* threshold to flush the translated code buffer */
44#define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
45
9fa3e853
FB
46#define SMC_BITMAP_USE_THRESHOLD 10
47
48#define MMAP_AREA_START 0x00000000
49#define MMAP_AREA_END 0xa8000000
fd6ce8f6
FB
50
51TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
52TranslationBlock *tb_hash[CODE_GEN_HASH_SIZE];
9fa3e853 53TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
fd6ce8f6 54int nb_tbs;
eb51d102
FB
55/* any access to the tbs or the page table must use this lock */
56spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
fd6ce8f6
FB
57
58uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE];
59uint8_t *code_gen_ptr;
60
9fa3e853
FB
61int phys_ram_size;
62int phys_ram_fd;
63uint8_t *phys_ram_base;
1ccde1cb 64uint8_t *phys_ram_dirty;
9fa3e853 65
54936004 66typedef struct PageDesc {
92e873b9 67 /* list of TBs intersecting this ram page */
fd6ce8f6 68 TranslationBlock *first_tb;
9fa3e853
FB
69 /* in order to optimize self modifying code, we count the number
70 of lookups we do to a given page to use a bitmap */
71 unsigned int code_write_count;
72 uint8_t *code_bitmap;
73#if defined(CONFIG_USER_ONLY)
74 unsigned long flags;
75#endif
54936004
FB
76} PageDesc;
77
92e873b9
FB
78typedef struct PhysPageDesc {
79 /* offset in host memory of the page + io_index in the low 12 bits */
80 unsigned long phys_offset;
81} PhysPageDesc;
82
9fa3e853
FB
83typedef struct VirtPageDesc {
84 /* physical address of code page. It is valid only if 'valid_tag'
85 matches 'virt_valid_tag' */
86 target_ulong phys_addr;
87 unsigned int valid_tag;
88#if !defined(CONFIG_SOFTMMU)
89 /* original page access rights. It is valid only if 'valid_tag'
90 matches 'virt_valid_tag' */
91 unsigned int prot;
92#endif
93} VirtPageDesc;
94
54936004
FB
95#define L2_BITS 10
96#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
97
98#define L1_SIZE (1 << L1_BITS)
99#define L2_SIZE (1 << L2_BITS)
100
33417e70 101static void io_mem_init(void);
fd6ce8f6 102
54936004
FB
103unsigned long real_host_page_size;
104unsigned long host_page_bits;
105unsigned long host_page_size;
106unsigned long host_page_mask;
107
92e873b9 108/* XXX: for system emulation, it could just be an array */
54936004 109static PageDesc *l1_map[L1_SIZE];
92e873b9 110static PhysPageDesc *l1_phys_map[L1_SIZE];
54936004 111
9fa3e853
FB
112#if !defined(CONFIG_USER_ONLY)
113static VirtPageDesc *l1_virt_map[L1_SIZE];
114static unsigned int virt_valid_tag;
115#endif
116
33417e70 117/* io memory support */
33417e70
FB
118CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
119CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
a4193c8a 120void *io_mem_opaque[IO_MEM_NB_ENTRIES];
33417e70
FB
121static int io_mem_nb;
122
34865134
FB
123/* log support */
124char *logfilename = "/tmp/qemu.log";
125FILE *logfile;
126int loglevel;
127
b346ff46 128static void page_init(void)
54936004
FB
129{
130 /* NOTE: we can always suppose that host_page_size >=
131 TARGET_PAGE_SIZE */
67b915a5
FB
132#ifdef _WIN32
133 real_host_page_size = 4096;
134#else
54936004 135 real_host_page_size = getpagesize();
67b915a5 136#endif
54936004
FB
137 if (host_page_size == 0)
138 host_page_size = real_host_page_size;
139 if (host_page_size < TARGET_PAGE_SIZE)
140 host_page_size = TARGET_PAGE_SIZE;
141 host_page_bits = 0;
142 while ((1 << host_page_bits) < host_page_size)
143 host_page_bits++;
144 host_page_mask = ~(host_page_size - 1);
9fa3e853
FB
145#if !defined(CONFIG_USER_ONLY)
146 virt_valid_tag = 1;
147#endif
54936004
FB
148}
149
fd6ce8f6 150static inline PageDesc *page_find_alloc(unsigned int index)
54936004 151{
54936004
FB
152 PageDesc **lp, *p;
153
54936004
FB
154 lp = &l1_map[index >> L2_BITS];
155 p = *lp;
156 if (!p) {
157 /* allocate if not found */
59817ccb 158 p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
fd6ce8f6 159 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
54936004
FB
160 *lp = p;
161 }
162 return p + (index & (L2_SIZE - 1));
163}
164
fd6ce8f6 165static inline PageDesc *page_find(unsigned int index)
54936004 166{
54936004
FB
167 PageDesc *p;
168
54936004
FB
169 p = l1_map[index >> L2_BITS];
170 if (!p)
171 return 0;
fd6ce8f6
FB
172 return p + (index & (L2_SIZE - 1));
173}
174
92e873b9
FB
175static inline PhysPageDesc *phys_page_find_alloc(unsigned int index)
176{
177 PhysPageDesc **lp, *p;
178
179 lp = &l1_phys_map[index >> L2_BITS];
180 p = *lp;
181 if (!p) {
182 /* allocate if not found */
183 p = qemu_malloc(sizeof(PhysPageDesc) * L2_SIZE);
184 memset(p, 0, sizeof(PhysPageDesc) * L2_SIZE);
185 *lp = p;
186 }
187 return p + (index & (L2_SIZE - 1));
188}
189
190static inline PhysPageDesc *phys_page_find(unsigned int index)
191{
192 PhysPageDesc *p;
193
194 p = l1_phys_map[index >> L2_BITS];
195 if (!p)
196 return 0;
197 return p + (index & (L2_SIZE - 1));
198}
199
9fa3e853 200#if !defined(CONFIG_USER_ONLY)
4f2ac237
FB
201static void tlb_protect_code(CPUState *env, target_ulong addr);
202static void tlb_unprotect_code_phys(CPUState *env, unsigned long phys_addr, target_ulong vaddr);
9fa3e853
FB
203
204static inline VirtPageDesc *virt_page_find_alloc(unsigned int index)
fd6ce8f6 205{
9fa3e853 206 VirtPageDesc **lp, *p;
fd6ce8f6 207
9fa3e853
FB
208 lp = &l1_virt_map[index >> L2_BITS];
209 p = *lp;
210 if (!p) {
211 /* allocate if not found */
59817ccb 212 p = qemu_malloc(sizeof(VirtPageDesc) * L2_SIZE);
9fa3e853
FB
213 memset(p, 0, sizeof(VirtPageDesc) * L2_SIZE);
214 *lp = p;
215 }
216 return p + (index & (L2_SIZE - 1));
217}
218
219static inline VirtPageDesc *virt_page_find(unsigned int index)
220{
221 VirtPageDesc *p;
222
223 p = l1_virt_map[index >> L2_BITS];
fd6ce8f6
FB
224 if (!p)
225 return 0;
9fa3e853 226 return p + (index & (L2_SIZE - 1));
54936004
FB
227}
228
9fa3e853 229static void virt_page_flush(void)
54936004 230{
9fa3e853
FB
231 int i, j;
232 VirtPageDesc *p;
233
234 virt_valid_tag++;
235
236 if (virt_valid_tag == 0) {
237 virt_valid_tag = 1;
238 for(i = 0; i < L1_SIZE; i++) {
239 p = l1_virt_map[i];
240 if (p) {
241 for(j = 0; j < L2_SIZE; j++)
242 p[j].valid_tag = 0;
243 }
fd6ce8f6 244 }
54936004
FB
245 }
246}
9fa3e853
FB
247#else
248static void virt_page_flush(void)
249{
250}
251#endif
fd6ce8f6 252
b346ff46 253void cpu_exec_init(void)
fd6ce8f6
FB
254{
255 if (!code_gen_ptr) {
256 code_gen_ptr = code_gen_buffer;
b346ff46 257 page_init();
33417e70 258 io_mem_init();
fd6ce8f6
FB
259 }
260}
261
9fa3e853
FB
262static inline void invalidate_page_bitmap(PageDesc *p)
263{
264 if (p->code_bitmap) {
59817ccb 265 qemu_free(p->code_bitmap);
9fa3e853
FB
266 p->code_bitmap = NULL;
267 }
268 p->code_write_count = 0;
269}
270
fd6ce8f6
FB
271/* set to NULL all the 'first_tb' fields in all PageDescs */
272static void page_flush_tb(void)
273{
274 int i, j;
275 PageDesc *p;
276
277 for(i = 0; i < L1_SIZE; i++) {
278 p = l1_map[i];
279 if (p) {
9fa3e853
FB
280 for(j = 0; j < L2_SIZE; j++) {
281 p->first_tb = NULL;
282 invalidate_page_bitmap(p);
283 p++;
284 }
fd6ce8f6
FB
285 }
286 }
287}
288
289/* flush all the translation blocks */
d4e8164f 290/* XXX: tb_flush is currently not thread safe */
0124311e 291void tb_flush(CPUState *env)
fd6ce8f6
FB
292{
293 int i;
0124311e 294#if defined(DEBUG_FLUSH)
fd6ce8f6
FB
295 printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
296 code_gen_ptr - code_gen_buffer,
297 nb_tbs,
0124311e 298 nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0);
fd6ce8f6
FB
299#endif
300 nb_tbs = 0;
301 for(i = 0;i < CODE_GEN_HASH_SIZE; i++)
302 tb_hash[i] = NULL;
9fa3e853
FB
303 virt_page_flush();
304
305 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++)
306 tb_phys_hash[i] = NULL;
fd6ce8f6 307 page_flush_tb();
9fa3e853 308
fd6ce8f6 309 code_gen_ptr = code_gen_buffer;
d4e8164f
FB
310 /* XXX: flush processor icache at this point if cache flush is
311 expensive */
fd6ce8f6
FB
312}
313
314#ifdef DEBUG_TB_CHECK
315
316static void tb_invalidate_check(unsigned long address)
317{
318 TranslationBlock *tb;
319 int i;
320 address &= TARGET_PAGE_MASK;
321 for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
322 for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
323 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
324 address >= tb->pc + tb->size)) {
325 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
326 address, tb->pc, tb->size);
327 }
328 }
329 }
330}
331
332/* verify that all the pages have correct rights for code */
333static void tb_page_check(void)
334{
335 TranslationBlock *tb;
336 int i, flags1, flags2;
337
338 for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
339 for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
340 flags1 = page_get_flags(tb->pc);
341 flags2 = page_get_flags(tb->pc + tb->size - 1);
342 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
343 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
344 tb->pc, tb->size, flags1, flags2);
345 }
346 }
347 }
348}
349
d4e8164f
FB
350void tb_jmp_check(TranslationBlock *tb)
351{
352 TranslationBlock *tb1;
353 unsigned int n1;
354
355 /* suppress any remaining jumps to this TB */
356 tb1 = tb->jmp_first;
357 for(;;) {
358 n1 = (long)tb1 & 3;
359 tb1 = (TranslationBlock *)((long)tb1 & ~3);
360 if (n1 == 2)
361 break;
362 tb1 = tb1->jmp_next[n1];
363 }
364 /* check end of list */
365 if (tb1 != tb) {
366 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
367 }
368}
369
fd6ce8f6
FB
370#endif
371
372/* invalidate one TB */
373static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
374 int next_offset)
375{
376 TranslationBlock *tb1;
377 for(;;) {
378 tb1 = *ptb;
379 if (tb1 == tb) {
380 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
381 break;
382 }
383 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
384 }
385}
386
9fa3e853
FB
387static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
388{
389 TranslationBlock *tb1;
390 unsigned int n1;
391
392 for(;;) {
393 tb1 = *ptb;
394 n1 = (long)tb1 & 3;
395 tb1 = (TranslationBlock *)((long)tb1 & ~3);
396 if (tb1 == tb) {
397 *ptb = tb1->page_next[n1];
398 break;
399 }
400 ptb = &tb1->page_next[n1];
401 }
402}
403
d4e8164f
FB
404static inline void tb_jmp_remove(TranslationBlock *tb, int n)
405{
406 TranslationBlock *tb1, **ptb;
407 unsigned int n1;
408
409 ptb = &tb->jmp_next[n];
410 tb1 = *ptb;
411 if (tb1) {
412 /* find tb(n) in circular list */
413 for(;;) {
414 tb1 = *ptb;
415 n1 = (long)tb1 & 3;
416 tb1 = (TranslationBlock *)((long)tb1 & ~3);
417 if (n1 == n && tb1 == tb)
418 break;
419 if (n1 == 2) {
420 ptb = &tb1->jmp_first;
421 } else {
422 ptb = &tb1->jmp_next[n1];
423 }
424 }
425 /* now we can suppress tb(n) from the list */
426 *ptb = tb->jmp_next[n];
427
428 tb->jmp_next[n] = NULL;
429 }
430}
431
432/* reset the jump entry 'n' of a TB so that it is not chained to
433 another TB */
434static inline void tb_reset_jump(TranslationBlock *tb, int n)
435{
436 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
437}
438
9fa3e853 439static inline void tb_invalidate(TranslationBlock *tb)
fd6ce8f6 440{
d4e8164f 441 unsigned int h, n1;
9fa3e853 442 TranslationBlock *tb1, *tb2, **ptb;
d4e8164f 443
36bdbe54 444 tb_invalidated_flag = 1;
59817ccb 445
fd6ce8f6
FB
446 /* remove the TB from the hash list */
447 h = tb_hash_func(tb->pc);
9fa3e853
FB
448 ptb = &tb_hash[h];
449 for(;;) {
450 tb1 = *ptb;
451 /* NOTE: the TB is not necessarily linked in the hash. It
452 indicates that it is not currently used */
453 if (tb1 == NULL)
454 return;
455 if (tb1 == tb) {
456 *ptb = tb1->hash_next;
457 break;
458 }
459 ptb = &tb1->hash_next;
fd6ce8f6 460 }
d4e8164f
FB
461
462 /* suppress this TB from the two jump lists */
463 tb_jmp_remove(tb, 0);
464 tb_jmp_remove(tb, 1);
465
466 /* suppress any remaining jumps to this TB */
467 tb1 = tb->jmp_first;
468 for(;;) {
469 n1 = (long)tb1 & 3;
470 if (n1 == 2)
471 break;
472 tb1 = (TranslationBlock *)((long)tb1 & ~3);
473 tb2 = tb1->jmp_next[n1];
474 tb_reset_jump(tb1, n1);
475 tb1->jmp_next[n1] = NULL;
476 tb1 = tb2;
477 }
478 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
fd6ce8f6
FB
479}
480
9fa3e853 481static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
fd6ce8f6 482{
fd6ce8f6 483 PageDesc *p;
9fa3e853
FB
484 unsigned int h;
485 target_ulong phys_pc;
486
487 /* remove the TB from the hash list */
488 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
489 h = tb_phys_hash_func(phys_pc);
490 tb_remove(&tb_phys_hash[h], tb,
491 offsetof(TranslationBlock, phys_hash_next));
492
493 /* remove the TB from the page list */
494 if (tb->page_addr[0] != page_addr) {
495 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
496 tb_page_remove(&p->first_tb, tb);
497 invalidate_page_bitmap(p);
498 }
499 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
500 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
501 tb_page_remove(&p->first_tb, tb);
502 invalidate_page_bitmap(p);
503 }
504
505 tb_invalidate(tb);
506}
507
508static inline void set_bits(uint8_t *tab, int start, int len)
509{
510 int end, mask, end1;
511
512 end = start + len;
513 tab += start >> 3;
514 mask = 0xff << (start & 7);
515 if ((start & ~7) == (end & ~7)) {
516 if (start < end) {
517 mask &= ~(0xff << (end & 7));
518 *tab |= mask;
519 }
520 } else {
521 *tab++ |= mask;
522 start = (start + 8) & ~7;
523 end1 = end & ~7;
524 while (start < end1) {
525 *tab++ = 0xff;
526 start += 8;
527 }
528 if (start < end) {
529 mask = ~(0xff << (end & 7));
530 *tab |= mask;
531 }
532 }
533}
534
535static void build_page_bitmap(PageDesc *p)
536{
537 int n, tb_start, tb_end;
538 TranslationBlock *tb;
539
59817ccb 540 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
9fa3e853
FB
541 if (!p->code_bitmap)
542 return;
543 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
544
545 tb = p->first_tb;
546 while (tb != NULL) {
547 n = (long)tb & 3;
548 tb = (TranslationBlock *)((long)tb & ~3);
549 /* NOTE: this is subtle as a TB may span two physical pages */
550 if (n == 0) {
551 /* NOTE: tb_end may be after the end of the page, but
552 it is not a problem */
553 tb_start = tb->pc & ~TARGET_PAGE_MASK;
554 tb_end = tb_start + tb->size;
555 if (tb_end > TARGET_PAGE_SIZE)
556 tb_end = TARGET_PAGE_SIZE;
557 } else {
558 tb_start = 0;
559 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
560 }
561 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
562 tb = tb->page_next[n];
563 }
564}
565
d720b93d
FB
566#ifdef TARGET_HAS_PRECISE_SMC
567
568static void tb_gen_code(CPUState *env,
569 target_ulong pc, target_ulong cs_base, int flags,
570 int cflags)
571{
572 TranslationBlock *tb;
573 uint8_t *tc_ptr;
574 target_ulong phys_pc, phys_page2, virt_page2;
575 int code_gen_size;
576
577 phys_pc = get_phys_addr_code(env, (unsigned long)pc);
578 tb = tb_alloc((unsigned long)pc);
579 if (!tb) {
580 /* flush must be done */
581 tb_flush(env);
582 /* cannot fail at this point */
583 tb = tb_alloc((unsigned long)pc);
584 }
585 tc_ptr = code_gen_ptr;
586 tb->tc_ptr = tc_ptr;
587 tb->cs_base = cs_base;
588 tb->flags = flags;
589 tb->cflags = cflags;
590 cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
591 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
592
593 /* check next page if needed */
594 virt_page2 = ((unsigned long)pc + tb->size - 1) & TARGET_PAGE_MASK;
595 phys_page2 = -1;
596 if (((unsigned long)pc & TARGET_PAGE_MASK) != virt_page2) {
597 phys_page2 = get_phys_addr_code(env, virt_page2);
598 }
599 tb_link_phys(tb, phys_pc, phys_page2);
600}
601#endif
602
9fa3e853
FB
603/* invalidate all TBs which intersect with the target physical page
604 starting in range [start;end[. NOTE: start and end must refer to
d720b93d
FB
605 the same physical page. 'is_cpu_write_access' should be true if called
606 from a real cpu write access: the virtual CPU will exit the current
607 TB if code is modified inside this TB. */
608void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
609 int is_cpu_write_access)
610{
611 int n, current_tb_modified, current_tb_not_found, current_flags;
612#if defined(TARGET_HAS_PRECISE_SMC) || !defined(CONFIG_USER_ONLY)
613 CPUState *env = cpu_single_env;
614#endif
9fa3e853 615 PageDesc *p;
d720b93d 616 TranslationBlock *tb, *tb_next, *current_tb;
9fa3e853 617 target_ulong tb_start, tb_end;
d720b93d 618 target_ulong current_pc, current_cs_base;
9fa3e853
FB
619
620 p = page_find(start >> TARGET_PAGE_BITS);
621 if (!p)
622 return;
623 if (!p->code_bitmap &&
d720b93d
FB
624 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
625 is_cpu_write_access) {
9fa3e853
FB
626 /* build code bitmap */
627 build_page_bitmap(p);
628 }
629
630 /* we remove all the TBs in the range [start, end[ */
631 /* XXX: see if in some cases it could be faster to invalidate all the code */
d720b93d
FB
632 current_tb_not_found = is_cpu_write_access;
633 current_tb_modified = 0;
634 current_tb = NULL; /* avoid warning */
635 current_pc = 0; /* avoid warning */
636 current_cs_base = 0; /* avoid warning */
637 current_flags = 0; /* avoid warning */
9fa3e853
FB
638 tb = p->first_tb;
639 while (tb != NULL) {
640 n = (long)tb & 3;
641 tb = (TranslationBlock *)((long)tb & ~3);
642 tb_next = tb->page_next[n];
643 /* NOTE: this is subtle as a TB may span two physical pages */
644 if (n == 0) {
645 /* NOTE: tb_end may be after the end of the page, but
646 it is not a problem */
647 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
648 tb_end = tb_start + tb->size;
649 } else {
650 tb_start = tb->page_addr[1];
651 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
652 }
653 if (!(tb_end <= start || tb_start >= end)) {
d720b93d
FB
654#ifdef TARGET_HAS_PRECISE_SMC
655 if (current_tb_not_found) {
656 current_tb_not_found = 0;
657 current_tb = NULL;
658 if (env->mem_write_pc) {
659 /* now we have a real cpu fault */
660 current_tb = tb_find_pc(env->mem_write_pc);
661 }
662 }
663 if (current_tb == tb &&
664 !(current_tb->cflags & CF_SINGLE_INSN)) {
665 /* If we are modifying the current TB, we must stop
666 its execution. We could be more precise by checking
667 that the modification is after the current PC, but it
668 would require a specialized function to partially
669 restore the CPU state */
670
671 current_tb_modified = 1;
672 cpu_restore_state(current_tb, env,
673 env->mem_write_pc, NULL);
674#if defined(TARGET_I386)
675 current_flags = env->hflags;
676 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
677 current_cs_base = (target_ulong)env->segs[R_CS].base;
678 current_pc = current_cs_base + env->eip;
679#else
680#error unsupported CPU
681#endif
682 }
683#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
684 tb_phys_invalidate(tb, -1);
685 }
686 tb = tb_next;
687 }
688#if !defined(CONFIG_USER_ONLY)
689 /* if no code remaining, no need to continue to use slow writes */
690 if (!p->first_tb) {
691 invalidate_page_bitmap(p);
d720b93d
FB
692 if (is_cpu_write_access) {
693 tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
694 }
695 }
696#endif
697#ifdef TARGET_HAS_PRECISE_SMC
698 if (current_tb_modified) {
699 /* we generate a block containing just the instruction
700 modifying the memory. It will ensure that it cannot modify
701 itself */
702 tb_gen_code(env, current_pc, current_cs_base, current_flags,
703 CF_SINGLE_INSN);
704 cpu_resume_from_signal(env, NULL);
9fa3e853 705 }
fd6ce8f6 706#endif
9fa3e853 707}
fd6ce8f6 708
9fa3e853 709/* len must be <= 8 and start must be a multiple of len */
d720b93d 710static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
9fa3e853
FB
711{
712 PageDesc *p;
713 int offset, b;
59817ccb 714#if 0
a4193c8a
FB
715 if (1) {
716 if (loglevel) {
717 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
718 cpu_single_env->mem_write_vaddr, len,
719 cpu_single_env->eip,
720 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
721 }
59817ccb
FB
722 }
723#endif
9fa3e853
FB
724 p = page_find(start >> TARGET_PAGE_BITS);
725 if (!p)
726 return;
727 if (p->code_bitmap) {
728 offset = start & ~TARGET_PAGE_MASK;
729 b = p->code_bitmap[offset >> 3] >> (offset & 7);
730 if (b & ((1 << len) - 1))
731 goto do_invalidate;
732 } else {
733 do_invalidate:
d720b93d 734 tb_invalidate_phys_page_range(start, start + len, 1);
9fa3e853
FB
735 }
736}
737
9fa3e853 738#if !defined(CONFIG_SOFTMMU)
d720b93d
FB
739static void tb_invalidate_phys_page(target_ulong addr,
740 unsigned long pc, void *puc)
9fa3e853 741{
d720b93d
FB
742 int n, current_flags, current_tb_modified;
743 target_ulong current_pc, current_cs_base;
9fa3e853 744 PageDesc *p;
d720b93d
FB
745 TranslationBlock *tb, *current_tb;
746#ifdef TARGET_HAS_PRECISE_SMC
747 CPUState *env = cpu_single_env;
748#endif
9fa3e853
FB
749
750 addr &= TARGET_PAGE_MASK;
751 p = page_find(addr >> TARGET_PAGE_BITS);
752 if (!p)
753 return;
754 tb = p->first_tb;
d720b93d
FB
755 current_tb_modified = 0;
756 current_tb = NULL;
757 current_pc = 0; /* avoid warning */
758 current_cs_base = 0; /* avoid warning */
759 current_flags = 0; /* avoid warning */
760#ifdef TARGET_HAS_PRECISE_SMC
761 if (tb && pc != 0) {
762 current_tb = tb_find_pc(pc);
763 }
764#endif
9fa3e853
FB
765 while (tb != NULL) {
766 n = (long)tb & 3;
767 tb = (TranslationBlock *)((long)tb & ~3);
d720b93d
FB
768#ifdef TARGET_HAS_PRECISE_SMC
769 if (current_tb == tb &&
770 !(current_tb->cflags & CF_SINGLE_INSN)) {
771 /* If we are modifying the current TB, we must stop
772 its execution. We could be more precise by checking
773 that the modification is after the current PC, but it
774 would require a specialized function to partially
775 restore the CPU state */
776
777 current_tb_modified = 1;
778 cpu_restore_state(current_tb, env, pc, puc);
779#if defined(TARGET_I386)
780 current_flags = env->hflags;
781 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
782 current_cs_base = (target_ulong)env->segs[R_CS].base;
783 current_pc = current_cs_base + env->eip;
784#else
785#error unsupported CPU
786#endif
787 }
788#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
789 tb_phys_invalidate(tb, addr);
790 tb = tb->page_next[n];
791 }
fd6ce8f6 792 p->first_tb = NULL;
d720b93d
FB
793#ifdef TARGET_HAS_PRECISE_SMC
794 if (current_tb_modified) {
795 /* we generate a block containing just the instruction
796 modifying the memory. It will ensure that it cannot modify
797 itself */
798 tb_gen_code(env, current_pc, current_cs_base, current_flags,
799 CF_SINGLE_INSN);
800 cpu_resume_from_signal(env, puc);
801 }
802#endif
fd6ce8f6 803}
9fa3e853 804#endif
fd6ce8f6
FB
805
806/* add the tb in the target page and protect it if necessary */
9fa3e853
FB
807static inline void tb_alloc_page(TranslationBlock *tb,
808 unsigned int n, unsigned int page_addr)
fd6ce8f6
FB
809{
810 PageDesc *p;
9fa3e853
FB
811 TranslationBlock *last_first_tb;
812
813 tb->page_addr[n] = page_addr;
814 p = page_find(page_addr >> TARGET_PAGE_BITS);
815 tb->page_next[n] = p->first_tb;
816 last_first_tb = p->first_tb;
817 p->first_tb = (TranslationBlock *)((long)tb | n);
818 invalidate_page_bitmap(p);
fd6ce8f6 819
d720b93d
FB
820#ifdef TARGET_HAS_SMC
821
9fa3e853 822#if defined(CONFIG_USER_ONLY)
fd6ce8f6 823 if (p->flags & PAGE_WRITE) {
9fa3e853
FB
824 unsigned long host_start, host_end, addr;
825 int prot;
826
fd6ce8f6
FB
827 /* force the host page as non writable (writes will have a
828 page fault + mprotect overhead) */
fd6ce8f6
FB
829 host_start = page_addr & host_page_mask;
830 host_end = host_start + host_page_size;
831 prot = 0;
832 for(addr = host_start; addr < host_end; addr += TARGET_PAGE_SIZE)
833 prot |= page_get_flags(addr);
834 mprotect((void *)host_start, host_page_size,
835 (prot & PAGE_BITS) & ~PAGE_WRITE);
836#ifdef DEBUG_TB_INVALIDATE
837 printf("protecting code page: 0x%08lx\n",
838 host_start);
839#endif
840 p->flags &= ~PAGE_WRITE;
fd6ce8f6 841 }
9fa3e853
FB
842#else
843 /* if some code is already present, then the pages are already
844 protected. So we handle the case where only the first TB is
845 allocated in a physical page */
846 if (!last_first_tb) {
847 target_ulong virt_addr;
848
849 virt_addr = (tb->pc & TARGET_PAGE_MASK) + (n << TARGET_PAGE_BITS);
850 tlb_protect_code(cpu_single_env, virt_addr);
851 }
852#endif
d720b93d
FB
853
854#endif /* TARGET_HAS_SMC */
fd6ce8f6
FB
855}
856
857/* Allocate a new translation block. Flush the translation buffer if
858 too many translation blocks or too much generated code. */
d4e8164f 859TranslationBlock *tb_alloc(unsigned long pc)
fd6ce8f6
FB
860{
861 TranslationBlock *tb;
fd6ce8f6
FB
862
863 if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
864 (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
d4e8164f 865 return NULL;
fd6ce8f6
FB
866 tb = &tbs[nb_tbs++];
867 tb->pc = pc;
b448f2f3 868 tb->cflags = 0;
d4e8164f
FB
869 return tb;
870}
871
9fa3e853
FB
872/* add a new TB and link it to the physical page tables. phys_page2 is
873 (-1) to indicate that only one page contains the TB. */
874void tb_link_phys(TranslationBlock *tb,
875 target_ulong phys_pc, target_ulong phys_page2)
d4e8164f 876{
9fa3e853
FB
877 unsigned int h;
878 TranslationBlock **ptb;
879
880 /* add in the physical hash table */
881 h = tb_phys_hash_func(phys_pc);
882 ptb = &tb_phys_hash[h];
883 tb->phys_hash_next = *ptb;
884 *ptb = tb;
fd6ce8f6
FB
885
886 /* add in the page list */
9fa3e853
FB
887 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
888 if (phys_page2 != -1)
889 tb_alloc_page(tb, 1, phys_page2);
890 else
891 tb->page_addr[1] = -1;
61382a50
FB
892#ifdef DEBUG_TB_CHECK
893 tb_page_check();
894#endif
9fa3e853
FB
895}
896
897/* link the tb with the other TBs */
898void tb_link(TranslationBlock *tb)
899{
900#if !defined(CONFIG_USER_ONLY)
901 {
902 VirtPageDesc *vp;
903 target_ulong addr;
904
905 /* save the code memory mappings (needed to invalidate the code) */
906 addr = tb->pc & TARGET_PAGE_MASK;
907 vp = virt_page_find_alloc(addr >> TARGET_PAGE_BITS);
98857888
FB
908#ifdef DEBUG_TLB_CHECK
909 if (vp->valid_tag == virt_valid_tag &&
910 vp->phys_addr != tb->page_addr[0]) {
911 printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",
912 addr, tb->page_addr[0], vp->phys_addr);
913 }
914#endif
9fa3e853 915 vp->phys_addr = tb->page_addr[0];
59817ccb
FB
916 if (vp->valid_tag != virt_valid_tag) {
917 vp->valid_tag = virt_valid_tag;
918#if !defined(CONFIG_SOFTMMU)
919 vp->prot = 0;
920#endif
921 }
9fa3e853
FB
922
923 if (tb->page_addr[1] != -1) {
924 addr += TARGET_PAGE_SIZE;
925 vp = virt_page_find_alloc(addr >> TARGET_PAGE_BITS);
98857888
FB
926#ifdef DEBUG_TLB_CHECK
927 if (vp->valid_tag == virt_valid_tag &&
928 vp->phys_addr != tb->page_addr[1]) {
929 printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",
930 addr, tb->page_addr[1], vp->phys_addr);
931 }
932#endif
9fa3e853 933 vp->phys_addr = tb->page_addr[1];
59817ccb
FB
934 if (vp->valid_tag != virt_valid_tag) {
935 vp->valid_tag = virt_valid_tag;
936#if !defined(CONFIG_SOFTMMU)
937 vp->prot = 0;
938#endif
939 }
9fa3e853
FB
940 }
941 }
942#endif
943
d4e8164f
FB
944 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
945 tb->jmp_next[0] = NULL;
946 tb->jmp_next[1] = NULL;
b448f2f3
FB
947#ifdef USE_CODE_COPY
948 tb->cflags &= ~CF_FP_USED;
949 if (tb->cflags & CF_TB_FP_USED)
950 tb->cflags |= CF_FP_USED;
951#endif
d4e8164f
FB
952
953 /* init original jump addresses */
954 if (tb->tb_next_offset[0] != 0xffff)
955 tb_reset_jump(tb, 0);
956 if (tb->tb_next_offset[1] != 0xffff)
957 tb_reset_jump(tb, 1);
fd6ce8f6
FB
958}
959
9fa3e853
FB
960/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
961 tb[1].tc_ptr. Return NULL if not found */
962TranslationBlock *tb_find_pc(unsigned long tc_ptr)
fd6ce8f6 963{
9fa3e853
FB
964 int m_min, m_max, m;
965 unsigned long v;
966 TranslationBlock *tb;
a513fe19
FB
967
968 if (nb_tbs <= 0)
969 return NULL;
970 if (tc_ptr < (unsigned long)code_gen_buffer ||
971 tc_ptr >= (unsigned long)code_gen_ptr)
972 return NULL;
973 /* binary search (cf Knuth) */
974 m_min = 0;
975 m_max = nb_tbs - 1;
976 while (m_min <= m_max) {
977 m = (m_min + m_max) >> 1;
978 tb = &tbs[m];
979 v = (unsigned long)tb->tc_ptr;
980 if (v == tc_ptr)
981 return tb;
982 else if (tc_ptr < v) {
983 m_max = m - 1;
984 } else {
985 m_min = m + 1;
986 }
987 }
988 return &tbs[m_max];
989}
7501267e 990
ea041c0e
FB
991static void tb_reset_jump_recursive(TranslationBlock *tb);
992
993static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
994{
995 TranslationBlock *tb1, *tb_next, **ptb;
996 unsigned int n1;
997
998 tb1 = tb->jmp_next[n];
999 if (tb1 != NULL) {
1000 /* find head of list */
1001 for(;;) {
1002 n1 = (long)tb1 & 3;
1003 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1004 if (n1 == 2)
1005 break;
1006 tb1 = tb1->jmp_next[n1];
1007 }
1008 /* we are now sure now that tb jumps to tb1 */
1009 tb_next = tb1;
1010
1011 /* remove tb from the jmp_first list */
1012 ptb = &tb_next->jmp_first;
1013 for(;;) {
1014 tb1 = *ptb;
1015 n1 = (long)tb1 & 3;
1016 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1017 if (n1 == n && tb1 == tb)
1018 break;
1019 ptb = &tb1->jmp_next[n1];
1020 }
1021 *ptb = tb->jmp_next[n];
1022 tb->jmp_next[n] = NULL;
1023
1024 /* suppress the jump to next tb in generated code */
1025 tb_reset_jump(tb, n);
1026
0124311e 1027 /* suppress jumps in the tb on which we could have jumped */
ea041c0e
FB
1028 tb_reset_jump_recursive(tb_next);
1029 }
1030}
1031
1032static void tb_reset_jump_recursive(TranslationBlock *tb)
1033{
1034 tb_reset_jump_recursive2(tb, 0);
1035 tb_reset_jump_recursive2(tb, 1);
1036}
1037
d720b93d
FB
1038static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1039{
1040 target_ulong phys_addr;
1041
1042 phys_addr = cpu_get_phys_page_debug(env, pc);
1043 tb_invalidate_phys_page_range(phys_addr, phys_addr + 1, 0);
1044}
1045
c33a346e
FB
1046/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1047 breakpoint is reached */
2e12669a 1048int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
4c3a88a2 1049{
a541f297 1050#if defined(TARGET_I386) || defined(TARGET_PPC)
4c3a88a2 1051 int i;
d720b93d 1052
4c3a88a2
FB
1053 for(i = 0; i < env->nb_breakpoints; i++) {
1054 if (env->breakpoints[i] == pc)
1055 return 0;
1056 }
1057
1058 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1059 return -1;
1060 env->breakpoints[env->nb_breakpoints++] = pc;
d720b93d
FB
1061
1062 breakpoint_invalidate(env, pc);
4c3a88a2
FB
1063 return 0;
1064#else
1065 return -1;
1066#endif
1067}
1068
1069/* remove a breakpoint */
2e12669a 1070int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
4c3a88a2 1071{
a541f297 1072#if defined(TARGET_I386) || defined(TARGET_PPC)
4c3a88a2
FB
1073 int i;
1074 for(i = 0; i < env->nb_breakpoints; i++) {
1075 if (env->breakpoints[i] == pc)
1076 goto found;
1077 }
1078 return -1;
1079 found:
1080 memmove(&env->breakpoints[i], &env->breakpoints[i + 1],
1081 (env->nb_breakpoints - (i + 1)) * sizeof(env->breakpoints[0]));
1082 env->nb_breakpoints--;
d720b93d
FB
1083
1084 breakpoint_invalidate(env, pc);
4c3a88a2
FB
1085 return 0;
1086#else
1087 return -1;
1088#endif
1089}
1090
c33a346e
FB
1091/* enable or disable single step mode. EXCP_DEBUG is returned by the
1092 CPU loop after each instruction */
1093void cpu_single_step(CPUState *env, int enabled)
1094{
a541f297 1095#if defined(TARGET_I386) || defined(TARGET_PPC)
c33a346e
FB
1096 if (env->singlestep_enabled != enabled) {
1097 env->singlestep_enabled = enabled;
1098 /* must flush all the translated code to avoid inconsistancies */
9fa3e853 1099 /* XXX: only flush what is necessary */
0124311e 1100 tb_flush(env);
c33a346e
FB
1101 }
1102#endif
1103}
1104
34865134
FB
1105/* enable or disable low levels log */
1106void cpu_set_log(int log_flags)
1107{
1108 loglevel = log_flags;
1109 if (loglevel && !logfile) {
1110 logfile = fopen(logfilename, "w");
1111 if (!logfile) {
1112 perror(logfilename);
1113 _exit(1);
1114 }
9fa3e853
FB
1115#if !defined(CONFIG_SOFTMMU)
1116 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1117 {
1118 static uint8_t logfile_buf[4096];
1119 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1120 }
1121#else
34865134 1122 setvbuf(logfile, NULL, _IOLBF, 0);
9fa3e853 1123#endif
34865134
FB
1124 }
1125}
1126
1127void cpu_set_log_filename(const char *filename)
1128{
1129 logfilename = strdup(filename);
1130}
c33a346e 1131
0124311e 1132/* mask must never be zero, except for A20 change call */
68a79315 1133void cpu_interrupt(CPUState *env, int mask)
ea041c0e
FB
1134{
1135 TranslationBlock *tb;
ee8b7021 1136 static int interrupt_lock;
59817ccb 1137
68a79315 1138 env->interrupt_request |= mask;
ea041c0e
FB
1139 /* if the cpu is currently executing code, we must unlink it and
1140 all the potentially executing TB */
1141 tb = env->current_tb;
ee8b7021
FB
1142 if (tb && !testandset(&interrupt_lock)) {
1143 env->current_tb = NULL;
ea041c0e 1144 tb_reset_jump_recursive(tb);
ee8b7021 1145 interrupt_lock = 0;
ea041c0e
FB
1146 }
1147}
1148
b54ad049
FB
1149void cpu_reset_interrupt(CPUState *env, int mask)
1150{
1151 env->interrupt_request &= ~mask;
1152}
1153
f193c797
FB
1154CPULogItem cpu_log_items[] = {
1155 { CPU_LOG_TB_OUT_ASM, "out_asm",
1156 "show generated host assembly code for each compiled TB" },
1157 { CPU_LOG_TB_IN_ASM, "in_asm",
1158 "show target assembly code for each compiled TB" },
1159 { CPU_LOG_TB_OP, "op",
1160 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1161#ifdef TARGET_I386
1162 { CPU_LOG_TB_OP_OPT, "op_opt",
1163 "show micro ops after optimization for each compiled TB" },
1164#endif
1165 { CPU_LOG_INT, "int",
1166 "show interrupts/exceptions in short format" },
1167 { CPU_LOG_EXEC, "exec",
1168 "show trace before each executed TB (lots of logs)" },
9fddaa0c
FB
1169 { CPU_LOG_TB_CPU, "cpu",
1170 "show CPU state before bloc translation" },
f193c797
FB
1171#ifdef TARGET_I386
1172 { CPU_LOG_PCALL, "pcall",
1173 "show protected mode far calls/returns/exceptions" },
1174#endif
fd872598
FB
1175 { CPU_LOG_IOPORT, "ioport",
1176 "show all i/o ports accesses" },
f193c797
FB
1177 { 0, NULL, NULL },
1178};
1179
1180static int cmp1(const char *s1, int n, const char *s2)
1181{
1182 if (strlen(s2) != n)
1183 return 0;
1184 return memcmp(s1, s2, n) == 0;
1185}
1186
1187/* takes a comma separated list of log masks. Return 0 if error. */
1188int cpu_str_to_log_mask(const char *str)
1189{
1190 CPULogItem *item;
1191 int mask;
1192 const char *p, *p1;
1193
1194 p = str;
1195 mask = 0;
1196 for(;;) {
1197 p1 = strchr(p, ',');
1198 if (!p1)
1199 p1 = p + strlen(p);
1200 for(item = cpu_log_items; item->mask != 0; item++) {
1201 if (cmp1(p, p1 - p, item->name))
1202 goto found;
1203 }
1204 return 0;
1205 found:
1206 mask |= item->mask;
1207 if (*p1 != ',')
1208 break;
1209 p = p1 + 1;
1210 }
1211 return mask;
1212}
ea041c0e 1213
7501267e
FB
1214void cpu_abort(CPUState *env, const char *fmt, ...)
1215{
1216 va_list ap;
1217
1218 va_start(ap, fmt);
1219 fprintf(stderr, "qemu: fatal: ");
1220 vfprintf(stderr, fmt, ap);
1221 fprintf(stderr, "\n");
1222#ifdef TARGET_I386
1223 cpu_x86_dump_state(env, stderr, X86_DUMP_FPU | X86_DUMP_CCOP);
1224#endif
1225 va_end(ap);
1226 abort();
1227}
1228
0124311e
FB
1229#if !defined(CONFIG_USER_ONLY)
1230
ee8b7021
FB
1231/* NOTE: if flush_global is true, also flush global entries (not
1232 implemented yet) */
1233void tlb_flush(CPUState *env, int flush_global)
33417e70 1234{
33417e70 1235 int i;
0124311e 1236
9fa3e853
FB
1237#if defined(DEBUG_TLB)
1238 printf("tlb_flush:\n");
1239#endif
0124311e
FB
1240 /* must reset current TB so that interrupts cannot modify the
1241 links while we are modifying them */
1242 env->current_tb = NULL;
1243
33417e70
FB
1244 for(i = 0; i < CPU_TLB_SIZE; i++) {
1245 env->tlb_read[0][i].address = -1;
1246 env->tlb_write[0][i].address = -1;
1247 env->tlb_read[1][i].address = -1;
1248 env->tlb_write[1][i].address = -1;
1249 }
9fa3e853
FB
1250
1251 virt_page_flush();
1252 for(i = 0;i < CODE_GEN_HASH_SIZE; i++)
1253 tb_hash[i] = NULL;
1254
1255#if !defined(CONFIG_SOFTMMU)
1256 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
1257#endif
33417e70
FB
1258}
1259
274da6b2 1260static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
61382a50
FB
1261{
1262 if (addr == (tlb_entry->address &
1263 (TARGET_PAGE_MASK | TLB_INVALID_MASK)))
1264 tlb_entry->address = -1;
1265}
1266
2e12669a 1267void tlb_flush_page(CPUState *env, target_ulong addr)
33417e70 1268{
9fa3e853
FB
1269 int i, n;
1270 VirtPageDesc *vp;
1271 PageDesc *p;
1272 TranslationBlock *tb;
0124311e 1273
9fa3e853
FB
1274#if defined(DEBUG_TLB)
1275 printf("tlb_flush_page: 0x%08x\n", addr);
1276#endif
0124311e
FB
1277 /* must reset current TB so that interrupts cannot modify the
1278 links while we are modifying them */
1279 env->current_tb = NULL;
61382a50
FB
1280
1281 addr &= TARGET_PAGE_MASK;
1282 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1283 tlb_flush_entry(&env->tlb_read[0][i], addr);
1284 tlb_flush_entry(&env->tlb_write[0][i], addr);
1285 tlb_flush_entry(&env->tlb_read[1][i], addr);
1286 tlb_flush_entry(&env->tlb_write[1][i], addr);
0124311e 1287
9fa3e853
FB
1288 /* remove from the virtual pc hash table all the TB at this
1289 virtual address */
1290
1291 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1292 if (vp && vp->valid_tag == virt_valid_tag) {
1293 p = page_find(vp->phys_addr >> TARGET_PAGE_BITS);
1294 if (p) {
1295 /* we remove all the links to the TBs in this virtual page */
1296 tb = p->first_tb;
1297 while (tb != NULL) {
1298 n = (long)tb & 3;
1299 tb = (TranslationBlock *)((long)tb & ~3);
1300 if ((tb->pc & TARGET_PAGE_MASK) == addr ||
1301 ((tb->pc + tb->size - 1) & TARGET_PAGE_MASK) == addr) {
1302 tb_invalidate(tb);
1303 }
1304 tb = tb->page_next[n];
1305 }
1306 }
98857888 1307 vp->valid_tag = 0;
9fa3e853
FB
1308 }
1309
0124311e 1310#if !defined(CONFIG_SOFTMMU)
9fa3e853 1311 if (addr < MMAP_AREA_END)
0124311e 1312 munmap((void *)addr, TARGET_PAGE_SIZE);
61382a50 1313#endif
9fa3e853
FB
1314}
1315
4f2ac237 1316static inline void tlb_protect_code1(CPUTLBEntry *tlb_entry, target_ulong addr)
9fa3e853
FB
1317{
1318 if (addr == (tlb_entry->address &
1319 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) &&
98857888
FB
1320 (tlb_entry->address & ~TARGET_PAGE_MASK) != IO_MEM_CODE &&
1321 (tlb_entry->address & ~TARGET_PAGE_MASK) != IO_MEM_ROM) {
1ccde1cb 1322 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_CODE;
9fa3e853
FB
1323 }
1324}
1325
1326/* update the TLBs so that writes to code in the virtual page 'addr'
1327 can be detected */
4f2ac237 1328static void tlb_protect_code(CPUState *env, target_ulong addr)
9fa3e853
FB
1329{
1330 int i;
1331
1332 addr &= TARGET_PAGE_MASK;
1333 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1334 tlb_protect_code1(&env->tlb_write[0][i], addr);
1335 tlb_protect_code1(&env->tlb_write[1][i], addr);
1336#if !defined(CONFIG_SOFTMMU)
1337 /* NOTE: as we generated the code for this page, it is already at
1338 least readable */
1339 if (addr < MMAP_AREA_END)
1340 mprotect((void *)addr, TARGET_PAGE_SIZE, PROT_READ);
1341#endif
1342}
1343
9fa3e853 1344static inline void tlb_unprotect_code2(CPUTLBEntry *tlb_entry,
4f2ac237 1345 unsigned long phys_addr)
9fa3e853
FB
1346{
1347 if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_CODE &&
1348 ((tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend) == phys_addr) {
1ccde1cb 1349 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
9fa3e853
FB
1350 }
1351}
1352
1353/* update the TLB so that writes in physical page 'phys_addr' are no longer
1354 tested self modifying code */
4f2ac237 1355static void tlb_unprotect_code_phys(CPUState *env, unsigned long phys_addr, target_ulong vaddr)
9fa3e853
FB
1356{
1357 int i;
1358
1359 phys_addr &= TARGET_PAGE_MASK;
1ccde1cb
FB
1360 phys_addr += (long)phys_ram_base;
1361 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1362 tlb_unprotect_code2(&env->tlb_write[0][i], phys_addr);
1363 tlb_unprotect_code2(&env->tlb_write[1][i], phys_addr);
1364}
1365
1366static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1367 unsigned long start, unsigned long length)
1368{
1369 unsigned long addr;
1370 if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1371 addr = (tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend;
1372 if ((addr - start) < length) {
1373 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1374 }
1375 }
1376}
1377
1378void cpu_physical_memory_reset_dirty(target_ulong start, target_ulong end)
1379{
1380 CPUState *env;
4f2ac237 1381 unsigned long length, start1;
1ccde1cb
FB
1382 int i;
1383
1384 start &= TARGET_PAGE_MASK;
1385 end = TARGET_PAGE_ALIGN(end);
1386
1387 length = end - start;
1388 if (length == 0)
1389 return;
1390 memset(phys_ram_dirty + (start >> TARGET_PAGE_BITS), 0, length >> TARGET_PAGE_BITS);
1391
1392 env = cpu_single_env;
1393 /* we modify the TLB cache so that the dirty bit will be set again
1394 when accessing the range */
59817ccb 1395 start1 = start + (unsigned long)phys_ram_base;
9fa3e853 1396 for(i = 0; i < CPU_TLB_SIZE; i++)
59817ccb 1397 tlb_reset_dirty_range(&env->tlb_write[0][i], start1, length);
9fa3e853 1398 for(i = 0; i < CPU_TLB_SIZE; i++)
59817ccb
FB
1399 tlb_reset_dirty_range(&env->tlb_write[1][i], start1, length);
1400
1401#if !defined(CONFIG_SOFTMMU)
1402 /* XXX: this is expensive */
1403 {
1404 VirtPageDesc *p;
1405 int j;
1406 target_ulong addr;
1407
1408 for(i = 0; i < L1_SIZE; i++) {
1409 p = l1_virt_map[i];
1410 if (p) {
1411 addr = i << (TARGET_PAGE_BITS + L2_BITS);
1412 for(j = 0; j < L2_SIZE; j++) {
1413 if (p->valid_tag == virt_valid_tag &&
1414 p->phys_addr >= start && p->phys_addr < end &&
1415 (p->prot & PROT_WRITE)) {
1416 if (addr < MMAP_AREA_END) {
1417 mprotect((void *)addr, TARGET_PAGE_SIZE,
1418 p->prot & ~PROT_WRITE);
1419 }
1420 }
1421 addr += TARGET_PAGE_SIZE;
1422 p++;
1423 }
1424 }
1425 }
1426 }
1427#endif
1ccde1cb
FB
1428}
1429
1430static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
1431 unsigned long start)
1432{
1433 unsigned long addr;
1434 if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1435 addr = (tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend;
1436 if (addr == start) {
1437 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_RAM;
1438 }
1439 }
1440}
1441
1442/* update the TLB corresponding to virtual page vaddr and phys addr
1443 addr so that it is no longer dirty */
1444static inline void tlb_set_dirty(unsigned long addr, target_ulong vaddr)
1445{
1446 CPUState *env = cpu_single_env;
1447 int i;
1448
1449 phys_ram_dirty[(addr - (unsigned long)phys_ram_base) >> TARGET_PAGE_BITS] = 1;
1450
1451 addr &= TARGET_PAGE_MASK;
1452 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1453 tlb_set_dirty1(&env->tlb_write[0][i], addr);
1454 tlb_set_dirty1(&env->tlb_write[1][i], addr);
9fa3e853
FB
1455}
1456
59817ccb
FB
1457/* add a new TLB entry. At most one entry for a given virtual address
1458 is permitted. Return 0 if OK or 2 if the page could not be mapped
1459 (can only happen in non SOFTMMU mode for I/O pages or pages
1460 conflicting with the host address space). */
2e12669a
FB
1461int tlb_set_page(CPUState *env, target_ulong vaddr,
1462 target_phys_addr_t paddr, int prot,
9fa3e853
FB
1463 int is_user, int is_softmmu)
1464{
92e873b9 1465 PhysPageDesc *p;
4f2ac237 1466 unsigned long pd;
9fa3e853
FB
1467 TranslationBlock *first_tb;
1468 unsigned int index;
4f2ac237
FB
1469 target_ulong address;
1470 unsigned long addend;
9fa3e853
FB
1471 int ret;
1472
92e873b9
FB
1473 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1474 first_tb = NULL;
9fa3e853
FB
1475 if (!p) {
1476 pd = IO_MEM_UNASSIGNED;
9fa3e853 1477 } else {
92e873b9 1478 PageDesc *p1;
9fa3e853 1479 pd = p->phys_offset;
92e873b9
FB
1480 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
1481 /* NOTE: we also allocate the page at this stage */
1482 p1 = page_find_alloc(pd >> TARGET_PAGE_BITS);
1483 first_tb = p1->first_tb;
1484 }
9fa3e853
FB
1485 }
1486#if defined(DEBUG_TLB)
1487 printf("tlb_set_page: vaddr=0x%08x paddr=0x%08x prot=%x u=%d c=%d smmu=%d pd=0x%08x\n",
1488 vaddr, paddr, prot, is_user, (first_tb != NULL), is_softmmu, pd);
1489#endif
1490
1491 ret = 0;
1492#if !defined(CONFIG_SOFTMMU)
1493 if (is_softmmu)
1494#endif
1495 {
1496 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1497 /* IO memory case */
1498 address = vaddr | pd;
1499 addend = paddr;
1500 } else {
1501 /* standard memory */
1502 address = vaddr;
1503 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1504 }
1505
1506 index = (vaddr >> 12) & (CPU_TLB_SIZE - 1);
1507 addend -= vaddr;
67b915a5 1508 if (prot & PAGE_READ) {
9fa3e853
FB
1509 env->tlb_read[is_user][index].address = address;
1510 env->tlb_read[is_user][index].addend = addend;
1511 } else {
1512 env->tlb_read[is_user][index].address = -1;
1513 env->tlb_read[is_user][index].addend = -1;
1514 }
67b915a5 1515 if (prot & PAGE_WRITE) {
9fa3e853
FB
1516 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM) {
1517 /* ROM: access is ignored (same as unassigned) */
1518 env->tlb_write[is_user][index].address = vaddr | IO_MEM_ROM;
1ccde1cb 1519 env->tlb_write[is_user][index].addend = addend;
d720b93d
FB
1520 } else
1521 /* XXX: the PowerPC code seems not ready to handle
1522 self modifying code with DCBI */
1523#if defined(TARGET_HAS_SMC) || 1
1524 if (first_tb) {
9fa3e853
FB
1525 /* if code is present, we use a specific memory
1526 handler. It works only for physical memory access */
1527 env->tlb_write[is_user][index].address = vaddr | IO_MEM_CODE;
1ccde1cb 1528 env->tlb_write[is_user][index].addend = addend;
d720b93d
FB
1529 } else
1530#endif
1531 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1ccde1cb
FB
1532 !cpu_physical_memory_is_dirty(pd)) {
1533 env->tlb_write[is_user][index].address = vaddr | IO_MEM_NOTDIRTY;
1534 env->tlb_write[is_user][index].addend = addend;
9fa3e853
FB
1535 } else {
1536 env->tlb_write[is_user][index].address = address;
1537 env->tlb_write[is_user][index].addend = addend;
1538 }
1539 } else {
1540 env->tlb_write[is_user][index].address = -1;
1541 env->tlb_write[is_user][index].addend = -1;
1542 }
1543 }
1544#if !defined(CONFIG_SOFTMMU)
1545 else {
1546 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1547 /* IO access: no mapping is done as it will be handled by the
1548 soft MMU */
1549 if (!(env->hflags & HF_SOFTMMU_MASK))
1550 ret = 2;
1551 } else {
1552 void *map_addr;
59817ccb
FB
1553
1554 if (vaddr >= MMAP_AREA_END) {
1555 ret = 2;
1556 } else {
1557 if (prot & PROT_WRITE) {
1558 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
d720b93d 1559#if defined(TARGET_HAS_SMC) || 1
59817ccb 1560 first_tb ||
d720b93d 1561#endif
59817ccb
FB
1562 ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1563 !cpu_physical_memory_is_dirty(pd))) {
1564 /* ROM: we do as if code was inside */
1565 /* if code is present, we only map as read only and save the
1566 original mapping */
1567 VirtPageDesc *vp;
1568
1569 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS);
1570 vp->phys_addr = pd;
1571 vp->prot = prot;
1572 vp->valid_tag = virt_valid_tag;
1573 prot &= ~PAGE_WRITE;
1574 }
1575 }
1576 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
1577 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1578 if (map_addr == MAP_FAILED) {
1579 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1580 paddr, vaddr);
9fa3e853 1581 }
9fa3e853
FB
1582 }
1583 }
1584 }
1585#endif
1586 return ret;
1587}
1588
1589/* called from signal handler: invalidate the code and unprotect the
1590 page. Return TRUE if the fault was succesfully handled. */
d720b93d 1591int page_unprotect(unsigned long addr, unsigned long pc, void *puc)
9fa3e853
FB
1592{
1593#if !defined(CONFIG_SOFTMMU)
1594 VirtPageDesc *vp;
1595
1596#if defined(DEBUG_TLB)
1597 printf("page_unprotect: addr=0x%08x\n", addr);
1598#endif
1599 addr &= TARGET_PAGE_MASK;
59817ccb
FB
1600
1601 /* if it is not mapped, no need to worry here */
1602 if (addr >= MMAP_AREA_END)
1603 return 0;
9fa3e853
FB
1604 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1605 if (!vp)
1606 return 0;
1607 /* NOTE: in this case, validate_tag is _not_ tested as it
1608 validates only the code TLB */
1609 if (vp->valid_tag != virt_valid_tag)
1610 return 0;
1611 if (!(vp->prot & PAGE_WRITE))
1612 return 0;
1613#if defined(DEBUG_TLB)
1614 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1615 addr, vp->phys_addr, vp->prot);
1616#endif
59817ccb
FB
1617 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1618 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1619 (unsigned long)addr, vp->prot);
d720b93d
FB
1620 /* set the dirty bit */
1621 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 1;
1622 /* flush the code inside */
1623 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
9fa3e853
FB
1624 return 1;
1625#else
1626 return 0;
1627#endif
33417e70
FB
1628}
1629
0124311e
FB
1630#else
1631
ee8b7021 1632void tlb_flush(CPUState *env, int flush_global)
0124311e
FB
1633{
1634}
1635
2e12669a 1636void tlb_flush_page(CPUState *env, target_ulong addr)
0124311e
FB
1637{
1638}
1639
2e12669a
FB
1640int tlb_set_page(CPUState *env, target_ulong vaddr,
1641 target_phys_addr_t paddr, int prot,
9fa3e853
FB
1642 int is_user, int is_softmmu)
1643{
1644 return 0;
1645}
0124311e 1646
9fa3e853
FB
1647/* dump memory mappings */
1648void page_dump(FILE *f)
33417e70 1649{
9fa3e853
FB
1650 unsigned long start, end;
1651 int i, j, prot, prot1;
1652 PageDesc *p;
33417e70 1653
9fa3e853
FB
1654 fprintf(f, "%-8s %-8s %-8s %s\n",
1655 "start", "end", "size", "prot");
1656 start = -1;
1657 end = -1;
1658 prot = 0;
1659 for(i = 0; i <= L1_SIZE; i++) {
1660 if (i < L1_SIZE)
1661 p = l1_map[i];
1662 else
1663 p = NULL;
1664 for(j = 0;j < L2_SIZE; j++) {
1665 if (!p)
1666 prot1 = 0;
1667 else
1668 prot1 = p[j].flags;
1669 if (prot1 != prot) {
1670 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1671 if (start != -1) {
1672 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1673 start, end, end - start,
1674 prot & PAGE_READ ? 'r' : '-',
1675 prot & PAGE_WRITE ? 'w' : '-',
1676 prot & PAGE_EXEC ? 'x' : '-');
1677 }
1678 if (prot1 != 0)
1679 start = end;
1680 else
1681 start = -1;
1682 prot = prot1;
1683 }
1684 if (!p)
1685 break;
1686 }
33417e70 1687 }
33417e70
FB
1688}
1689
9fa3e853 1690int page_get_flags(unsigned long address)
33417e70 1691{
9fa3e853
FB
1692 PageDesc *p;
1693
1694 p = page_find(address >> TARGET_PAGE_BITS);
33417e70 1695 if (!p)
9fa3e853
FB
1696 return 0;
1697 return p->flags;
1698}
1699
1700/* modify the flags of a page and invalidate the code if
1701 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1702 depending on PAGE_WRITE */
1703void page_set_flags(unsigned long start, unsigned long end, int flags)
1704{
1705 PageDesc *p;
1706 unsigned long addr;
1707
1708 start = start & TARGET_PAGE_MASK;
1709 end = TARGET_PAGE_ALIGN(end);
1710 if (flags & PAGE_WRITE)
1711 flags |= PAGE_WRITE_ORG;
1712 spin_lock(&tb_lock);
1713 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1714 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1715 /* if the write protection is set, then we invalidate the code
1716 inside */
1717 if (!(p->flags & PAGE_WRITE) &&
1718 (flags & PAGE_WRITE) &&
1719 p->first_tb) {
d720b93d 1720 tb_invalidate_phys_page(addr, 0, NULL);
9fa3e853
FB
1721 }
1722 p->flags = flags;
1723 }
1724 spin_unlock(&tb_lock);
33417e70
FB
1725}
1726
9fa3e853
FB
1727/* called from signal handler: invalidate the code and unprotect the
1728 page. Return TRUE if the fault was succesfully handled. */
d720b93d 1729int page_unprotect(unsigned long address, unsigned long pc, void *puc)
9fa3e853
FB
1730{
1731 unsigned int page_index, prot, pindex;
1732 PageDesc *p, *p1;
1733 unsigned long host_start, host_end, addr;
1734
1735 host_start = address & host_page_mask;
1736 page_index = host_start >> TARGET_PAGE_BITS;
1737 p1 = page_find(page_index);
1738 if (!p1)
1739 return 0;
1740 host_end = host_start + host_page_size;
1741 p = p1;
1742 prot = 0;
1743 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1744 prot |= p->flags;
1745 p++;
1746 }
1747 /* if the page was really writable, then we change its
1748 protection back to writable */
1749 if (prot & PAGE_WRITE_ORG) {
1750 pindex = (address - host_start) >> TARGET_PAGE_BITS;
1751 if (!(p1[pindex].flags & PAGE_WRITE)) {
1752 mprotect((void *)host_start, host_page_size,
1753 (prot & PAGE_BITS) | PAGE_WRITE);
1754 p1[pindex].flags |= PAGE_WRITE;
1755 /* and since the content will be modified, we must invalidate
1756 the corresponding translated code. */
d720b93d 1757 tb_invalidate_phys_page(address, pc, puc);
9fa3e853
FB
1758#ifdef DEBUG_TB_CHECK
1759 tb_invalidate_check(address);
1760#endif
1761 return 1;
1762 }
1763 }
1764 return 0;
1765}
1766
1767/* call this function when system calls directly modify a memory area */
1768void page_unprotect_range(uint8_t *data, unsigned long data_size)
1769{
1770 unsigned long start, end, addr;
1771
1772 start = (unsigned long)data;
1773 end = start + data_size;
1774 start &= TARGET_PAGE_MASK;
1775 end = TARGET_PAGE_ALIGN(end);
1776 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
d720b93d 1777 page_unprotect(addr, 0, NULL);
9fa3e853
FB
1778 }
1779}
1780
1ccde1cb
FB
1781static inline void tlb_set_dirty(unsigned long addr, target_ulong vaddr)
1782{
1783}
9fa3e853
FB
1784#endif /* defined(CONFIG_USER_ONLY) */
1785
33417e70
FB
1786/* register physical memory. 'size' must be a multiple of the target
1787 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1788 io memory page */
2e12669a
FB
1789void cpu_register_physical_memory(target_phys_addr_t start_addr,
1790 unsigned long size,
1791 unsigned long phys_offset)
33417e70
FB
1792{
1793 unsigned long addr, end_addr;
92e873b9 1794 PhysPageDesc *p;
33417e70 1795
5fd386f6 1796 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
33417e70 1797 end_addr = start_addr + size;
5fd386f6 1798 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
92e873b9 1799 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS);
9fa3e853
FB
1800 p->phys_offset = phys_offset;
1801 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM)
33417e70
FB
1802 phys_offset += TARGET_PAGE_SIZE;
1803 }
1804}
1805
a4193c8a 1806static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
33417e70
FB
1807{
1808 return 0;
1809}
1810
a4193c8a 1811static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
33417e70
FB
1812{
1813}
1814
1815static CPUReadMemoryFunc *unassigned_mem_read[3] = {
1816 unassigned_mem_readb,
1817 unassigned_mem_readb,
1818 unassigned_mem_readb,
1819};
1820
1821static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
1822 unassigned_mem_writeb,
1823 unassigned_mem_writeb,
1824 unassigned_mem_writeb,
1825};
1826
9fa3e853
FB
1827/* self modifying code support in soft mmu mode : writing to a page
1828 containing code comes to these functions */
1829
a4193c8a 1830static void code_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 1831{
1ccde1cb
FB
1832 unsigned long phys_addr;
1833
274da6b2 1834 phys_addr = addr - (unsigned long)phys_ram_base;
9fa3e853 1835#if !defined(CONFIG_USER_ONLY)
d720b93d 1836 tb_invalidate_phys_page_fast(phys_addr, 1);
9fa3e853 1837#endif
1ccde1cb
FB
1838 stb_raw((uint8_t *)addr, val);
1839 phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1;
9fa3e853
FB
1840}
1841
a4193c8a 1842static void code_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 1843{
1ccde1cb
FB
1844 unsigned long phys_addr;
1845
274da6b2 1846 phys_addr = addr - (unsigned long)phys_ram_base;
9fa3e853 1847#if !defined(CONFIG_USER_ONLY)
d720b93d 1848 tb_invalidate_phys_page_fast(phys_addr, 2);
9fa3e853 1849#endif
1ccde1cb
FB
1850 stw_raw((uint8_t *)addr, val);
1851 phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1;
9fa3e853
FB
1852}
1853
a4193c8a 1854static void code_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 1855{
1ccde1cb
FB
1856 unsigned long phys_addr;
1857
274da6b2 1858 phys_addr = addr - (unsigned long)phys_ram_base;
9fa3e853 1859#if !defined(CONFIG_USER_ONLY)
d720b93d 1860 tb_invalidate_phys_page_fast(phys_addr, 4);
9fa3e853 1861#endif
1ccde1cb
FB
1862 stl_raw((uint8_t *)addr, val);
1863 phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1;
9fa3e853
FB
1864}
1865
1866static CPUReadMemoryFunc *code_mem_read[3] = {
1867 NULL, /* never used */
1868 NULL, /* never used */
1869 NULL, /* never used */
1870};
1871
1872static CPUWriteMemoryFunc *code_mem_write[3] = {
1873 code_mem_writeb,
1874 code_mem_writew,
1875 code_mem_writel,
1876};
33417e70 1877
a4193c8a 1878static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1ccde1cb
FB
1879{
1880 stb_raw((uint8_t *)addr, val);
d720b93d 1881 tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
1ccde1cb
FB
1882}
1883
a4193c8a 1884static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
1ccde1cb
FB
1885{
1886 stw_raw((uint8_t *)addr, val);
d720b93d 1887 tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
1ccde1cb
FB
1888}
1889
a4193c8a 1890static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
1ccde1cb
FB
1891{
1892 stl_raw((uint8_t *)addr, val);
d720b93d 1893 tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
1ccde1cb
FB
1894}
1895
1896static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
1897 notdirty_mem_writeb,
1898 notdirty_mem_writew,
1899 notdirty_mem_writel,
1900};
1901
33417e70
FB
1902static void io_mem_init(void)
1903{
a4193c8a
FB
1904 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, code_mem_read, unassigned_mem_write, NULL);
1905 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
1906 cpu_register_io_memory(IO_MEM_CODE >> IO_MEM_SHIFT, code_mem_read, code_mem_write, NULL);
1907 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, code_mem_read, notdirty_mem_write, NULL);
1ccde1cb
FB
1908 io_mem_nb = 5;
1909
1910 /* alloc dirty bits array */
59817ccb 1911 phys_ram_dirty = qemu_malloc(phys_ram_size >> TARGET_PAGE_BITS);
33417e70
FB
1912}
1913
1914/* mem_read and mem_write are arrays of functions containing the
1915 function to access byte (index 0), word (index 1) and dword (index
1916 2). All functions must be supplied. If io_index is non zero, the
1917 corresponding io zone is modified. If it is zero, a new io zone is
1918 allocated. The return value can be used with
1919 cpu_register_physical_memory(). (-1) is returned if error. */
1920int cpu_register_io_memory(int io_index,
1921 CPUReadMemoryFunc **mem_read,
a4193c8a
FB
1922 CPUWriteMemoryFunc **mem_write,
1923 void *opaque)
33417e70
FB
1924{
1925 int i;
1926
1927 if (io_index <= 0) {
1928 if (io_index >= IO_MEM_NB_ENTRIES)
1929 return -1;
1930 io_index = io_mem_nb++;
1931 } else {
1932 if (io_index >= IO_MEM_NB_ENTRIES)
1933 return -1;
1934 }
1935
1936 for(i = 0;i < 3; i++) {
1937 io_mem_read[io_index][i] = mem_read[i];
1938 io_mem_write[io_index][i] = mem_write[i];
1939 }
a4193c8a 1940 io_mem_opaque[io_index] = opaque;
33417e70
FB
1941 return io_index << IO_MEM_SHIFT;
1942}
61382a50 1943
13eb76e0
FB
1944/* physical memory access (slow version, mainly for debug) */
1945#if defined(CONFIG_USER_ONLY)
2e12669a 1946void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
1947 int len, int is_write)
1948{
1949 int l, flags;
1950 target_ulong page;
1951
1952 while (len > 0) {
1953 page = addr & TARGET_PAGE_MASK;
1954 l = (page + TARGET_PAGE_SIZE) - addr;
1955 if (l > len)
1956 l = len;
1957 flags = page_get_flags(page);
1958 if (!(flags & PAGE_VALID))
1959 return;
1960 if (is_write) {
1961 if (!(flags & PAGE_WRITE))
1962 return;
1963 memcpy((uint8_t *)addr, buf, len);
1964 } else {
1965 if (!(flags & PAGE_READ))
1966 return;
1967 memcpy(buf, (uint8_t *)addr, len);
1968 }
1969 len -= l;
1970 buf += l;
1971 addr += l;
1972 }
1973}
1974#else
2e12669a 1975void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
1976 int len, int is_write)
1977{
1978 int l, io_index;
1979 uint8_t *ptr;
1980 uint32_t val;
2e12669a
FB
1981 target_phys_addr_t page;
1982 unsigned long pd;
92e873b9 1983 PhysPageDesc *p;
13eb76e0
FB
1984
1985 while (len > 0) {
1986 page = addr & TARGET_PAGE_MASK;
1987 l = (page + TARGET_PAGE_SIZE) - addr;
1988 if (l > len)
1989 l = len;
92e873b9 1990 p = phys_page_find(page >> TARGET_PAGE_BITS);
13eb76e0
FB
1991 if (!p) {
1992 pd = IO_MEM_UNASSIGNED;
1993 } else {
1994 pd = p->phys_offset;
1995 }
1996
1997 if (is_write) {
1998 if ((pd & ~TARGET_PAGE_MASK) != 0) {
1999 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2000 if (l >= 4 && ((addr & 3) == 0)) {
2001 /* 32 bit read access */
2002 val = ldl_raw(buf);
a4193c8a 2003 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2004 l = 4;
2005 } else if (l >= 2 && ((addr & 1) == 0)) {
2006 /* 16 bit read access */
2007 val = lduw_raw(buf);
a4193c8a 2008 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2009 l = 2;
2010 } else {
2011 /* 8 bit access */
2012 val = ldub_raw(buf);
a4193c8a 2013 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
2014 l = 1;
2015 }
2016 } else {
b448f2f3
FB
2017 unsigned long addr1;
2018 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
13eb76e0 2019 /* RAM case */
b448f2f3 2020 ptr = phys_ram_base + addr1;
13eb76e0 2021 memcpy(ptr, buf, l);
b448f2f3
FB
2022 /* invalidate code */
2023 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2024 /* set dirty bit */
2025 phys_ram_dirty[page >> TARGET_PAGE_BITS] = 1;
13eb76e0
FB
2026 }
2027 } else {
2028 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2029 (pd & ~TARGET_PAGE_MASK) != IO_MEM_CODE) {
2030 /* I/O case */
2031 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2032 if (l >= 4 && ((addr & 3) == 0)) {
2033 /* 32 bit read access */
a4193c8a 2034 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
13eb76e0
FB
2035 stl_raw(buf, val);
2036 l = 4;
2037 } else if (l >= 2 && ((addr & 1) == 0)) {
2038 /* 16 bit read access */
a4193c8a 2039 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
13eb76e0
FB
2040 stw_raw(buf, val);
2041 l = 2;
2042 } else {
2043 /* 8 bit access */
a4193c8a 2044 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
13eb76e0
FB
2045 stb_raw(buf, val);
2046 l = 1;
2047 }
2048 } else {
2049 /* RAM case */
2050 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2051 (addr & ~TARGET_PAGE_MASK);
2052 memcpy(buf, ptr, l);
2053 }
2054 }
2055 len -= l;
2056 buf += l;
2057 addr += l;
2058 }
2059}
2060#endif
2061
2062/* virtual memory access for debug */
b448f2f3
FB
2063int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
2064 uint8_t *buf, int len, int is_write)
13eb76e0
FB
2065{
2066 int l;
2067 target_ulong page, phys_addr;
2068
2069 while (len > 0) {
2070 page = addr & TARGET_PAGE_MASK;
2071 phys_addr = cpu_get_phys_page_debug(env, page);
2072 /* if no physical page mapped, return an error */
2073 if (phys_addr == -1)
2074 return -1;
2075 l = (page + TARGET_PAGE_SIZE) - addr;
2076 if (l > len)
2077 l = len;
b448f2f3
FB
2078 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
2079 buf, l, is_write);
13eb76e0
FB
2080 len -= l;
2081 buf += l;
2082 addr += l;
2083 }
2084 return 0;
2085}
2086
61382a50
FB
2087#if !defined(CONFIG_USER_ONLY)
2088
2089#define MMUSUFFIX _cmmu
2090#define GETPC() NULL
2091#define env cpu_single_env
2092
2093#define SHIFT 0
2094#include "softmmu_template.h"
2095
2096#define SHIFT 1
2097#include "softmmu_template.h"
2098
2099#define SHIFT 2
2100#include "softmmu_template.h"
2101
2102#define SHIFT 3
2103#include "softmmu_template.h"
2104
2105#undef env
2106
2107#endif