]> git.proxmox.com Git - mirror_qemu.git/blame - exec.c
irq statistics code (initial patch by Jocelyn Mayer)
[mirror_qemu.git] / exec.c
CommitLineData
54936004 1/*
fd6ce8f6 2 * virtual page mapping and translated block handling
54936004
FB
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
67b915a5 20#include "config.h"
54936004
FB
21#include <stdlib.h>
22#include <stdio.h>
23#include <stdarg.h>
24#include <string.h>
25#include <errno.h>
26#include <unistd.h>
27#include <inttypes.h>
67b915a5 28#if !defined(CONFIG_SOFTMMU)
fd6ce8f6 29#include <sys/mman.h>
67b915a5 30#endif
54936004 31
6180a181
FB
32#include "cpu.h"
33#include "exec-all.h"
54936004 34
fd6ce8f6 35//#define DEBUG_TB_INVALIDATE
66e85a21 36//#define DEBUG_FLUSH
9fa3e853 37//#define DEBUG_TLB
fd6ce8f6
FB
38
39/* make various TB consistency checks */
40//#define DEBUG_TB_CHECK
98857888 41//#define DEBUG_TLB_CHECK
fd6ce8f6
FB
42
43/* threshold to flush the translated code buffer */
44#define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
45
9fa3e853
FB
46#define SMC_BITMAP_USE_THRESHOLD 10
47
48#define MMAP_AREA_START 0x00000000
49#define MMAP_AREA_END 0xa8000000
fd6ce8f6
FB
50
51TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
52TranslationBlock *tb_hash[CODE_GEN_HASH_SIZE];
9fa3e853 53TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
fd6ce8f6 54int nb_tbs;
eb51d102
FB
55/* any access to the tbs or the page table must use this lock */
56spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
fd6ce8f6
FB
57
58uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE];
59uint8_t *code_gen_ptr;
60
9fa3e853
FB
61int phys_ram_size;
62int phys_ram_fd;
63uint8_t *phys_ram_base;
1ccde1cb 64uint8_t *phys_ram_dirty;
9fa3e853 65
54936004 66typedef struct PageDesc {
2e12669a 67 /* offset in host memory of the page + io_index in the low 12 bits */
9fa3e853
FB
68 unsigned long phys_offset;
69 /* list of TBs intersecting this physical page */
fd6ce8f6 70 TranslationBlock *first_tb;
9fa3e853
FB
71 /* in order to optimize self modifying code, we count the number
72 of lookups we do to a given page to use a bitmap */
73 unsigned int code_write_count;
74 uint8_t *code_bitmap;
75#if defined(CONFIG_USER_ONLY)
76 unsigned long flags;
77#endif
54936004
FB
78} PageDesc;
79
9fa3e853
FB
80typedef struct VirtPageDesc {
81 /* physical address of code page. It is valid only if 'valid_tag'
82 matches 'virt_valid_tag' */
83 target_ulong phys_addr;
84 unsigned int valid_tag;
85#if !defined(CONFIG_SOFTMMU)
86 /* original page access rights. It is valid only if 'valid_tag'
87 matches 'virt_valid_tag' */
88 unsigned int prot;
89#endif
90} VirtPageDesc;
91
54936004
FB
92#define L2_BITS 10
93#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
94
95#define L1_SIZE (1 << L1_BITS)
96#define L2_SIZE (1 << L2_BITS)
97
33417e70 98static void io_mem_init(void);
fd6ce8f6 99
54936004
FB
100unsigned long real_host_page_size;
101unsigned long host_page_bits;
102unsigned long host_page_size;
103unsigned long host_page_mask;
104
105static PageDesc *l1_map[L1_SIZE];
106
9fa3e853
FB
107#if !defined(CONFIG_USER_ONLY)
108static VirtPageDesc *l1_virt_map[L1_SIZE];
109static unsigned int virt_valid_tag;
110#endif
111
33417e70 112/* io memory support */
33417e70
FB
113CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
114CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
115static int io_mem_nb;
116
34865134
FB
117/* log support */
118char *logfilename = "/tmp/qemu.log";
119FILE *logfile;
120int loglevel;
121
b346ff46 122static void page_init(void)
54936004
FB
123{
124 /* NOTE: we can always suppose that host_page_size >=
125 TARGET_PAGE_SIZE */
67b915a5
FB
126#ifdef _WIN32
127 real_host_page_size = 4096;
128#else
54936004 129 real_host_page_size = getpagesize();
67b915a5 130#endif
54936004
FB
131 if (host_page_size == 0)
132 host_page_size = real_host_page_size;
133 if (host_page_size < TARGET_PAGE_SIZE)
134 host_page_size = TARGET_PAGE_SIZE;
135 host_page_bits = 0;
136 while ((1 << host_page_bits) < host_page_size)
137 host_page_bits++;
138 host_page_mask = ~(host_page_size - 1);
9fa3e853
FB
139#if !defined(CONFIG_USER_ONLY)
140 virt_valid_tag = 1;
141#endif
54936004
FB
142}
143
fd6ce8f6 144static inline PageDesc *page_find_alloc(unsigned int index)
54936004 145{
54936004
FB
146 PageDesc **lp, *p;
147
54936004
FB
148 lp = &l1_map[index >> L2_BITS];
149 p = *lp;
150 if (!p) {
151 /* allocate if not found */
59817ccb 152 p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
fd6ce8f6 153 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
54936004
FB
154 *lp = p;
155 }
156 return p + (index & (L2_SIZE - 1));
157}
158
fd6ce8f6 159static inline PageDesc *page_find(unsigned int index)
54936004 160{
54936004
FB
161 PageDesc *p;
162
54936004
FB
163 p = l1_map[index >> L2_BITS];
164 if (!p)
165 return 0;
fd6ce8f6
FB
166 return p + (index & (L2_SIZE - 1));
167}
168
9fa3e853 169#if !defined(CONFIG_USER_ONLY)
4f2ac237
FB
170static void tlb_protect_code(CPUState *env, target_ulong addr);
171static void tlb_unprotect_code_phys(CPUState *env, unsigned long phys_addr, target_ulong vaddr);
9fa3e853
FB
172
173static inline VirtPageDesc *virt_page_find_alloc(unsigned int index)
fd6ce8f6 174{
9fa3e853 175 VirtPageDesc **lp, *p;
fd6ce8f6 176
9fa3e853
FB
177 lp = &l1_virt_map[index >> L2_BITS];
178 p = *lp;
179 if (!p) {
180 /* allocate if not found */
59817ccb 181 p = qemu_malloc(sizeof(VirtPageDesc) * L2_SIZE);
9fa3e853
FB
182 memset(p, 0, sizeof(VirtPageDesc) * L2_SIZE);
183 *lp = p;
184 }
185 return p + (index & (L2_SIZE - 1));
186}
187
188static inline VirtPageDesc *virt_page_find(unsigned int index)
189{
190 VirtPageDesc *p;
191
192 p = l1_virt_map[index >> L2_BITS];
fd6ce8f6
FB
193 if (!p)
194 return 0;
9fa3e853 195 return p + (index & (L2_SIZE - 1));
54936004
FB
196}
197
9fa3e853 198static void virt_page_flush(void)
54936004 199{
9fa3e853
FB
200 int i, j;
201 VirtPageDesc *p;
202
203 virt_valid_tag++;
204
205 if (virt_valid_tag == 0) {
206 virt_valid_tag = 1;
207 for(i = 0; i < L1_SIZE; i++) {
208 p = l1_virt_map[i];
209 if (p) {
210 for(j = 0; j < L2_SIZE; j++)
211 p[j].valid_tag = 0;
212 }
fd6ce8f6 213 }
54936004
FB
214 }
215}
9fa3e853
FB
216#else
217static void virt_page_flush(void)
218{
219}
220#endif
fd6ce8f6 221
b346ff46 222void cpu_exec_init(void)
fd6ce8f6
FB
223{
224 if (!code_gen_ptr) {
225 code_gen_ptr = code_gen_buffer;
b346ff46 226 page_init();
33417e70 227 io_mem_init();
fd6ce8f6
FB
228 }
229}
230
9fa3e853
FB
231static inline void invalidate_page_bitmap(PageDesc *p)
232{
233 if (p->code_bitmap) {
59817ccb 234 qemu_free(p->code_bitmap);
9fa3e853
FB
235 p->code_bitmap = NULL;
236 }
237 p->code_write_count = 0;
238}
239
fd6ce8f6
FB
240/* set to NULL all the 'first_tb' fields in all PageDescs */
241static void page_flush_tb(void)
242{
243 int i, j;
244 PageDesc *p;
245
246 for(i = 0; i < L1_SIZE; i++) {
247 p = l1_map[i];
248 if (p) {
9fa3e853
FB
249 for(j = 0; j < L2_SIZE; j++) {
250 p->first_tb = NULL;
251 invalidate_page_bitmap(p);
252 p++;
253 }
fd6ce8f6
FB
254 }
255 }
256}
257
258/* flush all the translation blocks */
d4e8164f 259/* XXX: tb_flush is currently not thread safe */
0124311e 260void tb_flush(CPUState *env)
fd6ce8f6
FB
261{
262 int i;
0124311e 263#if defined(DEBUG_FLUSH)
fd6ce8f6
FB
264 printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
265 code_gen_ptr - code_gen_buffer,
266 nb_tbs,
0124311e 267 nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0);
fd6ce8f6
FB
268#endif
269 nb_tbs = 0;
270 for(i = 0;i < CODE_GEN_HASH_SIZE; i++)
271 tb_hash[i] = NULL;
9fa3e853
FB
272 virt_page_flush();
273
274 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++)
275 tb_phys_hash[i] = NULL;
fd6ce8f6 276 page_flush_tb();
9fa3e853 277
fd6ce8f6 278 code_gen_ptr = code_gen_buffer;
d4e8164f
FB
279 /* XXX: flush processor icache at this point if cache flush is
280 expensive */
fd6ce8f6
FB
281}
282
283#ifdef DEBUG_TB_CHECK
284
285static void tb_invalidate_check(unsigned long address)
286{
287 TranslationBlock *tb;
288 int i;
289 address &= TARGET_PAGE_MASK;
290 for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
291 for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
292 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
293 address >= tb->pc + tb->size)) {
294 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
295 address, tb->pc, tb->size);
296 }
297 }
298 }
299}
300
301/* verify that all the pages have correct rights for code */
302static void tb_page_check(void)
303{
304 TranslationBlock *tb;
305 int i, flags1, flags2;
306
307 for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
308 for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
309 flags1 = page_get_flags(tb->pc);
310 flags2 = page_get_flags(tb->pc + tb->size - 1);
311 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
312 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
313 tb->pc, tb->size, flags1, flags2);
314 }
315 }
316 }
317}
318
d4e8164f
FB
319void tb_jmp_check(TranslationBlock *tb)
320{
321 TranslationBlock *tb1;
322 unsigned int n1;
323
324 /* suppress any remaining jumps to this TB */
325 tb1 = tb->jmp_first;
326 for(;;) {
327 n1 = (long)tb1 & 3;
328 tb1 = (TranslationBlock *)((long)tb1 & ~3);
329 if (n1 == 2)
330 break;
331 tb1 = tb1->jmp_next[n1];
332 }
333 /* check end of list */
334 if (tb1 != tb) {
335 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
336 }
337}
338
fd6ce8f6
FB
339#endif
340
341/* invalidate one TB */
342static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
343 int next_offset)
344{
345 TranslationBlock *tb1;
346 for(;;) {
347 tb1 = *ptb;
348 if (tb1 == tb) {
349 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
350 break;
351 }
352 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
353 }
354}
355
9fa3e853
FB
356static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
357{
358 TranslationBlock *tb1;
359 unsigned int n1;
360
361 for(;;) {
362 tb1 = *ptb;
363 n1 = (long)tb1 & 3;
364 tb1 = (TranslationBlock *)((long)tb1 & ~3);
365 if (tb1 == tb) {
366 *ptb = tb1->page_next[n1];
367 break;
368 }
369 ptb = &tb1->page_next[n1];
370 }
371}
372
d4e8164f
FB
373static inline void tb_jmp_remove(TranslationBlock *tb, int n)
374{
375 TranslationBlock *tb1, **ptb;
376 unsigned int n1;
377
378 ptb = &tb->jmp_next[n];
379 tb1 = *ptb;
380 if (tb1) {
381 /* find tb(n) in circular list */
382 for(;;) {
383 tb1 = *ptb;
384 n1 = (long)tb1 & 3;
385 tb1 = (TranslationBlock *)((long)tb1 & ~3);
386 if (n1 == n && tb1 == tb)
387 break;
388 if (n1 == 2) {
389 ptb = &tb1->jmp_first;
390 } else {
391 ptb = &tb1->jmp_next[n1];
392 }
393 }
394 /* now we can suppress tb(n) from the list */
395 *ptb = tb->jmp_next[n];
396
397 tb->jmp_next[n] = NULL;
398 }
399}
400
401/* reset the jump entry 'n' of a TB so that it is not chained to
402 another TB */
403static inline void tb_reset_jump(TranslationBlock *tb, int n)
404{
405 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
406}
407
9fa3e853 408static inline void tb_invalidate(TranslationBlock *tb)
fd6ce8f6 409{
d4e8164f 410 unsigned int h, n1;
9fa3e853 411 TranslationBlock *tb1, *tb2, **ptb;
d4e8164f 412
36bdbe54 413 tb_invalidated_flag = 1;
59817ccb 414
fd6ce8f6
FB
415 /* remove the TB from the hash list */
416 h = tb_hash_func(tb->pc);
9fa3e853
FB
417 ptb = &tb_hash[h];
418 for(;;) {
419 tb1 = *ptb;
420 /* NOTE: the TB is not necessarily linked in the hash. It
421 indicates that it is not currently used */
422 if (tb1 == NULL)
423 return;
424 if (tb1 == tb) {
425 *ptb = tb1->hash_next;
426 break;
427 }
428 ptb = &tb1->hash_next;
fd6ce8f6 429 }
d4e8164f
FB
430
431 /* suppress this TB from the two jump lists */
432 tb_jmp_remove(tb, 0);
433 tb_jmp_remove(tb, 1);
434
435 /* suppress any remaining jumps to this TB */
436 tb1 = tb->jmp_first;
437 for(;;) {
438 n1 = (long)tb1 & 3;
439 if (n1 == 2)
440 break;
441 tb1 = (TranslationBlock *)((long)tb1 & ~3);
442 tb2 = tb1->jmp_next[n1];
443 tb_reset_jump(tb1, n1);
444 tb1->jmp_next[n1] = NULL;
445 tb1 = tb2;
446 }
447 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
fd6ce8f6
FB
448}
449
9fa3e853 450static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
fd6ce8f6 451{
fd6ce8f6 452 PageDesc *p;
9fa3e853
FB
453 unsigned int h;
454 target_ulong phys_pc;
455
456 /* remove the TB from the hash list */
457 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
458 h = tb_phys_hash_func(phys_pc);
459 tb_remove(&tb_phys_hash[h], tb,
460 offsetof(TranslationBlock, phys_hash_next));
461
462 /* remove the TB from the page list */
463 if (tb->page_addr[0] != page_addr) {
464 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
465 tb_page_remove(&p->first_tb, tb);
466 invalidate_page_bitmap(p);
467 }
468 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
469 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
470 tb_page_remove(&p->first_tb, tb);
471 invalidate_page_bitmap(p);
472 }
473
474 tb_invalidate(tb);
475}
476
477static inline void set_bits(uint8_t *tab, int start, int len)
478{
479 int end, mask, end1;
480
481 end = start + len;
482 tab += start >> 3;
483 mask = 0xff << (start & 7);
484 if ((start & ~7) == (end & ~7)) {
485 if (start < end) {
486 mask &= ~(0xff << (end & 7));
487 *tab |= mask;
488 }
489 } else {
490 *tab++ |= mask;
491 start = (start + 8) & ~7;
492 end1 = end & ~7;
493 while (start < end1) {
494 *tab++ = 0xff;
495 start += 8;
496 }
497 if (start < end) {
498 mask = ~(0xff << (end & 7));
499 *tab |= mask;
500 }
501 }
502}
503
504static void build_page_bitmap(PageDesc *p)
505{
506 int n, tb_start, tb_end;
507 TranslationBlock *tb;
508
59817ccb 509 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
9fa3e853
FB
510 if (!p->code_bitmap)
511 return;
512 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
513
514 tb = p->first_tb;
515 while (tb != NULL) {
516 n = (long)tb & 3;
517 tb = (TranslationBlock *)((long)tb & ~3);
518 /* NOTE: this is subtle as a TB may span two physical pages */
519 if (n == 0) {
520 /* NOTE: tb_end may be after the end of the page, but
521 it is not a problem */
522 tb_start = tb->pc & ~TARGET_PAGE_MASK;
523 tb_end = tb_start + tb->size;
524 if (tb_end > TARGET_PAGE_SIZE)
525 tb_end = TARGET_PAGE_SIZE;
526 } else {
527 tb_start = 0;
528 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
529 }
530 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
531 tb = tb->page_next[n];
532 }
533}
534
d720b93d
FB
535#ifdef TARGET_HAS_PRECISE_SMC
536
537static void tb_gen_code(CPUState *env,
538 target_ulong pc, target_ulong cs_base, int flags,
539 int cflags)
540{
541 TranslationBlock *tb;
542 uint8_t *tc_ptr;
543 target_ulong phys_pc, phys_page2, virt_page2;
544 int code_gen_size;
545
546 phys_pc = get_phys_addr_code(env, (unsigned long)pc);
547 tb = tb_alloc((unsigned long)pc);
548 if (!tb) {
549 /* flush must be done */
550 tb_flush(env);
551 /* cannot fail at this point */
552 tb = tb_alloc((unsigned long)pc);
553 }
554 tc_ptr = code_gen_ptr;
555 tb->tc_ptr = tc_ptr;
556 tb->cs_base = cs_base;
557 tb->flags = flags;
558 tb->cflags = cflags;
559 cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
560 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
561
562 /* check next page if needed */
563 virt_page2 = ((unsigned long)pc + tb->size - 1) & TARGET_PAGE_MASK;
564 phys_page2 = -1;
565 if (((unsigned long)pc & TARGET_PAGE_MASK) != virt_page2) {
566 phys_page2 = get_phys_addr_code(env, virt_page2);
567 }
568 tb_link_phys(tb, phys_pc, phys_page2);
569}
570#endif
571
9fa3e853
FB
572/* invalidate all TBs which intersect with the target physical page
573 starting in range [start;end[. NOTE: start and end must refer to
d720b93d
FB
574 the same physical page. 'is_cpu_write_access' should be true if called
575 from a real cpu write access: the virtual CPU will exit the current
576 TB if code is modified inside this TB. */
577void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
578 int is_cpu_write_access)
579{
580 int n, current_tb_modified, current_tb_not_found, current_flags;
581#if defined(TARGET_HAS_PRECISE_SMC) || !defined(CONFIG_USER_ONLY)
582 CPUState *env = cpu_single_env;
583#endif
9fa3e853 584 PageDesc *p;
d720b93d 585 TranslationBlock *tb, *tb_next, *current_tb;
9fa3e853 586 target_ulong tb_start, tb_end;
d720b93d 587 target_ulong current_pc, current_cs_base;
9fa3e853
FB
588
589 p = page_find(start >> TARGET_PAGE_BITS);
590 if (!p)
591 return;
592 if (!p->code_bitmap &&
d720b93d
FB
593 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
594 is_cpu_write_access) {
9fa3e853
FB
595 /* build code bitmap */
596 build_page_bitmap(p);
597 }
598
599 /* we remove all the TBs in the range [start, end[ */
600 /* XXX: see if in some cases it could be faster to invalidate all the code */
d720b93d
FB
601 current_tb_not_found = is_cpu_write_access;
602 current_tb_modified = 0;
603 current_tb = NULL; /* avoid warning */
604 current_pc = 0; /* avoid warning */
605 current_cs_base = 0; /* avoid warning */
606 current_flags = 0; /* avoid warning */
9fa3e853
FB
607 tb = p->first_tb;
608 while (tb != NULL) {
609 n = (long)tb & 3;
610 tb = (TranslationBlock *)((long)tb & ~3);
611 tb_next = tb->page_next[n];
612 /* NOTE: this is subtle as a TB may span two physical pages */
613 if (n == 0) {
614 /* NOTE: tb_end may be after the end of the page, but
615 it is not a problem */
616 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
617 tb_end = tb_start + tb->size;
618 } else {
619 tb_start = tb->page_addr[1];
620 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
621 }
622 if (!(tb_end <= start || tb_start >= end)) {
d720b93d
FB
623#ifdef TARGET_HAS_PRECISE_SMC
624 if (current_tb_not_found) {
625 current_tb_not_found = 0;
626 current_tb = NULL;
627 if (env->mem_write_pc) {
628 /* now we have a real cpu fault */
629 current_tb = tb_find_pc(env->mem_write_pc);
630 }
631 }
632 if (current_tb == tb &&
633 !(current_tb->cflags & CF_SINGLE_INSN)) {
634 /* If we are modifying the current TB, we must stop
635 its execution. We could be more precise by checking
636 that the modification is after the current PC, but it
637 would require a specialized function to partially
638 restore the CPU state */
639
640 current_tb_modified = 1;
641 cpu_restore_state(current_tb, env,
642 env->mem_write_pc, NULL);
643#if defined(TARGET_I386)
644 current_flags = env->hflags;
645 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
646 current_cs_base = (target_ulong)env->segs[R_CS].base;
647 current_pc = current_cs_base + env->eip;
648#else
649#error unsupported CPU
650#endif
651 }
652#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
653 tb_phys_invalidate(tb, -1);
654 }
655 tb = tb_next;
656 }
657#if !defined(CONFIG_USER_ONLY)
658 /* if no code remaining, no need to continue to use slow writes */
659 if (!p->first_tb) {
660 invalidate_page_bitmap(p);
d720b93d
FB
661 if (is_cpu_write_access) {
662 tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
663 }
664 }
665#endif
666#ifdef TARGET_HAS_PRECISE_SMC
667 if (current_tb_modified) {
668 /* we generate a block containing just the instruction
669 modifying the memory. It will ensure that it cannot modify
670 itself */
671 tb_gen_code(env, current_pc, current_cs_base, current_flags,
672 CF_SINGLE_INSN);
673 cpu_resume_from_signal(env, NULL);
9fa3e853 674 }
fd6ce8f6 675#endif
9fa3e853 676}
fd6ce8f6 677
9fa3e853 678/* len must be <= 8 and start must be a multiple of len */
d720b93d 679static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
9fa3e853
FB
680{
681 PageDesc *p;
682 int offset, b;
59817ccb
FB
683#if 0
684 if (cpu_single_env->cr[0] & CR0_PE_MASK) {
685 printf("modifying code at 0x%x size=%d EIP=%x\n",
686 (vaddr & TARGET_PAGE_MASK) | (start & ~TARGET_PAGE_MASK), len,
687 cpu_single_env->eip);
688 }
689#endif
9fa3e853
FB
690 p = page_find(start >> TARGET_PAGE_BITS);
691 if (!p)
692 return;
693 if (p->code_bitmap) {
694 offset = start & ~TARGET_PAGE_MASK;
695 b = p->code_bitmap[offset >> 3] >> (offset & 7);
696 if (b & ((1 << len) - 1))
697 goto do_invalidate;
698 } else {
699 do_invalidate:
d720b93d 700 tb_invalidate_phys_page_range(start, start + len, 1);
9fa3e853
FB
701 }
702}
703
9fa3e853 704#if !defined(CONFIG_SOFTMMU)
d720b93d
FB
705static void tb_invalidate_phys_page(target_ulong addr,
706 unsigned long pc, void *puc)
9fa3e853 707{
d720b93d
FB
708 int n, current_flags, current_tb_modified;
709 target_ulong current_pc, current_cs_base;
9fa3e853 710 PageDesc *p;
d720b93d
FB
711 TranslationBlock *tb, *current_tb;
712#ifdef TARGET_HAS_PRECISE_SMC
713 CPUState *env = cpu_single_env;
714#endif
9fa3e853
FB
715
716 addr &= TARGET_PAGE_MASK;
717 p = page_find(addr >> TARGET_PAGE_BITS);
718 if (!p)
719 return;
720 tb = p->first_tb;
d720b93d
FB
721 current_tb_modified = 0;
722 current_tb = NULL;
723 current_pc = 0; /* avoid warning */
724 current_cs_base = 0; /* avoid warning */
725 current_flags = 0; /* avoid warning */
726#ifdef TARGET_HAS_PRECISE_SMC
727 if (tb && pc != 0) {
728 current_tb = tb_find_pc(pc);
729 }
730#endif
9fa3e853
FB
731 while (tb != NULL) {
732 n = (long)tb & 3;
733 tb = (TranslationBlock *)((long)tb & ~3);
d720b93d
FB
734#ifdef TARGET_HAS_PRECISE_SMC
735 if (current_tb == tb &&
736 !(current_tb->cflags & CF_SINGLE_INSN)) {
737 /* If we are modifying the current TB, we must stop
738 its execution. We could be more precise by checking
739 that the modification is after the current PC, but it
740 would require a specialized function to partially
741 restore the CPU state */
742
743 current_tb_modified = 1;
744 cpu_restore_state(current_tb, env, pc, puc);
745#if defined(TARGET_I386)
746 current_flags = env->hflags;
747 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
748 current_cs_base = (target_ulong)env->segs[R_CS].base;
749 current_pc = current_cs_base + env->eip;
750#else
751#error unsupported CPU
752#endif
753 }
754#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
755 tb_phys_invalidate(tb, addr);
756 tb = tb->page_next[n];
757 }
fd6ce8f6 758 p->first_tb = NULL;
d720b93d
FB
759#ifdef TARGET_HAS_PRECISE_SMC
760 if (current_tb_modified) {
761 /* we generate a block containing just the instruction
762 modifying the memory. It will ensure that it cannot modify
763 itself */
764 tb_gen_code(env, current_pc, current_cs_base, current_flags,
765 CF_SINGLE_INSN);
766 cpu_resume_from_signal(env, puc);
767 }
768#endif
fd6ce8f6 769}
9fa3e853 770#endif
fd6ce8f6
FB
771
772/* add the tb in the target page and protect it if necessary */
9fa3e853
FB
773static inline void tb_alloc_page(TranslationBlock *tb,
774 unsigned int n, unsigned int page_addr)
fd6ce8f6
FB
775{
776 PageDesc *p;
9fa3e853
FB
777 TranslationBlock *last_first_tb;
778
779 tb->page_addr[n] = page_addr;
780 p = page_find(page_addr >> TARGET_PAGE_BITS);
781 tb->page_next[n] = p->first_tb;
782 last_first_tb = p->first_tb;
783 p->first_tb = (TranslationBlock *)((long)tb | n);
784 invalidate_page_bitmap(p);
fd6ce8f6 785
d720b93d
FB
786#ifdef TARGET_HAS_SMC
787
9fa3e853 788#if defined(CONFIG_USER_ONLY)
fd6ce8f6 789 if (p->flags & PAGE_WRITE) {
9fa3e853
FB
790 unsigned long host_start, host_end, addr;
791 int prot;
792
fd6ce8f6
FB
793 /* force the host page as non writable (writes will have a
794 page fault + mprotect overhead) */
fd6ce8f6
FB
795 host_start = page_addr & host_page_mask;
796 host_end = host_start + host_page_size;
797 prot = 0;
798 for(addr = host_start; addr < host_end; addr += TARGET_PAGE_SIZE)
799 prot |= page_get_flags(addr);
800 mprotect((void *)host_start, host_page_size,
801 (prot & PAGE_BITS) & ~PAGE_WRITE);
802#ifdef DEBUG_TB_INVALIDATE
803 printf("protecting code page: 0x%08lx\n",
804 host_start);
805#endif
806 p->flags &= ~PAGE_WRITE;
fd6ce8f6 807 }
9fa3e853
FB
808#else
809 /* if some code is already present, then the pages are already
810 protected. So we handle the case where only the first TB is
811 allocated in a physical page */
812 if (!last_first_tb) {
813 target_ulong virt_addr;
814
815 virt_addr = (tb->pc & TARGET_PAGE_MASK) + (n << TARGET_PAGE_BITS);
816 tlb_protect_code(cpu_single_env, virt_addr);
817 }
818#endif
d720b93d
FB
819
820#endif /* TARGET_HAS_SMC */
fd6ce8f6
FB
821}
822
823/* Allocate a new translation block. Flush the translation buffer if
824 too many translation blocks or too much generated code. */
d4e8164f 825TranslationBlock *tb_alloc(unsigned long pc)
fd6ce8f6
FB
826{
827 TranslationBlock *tb;
fd6ce8f6
FB
828
829 if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
830 (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
d4e8164f 831 return NULL;
fd6ce8f6
FB
832 tb = &tbs[nb_tbs++];
833 tb->pc = pc;
b448f2f3 834 tb->cflags = 0;
d4e8164f
FB
835 return tb;
836}
837
9fa3e853
FB
838/* add a new TB and link it to the physical page tables. phys_page2 is
839 (-1) to indicate that only one page contains the TB. */
840void tb_link_phys(TranslationBlock *tb,
841 target_ulong phys_pc, target_ulong phys_page2)
d4e8164f 842{
9fa3e853
FB
843 unsigned int h;
844 TranslationBlock **ptb;
845
846 /* add in the physical hash table */
847 h = tb_phys_hash_func(phys_pc);
848 ptb = &tb_phys_hash[h];
849 tb->phys_hash_next = *ptb;
850 *ptb = tb;
fd6ce8f6
FB
851
852 /* add in the page list */
9fa3e853
FB
853 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
854 if (phys_page2 != -1)
855 tb_alloc_page(tb, 1, phys_page2);
856 else
857 tb->page_addr[1] = -1;
61382a50
FB
858#ifdef DEBUG_TB_CHECK
859 tb_page_check();
860#endif
9fa3e853
FB
861}
862
863/* link the tb with the other TBs */
864void tb_link(TranslationBlock *tb)
865{
866#if !defined(CONFIG_USER_ONLY)
867 {
868 VirtPageDesc *vp;
869 target_ulong addr;
870
871 /* save the code memory mappings (needed to invalidate the code) */
872 addr = tb->pc & TARGET_PAGE_MASK;
873 vp = virt_page_find_alloc(addr >> TARGET_PAGE_BITS);
98857888
FB
874#ifdef DEBUG_TLB_CHECK
875 if (vp->valid_tag == virt_valid_tag &&
876 vp->phys_addr != tb->page_addr[0]) {
877 printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",
878 addr, tb->page_addr[0], vp->phys_addr);
879 }
880#endif
9fa3e853 881 vp->phys_addr = tb->page_addr[0];
59817ccb
FB
882 if (vp->valid_tag != virt_valid_tag) {
883 vp->valid_tag = virt_valid_tag;
884#if !defined(CONFIG_SOFTMMU)
885 vp->prot = 0;
886#endif
887 }
9fa3e853
FB
888
889 if (tb->page_addr[1] != -1) {
890 addr += TARGET_PAGE_SIZE;
891 vp = virt_page_find_alloc(addr >> TARGET_PAGE_BITS);
98857888
FB
892#ifdef DEBUG_TLB_CHECK
893 if (vp->valid_tag == virt_valid_tag &&
894 vp->phys_addr != tb->page_addr[1]) {
895 printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",
896 addr, tb->page_addr[1], vp->phys_addr);
897 }
898#endif
9fa3e853 899 vp->phys_addr = tb->page_addr[1];
59817ccb
FB
900 if (vp->valid_tag != virt_valid_tag) {
901 vp->valid_tag = virt_valid_tag;
902#if !defined(CONFIG_SOFTMMU)
903 vp->prot = 0;
904#endif
905 }
9fa3e853
FB
906 }
907 }
908#endif
909
d4e8164f
FB
910 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
911 tb->jmp_next[0] = NULL;
912 tb->jmp_next[1] = NULL;
b448f2f3
FB
913#ifdef USE_CODE_COPY
914 tb->cflags &= ~CF_FP_USED;
915 if (tb->cflags & CF_TB_FP_USED)
916 tb->cflags |= CF_FP_USED;
917#endif
d4e8164f
FB
918
919 /* init original jump addresses */
920 if (tb->tb_next_offset[0] != 0xffff)
921 tb_reset_jump(tb, 0);
922 if (tb->tb_next_offset[1] != 0xffff)
923 tb_reset_jump(tb, 1);
fd6ce8f6
FB
924}
925
9fa3e853
FB
926/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
927 tb[1].tc_ptr. Return NULL if not found */
928TranslationBlock *tb_find_pc(unsigned long tc_ptr)
fd6ce8f6 929{
9fa3e853
FB
930 int m_min, m_max, m;
931 unsigned long v;
932 TranslationBlock *tb;
a513fe19
FB
933
934 if (nb_tbs <= 0)
935 return NULL;
936 if (tc_ptr < (unsigned long)code_gen_buffer ||
937 tc_ptr >= (unsigned long)code_gen_ptr)
938 return NULL;
939 /* binary search (cf Knuth) */
940 m_min = 0;
941 m_max = nb_tbs - 1;
942 while (m_min <= m_max) {
943 m = (m_min + m_max) >> 1;
944 tb = &tbs[m];
945 v = (unsigned long)tb->tc_ptr;
946 if (v == tc_ptr)
947 return tb;
948 else if (tc_ptr < v) {
949 m_max = m - 1;
950 } else {
951 m_min = m + 1;
952 }
953 }
954 return &tbs[m_max];
955}
7501267e 956
ea041c0e
FB
957static void tb_reset_jump_recursive(TranslationBlock *tb);
958
959static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
960{
961 TranslationBlock *tb1, *tb_next, **ptb;
962 unsigned int n1;
963
964 tb1 = tb->jmp_next[n];
965 if (tb1 != NULL) {
966 /* find head of list */
967 for(;;) {
968 n1 = (long)tb1 & 3;
969 tb1 = (TranslationBlock *)((long)tb1 & ~3);
970 if (n1 == 2)
971 break;
972 tb1 = tb1->jmp_next[n1];
973 }
974 /* we are now sure now that tb jumps to tb1 */
975 tb_next = tb1;
976
977 /* remove tb from the jmp_first list */
978 ptb = &tb_next->jmp_first;
979 for(;;) {
980 tb1 = *ptb;
981 n1 = (long)tb1 & 3;
982 tb1 = (TranslationBlock *)((long)tb1 & ~3);
983 if (n1 == n && tb1 == tb)
984 break;
985 ptb = &tb1->jmp_next[n1];
986 }
987 *ptb = tb->jmp_next[n];
988 tb->jmp_next[n] = NULL;
989
990 /* suppress the jump to next tb in generated code */
991 tb_reset_jump(tb, n);
992
0124311e 993 /* suppress jumps in the tb on which we could have jumped */
ea041c0e
FB
994 tb_reset_jump_recursive(tb_next);
995 }
996}
997
998static void tb_reset_jump_recursive(TranslationBlock *tb)
999{
1000 tb_reset_jump_recursive2(tb, 0);
1001 tb_reset_jump_recursive2(tb, 1);
1002}
1003
d720b93d
FB
1004static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1005{
1006 target_ulong phys_addr;
1007
1008 phys_addr = cpu_get_phys_page_debug(env, pc);
1009 tb_invalidate_phys_page_range(phys_addr, phys_addr + 1, 0);
1010}
1011
c33a346e
FB
1012/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1013 breakpoint is reached */
2e12669a 1014int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
4c3a88a2 1015{
a541f297 1016#if defined(TARGET_I386) || defined(TARGET_PPC)
4c3a88a2 1017 int i;
d720b93d 1018
4c3a88a2
FB
1019 for(i = 0; i < env->nb_breakpoints; i++) {
1020 if (env->breakpoints[i] == pc)
1021 return 0;
1022 }
1023
1024 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1025 return -1;
1026 env->breakpoints[env->nb_breakpoints++] = pc;
d720b93d
FB
1027
1028 breakpoint_invalidate(env, pc);
4c3a88a2
FB
1029 return 0;
1030#else
1031 return -1;
1032#endif
1033}
1034
1035/* remove a breakpoint */
2e12669a 1036int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
4c3a88a2 1037{
a541f297 1038#if defined(TARGET_I386) || defined(TARGET_PPC)
4c3a88a2
FB
1039 int i;
1040 for(i = 0; i < env->nb_breakpoints; i++) {
1041 if (env->breakpoints[i] == pc)
1042 goto found;
1043 }
1044 return -1;
1045 found:
1046 memmove(&env->breakpoints[i], &env->breakpoints[i + 1],
1047 (env->nb_breakpoints - (i + 1)) * sizeof(env->breakpoints[0]));
1048 env->nb_breakpoints--;
d720b93d
FB
1049
1050 breakpoint_invalidate(env, pc);
4c3a88a2
FB
1051 return 0;
1052#else
1053 return -1;
1054#endif
1055}
1056
c33a346e
FB
1057/* enable or disable single step mode. EXCP_DEBUG is returned by the
1058 CPU loop after each instruction */
1059void cpu_single_step(CPUState *env, int enabled)
1060{
a541f297 1061#if defined(TARGET_I386) || defined(TARGET_PPC)
c33a346e
FB
1062 if (env->singlestep_enabled != enabled) {
1063 env->singlestep_enabled = enabled;
1064 /* must flush all the translated code to avoid inconsistancies */
9fa3e853 1065 /* XXX: only flush what is necessary */
0124311e 1066 tb_flush(env);
c33a346e
FB
1067 }
1068#endif
1069}
1070
34865134
FB
1071/* enable or disable low levels log */
1072void cpu_set_log(int log_flags)
1073{
1074 loglevel = log_flags;
1075 if (loglevel && !logfile) {
1076 logfile = fopen(logfilename, "w");
1077 if (!logfile) {
1078 perror(logfilename);
1079 _exit(1);
1080 }
9fa3e853
FB
1081#if !defined(CONFIG_SOFTMMU)
1082 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1083 {
1084 static uint8_t logfile_buf[4096];
1085 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1086 }
1087#else
34865134 1088 setvbuf(logfile, NULL, _IOLBF, 0);
9fa3e853 1089#endif
34865134
FB
1090 }
1091}
1092
1093void cpu_set_log_filename(const char *filename)
1094{
1095 logfilename = strdup(filename);
1096}
c33a346e 1097
0124311e 1098/* mask must never be zero, except for A20 change call */
68a79315 1099void cpu_interrupt(CPUState *env, int mask)
ea041c0e
FB
1100{
1101 TranslationBlock *tb;
ee8b7021 1102 static int interrupt_lock;
59817ccb 1103
68a79315 1104 env->interrupt_request |= mask;
ea041c0e
FB
1105 /* if the cpu is currently executing code, we must unlink it and
1106 all the potentially executing TB */
1107 tb = env->current_tb;
ee8b7021
FB
1108 if (tb && !testandset(&interrupt_lock)) {
1109 env->current_tb = NULL;
ea041c0e 1110 tb_reset_jump_recursive(tb);
ee8b7021 1111 interrupt_lock = 0;
ea041c0e
FB
1112 }
1113}
1114
b54ad049
FB
1115void cpu_reset_interrupt(CPUState *env, int mask)
1116{
1117 env->interrupt_request &= ~mask;
1118}
1119
f193c797
FB
1120CPULogItem cpu_log_items[] = {
1121 { CPU_LOG_TB_OUT_ASM, "out_asm",
1122 "show generated host assembly code for each compiled TB" },
1123 { CPU_LOG_TB_IN_ASM, "in_asm",
1124 "show target assembly code for each compiled TB" },
1125 { CPU_LOG_TB_OP, "op",
1126 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1127#ifdef TARGET_I386
1128 { CPU_LOG_TB_OP_OPT, "op_opt",
1129 "show micro ops after optimization for each compiled TB" },
1130#endif
1131 { CPU_LOG_INT, "int",
1132 "show interrupts/exceptions in short format" },
1133 { CPU_LOG_EXEC, "exec",
1134 "show trace before each executed TB (lots of logs)" },
1135#ifdef TARGET_I386
1136 { CPU_LOG_PCALL, "pcall",
1137 "show protected mode far calls/returns/exceptions" },
1138#endif
fd872598
FB
1139 { CPU_LOG_IOPORT, "ioport",
1140 "show all i/o ports accesses" },
f193c797
FB
1141 { 0, NULL, NULL },
1142};
1143
1144static int cmp1(const char *s1, int n, const char *s2)
1145{
1146 if (strlen(s2) != n)
1147 return 0;
1148 return memcmp(s1, s2, n) == 0;
1149}
1150
1151/* takes a comma separated list of log masks. Return 0 if error. */
1152int cpu_str_to_log_mask(const char *str)
1153{
1154 CPULogItem *item;
1155 int mask;
1156 const char *p, *p1;
1157
1158 p = str;
1159 mask = 0;
1160 for(;;) {
1161 p1 = strchr(p, ',');
1162 if (!p1)
1163 p1 = p + strlen(p);
1164 for(item = cpu_log_items; item->mask != 0; item++) {
1165 if (cmp1(p, p1 - p, item->name))
1166 goto found;
1167 }
1168 return 0;
1169 found:
1170 mask |= item->mask;
1171 if (*p1 != ',')
1172 break;
1173 p = p1 + 1;
1174 }
1175 return mask;
1176}
ea041c0e 1177
7501267e
FB
1178void cpu_abort(CPUState *env, const char *fmt, ...)
1179{
1180 va_list ap;
1181
1182 va_start(ap, fmt);
1183 fprintf(stderr, "qemu: fatal: ");
1184 vfprintf(stderr, fmt, ap);
1185 fprintf(stderr, "\n");
1186#ifdef TARGET_I386
1187 cpu_x86_dump_state(env, stderr, X86_DUMP_FPU | X86_DUMP_CCOP);
1188#endif
1189 va_end(ap);
1190 abort();
1191}
1192
0124311e
FB
1193#if !defined(CONFIG_USER_ONLY)
1194
ee8b7021
FB
1195/* NOTE: if flush_global is true, also flush global entries (not
1196 implemented yet) */
1197void tlb_flush(CPUState *env, int flush_global)
33417e70 1198{
33417e70 1199 int i;
0124311e 1200
9fa3e853
FB
1201#if defined(DEBUG_TLB)
1202 printf("tlb_flush:\n");
1203#endif
0124311e
FB
1204 /* must reset current TB so that interrupts cannot modify the
1205 links while we are modifying them */
1206 env->current_tb = NULL;
1207
33417e70
FB
1208 for(i = 0; i < CPU_TLB_SIZE; i++) {
1209 env->tlb_read[0][i].address = -1;
1210 env->tlb_write[0][i].address = -1;
1211 env->tlb_read[1][i].address = -1;
1212 env->tlb_write[1][i].address = -1;
1213 }
9fa3e853
FB
1214
1215 virt_page_flush();
1216 for(i = 0;i < CODE_GEN_HASH_SIZE; i++)
1217 tb_hash[i] = NULL;
1218
1219#if !defined(CONFIG_SOFTMMU)
1220 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
1221#endif
33417e70
FB
1222}
1223
274da6b2 1224static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
61382a50
FB
1225{
1226 if (addr == (tlb_entry->address &
1227 (TARGET_PAGE_MASK | TLB_INVALID_MASK)))
1228 tlb_entry->address = -1;
1229}
1230
2e12669a 1231void tlb_flush_page(CPUState *env, target_ulong addr)
33417e70 1232{
9fa3e853
FB
1233 int i, n;
1234 VirtPageDesc *vp;
1235 PageDesc *p;
1236 TranslationBlock *tb;
0124311e 1237
9fa3e853
FB
1238#if defined(DEBUG_TLB)
1239 printf("tlb_flush_page: 0x%08x\n", addr);
1240#endif
0124311e
FB
1241 /* must reset current TB so that interrupts cannot modify the
1242 links while we are modifying them */
1243 env->current_tb = NULL;
61382a50
FB
1244
1245 addr &= TARGET_PAGE_MASK;
1246 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1247 tlb_flush_entry(&env->tlb_read[0][i], addr);
1248 tlb_flush_entry(&env->tlb_write[0][i], addr);
1249 tlb_flush_entry(&env->tlb_read[1][i], addr);
1250 tlb_flush_entry(&env->tlb_write[1][i], addr);
0124311e 1251
9fa3e853
FB
1252 /* remove from the virtual pc hash table all the TB at this
1253 virtual address */
1254
1255 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1256 if (vp && vp->valid_tag == virt_valid_tag) {
1257 p = page_find(vp->phys_addr >> TARGET_PAGE_BITS);
1258 if (p) {
1259 /* we remove all the links to the TBs in this virtual page */
1260 tb = p->first_tb;
1261 while (tb != NULL) {
1262 n = (long)tb & 3;
1263 tb = (TranslationBlock *)((long)tb & ~3);
1264 if ((tb->pc & TARGET_PAGE_MASK) == addr ||
1265 ((tb->pc + tb->size - 1) & TARGET_PAGE_MASK) == addr) {
1266 tb_invalidate(tb);
1267 }
1268 tb = tb->page_next[n];
1269 }
1270 }
98857888 1271 vp->valid_tag = 0;
9fa3e853
FB
1272 }
1273
0124311e 1274#if !defined(CONFIG_SOFTMMU)
9fa3e853 1275 if (addr < MMAP_AREA_END)
0124311e 1276 munmap((void *)addr, TARGET_PAGE_SIZE);
61382a50 1277#endif
9fa3e853
FB
1278}
1279
4f2ac237 1280static inline void tlb_protect_code1(CPUTLBEntry *tlb_entry, target_ulong addr)
9fa3e853
FB
1281{
1282 if (addr == (tlb_entry->address &
1283 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) &&
98857888
FB
1284 (tlb_entry->address & ~TARGET_PAGE_MASK) != IO_MEM_CODE &&
1285 (tlb_entry->address & ~TARGET_PAGE_MASK) != IO_MEM_ROM) {
1ccde1cb 1286 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_CODE;
9fa3e853
FB
1287 }
1288}
1289
1290/* update the TLBs so that writes to code in the virtual page 'addr'
1291 can be detected */
4f2ac237 1292static void tlb_protect_code(CPUState *env, target_ulong addr)
9fa3e853
FB
1293{
1294 int i;
1295
1296 addr &= TARGET_PAGE_MASK;
1297 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1298 tlb_protect_code1(&env->tlb_write[0][i], addr);
1299 tlb_protect_code1(&env->tlb_write[1][i], addr);
1300#if !defined(CONFIG_SOFTMMU)
1301 /* NOTE: as we generated the code for this page, it is already at
1302 least readable */
1303 if (addr < MMAP_AREA_END)
1304 mprotect((void *)addr, TARGET_PAGE_SIZE, PROT_READ);
1305#endif
1306}
1307
9fa3e853 1308static inline void tlb_unprotect_code2(CPUTLBEntry *tlb_entry,
4f2ac237 1309 unsigned long phys_addr)
9fa3e853
FB
1310{
1311 if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_CODE &&
1312 ((tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend) == phys_addr) {
1ccde1cb 1313 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
9fa3e853
FB
1314 }
1315}
1316
1317/* update the TLB so that writes in physical page 'phys_addr' are no longer
1318 tested self modifying code */
4f2ac237 1319static void tlb_unprotect_code_phys(CPUState *env, unsigned long phys_addr, target_ulong vaddr)
9fa3e853
FB
1320{
1321 int i;
1322
1323 phys_addr &= TARGET_PAGE_MASK;
1ccde1cb
FB
1324 phys_addr += (long)phys_ram_base;
1325 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1326 tlb_unprotect_code2(&env->tlb_write[0][i], phys_addr);
1327 tlb_unprotect_code2(&env->tlb_write[1][i], phys_addr);
1328}
1329
1330static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1331 unsigned long start, unsigned long length)
1332{
1333 unsigned long addr;
1334 if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1335 addr = (tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend;
1336 if ((addr - start) < length) {
1337 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1338 }
1339 }
1340}
1341
1342void cpu_physical_memory_reset_dirty(target_ulong start, target_ulong end)
1343{
1344 CPUState *env;
4f2ac237 1345 unsigned long length, start1;
1ccde1cb
FB
1346 int i;
1347
1348 start &= TARGET_PAGE_MASK;
1349 end = TARGET_PAGE_ALIGN(end);
1350
1351 length = end - start;
1352 if (length == 0)
1353 return;
1354 memset(phys_ram_dirty + (start >> TARGET_PAGE_BITS), 0, length >> TARGET_PAGE_BITS);
1355
1356 env = cpu_single_env;
1357 /* we modify the TLB cache so that the dirty bit will be set again
1358 when accessing the range */
59817ccb 1359 start1 = start + (unsigned long)phys_ram_base;
9fa3e853 1360 for(i = 0; i < CPU_TLB_SIZE; i++)
59817ccb 1361 tlb_reset_dirty_range(&env->tlb_write[0][i], start1, length);
9fa3e853 1362 for(i = 0; i < CPU_TLB_SIZE; i++)
59817ccb
FB
1363 tlb_reset_dirty_range(&env->tlb_write[1][i], start1, length);
1364
1365#if !defined(CONFIG_SOFTMMU)
1366 /* XXX: this is expensive */
1367 {
1368 VirtPageDesc *p;
1369 int j;
1370 target_ulong addr;
1371
1372 for(i = 0; i < L1_SIZE; i++) {
1373 p = l1_virt_map[i];
1374 if (p) {
1375 addr = i << (TARGET_PAGE_BITS + L2_BITS);
1376 for(j = 0; j < L2_SIZE; j++) {
1377 if (p->valid_tag == virt_valid_tag &&
1378 p->phys_addr >= start && p->phys_addr < end &&
1379 (p->prot & PROT_WRITE)) {
1380 if (addr < MMAP_AREA_END) {
1381 mprotect((void *)addr, TARGET_PAGE_SIZE,
1382 p->prot & ~PROT_WRITE);
1383 }
1384 }
1385 addr += TARGET_PAGE_SIZE;
1386 p++;
1387 }
1388 }
1389 }
1390 }
1391#endif
1ccde1cb
FB
1392}
1393
1394static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
1395 unsigned long start)
1396{
1397 unsigned long addr;
1398 if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1399 addr = (tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend;
1400 if (addr == start) {
1401 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_RAM;
1402 }
1403 }
1404}
1405
1406/* update the TLB corresponding to virtual page vaddr and phys addr
1407 addr so that it is no longer dirty */
1408static inline void tlb_set_dirty(unsigned long addr, target_ulong vaddr)
1409{
1410 CPUState *env = cpu_single_env;
1411 int i;
1412
1413 phys_ram_dirty[(addr - (unsigned long)phys_ram_base) >> TARGET_PAGE_BITS] = 1;
1414
1415 addr &= TARGET_PAGE_MASK;
1416 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1417 tlb_set_dirty1(&env->tlb_write[0][i], addr);
1418 tlb_set_dirty1(&env->tlb_write[1][i], addr);
9fa3e853
FB
1419}
1420
59817ccb
FB
1421/* add a new TLB entry. At most one entry for a given virtual address
1422 is permitted. Return 0 if OK or 2 if the page could not be mapped
1423 (can only happen in non SOFTMMU mode for I/O pages or pages
1424 conflicting with the host address space). */
2e12669a
FB
1425int tlb_set_page(CPUState *env, target_ulong vaddr,
1426 target_phys_addr_t paddr, int prot,
9fa3e853
FB
1427 int is_user, int is_softmmu)
1428{
1429 PageDesc *p;
4f2ac237 1430 unsigned long pd;
9fa3e853
FB
1431 TranslationBlock *first_tb;
1432 unsigned int index;
4f2ac237
FB
1433 target_ulong address;
1434 unsigned long addend;
9fa3e853
FB
1435 int ret;
1436
1437 p = page_find(paddr >> TARGET_PAGE_BITS);
1438 if (!p) {
1439 pd = IO_MEM_UNASSIGNED;
1440 first_tb = NULL;
1441 } else {
1442 pd = p->phys_offset;
1443 first_tb = p->first_tb;
1444 }
1445#if defined(DEBUG_TLB)
1446 printf("tlb_set_page: vaddr=0x%08x paddr=0x%08x prot=%x u=%d c=%d smmu=%d pd=0x%08x\n",
1447 vaddr, paddr, prot, is_user, (first_tb != NULL), is_softmmu, pd);
1448#endif
1449
1450 ret = 0;
1451#if !defined(CONFIG_SOFTMMU)
1452 if (is_softmmu)
1453#endif
1454 {
1455 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1456 /* IO memory case */
1457 address = vaddr | pd;
1458 addend = paddr;
1459 } else {
1460 /* standard memory */
1461 address = vaddr;
1462 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1463 }
1464
1465 index = (vaddr >> 12) & (CPU_TLB_SIZE - 1);
1466 addend -= vaddr;
67b915a5 1467 if (prot & PAGE_READ) {
9fa3e853
FB
1468 env->tlb_read[is_user][index].address = address;
1469 env->tlb_read[is_user][index].addend = addend;
1470 } else {
1471 env->tlb_read[is_user][index].address = -1;
1472 env->tlb_read[is_user][index].addend = -1;
1473 }
67b915a5 1474 if (prot & PAGE_WRITE) {
9fa3e853
FB
1475 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM) {
1476 /* ROM: access is ignored (same as unassigned) */
1477 env->tlb_write[is_user][index].address = vaddr | IO_MEM_ROM;
1ccde1cb 1478 env->tlb_write[is_user][index].addend = addend;
d720b93d
FB
1479 } else
1480 /* XXX: the PowerPC code seems not ready to handle
1481 self modifying code with DCBI */
1482#if defined(TARGET_HAS_SMC) || 1
1483 if (first_tb) {
9fa3e853
FB
1484 /* if code is present, we use a specific memory
1485 handler. It works only for physical memory access */
1486 env->tlb_write[is_user][index].address = vaddr | IO_MEM_CODE;
1ccde1cb 1487 env->tlb_write[is_user][index].addend = addend;
d720b93d
FB
1488 } else
1489#endif
1490 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1ccde1cb
FB
1491 !cpu_physical_memory_is_dirty(pd)) {
1492 env->tlb_write[is_user][index].address = vaddr | IO_MEM_NOTDIRTY;
1493 env->tlb_write[is_user][index].addend = addend;
9fa3e853
FB
1494 } else {
1495 env->tlb_write[is_user][index].address = address;
1496 env->tlb_write[is_user][index].addend = addend;
1497 }
1498 } else {
1499 env->tlb_write[is_user][index].address = -1;
1500 env->tlb_write[is_user][index].addend = -1;
1501 }
1502 }
1503#if !defined(CONFIG_SOFTMMU)
1504 else {
1505 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1506 /* IO access: no mapping is done as it will be handled by the
1507 soft MMU */
1508 if (!(env->hflags & HF_SOFTMMU_MASK))
1509 ret = 2;
1510 } else {
1511 void *map_addr;
59817ccb
FB
1512
1513 if (vaddr >= MMAP_AREA_END) {
1514 ret = 2;
1515 } else {
1516 if (prot & PROT_WRITE) {
1517 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
d720b93d 1518#if defined(TARGET_HAS_SMC) || 1
59817ccb 1519 first_tb ||
d720b93d 1520#endif
59817ccb
FB
1521 ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1522 !cpu_physical_memory_is_dirty(pd))) {
1523 /* ROM: we do as if code was inside */
1524 /* if code is present, we only map as read only and save the
1525 original mapping */
1526 VirtPageDesc *vp;
1527
1528 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS);
1529 vp->phys_addr = pd;
1530 vp->prot = prot;
1531 vp->valid_tag = virt_valid_tag;
1532 prot &= ~PAGE_WRITE;
1533 }
1534 }
1535 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
1536 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1537 if (map_addr == MAP_FAILED) {
1538 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1539 paddr, vaddr);
9fa3e853 1540 }
9fa3e853
FB
1541 }
1542 }
1543 }
1544#endif
1545 return ret;
1546}
1547
1548/* called from signal handler: invalidate the code and unprotect the
1549 page. Return TRUE if the fault was succesfully handled. */
d720b93d 1550int page_unprotect(unsigned long addr, unsigned long pc, void *puc)
9fa3e853
FB
1551{
1552#if !defined(CONFIG_SOFTMMU)
1553 VirtPageDesc *vp;
1554
1555#if defined(DEBUG_TLB)
1556 printf("page_unprotect: addr=0x%08x\n", addr);
1557#endif
1558 addr &= TARGET_PAGE_MASK;
59817ccb
FB
1559
1560 /* if it is not mapped, no need to worry here */
1561 if (addr >= MMAP_AREA_END)
1562 return 0;
9fa3e853
FB
1563 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1564 if (!vp)
1565 return 0;
1566 /* NOTE: in this case, validate_tag is _not_ tested as it
1567 validates only the code TLB */
1568 if (vp->valid_tag != virt_valid_tag)
1569 return 0;
1570 if (!(vp->prot & PAGE_WRITE))
1571 return 0;
1572#if defined(DEBUG_TLB)
1573 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1574 addr, vp->phys_addr, vp->prot);
1575#endif
59817ccb
FB
1576 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1577 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1578 (unsigned long)addr, vp->prot);
d720b93d
FB
1579 /* set the dirty bit */
1580 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 1;
1581 /* flush the code inside */
1582 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
9fa3e853
FB
1583 return 1;
1584#else
1585 return 0;
1586#endif
33417e70
FB
1587}
1588
0124311e
FB
1589#else
1590
ee8b7021 1591void tlb_flush(CPUState *env, int flush_global)
0124311e
FB
1592{
1593}
1594
2e12669a 1595void tlb_flush_page(CPUState *env, target_ulong addr)
0124311e
FB
1596{
1597}
1598
2e12669a
FB
1599int tlb_set_page(CPUState *env, target_ulong vaddr,
1600 target_phys_addr_t paddr, int prot,
9fa3e853
FB
1601 int is_user, int is_softmmu)
1602{
1603 return 0;
1604}
0124311e 1605
9fa3e853
FB
1606/* dump memory mappings */
1607void page_dump(FILE *f)
33417e70 1608{
9fa3e853
FB
1609 unsigned long start, end;
1610 int i, j, prot, prot1;
1611 PageDesc *p;
33417e70 1612
9fa3e853
FB
1613 fprintf(f, "%-8s %-8s %-8s %s\n",
1614 "start", "end", "size", "prot");
1615 start = -1;
1616 end = -1;
1617 prot = 0;
1618 for(i = 0; i <= L1_SIZE; i++) {
1619 if (i < L1_SIZE)
1620 p = l1_map[i];
1621 else
1622 p = NULL;
1623 for(j = 0;j < L2_SIZE; j++) {
1624 if (!p)
1625 prot1 = 0;
1626 else
1627 prot1 = p[j].flags;
1628 if (prot1 != prot) {
1629 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1630 if (start != -1) {
1631 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1632 start, end, end - start,
1633 prot & PAGE_READ ? 'r' : '-',
1634 prot & PAGE_WRITE ? 'w' : '-',
1635 prot & PAGE_EXEC ? 'x' : '-');
1636 }
1637 if (prot1 != 0)
1638 start = end;
1639 else
1640 start = -1;
1641 prot = prot1;
1642 }
1643 if (!p)
1644 break;
1645 }
33417e70 1646 }
33417e70
FB
1647}
1648
9fa3e853 1649int page_get_flags(unsigned long address)
33417e70 1650{
9fa3e853
FB
1651 PageDesc *p;
1652
1653 p = page_find(address >> TARGET_PAGE_BITS);
33417e70 1654 if (!p)
9fa3e853
FB
1655 return 0;
1656 return p->flags;
1657}
1658
1659/* modify the flags of a page and invalidate the code if
1660 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1661 depending on PAGE_WRITE */
1662void page_set_flags(unsigned long start, unsigned long end, int flags)
1663{
1664 PageDesc *p;
1665 unsigned long addr;
1666
1667 start = start & TARGET_PAGE_MASK;
1668 end = TARGET_PAGE_ALIGN(end);
1669 if (flags & PAGE_WRITE)
1670 flags |= PAGE_WRITE_ORG;
1671 spin_lock(&tb_lock);
1672 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1673 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1674 /* if the write protection is set, then we invalidate the code
1675 inside */
1676 if (!(p->flags & PAGE_WRITE) &&
1677 (flags & PAGE_WRITE) &&
1678 p->first_tb) {
d720b93d 1679 tb_invalidate_phys_page(addr, 0, NULL);
9fa3e853
FB
1680 }
1681 p->flags = flags;
1682 }
1683 spin_unlock(&tb_lock);
33417e70
FB
1684}
1685
9fa3e853
FB
1686/* called from signal handler: invalidate the code and unprotect the
1687 page. Return TRUE if the fault was succesfully handled. */
d720b93d 1688int page_unprotect(unsigned long address, unsigned long pc, void *puc)
9fa3e853
FB
1689{
1690 unsigned int page_index, prot, pindex;
1691 PageDesc *p, *p1;
1692 unsigned long host_start, host_end, addr;
1693
1694 host_start = address & host_page_mask;
1695 page_index = host_start >> TARGET_PAGE_BITS;
1696 p1 = page_find(page_index);
1697 if (!p1)
1698 return 0;
1699 host_end = host_start + host_page_size;
1700 p = p1;
1701 prot = 0;
1702 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1703 prot |= p->flags;
1704 p++;
1705 }
1706 /* if the page was really writable, then we change its
1707 protection back to writable */
1708 if (prot & PAGE_WRITE_ORG) {
1709 pindex = (address - host_start) >> TARGET_PAGE_BITS;
1710 if (!(p1[pindex].flags & PAGE_WRITE)) {
1711 mprotect((void *)host_start, host_page_size,
1712 (prot & PAGE_BITS) | PAGE_WRITE);
1713 p1[pindex].flags |= PAGE_WRITE;
1714 /* and since the content will be modified, we must invalidate
1715 the corresponding translated code. */
d720b93d 1716 tb_invalidate_phys_page(address, pc, puc);
9fa3e853
FB
1717#ifdef DEBUG_TB_CHECK
1718 tb_invalidate_check(address);
1719#endif
1720 return 1;
1721 }
1722 }
1723 return 0;
1724}
1725
1726/* call this function when system calls directly modify a memory area */
1727void page_unprotect_range(uint8_t *data, unsigned long data_size)
1728{
1729 unsigned long start, end, addr;
1730
1731 start = (unsigned long)data;
1732 end = start + data_size;
1733 start &= TARGET_PAGE_MASK;
1734 end = TARGET_PAGE_ALIGN(end);
1735 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
d720b93d 1736 page_unprotect(addr, 0, NULL);
9fa3e853
FB
1737 }
1738}
1739
1ccde1cb
FB
1740static inline void tlb_set_dirty(unsigned long addr, target_ulong vaddr)
1741{
1742}
9fa3e853
FB
1743#endif /* defined(CONFIG_USER_ONLY) */
1744
33417e70
FB
1745/* register physical memory. 'size' must be a multiple of the target
1746 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1747 io memory page */
2e12669a
FB
1748void cpu_register_physical_memory(target_phys_addr_t start_addr,
1749 unsigned long size,
1750 unsigned long phys_offset)
33417e70
FB
1751{
1752 unsigned long addr, end_addr;
9fa3e853 1753 PageDesc *p;
33417e70
FB
1754
1755 end_addr = start_addr + size;
1756 for(addr = start_addr; addr < end_addr; addr += TARGET_PAGE_SIZE) {
9fa3e853
FB
1757 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1758 p->phys_offset = phys_offset;
1759 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM)
33417e70
FB
1760 phys_offset += TARGET_PAGE_SIZE;
1761 }
1762}
1763
2e12669a 1764static uint32_t unassigned_mem_readb(target_phys_addr_t addr)
33417e70
FB
1765{
1766 return 0;
1767}
1768
2e12669a 1769static void unassigned_mem_writeb(target_phys_addr_t addr, uint32_t val)
33417e70
FB
1770{
1771}
1772
1773static CPUReadMemoryFunc *unassigned_mem_read[3] = {
1774 unassigned_mem_readb,
1775 unassigned_mem_readb,
1776 unassigned_mem_readb,
1777};
1778
1779static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
1780 unassigned_mem_writeb,
1781 unassigned_mem_writeb,
1782 unassigned_mem_writeb,
1783};
1784
9fa3e853
FB
1785/* self modifying code support in soft mmu mode : writing to a page
1786 containing code comes to these functions */
1787
2e12669a 1788static void code_mem_writeb(target_phys_addr_t addr, uint32_t val)
9fa3e853 1789{
1ccde1cb
FB
1790 unsigned long phys_addr;
1791
274da6b2 1792 phys_addr = addr - (unsigned long)phys_ram_base;
9fa3e853 1793#if !defined(CONFIG_USER_ONLY)
d720b93d 1794 tb_invalidate_phys_page_fast(phys_addr, 1);
9fa3e853 1795#endif
1ccde1cb
FB
1796 stb_raw((uint8_t *)addr, val);
1797 phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1;
9fa3e853
FB
1798}
1799
2e12669a 1800static void code_mem_writew(target_phys_addr_t addr, uint32_t val)
9fa3e853 1801{
1ccde1cb
FB
1802 unsigned long phys_addr;
1803
274da6b2 1804 phys_addr = addr - (unsigned long)phys_ram_base;
9fa3e853 1805#if !defined(CONFIG_USER_ONLY)
d720b93d 1806 tb_invalidate_phys_page_fast(phys_addr, 2);
9fa3e853 1807#endif
1ccde1cb
FB
1808 stw_raw((uint8_t *)addr, val);
1809 phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1;
9fa3e853
FB
1810}
1811
2e12669a 1812static void code_mem_writel(target_phys_addr_t addr, uint32_t val)
9fa3e853 1813{
1ccde1cb
FB
1814 unsigned long phys_addr;
1815
274da6b2 1816 phys_addr = addr - (unsigned long)phys_ram_base;
9fa3e853 1817#if !defined(CONFIG_USER_ONLY)
d720b93d 1818 tb_invalidate_phys_page_fast(phys_addr, 4);
9fa3e853 1819#endif
1ccde1cb
FB
1820 stl_raw((uint8_t *)addr, val);
1821 phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1;
9fa3e853
FB
1822}
1823
1824static CPUReadMemoryFunc *code_mem_read[3] = {
1825 NULL, /* never used */
1826 NULL, /* never used */
1827 NULL, /* never used */
1828};
1829
1830static CPUWriteMemoryFunc *code_mem_write[3] = {
1831 code_mem_writeb,
1832 code_mem_writew,
1833 code_mem_writel,
1834};
33417e70 1835
4f2ac237 1836static void notdirty_mem_writeb(target_phys_addr_t addr, uint32_t val)
1ccde1cb
FB
1837{
1838 stb_raw((uint8_t *)addr, val);
d720b93d 1839 tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
1ccde1cb
FB
1840}
1841
4f2ac237 1842static void notdirty_mem_writew(target_phys_addr_t addr, uint32_t val)
1ccde1cb
FB
1843{
1844 stw_raw((uint8_t *)addr, val);
d720b93d 1845 tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
1ccde1cb
FB
1846}
1847
4f2ac237 1848static void notdirty_mem_writel(target_phys_addr_t addr, uint32_t val)
1ccde1cb
FB
1849{
1850 stl_raw((uint8_t *)addr, val);
d720b93d 1851 tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
1ccde1cb
FB
1852}
1853
1854static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
1855 notdirty_mem_writeb,
1856 notdirty_mem_writew,
1857 notdirty_mem_writel,
1858};
1859
33417e70
FB
1860static void io_mem_init(void)
1861{
9fa3e853
FB
1862 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, code_mem_read, unassigned_mem_write);
1863 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write);
1864 cpu_register_io_memory(IO_MEM_CODE >> IO_MEM_SHIFT, code_mem_read, code_mem_write);
1ccde1cb
FB
1865 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, code_mem_read, notdirty_mem_write);
1866 io_mem_nb = 5;
1867
1868 /* alloc dirty bits array */
59817ccb 1869 phys_ram_dirty = qemu_malloc(phys_ram_size >> TARGET_PAGE_BITS);
33417e70
FB
1870}
1871
1872/* mem_read and mem_write are arrays of functions containing the
1873 function to access byte (index 0), word (index 1) and dword (index
1874 2). All functions must be supplied. If io_index is non zero, the
1875 corresponding io zone is modified. If it is zero, a new io zone is
1876 allocated. The return value can be used with
1877 cpu_register_physical_memory(). (-1) is returned if error. */
1878int cpu_register_io_memory(int io_index,
1879 CPUReadMemoryFunc **mem_read,
1880 CPUWriteMemoryFunc **mem_write)
1881{
1882 int i;
1883
1884 if (io_index <= 0) {
1885 if (io_index >= IO_MEM_NB_ENTRIES)
1886 return -1;
1887 io_index = io_mem_nb++;
1888 } else {
1889 if (io_index >= IO_MEM_NB_ENTRIES)
1890 return -1;
1891 }
1892
1893 for(i = 0;i < 3; i++) {
1894 io_mem_read[io_index][i] = mem_read[i];
1895 io_mem_write[io_index][i] = mem_write[i];
1896 }
1897 return io_index << IO_MEM_SHIFT;
1898}
61382a50 1899
13eb76e0
FB
1900/* physical memory access (slow version, mainly for debug) */
1901#if defined(CONFIG_USER_ONLY)
2e12669a 1902void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
1903 int len, int is_write)
1904{
1905 int l, flags;
1906 target_ulong page;
1907
1908 while (len > 0) {
1909 page = addr & TARGET_PAGE_MASK;
1910 l = (page + TARGET_PAGE_SIZE) - addr;
1911 if (l > len)
1912 l = len;
1913 flags = page_get_flags(page);
1914 if (!(flags & PAGE_VALID))
1915 return;
1916 if (is_write) {
1917 if (!(flags & PAGE_WRITE))
1918 return;
1919 memcpy((uint8_t *)addr, buf, len);
1920 } else {
1921 if (!(flags & PAGE_READ))
1922 return;
1923 memcpy(buf, (uint8_t *)addr, len);
1924 }
1925 len -= l;
1926 buf += l;
1927 addr += l;
1928 }
1929}
1930#else
2e12669a 1931void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
1932 int len, int is_write)
1933{
1934 int l, io_index;
1935 uint8_t *ptr;
1936 uint32_t val;
2e12669a
FB
1937 target_phys_addr_t page;
1938 unsigned long pd;
13eb76e0
FB
1939 PageDesc *p;
1940
1941 while (len > 0) {
1942 page = addr & TARGET_PAGE_MASK;
1943 l = (page + TARGET_PAGE_SIZE) - addr;
1944 if (l > len)
1945 l = len;
1946 p = page_find(page >> TARGET_PAGE_BITS);
1947 if (!p) {
1948 pd = IO_MEM_UNASSIGNED;
1949 } else {
1950 pd = p->phys_offset;
1951 }
1952
1953 if (is_write) {
1954 if ((pd & ~TARGET_PAGE_MASK) != 0) {
1955 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
1956 if (l >= 4 && ((addr & 3) == 0)) {
1957 /* 32 bit read access */
1958 val = ldl_raw(buf);
d720b93d 1959 io_mem_write[io_index][2](addr, val);
13eb76e0
FB
1960 l = 4;
1961 } else if (l >= 2 && ((addr & 1) == 0)) {
1962 /* 16 bit read access */
1963 val = lduw_raw(buf);
d720b93d 1964 io_mem_write[io_index][1](addr, val);
13eb76e0
FB
1965 l = 2;
1966 } else {
1967 /* 8 bit access */
1968 val = ldub_raw(buf);
d720b93d 1969 io_mem_write[io_index][0](addr, val);
13eb76e0
FB
1970 l = 1;
1971 }
1972 } else {
b448f2f3
FB
1973 unsigned long addr1;
1974 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
13eb76e0 1975 /* RAM case */
b448f2f3 1976 ptr = phys_ram_base + addr1;
13eb76e0 1977 memcpy(ptr, buf, l);
b448f2f3
FB
1978 /* invalidate code */
1979 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
1980 /* set dirty bit */
1981 phys_ram_dirty[page >> TARGET_PAGE_BITS] = 1;
13eb76e0
FB
1982 }
1983 } else {
1984 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
1985 (pd & ~TARGET_PAGE_MASK) != IO_MEM_CODE) {
1986 /* I/O case */
1987 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
1988 if (l >= 4 && ((addr & 3) == 0)) {
1989 /* 32 bit read access */
1990 val = io_mem_read[io_index][2](addr);
1991 stl_raw(buf, val);
1992 l = 4;
1993 } else if (l >= 2 && ((addr & 1) == 0)) {
1994 /* 16 bit read access */
1995 val = io_mem_read[io_index][1](addr);
1996 stw_raw(buf, val);
1997 l = 2;
1998 } else {
1999 /* 8 bit access */
2000 val = io_mem_read[io_index][0](addr);
2001 stb_raw(buf, val);
2002 l = 1;
2003 }
2004 } else {
2005 /* RAM case */
2006 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2007 (addr & ~TARGET_PAGE_MASK);
2008 memcpy(buf, ptr, l);
2009 }
2010 }
2011 len -= l;
2012 buf += l;
2013 addr += l;
2014 }
2015}
2016#endif
2017
2018/* virtual memory access for debug */
b448f2f3
FB
2019int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
2020 uint8_t *buf, int len, int is_write)
13eb76e0
FB
2021{
2022 int l;
2023 target_ulong page, phys_addr;
2024
2025 while (len > 0) {
2026 page = addr & TARGET_PAGE_MASK;
2027 phys_addr = cpu_get_phys_page_debug(env, page);
2028 /* if no physical page mapped, return an error */
2029 if (phys_addr == -1)
2030 return -1;
2031 l = (page + TARGET_PAGE_SIZE) - addr;
2032 if (l > len)
2033 l = len;
b448f2f3
FB
2034 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
2035 buf, l, is_write);
13eb76e0
FB
2036 len -= l;
2037 buf += l;
2038 addr += l;
2039 }
2040 return 0;
2041}
2042
61382a50
FB
2043#if !defined(CONFIG_USER_ONLY)
2044
2045#define MMUSUFFIX _cmmu
2046#define GETPC() NULL
2047#define env cpu_single_env
2048
2049#define SHIFT 0
2050#include "softmmu_template.h"
2051
2052#define SHIFT 1
2053#include "softmmu_template.h"
2054
2055#define SHIFT 2
2056#include "softmmu_template.h"
2057
2058#define SHIFT 3
2059#include "softmmu_template.h"
2060
2061#undef env
2062
2063#endif