]> git.proxmox.com Git - qemu.git/blob - exec.c
PowerPC merge: real time TB and decrementer - faster and simpler exception handling...
[qemu.git] / exec.c
1 /*
2 * virtual page mapping and translated block handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20 #include "config.h"
21 #include <stdlib.h>
22 #include <stdio.h>
23 #include <stdarg.h>
24 #include <string.h>
25 #include <errno.h>
26 #include <unistd.h>
27 #include <inttypes.h>
28 #if !defined(CONFIG_SOFTMMU)
29 #include <sys/mman.h>
30 #endif
31
32 #include "cpu.h"
33 #include "exec-all.h"
34
35 //#define DEBUG_TB_INVALIDATE
36 //#define DEBUG_FLUSH
37 //#define DEBUG_TLB
38
39 /* make various TB consistency checks */
40 //#define DEBUG_TB_CHECK
41 //#define DEBUG_TLB_CHECK
42
43 /* threshold to flush the translated code buffer */
44 #define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
45
46 #define SMC_BITMAP_USE_THRESHOLD 10
47
48 #define MMAP_AREA_START 0x00000000
49 #define MMAP_AREA_END 0xa8000000
50
51 TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
52 TranslationBlock *tb_hash[CODE_GEN_HASH_SIZE];
53 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
54 int nb_tbs;
55 /* any access to the tbs or the page table must use this lock */
56 spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
57
58 uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE];
59 uint8_t *code_gen_ptr;
60
61 int phys_ram_size;
62 int phys_ram_fd;
63 uint8_t *phys_ram_base;
64 uint8_t *phys_ram_dirty;
65
66 typedef struct PageDesc {
67 /* offset in host memory of the page + io_index in the low 12 bits */
68 unsigned long phys_offset;
69 /* list of TBs intersecting this physical page */
70 TranslationBlock *first_tb;
71 /* in order to optimize self modifying code, we count the number
72 of lookups we do to a given page to use a bitmap */
73 unsigned int code_write_count;
74 uint8_t *code_bitmap;
75 #if defined(CONFIG_USER_ONLY)
76 unsigned long flags;
77 #endif
78 } PageDesc;
79
80 typedef struct VirtPageDesc {
81 /* physical address of code page. It is valid only if 'valid_tag'
82 matches 'virt_valid_tag' */
83 target_ulong phys_addr;
84 unsigned int valid_tag;
85 #if !defined(CONFIG_SOFTMMU)
86 /* original page access rights. It is valid only if 'valid_tag'
87 matches 'virt_valid_tag' */
88 unsigned int prot;
89 #endif
90 } VirtPageDesc;
91
92 #define L2_BITS 10
93 #define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
94
95 #define L1_SIZE (1 << L1_BITS)
96 #define L2_SIZE (1 << L2_BITS)
97
98 static void io_mem_init(void);
99
100 unsigned long real_host_page_size;
101 unsigned long host_page_bits;
102 unsigned long host_page_size;
103 unsigned long host_page_mask;
104
105 static PageDesc *l1_map[L1_SIZE];
106
107 #if !defined(CONFIG_USER_ONLY)
108 static VirtPageDesc *l1_virt_map[L1_SIZE];
109 static unsigned int virt_valid_tag;
110 #endif
111
112 /* io memory support */
113 CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
114 CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
115 static int io_mem_nb;
116
117 /* log support */
118 char *logfilename = "/tmp/qemu.log";
119 FILE *logfile;
120 int loglevel;
121
122 static void page_init(void)
123 {
124 /* NOTE: we can always suppose that host_page_size >=
125 TARGET_PAGE_SIZE */
126 #ifdef _WIN32
127 real_host_page_size = 4096;
128 #else
129 real_host_page_size = getpagesize();
130 #endif
131 if (host_page_size == 0)
132 host_page_size = real_host_page_size;
133 if (host_page_size < TARGET_PAGE_SIZE)
134 host_page_size = TARGET_PAGE_SIZE;
135 host_page_bits = 0;
136 while ((1 << host_page_bits) < host_page_size)
137 host_page_bits++;
138 host_page_mask = ~(host_page_size - 1);
139 #if !defined(CONFIG_USER_ONLY)
140 virt_valid_tag = 1;
141 #endif
142 }
143
144 static inline PageDesc *page_find_alloc(unsigned int index)
145 {
146 PageDesc **lp, *p;
147
148 lp = &l1_map[index >> L2_BITS];
149 p = *lp;
150 if (!p) {
151 /* allocate if not found */
152 p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
153 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
154 *lp = p;
155 }
156 return p + (index & (L2_SIZE - 1));
157 }
158
159 static inline PageDesc *page_find(unsigned int index)
160 {
161 PageDesc *p;
162
163 p = l1_map[index >> L2_BITS];
164 if (!p)
165 return 0;
166 return p + (index & (L2_SIZE - 1));
167 }
168
169 #if !defined(CONFIG_USER_ONLY)
170 static void tlb_protect_code(CPUState *env, target_ulong addr);
171 static void tlb_unprotect_code_phys(CPUState *env, unsigned long phys_addr, target_ulong vaddr);
172
173 static inline VirtPageDesc *virt_page_find_alloc(unsigned int index)
174 {
175 VirtPageDesc **lp, *p;
176
177 lp = &l1_virt_map[index >> L2_BITS];
178 p = *lp;
179 if (!p) {
180 /* allocate if not found */
181 p = qemu_malloc(sizeof(VirtPageDesc) * L2_SIZE);
182 memset(p, 0, sizeof(VirtPageDesc) * L2_SIZE);
183 *lp = p;
184 }
185 return p + (index & (L2_SIZE - 1));
186 }
187
188 static inline VirtPageDesc *virt_page_find(unsigned int index)
189 {
190 VirtPageDesc *p;
191
192 p = l1_virt_map[index >> L2_BITS];
193 if (!p)
194 return 0;
195 return p + (index & (L2_SIZE - 1));
196 }
197
198 static void virt_page_flush(void)
199 {
200 int i, j;
201 VirtPageDesc *p;
202
203 virt_valid_tag++;
204
205 if (virt_valid_tag == 0) {
206 virt_valid_tag = 1;
207 for(i = 0; i < L1_SIZE; i++) {
208 p = l1_virt_map[i];
209 if (p) {
210 for(j = 0; j < L2_SIZE; j++)
211 p[j].valid_tag = 0;
212 }
213 }
214 }
215 }
216 #else
217 static void virt_page_flush(void)
218 {
219 }
220 #endif
221
222 void cpu_exec_init(void)
223 {
224 if (!code_gen_ptr) {
225 code_gen_ptr = code_gen_buffer;
226 page_init();
227 io_mem_init();
228 }
229 }
230
231 static inline void invalidate_page_bitmap(PageDesc *p)
232 {
233 if (p->code_bitmap) {
234 qemu_free(p->code_bitmap);
235 p->code_bitmap = NULL;
236 }
237 p->code_write_count = 0;
238 }
239
240 /* set to NULL all the 'first_tb' fields in all PageDescs */
241 static void page_flush_tb(void)
242 {
243 int i, j;
244 PageDesc *p;
245
246 for(i = 0; i < L1_SIZE; i++) {
247 p = l1_map[i];
248 if (p) {
249 for(j = 0; j < L2_SIZE; j++) {
250 p->first_tb = NULL;
251 invalidate_page_bitmap(p);
252 p++;
253 }
254 }
255 }
256 }
257
258 /* flush all the translation blocks */
259 /* XXX: tb_flush is currently not thread safe */
260 void tb_flush(CPUState *env)
261 {
262 int i;
263 #if defined(DEBUG_FLUSH)
264 printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
265 code_gen_ptr - code_gen_buffer,
266 nb_tbs,
267 nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0);
268 #endif
269 nb_tbs = 0;
270 for(i = 0;i < CODE_GEN_HASH_SIZE; i++)
271 tb_hash[i] = NULL;
272 virt_page_flush();
273
274 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++)
275 tb_phys_hash[i] = NULL;
276 page_flush_tb();
277
278 code_gen_ptr = code_gen_buffer;
279 /* XXX: flush processor icache at this point if cache flush is
280 expensive */
281 }
282
283 #ifdef DEBUG_TB_CHECK
284
285 static void tb_invalidate_check(unsigned long address)
286 {
287 TranslationBlock *tb;
288 int i;
289 address &= TARGET_PAGE_MASK;
290 for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
291 for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
292 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
293 address >= tb->pc + tb->size)) {
294 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
295 address, tb->pc, tb->size);
296 }
297 }
298 }
299 }
300
301 /* verify that all the pages have correct rights for code */
302 static void tb_page_check(void)
303 {
304 TranslationBlock *tb;
305 int i, flags1, flags2;
306
307 for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
308 for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
309 flags1 = page_get_flags(tb->pc);
310 flags2 = page_get_flags(tb->pc + tb->size - 1);
311 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
312 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
313 tb->pc, tb->size, flags1, flags2);
314 }
315 }
316 }
317 }
318
319 void tb_jmp_check(TranslationBlock *tb)
320 {
321 TranslationBlock *tb1;
322 unsigned int n1;
323
324 /* suppress any remaining jumps to this TB */
325 tb1 = tb->jmp_first;
326 for(;;) {
327 n1 = (long)tb1 & 3;
328 tb1 = (TranslationBlock *)((long)tb1 & ~3);
329 if (n1 == 2)
330 break;
331 tb1 = tb1->jmp_next[n1];
332 }
333 /* check end of list */
334 if (tb1 != tb) {
335 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
336 }
337 }
338
339 #endif
340
341 /* invalidate one TB */
342 static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
343 int next_offset)
344 {
345 TranslationBlock *tb1;
346 for(;;) {
347 tb1 = *ptb;
348 if (tb1 == tb) {
349 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
350 break;
351 }
352 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
353 }
354 }
355
356 static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
357 {
358 TranslationBlock *tb1;
359 unsigned int n1;
360
361 for(;;) {
362 tb1 = *ptb;
363 n1 = (long)tb1 & 3;
364 tb1 = (TranslationBlock *)((long)tb1 & ~3);
365 if (tb1 == tb) {
366 *ptb = tb1->page_next[n1];
367 break;
368 }
369 ptb = &tb1->page_next[n1];
370 }
371 }
372
373 static inline void tb_jmp_remove(TranslationBlock *tb, int n)
374 {
375 TranslationBlock *tb1, **ptb;
376 unsigned int n1;
377
378 ptb = &tb->jmp_next[n];
379 tb1 = *ptb;
380 if (tb1) {
381 /* find tb(n) in circular list */
382 for(;;) {
383 tb1 = *ptb;
384 n1 = (long)tb1 & 3;
385 tb1 = (TranslationBlock *)((long)tb1 & ~3);
386 if (n1 == n && tb1 == tb)
387 break;
388 if (n1 == 2) {
389 ptb = &tb1->jmp_first;
390 } else {
391 ptb = &tb1->jmp_next[n1];
392 }
393 }
394 /* now we can suppress tb(n) from the list */
395 *ptb = tb->jmp_next[n];
396
397 tb->jmp_next[n] = NULL;
398 }
399 }
400
401 /* reset the jump entry 'n' of a TB so that it is not chained to
402 another TB */
403 static inline void tb_reset_jump(TranslationBlock *tb, int n)
404 {
405 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
406 }
407
408 static inline void tb_invalidate(TranslationBlock *tb)
409 {
410 unsigned int h, n1;
411 TranslationBlock *tb1, *tb2, **ptb;
412
413 tb_invalidated_flag = 1;
414
415 /* remove the TB from the hash list */
416 h = tb_hash_func(tb->pc);
417 ptb = &tb_hash[h];
418 for(;;) {
419 tb1 = *ptb;
420 /* NOTE: the TB is not necessarily linked in the hash. It
421 indicates that it is not currently used */
422 if (tb1 == NULL)
423 return;
424 if (tb1 == tb) {
425 *ptb = tb1->hash_next;
426 break;
427 }
428 ptb = &tb1->hash_next;
429 }
430
431 /* suppress this TB from the two jump lists */
432 tb_jmp_remove(tb, 0);
433 tb_jmp_remove(tb, 1);
434
435 /* suppress any remaining jumps to this TB */
436 tb1 = tb->jmp_first;
437 for(;;) {
438 n1 = (long)tb1 & 3;
439 if (n1 == 2)
440 break;
441 tb1 = (TranslationBlock *)((long)tb1 & ~3);
442 tb2 = tb1->jmp_next[n1];
443 tb_reset_jump(tb1, n1);
444 tb1->jmp_next[n1] = NULL;
445 tb1 = tb2;
446 }
447 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
448 }
449
450 static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
451 {
452 PageDesc *p;
453 unsigned int h;
454 target_ulong phys_pc;
455
456 /* remove the TB from the hash list */
457 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
458 h = tb_phys_hash_func(phys_pc);
459 tb_remove(&tb_phys_hash[h], tb,
460 offsetof(TranslationBlock, phys_hash_next));
461
462 /* remove the TB from the page list */
463 if (tb->page_addr[0] != page_addr) {
464 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
465 tb_page_remove(&p->first_tb, tb);
466 invalidate_page_bitmap(p);
467 }
468 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
469 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
470 tb_page_remove(&p->first_tb, tb);
471 invalidate_page_bitmap(p);
472 }
473
474 tb_invalidate(tb);
475 }
476
477 static inline void set_bits(uint8_t *tab, int start, int len)
478 {
479 int end, mask, end1;
480
481 end = start + len;
482 tab += start >> 3;
483 mask = 0xff << (start & 7);
484 if ((start & ~7) == (end & ~7)) {
485 if (start < end) {
486 mask &= ~(0xff << (end & 7));
487 *tab |= mask;
488 }
489 } else {
490 *tab++ |= mask;
491 start = (start + 8) & ~7;
492 end1 = end & ~7;
493 while (start < end1) {
494 *tab++ = 0xff;
495 start += 8;
496 }
497 if (start < end) {
498 mask = ~(0xff << (end & 7));
499 *tab |= mask;
500 }
501 }
502 }
503
504 static void build_page_bitmap(PageDesc *p)
505 {
506 int n, tb_start, tb_end;
507 TranslationBlock *tb;
508
509 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
510 if (!p->code_bitmap)
511 return;
512 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
513
514 tb = p->first_tb;
515 while (tb != NULL) {
516 n = (long)tb & 3;
517 tb = (TranslationBlock *)((long)tb & ~3);
518 /* NOTE: this is subtle as a TB may span two physical pages */
519 if (n == 0) {
520 /* NOTE: tb_end may be after the end of the page, but
521 it is not a problem */
522 tb_start = tb->pc & ~TARGET_PAGE_MASK;
523 tb_end = tb_start + tb->size;
524 if (tb_end > TARGET_PAGE_SIZE)
525 tb_end = TARGET_PAGE_SIZE;
526 } else {
527 tb_start = 0;
528 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
529 }
530 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
531 tb = tb->page_next[n];
532 }
533 }
534
535 #ifdef TARGET_HAS_PRECISE_SMC
536
537 static void tb_gen_code(CPUState *env,
538 target_ulong pc, target_ulong cs_base, int flags,
539 int cflags)
540 {
541 TranslationBlock *tb;
542 uint8_t *tc_ptr;
543 target_ulong phys_pc, phys_page2, virt_page2;
544 int code_gen_size;
545
546 phys_pc = get_phys_addr_code(env, (unsigned long)pc);
547 tb = tb_alloc((unsigned long)pc);
548 if (!tb) {
549 /* flush must be done */
550 tb_flush(env);
551 /* cannot fail at this point */
552 tb = tb_alloc((unsigned long)pc);
553 }
554 tc_ptr = code_gen_ptr;
555 tb->tc_ptr = tc_ptr;
556 tb->cs_base = cs_base;
557 tb->flags = flags;
558 tb->cflags = cflags;
559 cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
560 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
561
562 /* check next page if needed */
563 virt_page2 = ((unsigned long)pc + tb->size - 1) & TARGET_PAGE_MASK;
564 phys_page2 = -1;
565 if (((unsigned long)pc & TARGET_PAGE_MASK) != virt_page2) {
566 phys_page2 = get_phys_addr_code(env, virt_page2);
567 }
568 tb_link_phys(tb, phys_pc, phys_page2);
569 }
570 #endif
571
572 /* invalidate all TBs which intersect with the target physical page
573 starting in range [start;end[. NOTE: start and end must refer to
574 the same physical page. 'is_cpu_write_access' should be true if called
575 from a real cpu write access: the virtual CPU will exit the current
576 TB if code is modified inside this TB. */
577 void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
578 int is_cpu_write_access)
579 {
580 int n, current_tb_modified, current_tb_not_found, current_flags;
581 #if defined(TARGET_HAS_PRECISE_SMC) || !defined(CONFIG_USER_ONLY)
582 CPUState *env = cpu_single_env;
583 #endif
584 PageDesc *p;
585 TranslationBlock *tb, *tb_next, *current_tb;
586 target_ulong tb_start, tb_end;
587 target_ulong current_pc, current_cs_base;
588
589 p = page_find(start >> TARGET_PAGE_BITS);
590 if (!p)
591 return;
592 if (!p->code_bitmap &&
593 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
594 is_cpu_write_access) {
595 /* build code bitmap */
596 build_page_bitmap(p);
597 }
598
599 /* we remove all the TBs in the range [start, end[ */
600 /* XXX: see if in some cases it could be faster to invalidate all the code */
601 current_tb_not_found = is_cpu_write_access;
602 current_tb_modified = 0;
603 current_tb = NULL; /* avoid warning */
604 current_pc = 0; /* avoid warning */
605 current_cs_base = 0; /* avoid warning */
606 current_flags = 0; /* avoid warning */
607 tb = p->first_tb;
608 while (tb != NULL) {
609 n = (long)tb & 3;
610 tb = (TranslationBlock *)((long)tb & ~3);
611 tb_next = tb->page_next[n];
612 /* NOTE: this is subtle as a TB may span two physical pages */
613 if (n == 0) {
614 /* NOTE: tb_end may be after the end of the page, but
615 it is not a problem */
616 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
617 tb_end = tb_start + tb->size;
618 } else {
619 tb_start = tb->page_addr[1];
620 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
621 }
622 if (!(tb_end <= start || tb_start >= end)) {
623 #ifdef TARGET_HAS_PRECISE_SMC
624 if (current_tb_not_found) {
625 current_tb_not_found = 0;
626 current_tb = NULL;
627 if (env->mem_write_pc) {
628 /* now we have a real cpu fault */
629 current_tb = tb_find_pc(env->mem_write_pc);
630 }
631 }
632 if (current_tb == tb &&
633 !(current_tb->cflags & CF_SINGLE_INSN)) {
634 /* If we are modifying the current TB, we must stop
635 its execution. We could be more precise by checking
636 that the modification is after the current PC, but it
637 would require a specialized function to partially
638 restore the CPU state */
639
640 current_tb_modified = 1;
641 cpu_restore_state(current_tb, env,
642 env->mem_write_pc, NULL);
643 #if defined(TARGET_I386)
644 current_flags = env->hflags;
645 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
646 current_cs_base = (target_ulong)env->segs[R_CS].base;
647 current_pc = current_cs_base + env->eip;
648 #else
649 #error unsupported CPU
650 #endif
651 }
652 #endif /* TARGET_HAS_PRECISE_SMC */
653 tb_phys_invalidate(tb, -1);
654 }
655 tb = tb_next;
656 }
657 #if !defined(CONFIG_USER_ONLY)
658 /* if no code remaining, no need to continue to use slow writes */
659 if (!p->first_tb) {
660 invalidate_page_bitmap(p);
661 if (is_cpu_write_access) {
662 tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
663 }
664 }
665 #endif
666 #ifdef TARGET_HAS_PRECISE_SMC
667 if (current_tb_modified) {
668 /* we generate a block containing just the instruction
669 modifying the memory. It will ensure that it cannot modify
670 itself */
671 tb_gen_code(env, current_pc, current_cs_base, current_flags,
672 CF_SINGLE_INSN);
673 cpu_resume_from_signal(env, NULL);
674 }
675 #endif
676 }
677
678 /* len must be <= 8 and start must be a multiple of len */
679 static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
680 {
681 PageDesc *p;
682 int offset, b;
683 #if 0
684 if (cpu_single_env->cr[0] & CR0_PE_MASK) {
685 printf("modifying code at 0x%x size=%d EIP=%x\n",
686 (vaddr & TARGET_PAGE_MASK) | (start & ~TARGET_PAGE_MASK), len,
687 cpu_single_env->eip);
688 }
689 #endif
690 p = page_find(start >> TARGET_PAGE_BITS);
691 if (!p)
692 return;
693 if (p->code_bitmap) {
694 offset = start & ~TARGET_PAGE_MASK;
695 b = p->code_bitmap[offset >> 3] >> (offset & 7);
696 if (b & ((1 << len) - 1))
697 goto do_invalidate;
698 } else {
699 do_invalidate:
700 tb_invalidate_phys_page_range(start, start + len, 1);
701 }
702 }
703
704 #if !defined(CONFIG_SOFTMMU)
705 static void tb_invalidate_phys_page(target_ulong addr,
706 unsigned long pc, void *puc)
707 {
708 int n, current_flags, current_tb_modified;
709 target_ulong current_pc, current_cs_base;
710 PageDesc *p;
711 TranslationBlock *tb, *current_tb;
712 #ifdef TARGET_HAS_PRECISE_SMC
713 CPUState *env = cpu_single_env;
714 #endif
715
716 addr &= TARGET_PAGE_MASK;
717 p = page_find(addr >> TARGET_PAGE_BITS);
718 if (!p)
719 return;
720 tb = p->first_tb;
721 current_tb_modified = 0;
722 current_tb = NULL;
723 current_pc = 0; /* avoid warning */
724 current_cs_base = 0; /* avoid warning */
725 current_flags = 0; /* avoid warning */
726 #ifdef TARGET_HAS_PRECISE_SMC
727 if (tb && pc != 0) {
728 current_tb = tb_find_pc(pc);
729 }
730 #endif
731 while (tb != NULL) {
732 n = (long)tb & 3;
733 tb = (TranslationBlock *)((long)tb & ~3);
734 #ifdef TARGET_HAS_PRECISE_SMC
735 if (current_tb == tb &&
736 !(current_tb->cflags & CF_SINGLE_INSN)) {
737 /* If we are modifying the current TB, we must stop
738 its execution. We could be more precise by checking
739 that the modification is after the current PC, but it
740 would require a specialized function to partially
741 restore the CPU state */
742
743 current_tb_modified = 1;
744 cpu_restore_state(current_tb, env, pc, puc);
745 #if defined(TARGET_I386)
746 current_flags = env->hflags;
747 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
748 current_cs_base = (target_ulong)env->segs[R_CS].base;
749 current_pc = current_cs_base + env->eip;
750 #else
751 #error unsupported CPU
752 #endif
753 }
754 #endif /* TARGET_HAS_PRECISE_SMC */
755 tb_phys_invalidate(tb, addr);
756 tb = tb->page_next[n];
757 }
758 p->first_tb = NULL;
759 #ifdef TARGET_HAS_PRECISE_SMC
760 if (current_tb_modified) {
761 /* we generate a block containing just the instruction
762 modifying the memory. It will ensure that it cannot modify
763 itself */
764 tb_gen_code(env, current_pc, current_cs_base, current_flags,
765 CF_SINGLE_INSN);
766 cpu_resume_from_signal(env, puc);
767 }
768 #endif
769 }
770 #endif
771
772 /* add the tb in the target page and protect it if necessary */
773 static inline void tb_alloc_page(TranslationBlock *tb,
774 unsigned int n, unsigned int page_addr)
775 {
776 PageDesc *p;
777 TranslationBlock *last_first_tb;
778
779 tb->page_addr[n] = page_addr;
780 p = page_find(page_addr >> TARGET_PAGE_BITS);
781 tb->page_next[n] = p->first_tb;
782 last_first_tb = p->first_tb;
783 p->first_tb = (TranslationBlock *)((long)tb | n);
784 invalidate_page_bitmap(p);
785
786 #ifdef TARGET_HAS_SMC
787
788 #if defined(CONFIG_USER_ONLY)
789 if (p->flags & PAGE_WRITE) {
790 unsigned long host_start, host_end, addr;
791 int prot;
792
793 /* force the host page as non writable (writes will have a
794 page fault + mprotect overhead) */
795 host_start = page_addr & host_page_mask;
796 host_end = host_start + host_page_size;
797 prot = 0;
798 for(addr = host_start; addr < host_end; addr += TARGET_PAGE_SIZE)
799 prot |= page_get_flags(addr);
800 mprotect((void *)host_start, host_page_size,
801 (prot & PAGE_BITS) & ~PAGE_WRITE);
802 #ifdef DEBUG_TB_INVALIDATE
803 printf("protecting code page: 0x%08lx\n",
804 host_start);
805 #endif
806 p->flags &= ~PAGE_WRITE;
807 }
808 #else
809 /* if some code is already present, then the pages are already
810 protected. So we handle the case where only the first TB is
811 allocated in a physical page */
812 if (!last_first_tb) {
813 target_ulong virt_addr;
814
815 virt_addr = (tb->pc & TARGET_PAGE_MASK) + (n << TARGET_PAGE_BITS);
816 tlb_protect_code(cpu_single_env, virt_addr);
817 }
818 #endif
819
820 #endif /* TARGET_HAS_SMC */
821 }
822
823 /* Allocate a new translation block. Flush the translation buffer if
824 too many translation blocks or too much generated code. */
825 TranslationBlock *tb_alloc(unsigned long pc)
826 {
827 TranslationBlock *tb;
828
829 if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
830 (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
831 return NULL;
832 tb = &tbs[nb_tbs++];
833 tb->pc = pc;
834 tb->cflags = 0;
835 return tb;
836 }
837
838 /* add a new TB and link it to the physical page tables. phys_page2 is
839 (-1) to indicate that only one page contains the TB. */
840 void tb_link_phys(TranslationBlock *tb,
841 target_ulong phys_pc, target_ulong phys_page2)
842 {
843 unsigned int h;
844 TranslationBlock **ptb;
845
846 /* add in the physical hash table */
847 h = tb_phys_hash_func(phys_pc);
848 ptb = &tb_phys_hash[h];
849 tb->phys_hash_next = *ptb;
850 *ptb = tb;
851
852 /* add in the page list */
853 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
854 if (phys_page2 != -1)
855 tb_alloc_page(tb, 1, phys_page2);
856 else
857 tb->page_addr[1] = -1;
858 #ifdef DEBUG_TB_CHECK
859 tb_page_check();
860 #endif
861 }
862
863 /* link the tb with the other TBs */
864 void tb_link(TranslationBlock *tb)
865 {
866 #if !defined(CONFIG_USER_ONLY)
867 {
868 VirtPageDesc *vp;
869 target_ulong addr;
870
871 /* save the code memory mappings (needed to invalidate the code) */
872 addr = tb->pc & TARGET_PAGE_MASK;
873 vp = virt_page_find_alloc(addr >> TARGET_PAGE_BITS);
874 #ifdef DEBUG_TLB_CHECK
875 if (vp->valid_tag == virt_valid_tag &&
876 vp->phys_addr != tb->page_addr[0]) {
877 printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",
878 addr, tb->page_addr[0], vp->phys_addr);
879 }
880 #endif
881 vp->phys_addr = tb->page_addr[0];
882 if (vp->valid_tag != virt_valid_tag) {
883 vp->valid_tag = virt_valid_tag;
884 #if !defined(CONFIG_SOFTMMU)
885 vp->prot = 0;
886 #endif
887 }
888
889 if (tb->page_addr[1] != -1) {
890 addr += TARGET_PAGE_SIZE;
891 vp = virt_page_find_alloc(addr >> TARGET_PAGE_BITS);
892 #ifdef DEBUG_TLB_CHECK
893 if (vp->valid_tag == virt_valid_tag &&
894 vp->phys_addr != tb->page_addr[1]) {
895 printf("Error tb addr=0x%x phys=0x%x vp->phys_addr=0x%x\n",
896 addr, tb->page_addr[1], vp->phys_addr);
897 }
898 #endif
899 vp->phys_addr = tb->page_addr[1];
900 if (vp->valid_tag != virt_valid_tag) {
901 vp->valid_tag = virt_valid_tag;
902 #if !defined(CONFIG_SOFTMMU)
903 vp->prot = 0;
904 #endif
905 }
906 }
907 }
908 #endif
909
910 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
911 tb->jmp_next[0] = NULL;
912 tb->jmp_next[1] = NULL;
913 #ifdef USE_CODE_COPY
914 tb->cflags &= ~CF_FP_USED;
915 if (tb->cflags & CF_TB_FP_USED)
916 tb->cflags |= CF_FP_USED;
917 #endif
918
919 /* init original jump addresses */
920 if (tb->tb_next_offset[0] != 0xffff)
921 tb_reset_jump(tb, 0);
922 if (tb->tb_next_offset[1] != 0xffff)
923 tb_reset_jump(tb, 1);
924 }
925
926 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
927 tb[1].tc_ptr. Return NULL if not found */
928 TranslationBlock *tb_find_pc(unsigned long tc_ptr)
929 {
930 int m_min, m_max, m;
931 unsigned long v;
932 TranslationBlock *tb;
933
934 if (nb_tbs <= 0)
935 return NULL;
936 if (tc_ptr < (unsigned long)code_gen_buffer ||
937 tc_ptr >= (unsigned long)code_gen_ptr)
938 return NULL;
939 /* binary search (cf Knuth) */
940 m_min = 0;
941 m_max = nb_tbs - 1;
942 while (m_min <= m_max) {
943 m = (m_min + m_max) >> 1;
944 tb = &tbs[m];
945 v = (unsigned long)tb->tc_ptr;
946 if (v == tc_ptr)
947 return tb;
948 else if (tc_ptr < v) {
949 m_max = m - 1;
950 } else {
951 m_min = m + 1;
952 }
953 }
954 return &tbs[m_max];
955 }
956
957 static void tb_reset_jump_recursive(TranslationBlock *tb);
958
959 static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
960 {
961 TranslationBlock *tb1, *tb_next, **ptb;
962 unsigned int n1;
963
964 tb1 = tb->jmp_next[n];
965 if (tb1 != NULL) {
966 /* find head of list */
967 for(;;) {
968 n1 = (long)tb1 & 3;
969 tb1 = (TranslationBlock *)((long)tb1 & ~3);
970 if (n1 == 2)
971 break;
972 tb1 = tb1->jmp_next[n1];
973 }
974 /* we are now sure now that tb jumps to tb1 */
975 tb_next = tb1;
976
977 /* remove tb from the jmp_first list */
978 ptb = &tb_next->jmp_first;
979 for(;;) {
980 tb1 = *ptb;
981 n1 = (long)tb1 & 3;
982 tb1 = (TranslationBlock *)((long)tb1 & ~3);
983 if (n1 == n && tb1 == tb)
984 break;
985 ptb = &tb1->jmp_next[n1];
986 }
987 *ptb = tb->jmp_next[n];
988 tb->jmp_next[n] = NULL;
989
990 /* suppress the jump to next tb in generated code */
991 tb_reset_jump(tb, n);
992
993 /* suppress jumps in the tb on which we could have jumped */
994 tb_reset_jump_recursive(tb_next);
995 }
996 }
997
998 static void tb_reset_jump_recursive(TranslationBlock *tb)
999 {
1000 tb_reset_jump_recursive2(tb, 0);
1001 tb_reset_jump_recursive2(tb, 1);
1002 }
1003
1004 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1005 {
1006 target_ulong phys_addr;
1007
1008 phys_addr = cpu_get_phys_page_debug(env, pc);
1009 tb_invalidate_phys_page_range(phys_addr, phys_addr + 1, 0);
1010 }
1011
1012 /* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1013 breakpoint is reached */
1014 int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1015 {
1016 #if defined(TARGET_I386) || defined(TARGET_PPC)
1017 int i;
1018
1019 for(i = 0; i < env->nb_breakpoints; i++) {
1020 if (env->breakpoints[i] == pc)
1021 return 0;
1022 }
1023
1024 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1025 return -1;
1026 env->breakpoints[env->nb_breakpoints++] = pc;
1027
1028 breakpoint_invalidate(env, pc);
1029 return 0;
1030 #else
1031 return -1;
1032 #endif
1033 }
1034
1035 /* remove a breakpoint */
1036 int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1037 {
1038 #if defined(TARGET_I386) || defined(TARGET_PPC)
1039 int i;
1040 for(i = 0; i < env->nb_breakpoints; i++) {
1041 if (env->breakpoints[i] == pc)
1042 goto found;
1043 }
1044 return -1;
1045 found:
1046 memmove(&env->breakpoints[i], &env->breakpoints[i + 1],
1047 (env->nb_breakpoints - (i + 1)) * sizeof(env->breakpoints[0]));
1048 env->nb_breakpoints--;
1049
1050 breakpoint_invalidate(env, pc);
1051 return 0;
1052 #else
1053 return -1;
1054 #endif
1055 }
1056
1057 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1058 CPU loop after each instruction */
1059 void cpu_single_step(CPUState *env, int enabled)
1060 {
1061 #if defined(TARGET_I386) || defined(TARGET_PPC)
1062 if (env->singlestep_enabled != enabled) {
1063 env->singlestep_enabled = enabled;
1064 /* must flush all the translated code to avoid inconsistancies */
1065 /* XXX: only flush what is necessary */
1066 tb_flush(env);
1067 }
1068 #endif
1069 }
1070
1071 /* enable or disable low levels log */
1072 void cpu_set_log(int log_flags)
1073 {
1074 loglevel = log_flags;
1075 if (loglevel && !logfile) {
1076 logfile = fopen(logfilename, "w");
1077 if (!logfile) {
1078 perror(logfilename);
1079 _exit(1);
1080 }
1081 #if !defined(CONFIG_SOFTMMU)
1082 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1083 {
1084 static uint8_t logfile_buf[4096];
1085 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1086 }
1087 #else
1088 setvbuf(logfile, NULL, _IOLBF, 0);
1089 #endif
1090 }
1091 }
1092
1093 void cpu_set_log_filename(const char *filename)
1094 {
1095 logfilename = strdup(filename);
1096 }
1097
1098 /* mask must never be zero, except for A20 change call */
1099 void cpu_interrupt(CPUState *env, int mask)
1100 {
1101 TranslationBlock *tb;
1102 static int interrupt_lock;
1103
1104 env->interrupt_request |= mask;
1105 /* if the cpu is currently executing code, we must unlink it and
1106 all the potentially executing TB */
1107 tb = env->current_tb;
1108 if (tb && !testandset(&interrupt_lock)) {
1109 env->current_tb = NULL;
1110 tb_reset_jump_recursive(tb);
1111 interrupt_lock = 0;
1112 }
1113 }
1114
1115 void cpu_reset_interrupt(CPUState *env, int mask)
1116 {
1117 env->interrupt_request &= ~mask;
1118 }
1119
1120 CPULogItem cpu_log_items[] = {
1121 { CPU_LOG_TB_OUT_ASM, "out_asm",
1122 "show generated host assembly code for each compiled TB" },
1123 { CPU_LOG_TB_IN_ASM, "in_asm",
1124 "show target assembly code for each compiled TB" },
1125 { CPU_LOG_TB_OP, "op",
1126 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1127 #ifdef TARGET_I386
1128 { CPU_LOG_TB_OP_OPT, "op_opt",
1129 "show micro ops after optimization for each compiled TB" },
1130 #endif
1131 { CPU_LOG_INT, "int",
1132 "show interrupts/exceptions in short format" },
1133 { CPU_LOG_EXEC, "exec",
1134 "show trace before each executed TB (lots of logs)" },
1135 { CPU_LOG_TB_CPU, "cpu",
1136 "show CPU state before bloc translation" },
1137 #ifdef TARGET_I386
1138 { CPU_LOG_PCALL, "pcall",
1139 "show protected mode far calls/returns/exceptions" },
1140 #endif
1141 { CPU_LOG_IOPORT, "ioport",
1142 "show all i/o ports accesses" },
1143 { 0, NULL, NULL },
1144 };
1145
1146 static int cmp1(const char *s1, int n, const char *s2)
1147 {
1148 if (strlen(s2) != n)
1149 return 0;
1150 return memcmp(s1, s2, n) == 0;
1151 }
1152
1153 /* takes a comma separated list of log masks. Return 0 if error. */
1154 int cpu_str_to_log_mask(const char *str)
1155 {
1156 CPULogItem *item;
1157 int mask;
1158 const char *p, *p1;
1159
1160 p = str;
1161 mask = 0;
1162 for(;;) {
1163 p1 = strchr(p, ',');
1164 if (!p1)
1165 p1 = p + strlen(p);
1166 for(item = cpu_log_items; item->mask != 0; item++) {
1167 if (cmp1(p, p1 - p, item->name))
1168 goto found;
1169 }
1170 return 0;
1171 found:
1172 mask |= item->mask;
1173 if (*p1 != ',')
1174 break;
1175 p = p1 + 1;
1176 }
1177 return mask;
1178 }
1179
1180 void cpu_abort(CPUState *env, const char *fmt, ...)
1181 {
1182 va_list ap;
1183
1184 va_start(ap, fmt);
1185 fprintf(stderr, "qemu: fatal: ");
1186 vfprintf(stderr, fmt, ap);
1187 fprintf(stderr, "\n");
1188 #ifdef TARGET_I386
1189 cpu_x86_dump_state(env, stderr, X86_DUMP_FPU | X86_DUMP_CCOP);
1190 #endif
1191 va_end(ap);
1192 abort();
1193 }
1194
1195 #if !defined(CONFIG_USER_ONLY)
1196
1197 /* NOTE: if flush_global is true, also flush global entries (not
1198 implemented yet) */
1199 void tlb_flush(CPUState *env, int flush_global)
1200 {
1201 int i;
1202
1203 #if defined(DEBUG_TLB)
1204 printf("tlb_flush:\n");
1205 #endif
1206 /* must reset current TB so that interrupts cannot modify the
1207 links while we are modifying them */
1208 env->current_tb = NULL;
1209
1210 for(i = 0; i < CPU_TLB_SIZE; i++) {
1211 env->tlb_read[0][i].address = -1;
1212 env->tlb_write[0][i].address = -1;
1213 env->tlb_read[1][i].address = -1;
1214 env->tlb_write[1][i].address = -1;
1215 }
1216
1217 virt_page_flush();
1218 for(i = 0;i < CODE_GEN_HASH_SIZE; i++)
1219 tb_hash[i] = NULL;
1220
1221 #if !defined(CONFIG_SOFTMMU)
1222 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
1223 #endif
1224 }
1225
1226 static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1227 {
1228 if (addr == (tlb_entry->address &
1229 (TARGET_PAGE_MASK | TLB_INVALID_MASK)))
1230 tlb_entry->address = -1;
1231 }
1232
1233 void tlb_flush_page(CPUState *env, target_ulong addr)
1234 {
1235 int i, n;
1236 VirtPageDesc *vp;
1237 PageDesc *p;
1238 TranslationBlock *tb;
1239
1240 #if defined(DEBUG_TLB)
1241 printf("tlb_flush_page: 0x%08x\n", addr);
1242 #endif
1243 /* must reset current TB so that interrupts cannot modify the
1244 links while we are modifying them */
1245 env->current_tb = NULL;
1246
1247 addr &= TARGET_PAGE_MASK;
1248 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1249 tlb_flush_entry(&env->tlb_read[0][i], addr);
1250 tlb_flush_entry(&env->tlb_write[0][i], addr);
1251 tlb_flush_entry(&env->tlb_read[1][i], addr);
1252 tlb_flush_entry(&env->tlb_write[1][i], addr);
1253
1254 /* remove from the virtual pc hash table all the TB at this
1255 virtual address */
1256
1257 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1258 if (vp && vp->valid_tag == virt_valid_tag) {
1259 p = page_find(vp->phys_addr >> TARGET_PAGE_BITS);
1260 if (p) {
1261 /* we remove all the links to the TBs in this virtual page */
1262 tb = p->first_tb;
1263 while (tb != NULL) {
1264 n = (long)tb & 3;
1265 tb = (TranslationBlock *)((long)tb & ~3);
1266 if ((tb->pc & TARGET_PAGE_MASK) == addr ||
1267 ((tb->pc + tb->size - 1) & TARGET_PAGE_MASK) == addr) {
1268 tb_invalidate(tb);
1269 }
1270 tb = tb->page_next[n];
1271 }
1272 }
1273 vp->valid_tag = 0;
1274 }
1275
1276 #if !defined(CONFIG_SOFTMMU)
1277 if (addr < MMAP_AREA_END)
1278 munmap((void *)addr, TARGET_PAGE_SIZE);
1279 #endif
1280 }
1281
1282 static inline void tlb_protect_code1(CPUTLBEntry *tlb_entry, target_ulong addr)
1283 {
1284 if (addr == (tlb_entry->address &
1285 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) &&
1286 (tlb_entry->address & ~TARGET_PAGE_MASK) != IO_MEM_CODE &&
1287 (tlb_entry->address & ~TARGET_PAGE_MASK) != IO_MEM_ROM) {
1288 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_CODE;
1289 }
1290 }
1291
1292 /* update the TLBs so that writes to code in the virtual page 'addr'
1293 can be detected */
1294 static void tlb_protect_code(CPUState *env, target_ulong addr)
1295 {
1296 int i;
1297
1298 addr &= TARGET_PAGE_MASK;
1299 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1300 tlb_protect_code1(&env->tlb_write[0][i], addr);
1301 tlb_protect_code1(&env->tlb_write[1][i], addr);
1302 #if !defined(CONFIG_SOFTMMU)
1303 /* NOTE: as we generated the code for this page, it is already at
1304 least readable */
1305 if (addr < MMAP_AREA_END)
1306 mprotect((void *)addr, TARGET_PAGE_SIZE, PROT_READ);
1307 #endif
1308 }
1309
1310 static inline void tlb_unprotect_code2(CPUTLBEntry *tlb_entry,
1311 unsigned long phys_addr)
1312 {
1313 if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_CODE &&
1314 ((tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend) == phys_addr) {
1315 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1316 }
1317 }
1318
1319 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1320 tested self modifying code */
1321 static void tlb_unprotect_code_phys(CPUState *env, unsigned long phys_addr, target_ulong vaddr)
1322 {
1323 int i;
1324
1325 phys_addr &= TARGET_PAGE_MASK;
1326 phys_addr += (long)phys_ram_base;
1327 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1328 tlb_unprotect_code2(&env->tlb_write[0][i], phys_addr);
1329 tlb_unprotect_code2(&env->tlb_write[1][i], phys_addr);
1330 }
1331
1332 static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1333 unsigned long start, unsigned long length)
1334 {
1335 unsigned long addr;
1336 if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1337 addr = (tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend;
1338 if ((addr - start) < length) {
1339 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1340 }
1341 }
1342 }
1343
1344 void cpu_physical_memory_reset_dirty(target_ulong start, target_ulong end)
1345 {
1346 CPUState *env;
1347 unsigned long length, start1;
1348 int i;
1349
1350 start &= TARGET_PAGE_MASK;
1351 end = TARGET_PAGE_ALIGN(end);
1352
1353 length = end - start;
1354 if (length == 0)
1355 return;
1356 memset(phys_ram_dirty + (start >> TARGET_PAGE_BITS), 0, length >> TARGET_PAGE_BITS);
1357
1358 env = cpu_single_env;
1359 /* we modify the TLB cache so that the dirty bit will be set again
1360 when accessing the range */
1361 start1 = start + (unsigned long)phys_ram_base;
1362 for(i = 0; i < CPU_TLB_SIZE; i++)
1363 tlb_reset_dirty_range(&env->tlb_write[0][i], start1, length);
1364 for(i = 0; i < CPU_TLB_SIZE; i++)
1365 tlb_reset_dirty_range(&env->tlb_write[1][i], start1, length);
1366
1367 #if !defined(CONFIG_SOFTMMU)
1368 /* XXX: this is expensive */
1369 {
1370 VirtPageDesc *p;
1371 int j;
1372 target_ulong addr;
1373
1374 for(i = 0; i < L1_SIZE; i++) {
1375 p = l1_virt_map[i];
1376 if (p) {
1377 addr = i << (TARGET_PAGE_BITS + L2_BITS);
1378 for(j = 0; j < L2_SIZE; j++) {
1379 if (p->valid_tag == virt_valid_tag &&
1380 p->phys_addr >= start && p->phys_addr < end &&
1381 (p->prot & PROT_WRITE)) {
1382 if (addr < MMAP_AREA_END) {
1383 mprotect((void *)addr, TARGET_PAGE_SIZE,
1384 p->prot & ~PROT_WRITE);
1385 }
1386 }
1387 addr += TARGET_PAGE_SIZE;
1388 p++;
1389 }
1390 }
1391 }
1392 }
1393 #endif
1394 }
1395
1396 static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
1397 unsigned long start)
1398 {
1399 unsigned long addr;
1400 if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1401 addr = (tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend;
1402 if (addr == start) {
1403 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_RAM;
1404 }
1405 }
1406 }
1407
1408 /* update the TLB corresponding to virtual page vaddr and phys addr
1409 addr so that it is no longer dirty */
1410 static inline void tlb_set_dirty(unsigned long addr, target_ulong vaddr)
1411 {
1412 CPUState *env = cpu_single_env;
1413 int i;
1414
1415 phys_ram_dirty[(addr - (unsigned long)phys_ram_base) >> TARGET_PAGE_BITS] = 1;
1416
1417 addr &= TARGET_PAGE_MASK;
1418 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1419 tlb_set_dirty1(&env->tlb_write[0][i], addr);
1420 tlb_set_dirty1(&env->tlb_write[1][i], addr);
1421 }
1422
1423 /* add a new TLB entry. At most one entry for a given virtual address
1424 is permitted. Return 0 if OK or 2 if the page could not be mapped
1425 (can only happen in non SOFTMMU mode for I/O pages or pages
1426 conflicting with the host address space). */
1427 int tlb_set_page(CPUState *env, target_ulong vaddr,
1428 target_phys_addr_t paddr, int prot,
1429 int is_user, int is_softmmu)
1430 {
1431 PageDesc *p;
1432 unsigned long pd;
1433 TranslationBlock *first_tb;
1434 unsigned int index;
1435 target_ulong address;
1436 unsigned long addend;
1437 int ret;
1438
1439 p = page_find(paddr >> TARGET_PAGE_BITS);
1440 if (!p) {
1441 pd = IO_MEM_UNASSIGNED;
1442 first_tb = NULL;
1443 } else {
1444 pd = p->phys_offset;
1445 first_tb = p->first_tb;
1446 }
1447 #if defined(DEBUG_TLB)
1448 printf("tlb_set_page: vaddr=0x%08x paddr=0x%08x prot=%x u=%d c=%d smmu=%d pd=0x%08x\n",
1449 vaddr, paddr, prot, is_user, (first_tb != NULL), is_softmmu, pd);
1450 #endif
1451
1452 ret = 0;
1453 #if !defined(CONFIG_SOFTMMU)
1454 if (is_softmmu)
1455 #endif
1456 {
1457 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1458 /* IO memory case */
1459 address = vaddr | pd;
1460 addend = paddr;
1461 } else {
1462 /* standard memory */
1463 address = vaddr;
1464 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1465 }
1466
1467 index = (vaddr >> 12) & (CPU_TLB_SIZE - 1);
1468 addend -= vaddr;
1469 if (prot & PAGE_READ) {
1470 env->tlb_read[is_user][index].address = address;
1471 env->tlb_read[is_user][index].addend = addend;
1472 } else {
1473 env->tlb_read[is_user][index].address = -1;
1474 env->tlb_read[is_user][index].addend = -1;
1475 }
1476 if (prot & PAGE_WRITE) {
1477 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM) {
1478 /* ROM: access is ignored (same as unassigned) */
1479 env->tlb_write[is_user][index].address = vaddr | IO_MEM_ROM;
1480 env->tlb_write[is_user][index].addend = addend;
1481 } else
1482 /* XXX: the PowerPC code seems not ready to handle
1483 self modifying code with DCBI */
1484 #if defined(TARGET_HAS_SMC) || 1
1485 if (first_tb) {
1486 /* if code is present, we use a specific memory
1487 handler. It works only for physical memory access */
1488 env->tlb_write[is_user][index].address = vaddr | IO_MEM_CODE;
1489 env->tlb_write[is_user][index].addend = addend;
1490 } else
1491 #endif
1492 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1493 !cpu_physical_memory_is_dirty(pd)) {
1494 env->tlb_write[is_user][index].address = vaddr | IO_MEM_NOTDIRTY;
1495 env->tlb_write[is_user][index].addend = addend;
1496 } else {
1497 env->tlb_write[is_user][index].address = address;
1498 env->tlb_write[is_user][index].addend = addend;
1499 }
1500 } else {
1501 env->tlb_write[is_user][index].address = -1;
1502 env->tlb_write[is_user][index].addend = -1;
1503 }
1504 }
1505 #if !defined(CONFIG_SOFTMMU)
1506 else {
1507 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1508 /* IO access: no mapping is done as it will be handled by the
1509 soft MMU */
1510 if (!(env->hflags & HF_SOFTMMU_MASK))
1511 ret = 2;
1512 } else {
1513 void *map_addr;
1514
1515 if (vaddr >= MMAP_AREA_END) {
1516 ret = 2;
1517 } else {
1518 if (prot & PROT_WRITE) {
1519 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1520 #if defined(TARGET_HAS_SMC) || 1
1521 first_tb ||
1522 #endif
1523 ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1524 !cpu_physical_memory_is_dirty(pd))) {
1525 /* ROM: we do as if code was inside */
1526 /* if code is present, we only map as read only and save the
1527 original mapping */
1528 VirtPageDesc *vp;
1529
1530 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS);
1531 vp->phys_addr = pd;
1532 vp->prot = prot;
1533 vp->valid_tag = virt_valid_tag;
1534 prot &= ~PAGE_WRITE;
1535 }
1536 }
1537 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
1538 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1539 if (map_addr == MAP_FAILED) {
1540 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1541 paddr, vaddr);
1542 }
1543 }
1544 }
1545 }
1546 #endif
1547 return ret;
1548 }
1549
1550 /* called from signal handler: invalidate the code and unprotect the
1551 page. Return TRUE if the fault was succesfully handled. */
1552 int page_unprotect(unsigned long addr, unsigned long pc, void *puc)
1553 {
1554 #if !defined(CONFIG_SOFTMMU)
1555 VirtPageDesc *vp;
1556
1557 #if defined(DEBUG_TLB)
1558 printf("page_unprotect: addr=0x%08x\n", addr);
1559 #endif
1560 addr &= TARGET_PAGE_MASK;
1561
1562 /* if it is not mapped, no need to worry here */
1563 if (addr >= MMAP_AREA_END)
1564 return 0;
1565 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1566 if (!vp)
1567 return 0;
1568 /* NOTE: in this case, validate_tag is _not_ tested as it
1569 validates only the code TLB */
1570 if (vp->valid_tag != virt_valid_tag)
1571 return 0;
1572 if (!(vp->prot & PAGE_WRITE))
1573 return 0;
1574 #if defined(DEBUG_TLB)
1575 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1576 addr, vp->phys_addr, vp->prot);
1577 #endif
1578 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1579 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1580 (unsigned long)addr, vp->prot);
1581 /* set the dirty bit */
1582 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 1;
1583 /* flush the code inside */
1584 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
1585 return 1;
1586 #else
1587 return 0;
1588 #endif
1589 }
1590
1591 #else
1592
1593 void tlb_flush(CPUState *env, int flush_global)
1594 {
1595 }
1596
1597 void tlb_flush_page(CPUState *env, target_ulong addr)
1598 {
1599 }
1600
1601 int tlb_set_page(CPUState *env, target_ulong vaddr,
1602 target_phys_addr_t paddr, int prot,
1603 int is_user, int is_softmmu)
1604 {
1605 return 0;
1606 }
1607
1608 /* dump memory mappings */
1609 void page_dump(FILE *f)
1610 {
1611 unsigned long start, end;
1612 int i, j, prot, prot1;
1613 PageDesc *p;
1614
1615 fprintf(f, "%-8s %-8s %-8s %s\n",
1616 "start", "end", "size", "prot");
1617 start = -1;
1618 end = -1;
1619 prot = 0;
1620 for(i = 0; i <= L1_SIZE; i++) {
1621 if (i < L1_SIZE)
1622 p = l1_map[i];
1623 else
1624 p = NULL;
1625 for(j = 0;j < L2_SIZE; j++) {
1626 if (!p)
1627 prot1 = 0;
1628 else
1629 prot1 = p[j].flags;
1630 if (prot1 != prot) {
1631 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1632 if (start != -1) {
1633 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1634 start, end, end - start,
1635 prot & PAGE_READ ? 'r' : '-',
1636 prot & PAGE_WRITE ? 'w' : '-',
1637 prot & PAGE_EXEC ? 'x' : '-');
1638 }
1639 if (prot1 != 0)
1640 start = end;
1641 else
1642 start = -1;
1643 prot = prot1;
1644 }
1645 if (!p)
1646 break;
1647 }
1648 }
1649 }
1650
1651 int page_get_flags(unsigned long address)
1652 {
1653 PageDesc *p;
1654
1655 p = page_find(address >> TARGET_PAGE_BITS);
1656 if (!p)
1657 return 0;
1658 return p->flags;
1659 }
1660
1661 /* modify the flags of a page and invalidate the code if
1662 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1663 depending on PAGE_WRITE */
1664 void page_set_flags(unsigned long start, unsigned long end, int flags)
1665 {
1666 PageDesc *p;
1667 unsigned long addr;
1668
1669 start = start & TARGET_PAGE_MASK;
1670 end = TARGET_PAGE_ALIGN(end);
1671 if (flags & PAGE_WRITE)
1672 flags |= PAGE_WRITE_ORG;
1673 spin_lock(&tb_lock);
1674 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1675 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1676 /* if the write protection is set, then we invalidate the code
1677 inside */
1678 if (!(p->flags & PAGE_WRITE) &&
1679 (flags & PAGE_WRITE) &&
1680 p->first_tb) {
1681 tb_invalidate_phys_page(addr, 0, NULL);
1682 }
1683 p->flags = flags;
1684 }
1685 spin_unlock(&tb_lock);
1686 }
1687
1688 /* called from signal handler: invalidate the code and unprotect the
1689 page. Return TRUE if the fault was succesfully handled. */
1690 int page_unprotect(unsigned long address, unsigned long pc, void *puc)
1691 {
1692 unsigned int page_index, prot, pindex;
1693 PageDesc *p, *p1;
1694 unsigned long host_start, host_end, addr;
1695
1696 host_start = address & host_page_mask;
1697 page_index = host_start >> TARGET_PAGE_BITS;
1698 p1 = page_find(page_index);
1699 if (!p1)
1700 return 0;
1701 host_end = host_start + host_page_size;
1702 p = p1;
1703 prot = 0;
1704 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1705 prot |= p->flags;
1706 p++;
1707 }
1708 /* if the page was really writable, then we change its
1709 protection back to writable */
1710 if (prot & PAGE_WRITE_ORG) {
1711 pindex = (address - host_start) >> TARGET_PAGE_BITS;
1712 if (!(p1[pindex].flags & PAGE_WRITE)) {
1713 mprotect((void *)host_start, host_page_size,
1714 (prot & PAGE_BITS) | PAGE_WRITE);
1715 p1[pindex].flags |= PAGE_WRITE;
1716 /* and since the content will be modified, we must invalidate
1717 the corresponding translated code. */
1718 tb_invalidate_phys_page(address, pc, puc);
1719 #ifdef DEBUG_TB_CHECK
1720 tb_invalidate_check(address);
1721 #endif
1722 return 1;
1723 }
1724 }
1725 return 0;
1726 }
1727
1728 /* call this function when system calls directly modify a memory area */
1729 void page_unprotect_range(uint8_t *data, unsigned long data_size)
1730 {
1731 unsigned long start, end, addr;
1732
1733 start = (unsigned long)data;
1734 end = start + data_size;
1735 start &= TARGET_PAGE_MASK;
1736 end = TARGET_PAGE_ALIGN(end);
1737 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1738 page_unprotect(addr, 0, NULL);
1739 }
1740 }
1741
1742 static inline void tlb_set_dirty(unsigned long addr, target_ulong vaddr)
1743 {
1744 }
1745 #endif /* defined(CONFIG_USER_ONLY) */
1746
1747 /* register physical memory. 'size' must be a multiple of the target
1748 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1749 io memory page */
1750 void cpu_register_physical_memory(target_phys_addr_t start_addr,
1751 unsigned long size,
1752 unsigned long phys_offset)
1753 {
1754 unsigned long addr, end_addr;
1755 PageDesc *p;
1756
1757 end_addr = start_addr + size;
1758 for(addr = start_addr; addr < end_addr; addr += TARGET_PAGE_SIZE) {
1759 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1760 p->phys_offset = phys_offset;
1761 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM)
1762 phys_offset += TARGET_PAGE_SIZE;
1763 }
1764 }
1765
1766 static uint32_t unassigned_mem_readb(target_phys_addr_t addr)
1767 {
1768 return 0;
1769 }
1770
1771 static void unassigned_mem_writeb(target_phys_addr_t addr, uint32_t val)
1772 {
1773 }
1774
1775 static CPUReadMemoryFunc *unassigned_mem_read[3] = {
1776 unassigned_mem_readb,
1777 unassigned_mem_readb,
1778 unassigned_mem_readb,
1779 };
1780
1781 static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
1782 unassigned_mem_writeb,
1783 unassigned_mem_writeb,
1784 unassigned_mem_writeb,
1785 };
1786
1787 /* self modifying code support in soft mmu mode : writing to a page
1788 containing code comes to these functions */
1789
1790 static void code_mem_writeb(target_phys_addr_t addr, uint32_t val)
1791 {
1792 unsigned long phys_addr;
1793
1794 phys_addr = addr - (unsigned long)phys_ram_base;
1795 #if !defined(CONFIG_USER_ONLY)
1796 tb_invalidate_phys_page_fast(phys_addr, 1);
1797 #endif
1798 stb_raw((uint8_t *)addr, val);
1799 phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1;
1800 }
1801
1802 static void code_mem_writew(target_phys_addr_t addr, uint32_t val)
1803 {
1804 unsigned long phys_addr;
1805
1806 phys_addr = addr - (unsigned long)phys_ram_base;
1807 #if !defined(CONFIG_USER_ONLY)
1808 tb_invalidate_phys_page_fast(phys_addr, 2);
1809 #endif
1810 stw_raw((uint8_t *)addr, val);
1811 phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1;
1812 }
1813
1814 static void code_mem_writel(target_phys_addr_t addr, uint32_t val)
1815 {
1816 unsigned long phys_addr;
1817
1818 phys_addr = addr - (unsigned long)phys_ram_base;
1819 #if !defined(CONFIG_USER_ONLY)
1820 tb_invalidate_phys_page_fast(phys_addr, 4);
1821 #endif
1822 stl_raw((uint8_t *)addr, val);
1823 phys_ram_dirty[phys_addr >> TARGET_PAGE_BITS] = 1;
1824 }
1825
1826 static CPUReadMemoryFunc *code_mem_read[3] = {
1827 NULL, /* never used */
1828 NULL, /* never used */
1829 NULL, /* never used */
1830 };
1831
1832 static CPUWriteMemoryFunc *code_mem_write[3] = {
1833 code_mem_writeb,
1834 code_mem_writew,
1835 code_mem_writel,
1836 };
1837
1838 static void notdirty_mem_writeb(target_phys_addr_t addr, uint32_t val)
1839 {
1840 stb_raw((uint8_t *)addr, val);
1841 tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
1842 }
1843
1844 static void notdirty_mem_writew(target_phys_addr_t addr, uint32_t val)
1845 {
1846 stw_raw((uint8_t *)addr, val);
1847 tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
1848 }
1849
1850 static void notdirty_mem_writel(target_phys_addr_t addr, uint32_t val)
1851 {
1852 stl_raw((uint8_t *)addr, val);
1853 tlb_set_dirty(addr, cpu_single_env->mem_write_vaddr);
1854 }
1855
1856 static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
1857 notdirty_mem_writeb,
1858 notdirty_mem_writew,
1859 notdirty_mem_writel,
1860 };
1861
1862 static void io_mem_init(void)
1863 {
1864 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, code_mem_read, unassigned_mem_write);
1865 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write);
1866 cpu_register_io_memory(IO_MEM_CODE >> IO_MEM_SHIFT, code_mem_read, code_mem_write);
1867 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, code_mem_read, notdirty_mem_write);
1868 io_mem_nb = 5;
1869
1870 /* alloc dirty bits array */
1871 phys_ram_dirty = qemu_malloc(phys_ram_size >> TARGET_PAGE_BITS);
1872 }
1873
1874 /* mem_read and mem_write are arrays of functions containing the
1875 function to access byte (index 0), word (index 1) and dword (index
1876 2). All functions must be supplied. If io_index is non zero, the
1877 corresponding io zone is modified. If it is zero, a new io zone is
1878 allocated. The return value can be used with
1879 cpu_register_physical_memory(). (-1) is returned if error. */
1880 int cpu_register_io_memory(int io_index,
1881 CPUReadMemoryFunc **mem_read,
1882 CPUWriteMemoryFunc **mem_write)
1883 {
1884 int i;
1885
1886 if (io_index <= 0) {
1887 if (io_index >= IO_MEM_NB_ENTRIES)
1888 return -1;
1889 io_index = io_mem_nb++;
1890 } else {
1891 if (io_index >= IO_MEM_NB_ENTRIES)
1892 return -1;
1893 }
1894
1895 for(i = 0;i < 3; i++) {
1896 io_mem_read[io_index][i] = mem_read[i];
1897 io_mem_write[io_index][i] = mem_write[i];
1898 }
1899 return io_index << IO_MEM_SHIFT;
1900 }
1901
1902 /* physical memory access (slow version, mainly for debug) */
1903 #if defined(CONFIG_USER_ONLY)
1904 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
1905 int len, int is_write)
1906 {
1907 int l, flags;
1908 target_ulong page;
1909
1910 while (len > 0) {
1911 page = addr & TARGET_PAGE_MASK;
1912 l = (page + TARGET_PAGE_SIZE) - addr;
1913 if (l > len)
1914 l = len;
1915 flags = page_get_flags(page);
1916 if (!(flags & PAGE_VALID))
1917 return;
1918 if (is_write) {
1919 if (!(flags & PAGE_WRITE))
1920 return;
1921 memcpy((uint8_t *)addr, buf, len);
1922 } else {
1923 if (!(flags & PAGE_READ))
1924 return;
1925 memcpy(buf, (uint8_t *)addr, len);
1926 }
1927 len -= l;
1928 buf += l;
1929 addr += l;
1930 }
1931 }
1932 #else
1933 void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
1934 int len, int is_write)
1935 {
1936 int l, io_index;
1937 uint8_t *ptr;
1938 uint32_t val;
1939 target_phys_addr_t page;
1940 unsigned long pd;
1941 PageDesc *p;
1942
1943 while (len > 0) {
1944 page = addr & TARGET_PAGE_MASK;
1945 l = (page + TARGET_PAGE_SIZE) - addr;
1946 if (l > len)
1947 l = len;
1948 p = page_find(page >> TARGET_PAGE_BITS);
1949 if (!p) {
1950 pd = IO_MEM_UNASSIGNED;
1951 } else {
1952 pd = p->phys_offset;
1953 }
1954
1955 if (is_write) {
1956 if ((pd & ~TARGET_PAGE_MASK) != 0) {
1957 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
1958 if (l >= 4 && ((addr & 3) == 0)) {
1959 /* 32 bit read access */
1960 val = ldl_raw(buf);
1961 io_mem_write[io_index][2](addr, val);
1962 l = 4;
1963 } else if (l >= 2 && ((addr & 1) == 0)) {
1964 /* 16 bit read access */
1965 val = lduw_raw(buf);
1966 io_mem_write[io_index][1](addr, val);
1967 l = 2;
1968 } else {
1969 /* 8 bit access */
1970 val = ldub_raw(buf);
1971 io_mem_write[io_index][0](addr, val);
1972 l = 1;
1973 }
1974 } else {
1975 unsigned long addr1;
1976 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
1977 /* RAM case */
1978 ptr = phys_ram_base + addr1;
1979 memcpy(ptr, buf, l);
1980 /* invalidate code */
1981 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
1982 /* set dirty bit */
1983 phys_ram_dirty[page >> TARGET_PAGE_BITS] = 1;
1984 }
1985 } else {
1986 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
1987 (pd & ~TARGET_PAGE_MASK) != IO_MEM_CODE) {
1988 /* I/O case */
1989 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
1990 if (l >= 4 && ((addr & 3) == 0)) {
1991 /* 32 bit read access */
1992 val = io_mem_read[io_index][2](addr);
1993 stl_raw(buf, val);
1994 l = 4;
1995 } else if (l >= 2 && ((addr & 1) == 0)) {
1996 /* 16 bit read access */
1997 val = io_mem_read[io_index][1](addr);
1998 stw_raw(buf, val);
1999 l = 2;
2000 } else {
2001 /* 8 bit access */
2002 val = io_mem_read[io_index][0](addr);
2003 stb_raw(buf, val);
2004 l = 1;
2005 }
2006 } else {
2007 /* RAM case */
2008 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2009 (addr & ~TARGET_PAGE_MASK);
2010 memcpy(buf, ptr, l);
2011 }
2012 }
2013 len -= l;
2014 buf += l;
2015 addr += l;
2016 }
2017 }
2018 #endif
2019
2020 /* virtual memory access for debug */
2021 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
2022 uint8_t *buf, int len, int is_write)
2023 {
2024 int l;
2025 target_ulong page, phys_addr;
2026
2027 while (len > 0) {
2028 page = addr & TARGET_PAGE_MASK;
2029 phys_addr = cpu_get_phys_page_debug(env, page);
2030 /* if no physical page mapped, return an error */
2031 if (phys_addr == -1)
2032 return -1;
2033 l = (page + TARGET_PAGE_SIZE) - addr;
2034 if (l > len)
2035 l = len;
2036 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
2037 buf, l, is_write);
2038 len -= l;
2039 buf += l;
2040 addr += l;
2041 }
2042 return 0;
2043 }
2044
2045 #if !defined(CONFIG_USER_ONLY)
2046
2047 #define MMUSUFFIX _cmmu
2048 #define GETPC() NULL
2049 #define env cpu_single_env
2050
2051 #define SHIFT 0
2052 #include "softmmu_template.h"
2053
2054 #define SHIFT 1
2055 #include "softmmu_template.h"
2056
2057 #define SHIFT 2
2058 #include "softmmu_template.h"
2059
2060 #define SHIFT 3
2061 #include "softmmu_template.h"
2062
2063 #undef env
2064
2065 #endif