2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
23 #include <sys/types.h>
36 #include "qemu-common.h"
41 #if defined(CONFIG_USER_ONLY)
46 //#define DEBUG_TB_INVALIDATE
49 //#define DEBUG_UNASSIGNED
51 /* make various TB consistency checks */
52 //#define DEBUG_TB_CHECK
53 //#define DEBUG_TLB_CHECK
55 //#define DEBUG_IOPORT
56 //#define DEBUG_SUBPAGE
58 #if !defined(CONFIG_USER_ONLY)
59 /* TB consistency checks only implemented for usermode emulation. */
63 #define SMC_BITMAP_USE_THRESHOLD 10
65 static TranslationBlock
*tbs
;
66 int code_gen_max_blocks
;
67 TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
69 /* any access to the tbs or the page table must use this lock */
70 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
72 #if defined(__arm__) || defined(__sparc_v9__)
73 /* The prologue must be reachable with a direct jump. ARM and Sparc64
74 have limited branch ranges (possibly also PPC) so place it in a
75 section close to code segment. */
76 #define code_gen_section \
77 __attribute__((__section__(".gen_code"))) \
78 __attribute__((aligned (32)))
80 /* Maximum alignment for Win32 is 16. */
81 #define code_gen_section \
82 __attribute__((aligned (16)))
84 #define code_gen_section \
85 __attribute__((aligned (32)))
88 uint8_t code_gen_prologue
[1024] code_gen_section
;
89 static uint8_t *code_gen_buffer
;
90 static unsigned long code_gen_buffer_size
;
91 /* threshold to flush the translated code buffer */
92 static unsigned long code_gen_buffer_max_size
;
93 uint8_t *code_gen_ptr
;
95 #if !defined(CONFIG_USER_ONLY)
97 uint8_t *phys_ram_dirty
;
98 static int in_migration
;
100 typedef struct RAMBlock
{
104 struct RAMBlock
*next
;
107 static RAMBlock
*ram_blocks
;
108 /* TODO: When we implement (and use) ram deallocation (e.g. for hotplug)
109 then we can no longer assume contiguous ram offsets, and external uses
110 of this variable will break. */
111 ram_addr_t last_ram_offset
;
115 /* current CPU in the current thread. It is only valid inside
117 CPUState
*cpu_single_env
;
118 /* 0 = Do not count executed instructions.
119 1 = Precise instruction counting.
120 2 = Adaptive rate instruction counting. */
122 /* Current instruction counter. While executing translated code this may
123 include some instructions that have not yet been executed. */
126 typedef struct PageDesc
{
127 /* list of TBs intersecting this ram page */
128 TranslationBlock
*first_tb
;
129 /* in order to optimize self modifying code, we count the number
130 of lookups we do to a given page to use a bitmap */
131 unsigned int code_write_count
;
132 uint8_t *code_bitmap
;
133 #if defined(CONFIG_USER_ONLY)
138 /* In system mode we want L1_MAP to be based on ram offsets,
139 while in user mode we want it to be based on virtual addresses. */
140 #if !defined(CONFIG_USER_ONLY)
141 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
142 # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
144 # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
147 # define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
150 /* Size of the L2 (and L3, etc) page tables. */
152 #define L2_SIZE (1 << L2_BITS)
154 /* The bits remaining after N lower levels of page tables. */
155 #define P_L1_BITS_REM \
156 ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
157 #define V_L1_BITS_REM \
158 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
160 /* Size of the L1 page table. Avoid silly small sizes. */
161 #if P_L1_BITS_REM < 4
162 #define P_L1_BITS (P_L1_BITS_REM + L2_BITS)
164 #define P_L1_BITS P_L1_BITS_REM
167 #if V_L1_BITS_REM < 4
168 #define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
170 #define V_L1_BITS V_L1_BITS_REM
173 #define P_L1_SIZE ((target_phys_addr_t)1 << P_L1_BITS)
174 #define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
176 #define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS)
177 #define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
179 unsigned long qemu_real_host_page_size
;
180 unsigned long qemu_host_page_bits
;
181 unsigned long qemu_host_page_size
;
182 unsigned long qemu_host_page_mask
;
184 /* This is a multi-level map on the virtual address space.
185 The bottom level has pointers to PageDesc. */
186 static void *l1_map
[V_L1_SIZE
];
188 #if !defined(CONFIG_USER_ONLY)
189 typedef struct PhysPageDesc
{
190 /* offset in host memory of the page + io_index in the low bits */
191 ram_addr_t phys_offset
;
192 ram_addr_t region_offset
;
195 /* This is a multi-level map on the physical address space.
196 The bottom level has pointers to PhysPageDesc. */
197 static void *l1_phys_map
[P_L1_SIZE
];
199 static void io_mem_init(void);
201 /* io memory support */
202 CPUWriteMemoryFunc
*io_mem_write
[IO_MEM_NB_ENTRIES
][4];
203 CPUReadMemoryFunc
*io_mem_read
[IO_MEM_NB_ENTRIES
][4];
204 void *io_mem_opaque
[IO_MEM_NB_ENTRIES
];
205 static char io_mem_used
[IO_MEM_NB_ENTRIES
];
206 static int io_mem_watch
;
211 static const char *logfilename
= "qemu.log";
213 static const char *logfilename
= "/tmp/qemu.log";
217 static int log_append
= 0;
220 static int tlb_flush_count
;
221 static int tb_flush_count
;
222 static int tb_phys_invalidate_count
;
225 static void map_exec(void *addr
, long size
)
228 VirtualProtect(addr
, size
,
229 PAGE_EXECUTE_READWRITE
, &old_protect
);
233 static void map_exec(void *addr
, long size
)
235 unsigned long start
, end
, page_size
;
237 page_size
= getpagesize();
238 start
= (unsigned long)addr
;
239 start
&= ~(page_size
- 1);
241 end
= (unsigned long)addr
+ size
;
242 end
+= page_size
- 1;
243 end
&= ~(page_size
- 1);
245 mprotect((void *)start
, end
- start
,
246 PROT_READ
| PROT_WRITE
| PROT_EXEC
);
250 static void page_init(void)
252 /* NOTE: we can always suppose that qemu_host_page_size >=
256 SYSTEM_INFO system_info
;
258 GetSystemInfo(&system_info
);
259 qemu_real_host_page_size
= system_info
.dwPageSize
;
262 qemu_real_host_page_size
= getpagesize();
264 if (qemu_host_page_size
== 0)
265 qemu_host_page_size
= qemu_real_host_page_size
;
266 if (qemu_host_page_size
< TARGET_PAGE_SIZE
)
267 qemu_host_page_size
= TARGET_PAGE_SIZE
;
268 qemu_host_page_bits
= 0;
269 while ((1 << qemu_host_page_bits
) < qemu_host_page_size
)
270 qemu_host_page_bits
++;
271 qemu_host_page_mask
= ~(qemu_host_page_size
- 1);
273 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
277 last_brk
= (unsigned long)sbrk(0);
279 f
= fopen("/proc/self/maps", "r");
284 unsigned long startaddr
, endaddr
;
287 n
= fscanf (f
, "%lx-%lx %*[^\n]\n", &startaddr
, &endaddr
);
289 if (n
== 2 && h2g_valid(startaddr
)) {
290 startaddr
= h2g(startaddr
) & TARGET_PAGE_MASK
;
292 if (h2g_valid(endaddr
)) {
293 endaddr
= h2g(endaddr
);
297 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
308 static PageDesc
*page_find_alloc(tb_page_addr_t index
, int alloc
)
314 #if defined(CONFIG_USER_ONLY)
315 /* We can't use qemu_malloc because it may recurse into a locked mutex.
316 Neither can we record the new pages we reserve while allocating a
317 given page because that may recurse into an unallocated page table
318 entry. Stuff the allocations we do make into a queue and process
319 them after having completed one entire page table allocation. */
321 unsigned long reserve
[2 * (V_L1_SHIFT
/ L2_BITS
)];
324 # define ALLOC(P, SIZE) \
326 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
327 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
328 if (h2g_valid(P)) { \
329 reserve[reserve_idx] = h2g(P); \
330 reserve[reserve_idx + 1] = SIZE; \
335 # define ALLOC(P, SIZE) \
336 do { P = qemu_mallocz(SIZE); } while (0)
339 /* Level 1. Always allocated. */
340 lp
= l1_map
+ ((index
>> V_L1_SHIFT
) & (V_L1_SIZE
- 1));
343 for (i
= V_L1_SHIFT
/ L2_BITS
- 1; i
> 0; i
--) {
350 ALLOC(p
, sizeof(void *) * L2_SIZE
);
354 lp
= p
+ ((index
>> (i
* L2_BITS
)) & (L2_SIZE
- 1));
362 ALLOC(pd
, sizeof(PageDesc
) * L2_SIZE
);
367 #if defined(CONFIG_USER_ONLY)
368 for (i
= 0; i
< reserve_idx
; i
+= 2) {
369 unsigned long addr
= reserve
[i
];
370 unsigned long len
= reserve
[i
+ 1];
372 page_set_flags(addr
& TARGET_PAGE_MASK
,
373 TARGET_PAGE_ALIGN(addr
+ len
),
378 return pd
+ (index
& (L2_SIZE
- 1));
381 static inline PageDesc
*page_find(tb_page_addr_t index
)
383 return page_find_alloc(index
, 0);
386 #if !defined(CONFIG_USER_ONLY)
387 static PhysPageDesc
*phys_page_find_alloc(target_phys_addr_t index
, int alloc
)
393 /* Level 1. Always allocated. */
394 lp
= l1_phys_map
+ ((index
>> P_L1_SHIFT
) & (P_L1_SIZE
- 1));
397 for (i
= P_L1_SHIFT
/ L2_BITS
- 1; i
> 0; i
--) {
403 *lp
= p
= qemu_mallocz(sizeof(void *) * L2_SIZE
);
405 lp
= p
+ ((index
>> (i
* L2_BITS
)) & (L2_SIZE
- 1));
416 *lp
= pd
= qemu_malloc(sizeof(PhysPageDesc
) * L2_SIZE
);
418 for (i
= 0; i
< L2_SIZE
; i
++) {
419 pd
[i
].phys_offset
= IO_MEM_UNASSIGNED
;
420 pd
[i
].region_offset
= (index
+ i
) << TARGET_PAGE_BITS
;
424 return pd
+ (index
& (L2_SIZE
- 1));
427 static inline PhysPageDesc
*phys_page_find(target_phys_addr_t index
)
429 return phys_page_find_alloc(index
, 0);
432 static void tlb_protect_code(ram_addr_t ram_addr
);
433 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
435 #define mmap_lock() do { } while(0)
436 #define mmap_unlock() do { } while(0)
439 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
441 #if defined(CONFIG_USER_ONLY)
442 /* Currently it is not recommended to allocate big chunks of data in
443 user mode. It will change when a dedicated libc will be used */
444 #define USE_STATIC_CODE_GEN_BUFFER
447 #ifdef USE_STATIC_CODE_GEN_BUFFER
448 static uint8_t static_code_gen_buffer
[DEFAULT_CODE_GEN_BUFFER_SIZE
];
451 static void code_gen_alloc(unsigned long tb_size
)
453 #ifdef USE_STATIC_CODE_GEN_BUFFER
454 code_gen_buffer
= static_code_gen_buffer
;
455 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
456 map_exec(code_gen_buffer
, code_gen_buffer_size
);
458 code_gen_buffer_size
= tb_size
;
459 if (code_gen_buffer_size
== 0) {
460 #if defined(CONFIG_USER_ONLY)
461 /* in user mode, phys_ram_size is not meaningful */
462 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
464 /* XXX: needs adjustments */
465 code_gen_buffer_size
= (unsigned long)(ram_size
/ 4);
468 if (code_gen_buffer_size
< MIN_CODE_GEN_BUFFER_SIZE
)
469 code_gen_buffer_size
= MIN_CODE_GEN_BUFFER_SIZE
;
470 /* The code gen buffer location may have constraints depending on
471 the host cpu and OS */
472 #if defined(__linux__)
477 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
478 #if defined(__x86_64__)
480 /* Cannot map more than that */
481 if (code_gen_buffer_size
> (800 * 1024 * 1024))
482 code_gen_buffer_size
= (800 * 1024 * 1024);
483 #elif defined(__sparc_v9__)
484 // Map the buffer below 2G, so we can use direct calls and branches
486 start
= (void *) 0x60000000UL
;
487 if (code_gen_buffer_size
> (512 * 1024 * 1024))
488 code_gen_buffer_size
= (512 * 1024 * 1024);
489 #elif defined(__arm__)
490 /* Map the buffer below 32M, so we can use direct calls and branches */
492 start
= (void *) 0x01000000UL
;
493 if (code_gen_buffer_size
> 16 * 1024 * 1024)
494 code_gen_buffer_size
= 16 * 1024 * 1024;
496 code_gen_buffer
= mmap(start
, code_gen_buffer_size
,
497 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
499 if (code_gen_buffer
== MAP_FAILED
) {
500 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
504 #elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__DragonFly__)
508 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
509 #if defined(__x86_64__)
510 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
511 * 0x40000000 is free */
513 addr
= (void *)0x40000000;
514 /* Cannot map more than that */
515 if (code_gen_buffer_size
> (800 * 1024 * 1024))
516 code_gen_buffer_size
= (800 * 1024 * 1024);
518 code_gen_buffer
= mmap(addr
, code_gen_buffer_size
,
519 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
521 if (code_gen_buffer
== MAP_FAILED
) {
522 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
527 code_gen_buffer
= qemu_malloc(code_gen_buffer_size
);
528 map_exec(code_gen_buffer
, code_gen_buffer_size
);
530 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
531 map_exec(code_gen_prologue
, sizeof(code_gen_prologue
));
532 code_gen_buffer_max_size
= code_gen_buffer_size
-
533 code_gen_max_block_size();
534 code_gen_max_blocks
= code_gen_buffer_size
/ CODE_GEN_AVG_BLOCK_SIZE
;
535 tbs
= qemu_malloc(code_gen_max_blocks
* sizeof(TranslationBlock
));
538 /* Must be called before using the QEMU cpus. 'tb_size' is the size
539 (in bytes) allocated to the translation buffer. Zero means default
541 void cpu_exec_init_all(unsigned long tb_size
)
544 code_gen_alloc(tb_size
);
545 code_gen_ptr
= code_gen_buffer
;
547 #if !defined(CONFIG_USER_ONLY)
552 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
554 static int cpu_common_post_load(void *opaque
, int version_id
)
556 CPUState
*env
= opaque
;
558 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
559 version_id is increased. */
560 env
->interrupt_request
&= ~0x01;
566 static const VMStateDescription vmstate_cpu_common
= {
567 .name
= "cpu_common",
569 .minimum_version_id
= 1,
570 .minimum_version_id_old
= 1,
571 .post_load
= cpu_common_post_load
,
572 .fields
= (VMStateField
[]) {
573 VMSTATE_UINT32(halted
, CPUState
),
574 VMSTATE_UINT32(interrupt_request
, CPUState
),
575 VMSTATE_END_OF_LIST()
580 CPUState
*qemu_get_cpu(int cpu
)
582 CPUState
*env
= first_cpu
;
585 if (env
->cpu_index
== cpu
)
593 void cpu_exec_init(CPUState
*env
)
598 #if defined(CONFIG_USER_ONLY)
601 env
->next_cpu
= NULL
;
604 while (*penv
!= NULL
) {
605 penv
= &(*penv
)->next_cpu
;
608 env
->cpu_index
= cpu_index
;
610 QTAILQ_INIT(&env
->breakpoints
);
611 QTAILQ_INIT(&env
->watchpoints
);
613 #if defined(CONFIG_USER_ONLY)
616 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
617 vmstate_register(cpu_index
, &vmstate_cpu_common
, env
);
618 register_savevm("cpu", cpu_index
, CPU_SAVE_VERSION
,
619 cpu_save
, cpu_load
, env
);
623 static inline void invalidate_page_bitmap(PageDesc
*p
)
625 if (p
->code_bitmap
) {
626 qemu_free(p
->code_bitmap
);
627 p
->code_bitmap
= NULL
;
629 p
->code_write_count
= 0;
632 /* Set to NULL all the 'first_tb' fields in all PageDescs. */
634 static void page_flush_tb_1 (int level
, void **lp
)
643 for (i
= 0; i
< L2_BITS
; ++i
) {
644 pd
[i
].first_tb
= NULL
;
645 invalidate_page_bitmap(pd
+ i
);
649 for (i
= 0; i
< L2_BITS
; ++i
) {
650 page_flush_tb_1 (level
- 1, pp
+ i
);
655 static void page_flush_tb(void)
658 for (i
= 0; i
< V_L1_SIZE
; i
++) {
659 page_flush_tb_1(V_L1_SHIFT
/ L2_BITS
- 1, l1_map
+ i
);
663 /* flush all the translation blocks */
664 /* XXX: tb_flush is currently not thread safe */
665 void tb_flush(CPUState
*env1
)
668 #if defined(DEBUG_FLUSH)
669 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
670 (unsigned long)(code_gen_ptr
- code_gen_buffer
),
672 ((unsigned long)(code_gen_ptr
- code_gen_buffer
)) / nb_tbs
: 0);
674 if ((unsigned long)(code_gen_ptr
- code_gen_buffer
) > code_gen_buffer_size
)
675 cpu_abort(env1
, "Internal error: code buffer overflow\n");
679 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
680 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
683 memset (tb_phys_hash
, 0, CODE_GEN_PHYS_HASH_SIZE
* sizeof (void *));
686 code_gen_ptr
= code_gen_buffer
;
687 /* XXX: flush processor icache at this point if cache flush is
692 #ifdef DEBUG_TB_CHECK
694 static void tb_invalidate_check(target_ulong address
)
696 TranslationBlock
*tb
;
698 address
&= TARGET_PAGE_MASK
;
699 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
700 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
701 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
702 address
>= tb
->pc
+ tb
->size
)) {
703 printf("ERROR invalidate: address=" TARGET_FMT_lx
704 " PC=%08lx size=%04x\n",
705 address
, (long)tb
->pc
, tb
->size
);
711 /* verify that all the pages have correct rights for code */
712 static void tb_page_check(void)
714 TranslationBlock
*tb
;
715 int i
, flags1
, flags2
;
717 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
718 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
719 flags1
= page_get_flags(tb
->pc
);
720 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
721 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
722 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
723 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
731 /* invalidate one TB */
732 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
735 TranslationBlock
*tb1
;
739 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
742 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
746 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
748 TranslationBlock
*tb1
;
754 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
756 *ptb
= tb1
->page_next
[n1
];
759 ptb
= &tb1
->page_next
[n1
];
763 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
765 TranslationBlock
*tb1
, **ptb
;
768 ptb
= &tb
->jmp_next
[n
];
771 /* find tb(n) in circular list */
775 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
776 if (n1
== n
&& tb1
== tb
)
779 ptb
= &tb1
->jmp_first
;
781 ptb
= &tb1
->jmp_next
[n1
];
784 /* now we can suppress tb(n) from the list */
785 *ptb
= tb
->jmp_next
[n
];
787 tb
->jmp_next
[n
] = NULL
;
791 /* reset the jump entry 'n' of a TB so that it is not chained to
793 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
795 tb_set_jmp_target(tb
, n
, (unsigned long)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
798 void tb_phys_invalidate(TranslationBlock
*tb
, tb_page_addr_t page_addr
)
803 tb_page_addr_t phys_pc
;
804 TranslationBlock
*tb1
, *tb2
;
806 /* remove the TB from the hash list */
807 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
808 h
= tb_phys_hash_func(phys_pc
);
809 tb_remove(&tb_phys_hash
[h
], tb
,
810 offsetof(TranslationBlock
, phys_hash_next
));
812 /* remove the TB from the page list */
813 if (tb
->page_addr
[0] != page_addr
) {
814 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
815 tb_page_remove(&p
->first_tb
, tb
);
816 invalidate_page_bitmap(p
);
818 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
819 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
820 tb_page_remove(&p
->first_tb
, tb
);
821 invalidate_page_bitmap(p
);
824 tb_invalidated_flag
= 1;
826 /* remove the TB from the hash list */
827 h
= tb_jmp_cache_hash_func(tb
->pc
);
828 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
829 if (env
->tb_jmp_cache
[h
] == tb
)
830 env
->tb_jmp_cache
[h
] = NULL
;
833 /* suppress this TB from the two jump lists */
834 tb_jmp_remove(tb
, 0);
835 tb_jmp_remove(tb
, 1);
837 /* suppress any remaining jumps to this TB */
843 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
844 tb2
= tb1
->jmp_next
[n1
];
845 tb_reset_jump(tb1
, n1
);
846 tb1
->jmp_next
[n1
] = NULL
;
849 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2); /* fail safe */
851 tb_phys_invalidate_count
++;
854 static inline void set_bits(uint8_t *tab
, int start
, int len
)
860 mask
= 0xff << (start
& 7);
861 if ((start
& ~7) == (end
& ~7)) {
863 mask
&= ~(0xff << (end
& 7));
868 start
= (start
+ 8) & ~7;
870 while (start
< end1
) {
875 mask
= ~(0xff << (end
& 7));
881 static void build_page_bitmap(PageDesc
*p
)
883 int n
, tb_start
, tb_end
;
884 TranslationBlock
*tb
;
886 p
->code_bitmap
= qemu_mallocz(TARGET_PAGE_SIZE
/ 8);
891 tb
= (TranslationBlock
*)((long)tb
& ~3);
892 /* NOTE: this is subtle as a TB may span two physical pages */
894 /* NOTE: tb_end may be after the end of the page, but
895 it is not a problem */
896 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
897 tb_end
= tb_start
+ tb
->size
;
898 if (tb_end
> TARGET_PAGE_SIZE
)
899 tb_end
= TARGET_PAGE_SIZE
;
902 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
904 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
905 tb
= tb
->page_next
[n
];
909 TranslationBlock
*tb_gen_code(CPUState
*env
,
910 target_ulong pc
, target_ulong cs_base
,
911 int flags
, int cflags
)
913 TranslationBlock
*tb
;
915 tb_page_addr_t phys_pc
, phys_page2
;
916 target_ulong virt_page2
;
919 phys_pc
= get_page_addr_code(env
, pc
);
922 /* flush must be done */
924 /* cannot fail at this point */
926 /* Don't forget to invalidate previous TB info. */
927 tb_invalidated_flag
= 1;
929 tc_ptr
= code_gen_ptr
;
931 tb
->cs_base
= cs_base
;
934 cpu_gen_code(env
, tb
, &code_gen_size
);
935 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
937 /* check next page if needed */
938 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
940 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
941 phys_page2
= get_page_addr_code(env
, virt_page2
);
943 tb_link_page(tb
, phys_pc
, phys_page2
);
947 /* invalidate all TBs which intersect with the target physical page
948 starting in range [start;end[. NOTE: start and end must refer to
949 the same physical page. 'is_cpu_write_access' should be true if called
950 from a real cpu write access: the virtual CPU will exit the current
951 TB if code is modified inside this TB. */
952 void tb_invalidate_phys_page_range(tb_page_addr_t start
, tb_page_addr_t end
,
953 int is_cpu_write_access
)
955 TranslationBlock
*tb
, *tb_next
, *saved_tb
;
956 CPUState
*env
= cpu_single_env
;
957 tb_page_addr_t tb_start
, tb_end
;
960 #ifdef TARGET_HAS_PRECISE_SMC
961 int current_tb_not_found
= is_cpu_write_access
;
962 TranslationBlock
*current_tb
= NULL
;
963 int current_tb_modified
= 0;
964 target_ulong current_pc
= 0;
965 target_ulong current_cs_base
= 0;
966 int current_flags
= 0;
967 #endif /* TARGET_HAS_PRECISE_SMC */
969 p
= page_find(start
>> TARGET_PAGE_BITS
);
972 if (!p
->code_bitmap
&&
973 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
&&
974 is_cpu_write_access
) {
975 /* build code bitmap */
976 build_page_bitmap(p
);
979 /* we remove all the TBs in the range [start, end[ */
980 /* XXX: see if in some cases it could be faster to invalidate all the code */
984 tb
= (TranslationBlock
*)((long)tb
& ~3);
985 tb_next
= tb
->page_next
[n
];
986 /* NOTE: this is subtle as a TB may span two physical pages */
988 /* NOTE: tb_end may be after the end of the page, but
989 it is not a problem */
990 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
991 tb_end
= tb_start
+ tb
->size
;
993 tb_start
= tb
->page_addr
[1];
994 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
996 if (!(tb_end
<= start
|| tb_start
>= end
)) {
997 #ifdef TARGET_HAS_PRECISE_SMC
998 if (current_tb_not_found
) {
999 current_tb_not_found
= 0;
1001 if (env
->mem_io_pc
) {
1002 /* now we have a real cpu fault */
1003 current_tb
= tb_find_pc(env
->mem_io_pc
);
1006 if (current_tb
== tb
&&
1007 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1008 /* If we are modifying the current TB, we must stop
1009 its execution. We could be more precise by checking
1010 that the modification is after the current PC, but it
1011 would require a specialized function to partially
1012 restore the CPU state */
1014 current_tb_modified
= 1;
1015 cpu_restore_state(current_tb
, env
,
1016 env
->mem_io_pc
, NULL
);
1017 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1020 #endif /* TARGET_HAS_PRECISE_SMC */
1021 /* we need to do that to handle the case where a signal
1022 occurs while doing tb_phys_invalidate() */
1025 saved_tb
= env
->current_tb
;
1026 env
->current_tb
= NULL
;
1028 tb_phys_invalidate(tb
, -1);
1030 env
->current_tb
= saved_tb
;
1031 if (env
->interrupt_request
&& env
->current_tb
)
1032 cpu_interrupt(env
, env
->interrupt_request
);
1037 #if !defined(CONFIG_USER_ONLY)
1038 /* if no code remaining, no need to continue to use slow writes */
1040 invalidate_page_bitmap(p
);
1041 if (is_cpu_write_access
) {
1042 tlb_unprotect_code_phys(env
, start
, env
->mem_io_vaddr
);
1046 #ifdef TARGET_HAS_PRECISE_SMC
1047 if (current_tb_modified
) {
1048 /* we generate a block containing just the instruction
1049 modifying the memory. It will ensure that it cannot modify
1051 env
->current_tb
= NULL
;
1052 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1053 cpu_resume_from_signal(env
, NULL
);
1058 /* len must be <= 8 and start must be a multiple of len */
1059 static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start
, int len
)
1065 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1066 cpu_single_env
->mem_io_vaddr
, len
,
1067 cpu_single_env
->eip
,
1068 cpu_single_env
->eip
+ (long)cpu_single_env
->segs
[R_CS
].base
);
1071 p
= page_find(start
>> TARGET_PAGE_BITS
);
1074 if (p
->code_bitmap
) {
1075 offset
= start
& ~TARGET_PAGE_MASK
;
1076 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
1077 if (b
& ((1 << len
) - 1))
1081 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
1085 #if !defined(CONFIG_SOFTMMU)
1086 static void tb_invalidate_phys_page(tb_page_addr_t addr
,
1087 unsigned long pc
, void *puc
)
1089 TranslationBlock
*tb
;
1092 #ifdef TARGET_HAS_PRECISE_SMC
1093 TranslationBlock
*current_tb
= NULL
;
1094 CPUState
*env
= cpu_single_env
;
1095 int current_tb_modified
= 0;
1096 target_ulong current_pc
= 0;
1097 target_ulong current_cs_base
= 0;
1098 int current_flags
= 0;
1101 addr
&= TARGET_PAGE_MASK
;
1102 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1106 #ifdef TARGET_HAS_PRECISE_SMC
1107 if (tb
&& pc
!= 0) {
1108 current_tb
= tb_find_pc(pc
);
1111 while (tb
!= NULL
) {
1113 tb
= (TranslationBlock
*)((long)tb
& ~3);
1114 #ifdef TARGET_HAS_PRECISE_SMC
1115 if (current_tb
== tb
&&
1116 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1117 /* If we are modifying the current TB, we must stop
1118 its execution. We could be more precise by checking
1119 that the modification is after the current PC, but it
1120 would require a specialized function to partially
1121 restore the CPU state */
1123 current_tb_modified
= 1;
1124 cpu_restore_state(current_tb
, env
, pc
, puc
);
1125 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1128 #endif /* TARGET_HAS_PRECISE_SMC */
1129 tb_phys_invalidate(tb
, addr
);
1130 tb
= tb
->page_next
[n
];
1133 #ifdef TARGET_HAS_PRECISE_SMC
1134 if (current_tb_modified
) {
1135 /* we generate a block containing just the instruction
1136 modifying the memory. It will ensure that it cannot modify
1138 env
->current_tb
= NULL
;
1139 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1140 cpu_resume_from_signal(env
, puc
);
1146 /* add the tb in the target page and protect it if necessary */
1147 static inline void tb_alloc_page(TranslationBlock
*tb
,
1148 unsigned int n
, tb_page_addr_t page_addr
)
1151 TranslationBlock
*last_first_tb
;
1153 tb
->page_addr
[n
] = page_addr
;
1154 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
, 1);
1155 tb
->page_next
[n
] = p
->first_tb
;
1156 last_first_tb
= p
->first_tb
;
1157 p
->first_tb
= (TranslationBlock
*)((long)tb
| n
);
1158 invalidate_page_bitmap(p
);
1160 #if defined(TARGET_HAS_SMC) || 1
1162 #if defined(CONFIG_USER_ONLY)
1163 if (p
->flags
& PAGE_WRITE
) {
1168 /* force the host page as non writable (writes will have a
1169 page fault + mprotect overhead) */
1170 page_addr
&= qemu_host_page_mask
;
1172 for(addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
1173 addr
+= TARGET_PAGE_SIZE
) {
1175 p2
= page_find (addr
>> TARGET_PAGE_BITS
);
1179 p2
->flags
&= ~PAGE_WRITE
;
1180 page_get_flags(addr
);
1182 mprotect(g2h(page_addr
), qemu_host_page_size
,
1183 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
1184 #ifdef DEBUG_TB_INVALIDATE
1185 printf("protecting code page: 0x" TARGET_FMT_lx
"\n",
1190 /* if some code is already present, then the pages are already
1191 protected. So we handle the case where only the first TB is
1192 allocated in a physical page */
1193 if (!last_first_tb
) {
1194 tlb_protect_code(page_addr
);
1198 #endif /* TARGET_HAS_SMC */
1201 /* Allocate a new translation block. Flush the translation buffer if
1202 too many translation blocks or too much generated code. */
1203 TranslationBlock
*tb_alloc(target_ulong pc
)
1205 TranslationBlock
*tb
;
1207 if (nb_tbs
>= code_gen_max_blocks
||
1208 (code_gen_ptr
- code_gen_buffer
) >= code_gen_buffer_max_size
)
1210 tb
= &tbs
[nb_tbs
++];
1216 void tb_free(TranslationBlock
*tb
)
1218 /* In practice this is mostly used for single use temporary TB
1219 Ignore the hard cases and just back up if this TB happens to
1220 be the last one generated. */
1221 if (nb_tbs
> 0 && tb
== &tbs
[nb_tbs
- 1]) {
1222 code_gen_ptr
= tb
->tc_ptr
;
1227 /* add a new TB and link it to the physical page tables. phys_page2 is
1228 (-1) to indicate that only one page contains the TB. */
1229 void tb_link_page(TranslationBlock
*tb
,
1230 tb_page_addr_t phys_pc
, tb_page_addr_t phys_page2
)
1233 TranslationBlock
**ptb
;
1235 /* Grab the mmap lock to stop another thread invalidating this TB
1236 before we are done. */
1238 /* add in the physical hash table */
1239 h
= tb_phys_hash_func(phys_pc
);
1240 ptb
= &tb_phys_hash
[h
];
1241 tb
->phys_hash_next
= *ptb
;
1244 /* add in the page list */
1245 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
1246 if (phys_page2
!= -1)
1247 tb_alloc_page(tb
, 1, phys_page2
);
1249 tb
->page_addr
[1] = -1;
1251 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2);
1252 tb
->jmp_next
[0] = NULL
;
1253 tb
->jmp_next
[1] = NULL
;
1255 /* init original jump addresses */
1256 if (tb
->tb_next_offset
[0] != 0xffff)
1257 tb_reset_jump(tb
, 0);
1258 if (tb
->tb_next_offset
[1] != 0xffff)
1259 tb_reset_jump(tb
, 1);
1261 #ifdef DEBUG_TB_CHECK
1267 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1268 tb[1].tc_ptr. Return NULL if not found */
1269 TranslationBlock
*tb_find_pc(unsigned long tc_ptr
)
1271 int m_min
, m_max
, m
;
1273 TranslationBlock
*tb
;
1277 if (tc_ptr
< (unsigned long)code_gen_buffer
||
1278 tc_ptr
>= (unsigned long)code_gen_ptr
)
1280 /* binary search (cf Knuth) */
1283 while (m_min
<= m_max
) {
1284 m
= (m_min
+ m_max
) >> 1;
1286 v
= (unsigned long)tb
->tc_ptr
;
1289 else if (tc_ptr
< v
) {
1298 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
1300 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
1302 TranslationBlock
*tb1
, *tb_next
, **ptb
;
1305 tb1
= tb
->jmp_next
[n
];
1307 /* find head of list */
1310 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1313 tb1
= tb1
->jmp_next
[n1
];
1315 /* we are now sure now that tb jumps to tb1 */
1318 /* remove tb from the jmp_first list */
1319 ptb
= &tb_next
->jmp_first
;
1323 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1324 if (n1
== n
&& tb1
== tb
)
1326 ptb
= &tb1
->jmp_next
[n1
];
1328 *ptb
= tb
->jmp_next
[n
];
1329 tb
->jmp_next
[n
] = NULL
;
1331 /* suppress the jump to next tb in generated code */
1332 tb_reset_jump(tb
, n
);
1334 /* suppress jumps in the tb on which we could have jumped */
1335 tb_reset_jump_recursive(tb_next
);
1339 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
1341 tb_reset_jump_recursive2(tb
, 0);
1342 tb_reset_jump_recursive2(tb
, 1);
1345 #if defined(TARGET_HAS_ICE)
1346 #if defined(CONFIG_USER_ONLY)
1347 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1349 tb_invalidate_phys_page_range(pc
, pc
+ 1, 0);
1352 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1354 target_phys_addr_t addr
;
1356 ram_addr_t ram_addr
;
1359 addr
= cpu_get_phys_page_debug(env
, pc
);
1360 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
1362 pd
= IO_MEM_UNASSIGNED
;
1364 pd
= p
->phys_offset
;
1366 ram_addr
= (pd
& TARGET_PAGE_MASK
) | (pc
& ~TARGET_PAGE_MASK
);
1367 tb_invalidate_phys_page_range(ram_addr
, ram_addr
+ 1, 0);
1370 #endif /* TARGET_HAS_ICE */
1372 #if defined(CONFIG_USER_ONLY)
1373 void cpu_watchpoint_remove_all(CPUState
*env
, int mask
)
1378 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
, target_ulong len
,
1379 int flags
, CPUWatchpoint
**watchpoint
)
1384 /* Add a watchpoint. */
1385 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
, target_ulong len
,
1386 int flags
, CPUWatchpoint
**watchpoint
)
1388 target_ulong len_mask
= ~(len
- 1);
1391 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1392 if ((len
!= 1 && len
!= 2 && len
!= 4 && len
!= 8) || (addr
& ~len_mask
)) {
1393 fprintf(stderr
, "qemu: tried to set invalid watchpoint at "
1394 TARGET_FMT_lx
", len=" TARGET_FMT_lu
"\n", addr
, len
);
1397 wp
= qemu_malloc(sizeof(*wp
));
1400 wp
->len_mask
= len_mask
;
1403 /* keep all GDB-injected watchpoints in front */
1405 QTAILQ_INSERT_HEAD(&env
->watchpoints
, wp
, entry
);
1407 QTAILQ_INSERT_TAIL(&env
->watchpoints
, wp
, entry
);
1409 tlb_flush_page(env
, addr
);
1416 /* Remove a specific watchpoint. */
1417 int cpu_watchpoint_remove(CPUState
*env
, target_ulong addr
, target_ulong len
,
1420 target_ulong len_mask
= ~(len
- 1);
1423 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1424 if (addr
== wp
->vaddr
&& len_mask
== wp
->len_mask
1425 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
1426 cpu_watchpoint_remove_by_ref(env
, wp
);
1433 /* Remove a specific watchpoint by reference. */
1434 void cpu_watchpoint_remove_by_ref(CPUState
*env
, CPUWatchpoint
*watchpoint
)
1436 QTAILQ_REMOVE(&env
->watchpoints
, watchpoint
, entry
);
1438 tlb_flush_page(env
, watchpoint
->vaddr
);
1440 qemu_free(watchpoint
);
1443 /* Remove all matching watchpoints. */
1444 void cpu_watchpoint_remove_all(CPUState
*env
, int mask
)
1446 CPUWatchpoint
*wp
, *next
;
1448 QTAILQ_FOREACH_SAFE(wp
, &env
->watchpoints
, entry
, next
) {
1449 if (wp
->flags
& mask
)
1450 cpu_watchpoint_remove_by_ref(env
, wp
);
1455 /* Add a breakpoint. */
1456 int cpu_breakpoint_insert(CPUState
*env
, target_ulong pc
, int flags
,
1457 CPUBreakpoint
**breakpoint
)
1459 #if defined(TARGET_HAS_ICE)
1462 bp
= qemu_malloc(sizeof(*bp
));
1467 /* keep all GDB-injected breakpoints in front */
1469 QTAILQ_INSERT_HEAD(&env
->breakpoints
, bp
, entry
);
1471 QTAILQ_INSERT_TAIL(&env
->breakpoints
, bp
, entry
);
1473 breakpoint_invalidate(env
, pc
);
1483 /* Remove a specific breakpoint. */
1484 int cpu_breakpoint_remove(CPUState
*env
, target_ulong pc
, int flags
)
1486 #if defined(TARGET_HAS_ICE)
1489 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1490 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
1491 cpu_breakpoint_remove_by_ref(env
, bp
);
1501 /* Remove a specific breakpoint by reference. */
1502 void cpu_breakpoint_remove_by_ref(CPUState
*env
, CPUBreakpoint
*breakpoint
)
1504 #if defined(TARGET_HAS_ICE)
1505 QTAILQ_REMOVE(&env
->breakpoints
, breakpoint
, entry
);
1507 breakpoint_invalidate(env
, breakpoint
->pc
);
1509 qemu_free(breakpoint
);
1513 /* Remove all matching breakpoints. */
1514 void cpu_breakpoint_remove_all(CPUState
*env
, int mask
)
1516 #if defined(TARGET_HAS_ICE)
1517 CPUBreakpoint
*bp
, *next
;
1519 QTAILQ_FOREACH_SAFE(bp
, &env
->breakpoints
, entry
, next
) {
1520 if (bp
->flags
& mask
)
1521 cpu_breakpoint_remove_by_ref(env
, bp
);
1526 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1527 CPU loop after each instruction */
1528 void cpu_single_step(CPUState
*env
, int enabled
)
1530 #if defined(TARGET_HAS_ICE)
1531 if (env
->singlestep_enabled
!= enabled
) {
1532 env
->singlestep_enabled
= enabled
;
1534 kvm_update_guest_debug(env
, 0);
1536 /* must flush all the translated code to avoid inconsistencies */
1537 /* XXX: only flush what is necessary */
1544 /* enable or disable low levels log */
1545 void cpu_set_log(int log_flags
)
1547 loglevel
= log_flags
;
1548 if (loglevel
&& !logfile
) {
1549 logfile
= fopen(logfilename
, log_append
? "a" : "w");
1551 perror(logfilename
);
1554 #if !defined(CONFIG_SOFTMMU)
1555 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1557 static char logfile_buf
[4096];
1558 setvbuf(logfile
, logfile_buf
, _IOLBF
, sizeof(logfile_buf
));
1560 #elif !defined(_WIN32)
1561 /* Win32 doesn't support line-buffering and requires size >= 2 */
1562 setvbuf(logfile
, NULL
, _IOLBF
, 0);
1566 if (!loglevel
&& logfile
) {
1572 void cpu_set_log_filename(const char *filename
)
1574 logfilename
= strdup(filename
);
1579 cpu_set_log(loglevel
);
1582 static void cpu_unlink_tb(CPUState
*env
)
1584 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1585 problem and hope the cpu will stop of its own accord. For userspace
1586 emulation this often isn't actually as bad as it sounds. Often
1587 signals are used primarily to interrupt blocking syscalls. */
1588 TranslationBlock
*tb
;
1589 static spinlock_t interrupt_lock
= SPIN_LOCK_UNLOCKED
;
1591 spin_lock(&interrupt_lock
);
1592 tb
= env
->current_tb
;
1593 /* if the cpu is currently executing code, we must unlink it and
1594 all the potentially executing TB */
1596 env
->current_tb
= NULL
;
1597 tb_reset_jump_recursive(tb
);
1599 spin_unlock(&interrupt_lock
);
1602 /* mask must never be zero, except for A20 change call */
1603 void cpu_interrupt(CPUState
*env
, int mask
)
1607 old_mask
= env
->interrupt_request
;
1608 env
->interrupt_request
|= mask
;
1610 #ifndef CONFIG_USER_ONLY
1612 * If called from iothread context, wake the target cpu in
1615 if (!qemu_cpu_self(env
)) {
1622 env
->icount_decr
.u16
.high
= 0xffff;
1623 #ifndef CONFIG_USER_ONLY
1625 && (mask
& ~old_mask
) != 0) {
1626 cpu_abort(env
, "Raised interrupt while not in I/O function");
1634 void cpu_reset_interrupt(CPUState
*env
, int mask
)
1636 env
->interrupt_request
&= ~mask
;
1639 void cpu_exit(CPUState
*env
)
1641 env
->exit_request
= 1;
1645 const CPULogItem cpu_log_items
[] = {
1646 { CPU_LOG_TB_OUT_ASM
, "out_asm",
1647 "show generated host assembly code for each compiled TB" },
1648 { CPU_LOG_TB_IN_ASM
, "in_asm",
1649 "show target assembly code for each compiled TB" },
1650 { CPU_LOG_TB_OP
, "op",
1651 "show micro ops for each compiled TB" },
1652 { CPU_LOG_TB_OP_OPT
, "op_opt",
1655 "before eflags optimization and "
1657 "after liveness analysis" },
1658 { CPU_LOG_INT
, "int",
1659 "show interrupts/exceptions in short format" },
1660 { CPU_LOG_EXEC
, "exec",
1661 "show trace before each executed TB (lots of logs)" },
1662 { CPU_LOG_TB_CPU
, "cpu",
1663 "show CPU state before block translation" },
1665 { CPU_LOG_PCALL
, "pcall",
1666 "show protected mode far calls/returns/exceptions" },
1667 { CPU_LOG_RESET
, "cpu_reset",
1668 "show CPU state before CPU resets" },
1671 { CPU_LOG_IOPORT
, "ioport",
1672 "show all i/o ports accesses" },
1677 #ifndef CONFIG_USER_ONLY
1678 static QLIST_HEAD(memory_client_list
, CPUPhysMemoryClient
) memory_client_list
1679 = QLIST_HEAD_INITIALIZER(memory_client_list
);
1681 static void cpu_notify_set_memory(target_phys_addr_t start_addr
,
1683 ram_addr_t phys_offset
)
1685 CPUPhysMemoryClient
*client
;
1686 QLIST_FOREACH(client
, &memory_client_list
, list
) {
1687 client
->set_memory(client
, start_addr
, size
, phys_offset
);
1691 static int cpu_notify_sync_dirty_bitmap(target_phys_addr_t start
,
1692 target_phys_addr_t end
)
1694 CPUPhysMemoryClient
*client
;
1695 QLIST_FOREACH(client
, &memory_client_list
, list
) {
1696 int r
= client
->sync_dirty_bitmap(client
, start
, end
);
1703 static int cpu_notify_migration_log(int enable
)
1705 CPUPhysMemoryClient
*client
;
1706 QLIST_FOREACH(client
, &memory_client_list
, list
) {
1707 int r
= client
->migration_log(client
, enable
);
1714 static void phys_page_for_each_1(CPUPhysMemoryClient
*client
,
1715 int level
, void **lp
)
1723 PhysPageDesc
*pd
= *lp
;
1724 for (i
= 0; i
< L2_BITS
; ++i
) {
1725 if (pd
[i
].phys_offset
!= IO_MEM_UNASSIGNED
) {
1726 client
->set_memory(client
, pd
[i
].region_offset
,
1727 TARGET_PAGE_SIZE
, pd
[i
].phys_offset
);
1732 for (i
= 0; i
< L2_BITS
; ++i
) {
1733 phys_page_for_each_1(client
, level
- 1, pp
+ i
);
1738 static void phys_page_for_each(CPUPhysMemoryClient
*client
)
1741 for (i
= 0; i
< P_L1_SIZE
; ++i
) {
1742 phys_page_for_each_1(client
, P_L1_SHIFT
/ L2_BITS
- 1,
1747 void cpu_register_phys_memory_client(CPUPhysMemoryClient
*client
)
1749 QLIST_INSERT_HEAD(&memory_client_list
, client
, list
);
1750 phys_page_for_each(client
);
1753 void cpu_unregister_phys_memory_client(CPUPhysMemoryClient
*client
)
1755 QLIST_REMOVE(client
, list
);
1759 static int cmp1(const char *s1
, int n
, const char *s2
)
1761 if (strlen(s2
) != n
)
1763 return memcmp(s1
, s2
, n
) == 0;
1766 /* takes a comma separated list of log masks. Return 0 if error. */
1767 int cpu_str_to_log_mask(const char *str
)
1769 const CPULogItem
*item
;
1776 p1
= strchr(p
, ',');
1779 if(cmp1(p
,p1
-p
,"all")) {
1780 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1784 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1785 if (cmp1(p
, p1
- p
, item
->name
))
1799 void cpu_abort(CPUState
*env
, const char *fmt
, ...)
1806 fprintf(stderr
, "qemu: fatal: ");
1807 vfprintf(stderr
, fmt
, ap
);
1808 fprintf(stderr
, "\n");
1810 cpu_dump_state(env
, stderr
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1812 cpu_dump_state(env
, stderr
, fprintf
, 0);
1814 if (qemu_log_enabled()) {
1815 qemu_log("qemu: fatal: ");
1816 qemu_log_vprintf(fmt
, ap2
);
1819 log_cpu_state(env
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1821 log_cpu_state(env
, 0);
1828 #if defined(CONFIG_USER_ONLY)
1830 struct sigaction act
;
1831 sigfillset(&act
.sa_mask
);
1832 act
.sa_handler
= SIG_DFL
;
1833 sigaction(SIGABRT
, &act
, NULL
);
1839 CPUState
*cpu_copy(CPUState
*env
)
1841 CPUState
*new_env
= cpu_init(env
->cpu_model_str
);
1842 CPUState
*next_cpu
= new_env
->next_cpu
;
1843 int cpu_index
= new_env
->cpu_index
;
1844 #if defined(TARGET_HAS_ICE)
1849 memcpy(new_env
, env
, sizeof(CPUState
));
1851 /* Preserve chaining and index. */
1852 new_env
->next_cpu
= next_cpu
;
1853 new_env
->cpu_index
= cpu_index
;
1855 /* Clone all break/watchpoints.
1856 Note: Once we support ptrace with hw-debug register access, make sure
1857 BP_CPU break/watchpoints are handled correctly on clone. */
1858 QTAILQ_INIT(&env
->breakpoints
);
1859 QTAILQ_INIT(&env
->watchpoints
);
1860 #if defined(TARGET_HAS_ICE)
1861 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1862 cpu_breakpoint_insert(new_env
, bp
->pc
, bp
->flags
, NULL
);
1864 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1865 cpu_watchpoint_insert(new_env
, wp
->vaddr
, (~wp
->len_mask
) + 1,
1873 #if !defined(CONFIG_USER_ONLY)
1875 static inline void tlb_flush_jmp_cache(CPUState
*env
, target_ulong addr
)
1879 /* Discard jump cache entries for any tb which might potentially
1880 overlap the flushed page. */
1881 i
= tb_jmp_cache_hash_page(addr
- TARGET_PAGE_SIZE
);
1882 memset (&env
->tb_jmp_cache
[i
], 0,
1883 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1885 i
= tb_jmp_cache_hash_page(addr
);
1886 memset (&env
->tb_jmp_cache
[i
], 0,
1887 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1890 static CPUTLBEntry s_cputlb_empty_entry
= {
1897 /* NOTE: if flush_global is true, also flush global entries (not
1899 void tlb_flush(CPUState
*env
, int flush_global
)
1903 #if defined(DEBUG_TLB)
1904 printf("tlb_flush:\n");
1906 /* must reset current TB so that interrupts cannot modify the
1907 links while we are modifying them */
1908 env
->current_tb
= NULL
;
1910 for(i
= 0; i
< CPU_TLB_SIZE
; i
++) {
1912 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
1913 env
->tlb_table
[mmu_idx
][i
] = s_cputlb_empty_entry
;
1917 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
1922 static inline void tlb_flush_entry(CPUTLBEntry
*tlb_entry
, target_ulong addr
)
1924 if (addr
== (tlb_entry
->addr_read
&
1925 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1926 addr
== (tlb_entry
->addr_write
&
1927 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1928 addr
== (tlb_entry
->addr_code
&
1929 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
1930 *tlb_entry
= s_cputlb_empty_entry
;
1934 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1939 #if defined(DEBUG_TLB)
1940 printf("tlb_flush_page: " TARGET_FMT_lx
"\n", addr
);
1942 /* must reset current TB so that interrupts cannot modify the
1943 links while we are modifying them */
1944 env
->current_tb
= NULL
;
1946 addr
&= TARGET_PAGE_MASK
;
1947 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1948 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++)
1949 tlb_flush_entry(&env
->tlb_table
[mmu_idx
][i
], addr
);
1951 tlb_flush_jmp_cache(env
, addr
);
1954 /* update the TLBs so that writes to code in the virtual page 'addr'
1956 static void tlb_protect_code(ram_addr_t ram_addr
)
1958 cpu_physical_memory_reset_dirty(ram_addr
,
1959 ram_addr
+ TARGET_PAGE_SIZE
,
1963 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1964 tested for self modifying code */
1965 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
1968 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] |= CODE_DIRTY_FLAG
;
1971 static inline void tlb_reset_dirty_range(CPUTLBEntry
*tlb_entry
,
1972 unsigned long start
, unsigned long length
)
1975 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1976 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1977 if ((addr
- start
) < length
) {
1978 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | TLB_NOTDIRTY
;
1983 /* Note: start and end must be within the same ram block. */
1984 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t end
,
1988 unsigned long length
, start1
;
1992 start
&= TARGET_PAGE_MASK
;
1993 end
= TARGET_PAGE_ALIGN(end
);
1995 length
= end
- start
;
1998 len
= length
>> TARGET_PAGE_BITS
;
1999 mask
= ~dirty_flags
;
2000 p
= phys_ram_dirty
+ (start
>> TARGET_PAGE_BITS
);
2001 for(i
= 0; i
< len
; i
++)
2004 /* we modify the TLB cache so that the dirty bit will be set again
2005 when accessing the range */
2006 start1
= (unsigned long)qemu_get_ram_ptr(start
);
2007 /* Chek that we don't span multiple blocks - this breaks the
2008 address comparisons below. */
2009 if ((unsigned long)qemu_get_ram_ptr(end
- 1) - start1
2010 != (end
- 1) - start
) {
2014 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
2016 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
2017 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
2018 tlb_reset_dirty_range(&env
->tlb_table
[mmu_idx
][i
],
2024 int cpu_physical_memory_set_dirty_tracking(int enable
)
2027 in_migration
= enable
;
2028 ret
= cpu_notify_migration_log(!!enable
);
2032 int cpu_physical_memory_get_dirty_tracking(void)
2034 return in_migration
;
2037 int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr
,
2038 target_phys_addr_t end_addr
)
2042 ret
= cpu_notify_sync_dirty_bitmap(start_addr
, end_addr
);
2046 static inline void tlb_update_dirty(CPUTLBEntry
*tlb_entry
)
2048 ram_addr_t ram_addr
;
2051 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
2052 p
= (void *)(unsigned long)((tlb_entry
->addr_write
& TARGET_PAGE_MASK
)
2053 + tlb_entry
->addend
);
2054 ram_addr
= qemu_ram_addr_from_host(p
);
2055 if (!cpu_physical_memory_is_dirty(ram_addr
)) {
2056 tlb_entry
->addr_write
|= TLB_NOTDIRTY
;
2061 /* update the TLB according to the current state of the dirty bits */
2062 void cpu_tlb_update_dirty(CPUState
*env
)
2066 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
2067 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
2068 tlb_update_dirty(&env
->tlb_table
[mmu_idx
][i
]);
2072 static inline void tlb_set_dirty1(CPUTLBEntry
*tlb_entry
, target_ulong vaddr
)
2074 if (tlb_entry
->addr_write
== (vaddr
| TLB_NOTDIRTY
))
2075 tlb_entry
->addr_write
= vaddr
;
2078 /* update the TLB corresponding to virtual page vaddr
2079 so that it is no longer dirty */
2080 static inline void tlb_set_dirty(CPUState
*env
, target_ulong vaddr
)
2085 vaddr
&= TARGET_PAGE_MASK
;
2086 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
2087 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++)
2088 tlb_set_dirty1(&env
->tlb_table
[mmu_idx
][i
], vaddr
);
2091 /* add a new TLB entry. At most one entry for a given virtual address
2092 is permitted. Return 0 if OK or 2 if the page could not be mapped
2093 (can only happen in non SOFTMMU mode for I/O pages or pages
2094 conflicting with the host address space). */
2095 int tlb_set_page_exec(CPUState
*env
, target_ulong vaddr
,
2096 target_phys_addr_t paddr
, int prot
,
2097 int mmu_idx
, int is_softmmu
)
2102 target_ulong address
;
2103 target_ulong code_address
;
2104 target_phys_addr_t addend
;
2108 target_phys_addr_t iotlb
;
2110 p
= phys_page_find(paddr
>> TARGET_PAGE_BITS
);
2112 pd
= IO_MEM_UNASSIGNED
;
2114 pd
= p
->phys_offset
;
2116 #if defined(DEBUG_TLB)
2117 printf("tlb_set_page: vaddr=" TARGET_FMT_lx
" paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
2118 vaddr
, (int)paddr
, prot
, mmu_idx
, is_softmmu
, pd
);
2123 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&& !(pd
& IO_MEM_ROMD
)) {
2124 /* IO memory case (romd handled later) */
2125 address
|= TLB_MMIO
;
2127 addend
= (unsigned long)qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
);
2128 if ((pd
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
) {
2130 iotlb
= pd
& TARGET_PAGE_MASK
;
2131 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
)
2132 iotlb
|= IO_MEM_NOTDIRTY
;
2134 iotlb
|= IO_MEM_ROM
;
2136 /* IO handlers are currently passed a physical address.
2137 It would be nice to pass an offset from the base address
2138 of that region. This would avoid having to special case RAM,
2139 and avoid full address decoding in every device.
2140 We can't use the high bits of pd for this because
2141 IO_MEM_ROMD uses these as a ram address. */
2142 iotlb
= (pd
& ~TARGET_PAGE_MASK
);
2144 iotlb
+= p
->region_offset
;
2150 code_address
= address
;
2151 /* Make accesses to pages with watchpoints go via the
2152 watchpoint trap routines. */
2153 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
2154 if (vaddr
== (wp
->vaddr
& TARGET_PAGE_MASK
)) {
2155 iotlb
= io_mem_watch
+ paddr
;
2156 /* TODO: The memory case can be optimized by not trapping
2157 reads of pages with a write breakpoint. */
2158 address
|= TLB_MMIO
;
2162 index
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
2163 env
->iotlb
[mmu_idx
][index
] = iotlb
- vaddr
;
2164 te
= &env
->tlb_table
[mmu_idx
][index
];
2165 te
->addend
= addend
- vaddr
;
2166 if (prot
& PAGE_READ
) {
2167 te
->addr_read
= address
;
2172 if (prot
& PAGE_EXEC
) {
2173 te
->addr_code
= code_address
;
2177 if (prot
& PAGE_WRITE
) {
2178 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
2179 (pd
& IO_MEM_ROMD
)) {
2180 /* Write access calls the I/O callback. */
2181 te
->addr_write
= address
| TLB_MMIO
;
2182 } else if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
2183 !cpu_physical_memory_is_dirty(pd
)) {
2184 te
->addr_write
= address
| TLB_NOTDIRTY
;
2186 te
->addr_write
= address
;
2189 te
->addr_write
= -1;
2196 void tlb_flush(CPUState
*env
, int flush_global
)
2200 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
2205 * Walks guest process memory "regions" one by one
2206 * and calls callback function 'fn' for each region.
2209 struct walk_memory_regions_data
2211 walk_memory_regions_fn fn
;
2213 unsigned long start
;
2217 static int walk_memory_regions_end(struct walk_memory_regions_data
*data
,
2218 unsigned long end
, int new_prot
)
2220 if (data
->start
!= -1ul) {
2221 int rc
= data
->fn(data
->priv
, data
->start
, end
, data
->prot
);
2227 data
->start
= (new_prot
? end
: -1ul);
2228 data
->prot
= new_prot
;
2233 static int walk_memory_regions_1(struct walk_memory_regions_data
*data
,
2234 unsigned long base
, int level
, void **lp
)
2240 return walk_memory_regions_end(data
, base
, 0);
2245 for (i
= 0; i
< L2_BITS
; ++i
) {
2246 int prot
= pd
[i
].flags
;
2248 pa
= base
| (i
<< TARGET_PAGE_BITS
);
2249 if (prot
!= data
->prot
) {
2250 rc
= walk_memory_regions_end(data
, pa
, prot
);
2258 for (i
= 0; i
< L2_BITS
; ++i
) {
2259 pa
= base
| (i
<< (TARGET_PAGE_BITS
+ L2_BITS
* level
));
2260 rc
= walk_memory_regions_1(data
, pa
, level
- 1, pp
+ i
);
2270 int walk_memory_regions(void *priv
, walk_memory_regions_fn fn
)
2272 struct walk_memory_regions_data data
;
2280 for (i
= 0; i
< V_L1_SIZE
; i
++) {
2281 int rc
= walk_memory_regions_1(&data
, i
<< V_L1_SHIFT
,
2282 V_L1_SHIFT
/ L2_BITS
- 1, l1_map
+ i
);
2288 return walk_memory_regions_end(&data
, 0, 0);
2291 static int dump_region(void *priv
, unsigned long start
,
2292 unsigned long end
, unsigned long prot
)
2294 FILE *f
= (FILE *)priv
;
2296 (void) fprintf(f
, "%08lx-%08lx %08lx %c%c%c\n",
2297 start
, end
, end
- start
,
2298 ((prot
& PAGE_READ
) ? 'r' : '-'),
2299 ((prot
& PAGE_WRITE
) ? 'w' : '-'),
2300 ((prot
& PAGE_EXEC
) ? 'x' : '-'));
2305 /* dump memory mappings */
2306 void page_dump(FILE *f
)
2308 (void) fprintf(f
, "%-8s %-8s %-8s %s\n",
2309 "start", "end", "size", "prot");
2310 walk_memory_regions(f
, dump_region
);
2313 int page_get_flags(target_ulong address
)
2317 p
= page_find(address
>> TARGET_PAGE_BITS
);
2323 /* Modify the flags of a page and invalidate the code if necessary.
2324 The flag PAGE_WRITE_ORG is positioned automatically depending
2325 on PAGE_WRITE. The mmap_lock should already be held. */
2326 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
2328 target_ulong addr
, len
;
2330 /* This function should never be called with addresses outside the
2331 guest address space. If this assert fires, it probably indicates
2332 a missing call to h2g_valid. */
2333 #if HOST_LONG_BITS > L1_MAP_ADDR_SPACE_BITS
2334 assert(end
< (1ul << L1_MAP_ADDR_SPACE_BITS
));
2336 assert(start
< end
);
2338 start
= start
& TARGET_PAGE_MASK
;
2339 end
= TARGET_PAGE_ALIGN(end
);
2341 if (flags
& PAGE_WRITE
) {
2342 flags
|= PAGE_WRITE_ORG
;
2345 for (addr
= start
, len
= end
- start
;
2347 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
2348 PageDesc
*p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2350 /* If the write protection bit is set, then we invalidate
2352 if (!(p
->flags
& PAGE_WRITE
) &&
2353 (flags
& PAGE_WRITE
) &&
2355 tb_invalidate_phys_page(addr
, 0, NULL
);
2361 int page_check_range(target_ulong start
, target_ulong len
, int flags
)
2367 /* This function should never be called with addresses outside the
2368 guest address space. If this assert fires, it probably indicates
2369 a missing call to h2g_valid. */
2370 #if HOST_LONG_BITS > L1_MAP_ADDR_SPACE_BITS
2371 assert(start
< (1ul << L1_MAP_ADDR_SPACE_BITS
));
2374 if (start
+ len
- 1 < start
) {
2375 /* We've wrapped around. */
2379 end
= TARGET_PAGE_ALIGN(start
+len
); /* must do before we loose bits in the next step */
2380 start
= start
& TARGET_PAGE_MASK
;
2382 for (addr
= start
, len
= end
- start
;
2384 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
2385 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2388 if( !(p
->flags
& PAGE_VALID
) )
2391 if ((flags
& PAGE_READ
) && !(p
->flags
& PAGE_READ
))
2393 if (flags
& PAGE_WRITE
) {
2394 if (!(p
->flags
& PAGE_WRITE_ORG
))
2396 /* unprotect the page if it was put read-only because it
2397 contains translated code */
2398 if (!(p
->flags
& PAGE_WRITE
)) {
2399 if (!page_unprotect(addr
, 0, NULL
))
2408 /* called from signal handler: invalidate the code and unprotect the
2409 page. Return TRUE if the fault was successfully handled. */
2410 int page_unprotect(target_ulong address
, unsigned long pc
, void *puc
)
2412 unsigned int page_index
, prot
, pindex
;
2414 target_ulong host_start
, host_end
, addr
;
2416 /* Technically this isn't safe inside a signal handler. However we
2417 know this only ever happens in a synchronous SEGV handler, so in
2418 practice it seems to be ok. */
2421 host_start
= address
& qemu_host_page_mask
;
2422 page_index
= host_start
>> TARGET_PAGE_BITS
;
2423 p1
= page_find(page_index
);
2428 host_end
= host_start
+ qemu_host_page_size
;
2431 for(addr
= host_start
;addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
2435 /* if the page was really writable, then we change its
2436 protection back to writable */
2437 if (prot
& PAGE_WRITE_ORG
) {
2438 pindex
= (address
- host_start
) >> TARGET_PAGE_BITS
;
2439 if (!(p1
[pindex
].flags
& PAGE_WRITE
)) {
2440 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
2441 (prot
& PAGE_BITS
) | PAGE_WRITE
);
2442 p1
[pindex
].flags
|= PAGE_WRITE
;
2443 /* and since the content will be modified, we must invalidate
2444 the corresponding translated code. */
2445 tb_invalidate_phys_page(address
, pc
, puc
);
2446 #ifdef DEBUG_TB_CHECK
2447 tb_invalidate_check(address
);
2457 static inline void tlb_set_dirty(CPUState
*env
,
2458 unsigned long addr
, target_ulong vaddr
)
2461 #endif /* defined(CONFIG_USER_ONLY) */
2463 #if !defined(CONFIG_USER_ONLY)
2465 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2466 typedef struct subpage_t
{
2467 target_phys_addr_t base
;
2468 CPUReadMemoryFunc
* const *mem_read
[TARGET_PAGE_SIZE
][4];
2469 CPUWriteMemoryFunc
* const *mem_write
[TARGET_PAGE_SIZE
][4];
2470 void *opaque
[TARGET_PAGE_SIZE
][2][4];
2471 ram_addr_t region_offset
[TARGET_PAGE_SIZE
][2][4];
2474 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2475 ram_addr_t memory
, ram_addr_t region_offset
);
2476 static void *subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
2477 ram_addr_t orig_memory
, ram_addr_t region_offset
);
2478 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2481 if (addr > start_addr) \
2484 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2485 if (start_addr2 > 0) \
2489 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2490 end_addr2 = TARGET_PAGE_SIZE - 1; \
2492 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2493 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2498 /* register physical memory.
2499 For RAM, 'size' must be a multiple of the target page size.
2500 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2501 io memory page. The address used when calling the IO function is
2502 the offset from the start of the region, plus region_offset. Both
2503 start_addr and region_offset are rounded down to a page boundary
2504 before calculating this offset. This should not be a problem unless
2505 the low bits of start_addr and region_offset differ. */
2506 void cpu_register_physical_memory_offset(target_phys_addr_t start_addr
,
2508 ram_addr_t phys_offset
,
2509 ram_addr_t region_offset
)
2511 target_phys_addr_t addr
, end_addr
;
2514 ram_addr_t orig_size
= size
;
2517 cpu_notify_set_memory(start_addr
, size
, phys_offset
);
2519 if (phys_offset
== IO_MEM_UNASSIGNED
) {
2520 region_offset
= start_addr
;
2522 region_offset
&= TARGET_PAGE_MASK
;
2523 size
= (size
+ TARGET_PAGE_SIZE
- 1) & TARGET_PAGE_MASK
;
2524 end_addr
= start_addr
+ (target_phys_addr_t
)size
;
2525 for(addr
= start_addr
; addr
!= end_addr
; addr
+= TARGET_PAGE_SIZE
) {
2526 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2527 if (p
&& p
->phys_offset
!= IO_MEM_UNASSIGNED
) {
2528 ram_addr_t orig_memory
= p
->phys_offset
;
2529 target_phys_addr_t start_addr2
, end_addr2
;
2530 int need_subpage
= 0;
2532 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
, end_addr2
,
2534 if (need_subpage
|| phys_offset
& IO_MEM_SUBWIDTH
) {
2535 if (!(orig_memory
& IO_MEM_SUBPAGE
)) {
2536 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2537 &p
->phys_offset
, orig_memory
,
2540 subpage
= io_mem_opaque
[(orig_memory
& ~TARGET_PAGE_MASK
)
2543 subpage_register(subpage
, start_addr2
, end_addr2
, phys_offset
,
2545 p
->region_offset
= 0;
2547 p
->phys_offset
= phys_offset
;
2548 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2549 (phys_offset
& IO_MEM_ROMD
))
2550 phys_offset
+= TARGET_PAGE_SIZE
;
2553 p
= phys_page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2554 p
->phys_offset
= phys_offset
;
2555 p
->region_offset
= region_offset
;
2556 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2557 (phys_offset
& IO_MEM_ROMD
)) {
2558 phys_offset
+= TARGET_PAGE_SIZE
;
2560 target_phys_addr_t start_addr2
, end_addr2
;
2561 int need_subpage
= 0;
2563 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
,
2564 end_addr2
, need_subpage
);
2566 if (need_subpage
|| phys_offset
& IO_MEM_SUBWIDTH
) {
2567 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2568 &p
->phys_offset
, IO_MEM_UNASSIGNED
,
2569 addr
& TARGET_PAGE_MASK
);
2570 subpage_register(subpage
, start_addr2
, end_addr2
,
2571 phys_offset
, region_offset
);
2572 p
->region_offset
= 0;
2576 region_offset
+= TARGET_PAGE_SIZE
;
2579 /* since each CPU stores ram addresses in its TLB cache, we must
2580 reset the modified entries */
2582 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
2587 /* XXX: temporary until new memory mapping API */
2588 ram_addr_t
cpu_get_physical_page_desc(target_phys_addr_t addr
)
2592 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2594 return IO_MEM_UNASSIGNED
;
2595 return p
->phys_offset
;
2598 void qemu_register_coalesced_mmio(target_phys_addr_t addr
, ram_addr_t size
)
2601 kvm_coalesce_mmio_region(addr
, size
);
2604 void qemu_unregister_coalesced_mmio(target_phys_addr_t addr
, ram_addr_t size
)
2607 kvm_uncoalesce_mmio_region(addr
, size
);
2610 void qemu_flush_coalesced_mmio_buffer(void)
2613 kvm_flush_coalesced_mmio_buffer();
2616 #if defined(__linux__) && !defined(TARGET_S390X)
2618 #include <sys/vfs.h>
2620 #define HUGETLBFS_MAGIC 0x958458f6
2622 static long gethugepagesize(const char *path
)
2628 ret
= statfs(path
, &fs
);
2629 } while (ret
!= 0 && errno
== EINTR
);
2636 if (fs
.f_type
!= HUGETLBFS_MAGIC
)
2637 fprintf(stderr
, "Warning: path not on HugeTLBFS: %s\n", path
);
2642 static void *file_ram_alloc(ram_addr_t memory
, const char *path
)
2650 unsigned long hpagesize
;
2652 hpagesize
= gethugepagesize(path
);
2657 if (memory
< hpagesize
) {
2661 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2662 fprintf(stderr
, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2666 if (asprintf(&filename
, "%s/qemu_back_mem.XXXXXX", path
) == -1) {
2670 fd
= mkstemp(filename
);
2679 memory
= (memory
+hpagesize
-1) & ~(hpagesize
-1);
2682 * ftruncate is not supported by hugetlbfs in older
2683 * hosts, so don't bother bailing out on errors.
2684 * If anything goes wrong with it under other filesystems,
2687 if (ftruncate(fd
, memory
))
2688 perror("ftruncate");
2691 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2692 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2693 * to sidestep this quirk.
2695 flags
= mem_prealloc
? MAP_POPULATE
| MAP_SHARED
: MAP_PRIVATE
;
2696 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
, flags
, fd
, 0);
2698 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
, MAP_PRIVATE
, fd
, 0);
2700 if (area
== MAP_FAILED
) {
2701 perror("file_ram_alloc: can't mmap RAM pages");
2709 ram_addr_t
qemu_ram_alloc(ram_addr_t size
)
2711 RAMBlock
*new_block
;
2713 size
= TARGET_PAGE_ALIGN(size
);
2714 new_block
= qemu_malloc(sizeof(*new_block
));
2717 #if defined (__linux__) && !defined(TARGET_S390X)
2718 new_block
->host
= file_ram_alloc(size
, mem_path
);
2719 if (!new_block
->host
)
2722 fprintf(stderr
, "-mem-path option unsupported\n");
2726 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2727 /* XXX S390 KVM requires the topmost vma of the RAM to be < 256GB */
2728 new_block
->host
= mmap((void*)0x1000000, size
,
2729 PROT_EXEC
|PROT_READ
|PROT_WRITE
,
2730 MAP_SHARED
| MAP_ANONYMOUS
, -1, 0);
2732 new_block
->host
= qemu_vmalloc(size
);
2734 #ifdef MADV_MERGEABLE
2735 madvise(new_block
->host
, size
, MADV_MERGEABLE
);
2738 new_block
->offset
= last_ram_offset
;
2739 new_block
->length
= size
;
2741 new_block
->next
= ram_blocks
;
2742 ram_blocks
= new_block
;
2744 phys_ram_dirty
= qemu_realloc(phys_ram_dirty
,
2745 (last_ram_offset
+ size
) >> TARGET_PAGE_BITS
);
2746 memset(phys_ram_dirty
+ (last_ram_offset
>> TARGET_PAGE_BITS
),
2747 0xff, size
>> TARGET_PAGE_BITS
);
2749 last_ram_offset
+= size
;
2752 kvm_setup_guest_memory(new_block
->host
, size
);
2754 return new_block
->offset
;
2757 void qemu_ram_free(ram_addr_t addr
)
2759 /* TODO: implement this. */
2762 /* Return a host pointer to ram allocated with qemu_ram_alloc.
2763 With the exception of the softmmu code in this file, this should
2764 only be used for local memory (e.g. video ram) that the device owns,
2765 and knows it isn't going to access beyond the end of the block.
2767 It should not be used for general purpose DMA.
2768 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2770 void *qemu_get_ram_ptr(ram_addr_t addr
)
2777 prevp
= &ram_blocks
;
2779 while (block
&& (block
->offset
> addr
2780 || block
->offset
+ block
->length
<= addr
)) {
2782 prevp
= &prev
->next
;
2784 block
= block
->next
;
2787 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
2790 /* Move this entry to to start of the list. */
2792 prev
->next
= block
->next
;
2793 block
->next
= *prevp
;
2796 return block
->host
+ (addr
- block
->offset
);
2799 /* Some of the softmmu routines need to translate from a host pointer
2800 (typically a TLB entry) back to a ram offset. */
2801 ram_addr_t
qemu_ram_addr_from_host(void *ptr
)
2805 uint8_t *host
= ptr
;
2809 while (block
&& (block
->host
> host
2810 || block
->host
+ block
->length
<= host
)) {
2812 block
= block
->next
;
2815 fprintf(stderr
, "Bad ram pointer %p\n", ptr
);
2818 return block
->offset
+ (host
- block
->host
);
2821 static uint32_t unassigned_mem_readb(void *opaque
, target_phys_addr_t addr
)
2823 #ifdef DEBUG_UNASSIGNED
2824 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2826 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2827 do_unassigned_access(addr
, 0, 0, 0, 1);
2832 static uint32_t unassigned_mem_readw(void *opaque
, target_phys_addr_t addr
)
2834 #ifdef DEBUG_UNASSIGNED
2835 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2837 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2838 do_unassigned_access(addr
, 0, 0, 0, 2);
2843 static uint32_t unassigned_mem_readl(void *opaque
, target_phys_addr_t addr
)
2845 #ifdef DEBUG_UNASSIGNED
2846 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2848 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2849 do_unassigned_access(addr
, 0, 0, 0, 4);
2854 static void unassigned_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2856 #ifdef DEBUG_UNASSIGNED
2857 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2859 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2860 do_unassigned_access(addr
, 1, 0, 0, 1);
2864 static void unassigned_mem_writew(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2866 #ifdef DEBUG_UNASSIGNED
2867 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2869 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2870 do_unassigned_access(addr
, 1, 0, 0, 2);
2874 static void unassigned_mem_writel(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2876 #ifdef DEBUG_UNASSIGNED
2877 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2879 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2880 do_unassigned_access(addr
, 1, 0, 0, 4);
2884 static CPUReadMemoryFunc
* const unassigned_mem_read
[3] = {
2885 unassigned_mem_readb
,
2886 unassigned_mem_readw
,
2887 unassigned_mem_readl
,
2890 static CPUWriteMemoryFunc
* const unassigned_mem_write
[3] = {
2891 unassigned_mem_writeb
,
2892 unassigned_mem_writew
,
2893 unassigned_mem_writel
,
2896 static void notdirty_mem_writeb(void *opaque
, target_phys_addr_t ram_addr
,
2900 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2901 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2902 #if !defined(CONFIG_USER_ONLY)
2903 tb_invalidate_phys_page_fast(ram_addr
, 1);
2904 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2907 stb_p(qemu_get_ram_ptr(ram_addr
), val
);
2908 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2909 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2910 /* we remove the notdirty callback only if the code has been
2912 if (dirty_flags
== 0xff)
2913 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
2916 static void notdirty_mem_writew(void *opaque
, target_phys_addr_t ram_addr
,
2920 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2921 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2922 #if !defined(CONFIG_USER_ONLY)
2923 tb_invalidate_phys_page_fast(ram_addr
, 2);
2924 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2927 stw_p(qemu_get_ram_ptr(ram_addr
), val
);
2928 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2929 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2930 /* we remove the notdirty callback only if the code has been
2932 if (dirty_flags
== 0xff)
2933 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
2936 static void notdirty_mem_writel(void *opaque
, target_phys_addr_t ram_addr
,
2940 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2941 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2942 #if !defined(CONFIG_USER_ONLY)
2943 tb_invalidate_phys_page_fast(ram_addr
, 4);
2944 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2947 stl_p(qemu_get_ram_ptr(ram_addr
), val
);
2948 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2949 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2950 /* we remove the notdirty callback only if the code has been
2952 if (dirty_flags
== 0xff)
2953 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
2956 static CPUReadMemoryFunc
* const error_mem_read
[3] = {
2957 NULL
, /* never used */
2958 NULL
, /* never used */
2959 NULL
, /* never used */
2962 static CPUWriteMemoryFunc
* const notdirty_mem_write
[3] = {
2963 notdirty_mem_writeb
,
2964 notdirty_mem_writew
,
2965 notdirty_mem_writel
,
2968 /* Generate a debug exception if a watchpoint has been hit. */
2969 static void check_watchpoint(int offset
, int len_mask
, int flags
)
2971 CPUState
*env
= cpu_single_env
;
2972 target_ulong pc
, cs_base
;
2973 TranslationBlock
*tb
;
2978 if (env
->watchpoint_hit
) {
2979 /* We re-entered the check after replacing the TB. Now raise
2980 * the debug interrupt so that is will trigger after the
2981 * current instruction. */
2982 cpu_interrupt(env
, CPU_INTERRUPT_DEBUG
);
2985 vaddr
= (env
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
2986 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
2987 if ((vaddr
== (wp
->vaddr
& len_mask
) ||
2988 (vaddr
& wp
->len_mask
) == wp
->vaddr
) && (wp
->flags
& flags
)) {
2989 wp
->flags
|= BP_WATCHPOINT_HIT
;
2990 if (!env
->watchpoint_hit
) {
2991 env
->watchpoint_hit
= wp
;
2992 tb
= tb_find_pc(env
->mem_io_pc
);
2994 cpu_abort(env
, "check_watchpoint: could not find TB for "
2995 "pc=%p", (void *)env
->mem_io_pc
);
2997 cpu_restore_state(tb
, env
, env
->mem_io_pc
, NULL
);
2998 tb_phys_invalidate(tb
, -1);
2999 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
3000 env
->exception_index
= EXCP_DEBUG
;
3002 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
3003 tb_gen_code(env
, pc
, cs_base
, cpu_flags
, 1);
3005 cpu_resume_from_signal(env
, NULL
);
3008 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
3013 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3014 so these check for a hit then pass through to the normal out-of-line
3016 static uint32_t watch_mem_readb(void *opaque
, target_phys_addr_t addr
)
3018 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x0, BP_MEM_READ
);
3019 return ldub_phys(addr
);
3022 static uint32_t watch_mem_readw(void *opaque
, target_phys_addr_t addr
)
3024 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x1, BP_MEM_READ
);
3025 return lduw_phys(addr
);
3028 static uint32_t watch_mem_readl(void *opaque
, target_phys_addr_t addr
)
3030 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x3, BP_MEM_READ
);
3031 return ldl_phys(addr
);
3034 static void watch_mem_writeb(void *opaque
, target_phys_addr_t addr
,
3037 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x0, BP_MEM_WRITE
);
3038 stb_phys(addr
, val
);
3041 static void watch_mem_writew(void *opaque
, target_phys_addr_t addr
,
3044 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x1, BP_MEM_WRITE
);
3045 stw_phys(addr
, val
);
3048 static void watch_mem_writel(void *opaque
, target_phys_addr_t addr
,
3051 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x3, BP_MEM_WRITE
);
3052 stl_phys(addr
, val
);
3055 static CPUReadMemoryFunc
* const watch_mem_read
[3] = {
3061 static CPUWriteMemoryFunc
* const watch_mem_write
[3] = {
3067 static inline uint32_t subpage_readlen (subpage_t
*mmio
, target_phys_addr_t addr
,
3073 idx
= SUBPAGE_IDX(addr
);
3074 #if defined(DEBUG_SUBPAGE)
3075 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d\n", __func__
,
3076 mmio
, len
, addr
, idx
);
3078 ret
= (**mmio
->mem_read
[idx
][len
])(mmio
->opaque
[idx
][0][len
],
3079 addr
+ mmio
->region_offset
[idx
][0][len
]);
3084 static inline void subpage_writelen (subpage_t
*mmio
, target_phys_addr_t addr
,
3085 uint32_t value
, unsigned int len
)
3089 idx
= SUBPAGE_IDX(addr
);
3090 #if defined(DEBUG_SUBPAGE)
3091 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d value %08x\n", __func__
,
3092 mmio
, len
, addr
, idx
, value
);
3094 (**mmio
->mem_write
[idx
][len
])(mmio
->opaque
[idx
][1][len
],
3095 addr
+ mmio
->region_offset
[idx
][1][len
],
3099 static uint32_t subpage_readb (void *opaque
, target_phys_addr_t addr
)
3101 #if defined(DEBUG_SUBPAGE)
3102 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
3105 return subpage_readlen(opaque
, addr
, 0);
3108 static void subpage_writeb (void *opaque
, target_phys_addr_t addr
,
3111 #if defined(DEBUG_SUBPAGE)
3112 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
3114 subpage_writelen(opaque
, addr
, value
, 0);
3117 static uint32_t subpage_readw (void *opaque
, target_phys_addr_t addr
)
3119 #if defined(DEBUG_SUBPAGE)
3120 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
3123 return subpage_readlen(opaque
, addr
, 1);
3126 static void subpage_writew (void *opaque
, target_phys_addr_t addr
,
3129 #if defined(DEBUG_SUBPAGE)
3130 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
3132 subpage_writelen(opaque
, addr
, value
, 1);
3135 static uint32_t subpage_readl (void *opaque
, target_phys_addr_t addr
)
3137 #if defined(DEBUG_SUBPAGE)
3138 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
3141 return subpage_readlen(opaque
, addr
, 2);
3144 static void subpage_writel (void *opaque
,
3145 target_phys_addr_t addr
, uint32_t value
)
3147 #if defined(DEBUG_SUBPAGE)
3148 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
3150 subpage_writelen(opaque
, addr
, value
, 2);
3153 static CPUReadMemoryFunc
* const subpage_read
[] = {
3159 static CPUWriteMemoryFunc
* const subpage_write
[] = {
3165 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
3166 ram_addr_t memory
, ram_addr_t region_offset
)
3171 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
3173 idx
= SUBPAGE_IDX(start
);
3174 eidx
= SUBPAGE_IDX(end
);
3175 #if defined(DEBUG_SUBPAGE)
3176 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__
,
3177 mmio
, start
, end
, idx
, eidx
, memory
);
3179 memory
>>= IO_MEM_SHIFT
;
3180 for (; idx
<= eidx
; idx
++) {
3181 for (i
= 0; i
< 4; i
++) {
3182 if (io_mem_read
[memory
][i
]) {
3183 mmio
->mem_read
[idx
][i
] = &io_mem_read
[memory
][i
];
3184 mmio
->opaque
[idx
][0][i
] = io_mem_opaque
[memory
];
3185 mmio
->region_offset
[idx
][0][i
] = region_offset
;
3187 if (io_mem_write
[memory
][i
]) {
3188 mmio
->mem_write
[idx
][i
] = &io_mem_write
[memory
][i
];
3189 mmio
->opaque
[idx
][1][i
] = io_mem_opaque
[memory
];
3190 mmio
->region_offset
[idx
][1][i
] = region_offset
;
3198 static void *subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
3199 ram_addr_t orig_memory
, ram_addr_t region_offset
)
3204 mmio
= qemu_mallocz(sizeof(subpage_t
));
3207 subpage_memory
= cpu_register_io_memory(subpage_read
, subpage_write
, mmio
);
3208 #if defined(DEBUG_SUBPAGE)
3209 printf("%s: %p base " TARGET_FMT_plx
" len %08x %d\n", __func__
,
3210 mmio
, base
, TARGET_PAGE_SIZE
, subpage_memory
);
3212 *phys
= subpage_memory
| IO_MEM_SUBPAGE
;
3213 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
- 1, orig_memory
,
3219 static int get_free_io_mem_idx(void)
3223 for (i
= 0; i
<IO_MEM_NB_ENTRIES
; i
++)
3224 if (!io_mem_used
[i
]) {
3228 fprintf(stderr
, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES
);
3232 /* mem_read and mem_write are arrays of functions containing the
3233 function to access byte (index 0), word (index 1) and dword (index
3234 2). Functions can be omitted with a NULL function pointer.
3235 If io_index is non zero, the corresponding io zone is
3236 modified. If it is zero, a new io zone is allocated. The return
3237 value can be used with cpu_register_physical_memory(). (-1) is
3238 returned if error. */
3239 static int cpu_register_io_memory_fixed(int io_index
,
3240 CPUReadMemoryFunc
* const *mem_read
,
3241 CPUWriteMemoryFunc
* const *mem_write
,
3244 int i
, subwidth
= 0;
3246 if (io_index
<= 0) {
3247 io_index
= get_free_io_mem_idx();
3251 io_index
>>= IO_MEM_SHIFT
;
3252 if (io_index
>= IO_MEM_NB_ENTRIES
)
3256 for(i
= 0;i
< 3; i
++) {
3257 if (!mem_read
[i
] || !mem_write
[i
])
3258 subwidth
= IO_MEM_SUBWIDTH
;
3259 io_mem_read
[io_index
][i
] = mem_read
[i
];
3260 io_mem_write
[io_index
][i
] = mem_write
[i
];
3262 io_mem_opaque
[io_index
] = opaque
;
3263 return (io_index
<< IO_MEM_SHIFT
) | subwidth
;
3266 int cpu_register_io_memory(CPUReadMemoryFunc
* const *mem_read
,
3267 CPUWriteMemoryFunc
* const *mem_write
,
3270 return cpu_register_io_memory_fixed(0, mem_read
, mem_write
, opaque
);
3273 void cpu_unregister_io_memory(int io_table_address
)
3276 int io_index
= io_table_address
>> IO_MEM_SHIFT
;
3278 for (i
=0;i
< 3; i
++) {
3279 io_mem_read
[io_index
][i
] = unassigned_mem_read
[i
];
3280 io_mem_write
[io_index
][i
] = unassigned_mem_write
[i
];
3282 io_mem_opaque
[io_index
] = NULL
;
3283 io_mem_used
[io_index
] = 0;
3286 static void io_mem_init(void)
3290 cpu_register_io_memory_fixed(IO_MEM_ROM
, error_mem_read
, unassigned_mem_write
, NULL
);
3291 cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED
, unassigned_mem_read
, unassigned_mem_write
, NULL
);
3292 cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY
, error_mem_read
, notdirty_mem_write
, NULL
);
3296 io_mem_watch
= cpu_register_io_memory(watch_mem_read
,
3297 watch_mem_write
, NULL
);
3300 #endif /* !defined(CONFIG_USER_ONLY) */
3302 /* physical memory access (slow version, mainly for debug) */
3303 #if defined(CONFIG_USER_ONLY)
3304 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
3305 uint8_t *buf
, int len
, int is_write
)
3312 page
= addr
& TARGET_PAGE_MASK
;
3313 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3316 flags
= page_get_flags(page
);
3317 if (!(flags
& PAGE_VALID
))
3320 if (!(flags
& PAGE_WRITE
))
3322 /* XXX: this code should not depend on lock_user */
3323 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
3326 unlock_user(p
, addr
, l
);
3328 if (!(flags
& PAGE_READ
))
3330 /* XXX: this code should not depend on lock_user */
3331 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
3334 unlock_user(p
, addr
, 0);
3344 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
3345 int len
, int is_write
)
3350 target_phys_addr_t page
;
3355 page
= addr
& TARGET_PAGE_MASK
;
3356 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3359 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3361 pd
= IO_MEM_UNASSIGNED
;
3363 pd
= p
->phys_offset
;
3367 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3368 target_phys_addr_t addr1
= addr
;
3369 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3371 addr1
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3372 /* XXX: could force cpu_single_env to NULL to avoid
3374 if (l
>= 4 && ((addr1
& 3) == 0)) {
3375 /* 32 bit write access */
3377 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr1
, val
);
3379 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
3380 /* 16 bit write access */
3382 io_mem_write
[io_index
][1](io_mem_opaque
[io_index
], addr1
, val
);
3385 /* 8 bit write access */
3387 io_mem_write
[io_index
][0](io_mem_opaque
[io_index
], addr1
, val
);
3391 unsigned long addr1
;
3392 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3394 ptr
= qemu_get_ram_ptr(addr1
);
3395 memcpy(ptr
, buf
, l
);
3396 if (!cpu_physical_memory_is_dirty(addr1
)) {
3397 /* invalidate code */
3398 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
3400 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
3401 (0xff & ~CODE_DIRTY_FLAG
);
3405 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
3406 !(pd
& IO_MEM_ROMD
)) {
3407 target_phys_addr_t addr1
= addr
;
3409 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3411 addr1
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3412 if (l
>= 4 && ((addr1
& 3) == 0)) {
3413 /* 32 bit read access */
3414 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr1
);
3417 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
3418 /* 16 bit read access */
3419 val
= io_mem_read
[io_index
][1](io_mem_opaque
[io_index
], addr1
);
3423 /* 8 bit read access */
3424 val
= io_mem_read
[io_index
][0](io_mem_opaque
[io_index
], addr1
);
3430 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
3431 (addr
& ~TARGET_PAGE_MASK
);
3432 memcpy(buf
, ptr
, l
);
3441 /* used for ROM loading : can write in RAM and ROM */
3442 void cpu_physical_memory_write_rom(target_phys_addr_t addr
,
3443 const uint8_t *buf
, int len
)
3447 target_phys_addr_t page
;
3452 page
= addr
& TARGET_PAGE_MASK
;
3453 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3456 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3458 pd
= IO_MEM_UNASSIGNED
;
3460 pd
= p
->phys_offset
;
3463 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
&&
3464 (pd
& ~TARGET_PAGE_MASK
) != IO_MEM_ROM
&&
3465 !(pd
& IO_MEM_ROMD
)) {
3468 unsigned long addr1
;
3469 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3471 ptr
= qemu_get_ram_ptr(addr1
);
3472 memcpy(ptr
, buf
, l
);
3482 target_phys_addr_t addr
;
3483 target_phys_addr_t len
;
3486 static BounceBuffer bounce
;
3488 typedef struct MapClient
{
3490 void (*callback
)(void *opaque
);
3491 QLIST_ENTRY(MapClient
) link
;
3494 static QLIST_HEAD(map_client_list
, MapClient
) map_client_list
3495 = QLIST_HEAD_INITIALIZER(map_client_list
);
3497 void *cpu_register_map_client(void *opaque
, void (*callback
)(void *opaque
))
3499 MapClient
*client
= qemu_malloc(sizeof(*client
));
3501 client
->opaque
= opaque
;
3502 client
->callback
= callback
;
3503 QLIST_INSERT_HEAD(&map_client_list
, client
, link
);
3507 void cpu_unregister_map_client(void *_client
)
3509 MapClient
*client
= (MapClient
*)_client
;
3511 QLIST_REMOVE(client
, link
);
3515 static void cpu_notify_map_clients(void)
3519 while (!QLIST_EMPTY(&map_client_list
)) {
3520 client
= QLIST_FIRST(&map_client_list
);
3521 client
->callback(client
->opaque
);
3522 cpu_unregister_map_client(client
);
3526 /* Map a physical memory region into a host virtual address.
3527 * May map a subset of the requested range, given by and returned in *plen.
3528 * May return NULL if resources needed to perform the mapping are exhausted.
3529 * Use only for reads OR writes - not for read-modify-write operations.
3530 * Use cpu_register_map_client() to know when retrying the map operation is
3531 * likely to succeed.
3533 void *cpu_physical_memory_map(target_phys_addr_t addr
,
3534 target_phys_addr_t
*plen
,
3537 target_phys_addr_t len
= *plen
;
3538 target_phys_addr_t done
= 0;
3540 uint8_t *ret
= NULL
;
3542 target_phys_addr_t page
;
3545 unsigned long addr1
;
3548 page
= addr
& TARGET_PAGE_MASK
;
3549 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3552 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3554 pd
= IO_MEM_UNASSIGNED
;
3556 pd
= p
->phys_offset
;
3559 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3560 if (done
|| bounce
.buffer
) {
3563 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, TARGET_PAGE_SIZE
);
3567 cpu_physical_memory_rw(addr
, bounce
.buffer
, l
, 0);
3569 ptr
= bounce
.buffer
;
3571 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3572 ptr
= qemu_get_ram_ptr(addr1
);
3576 } else if (ret
+ done
!= ptr
) {
3588 /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3589 * Will also mark the memory as dirty if is_write == 1. access_len gives
3590 * the amount of memory that was actually read or written by the caller.
3592 void cpu_physical_memory_unmap(void *buffer
, target_phys_addr_t len
,
3593 int is_write
, target_phys_addr_t access_len
)
3595 if (buffer
!= bounce
.buffer
) {
3597 ram_addr_t addr1
= qemu_ram_addr_from_host(buffer
);
3598 while (access_len
) {
3600 l
= TARGET_PAGE_SIZE
;
3603 if (!cpu_physical_memory_is_dirty(addr1
)) {
3604 /* invalidate code */
3605 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
3607 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
3608 (0xff & ~CODE_DIRTY_FLAG
);
3617 cpu_physical_memory_write(bounce
.addr
, bounce
.buffer
, access_len
);
3619 qemu_vfree(bounce
.buffer
);
3620 bounce
.buffer
= NULL
;
3621 cpu_notify_map_clients();
3624 /* warning: addr must be aligned */
3625 uint32_t ldl_phys(target_phys_addr_t addr
)
3633 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3635 pd
= IO_MEM_UNASSIGNED
;
3637 pd
= p
->phys_offset
;
3640 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
3641 !(pd
& IO_MEM_ROMD
)) {
3643 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3645 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3646 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
3649 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
3650 (addr
& ~TARGET_PAGE_MASK
);
3656 /* warning: addr must be aligned */
3657 uint64_t ldq_phys(target_phys_addr_t addr
)
3665 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3667 pd
= IO_MEM_UNASSIGNED
;
3669 pd
= p
->phys_offset
;
3672 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
3673 !(pd
& IO_MEM_ROMD
)) {
3675 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3677 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3678 #ifdef TARGET_WORDS_BIGENDIAN
3679 val
= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
) << 32;
3680 val
|= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4);
3682 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
3683 val
|= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4) << 32;
3687 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
3688 (addr
& ~TARGET_PAGE_MASK
);
3695 uint32_t ldub_phys(target_phys_addr_t addr
)
3698 cpu_physical_memory_read(addr
, &val
, 1);
3703 uint32_t lduw_phys(target_phys_addr_t addr
)
3706 cpu_physical_memory_read(addr
, (uint8_t *)&val
, 2);
3707 return tswap16(val
);
3710 /* warning: addr must be aligned. The ram page is not masked as dirty
3711 and the code inside is not invalidated. It is useful if the dirty
3712 bits are used to track modified PTEs */
3713 void stl_phys_notdirty(target_phys_addr_t addr
, uint32_t val
)
3720 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3722 pd
= IO_MEM_UNASSIGNED
;
3724 pd
= p
->phys_offset
;
3727 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3728 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3730 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3731 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
3733 unsigned long addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3734 ptr
= qemu_get_ram_ptr(addr1
);
3737 if (unlikely(in_migration
)) {
3738 if (!cpu_physical_memory_is_dirty(addr1
)) {
3739 /* invalidate code */
3740 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
3742 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
3743 (0xff & ~CODE_DIRTY_FLAG
);
3749 void stq_phys_notdirty(target_phys_addr_t addr
, uint64_t val
)
3756 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3758 pd
= IO_MEM_UNASSIGNED
;
3760 pd
= p
->phys_offset
;
3763 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3764 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3766 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3767 #ifdef TARGET_WORDS_BIGENDIAN
3768 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
>> 32);
3769 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
);
3771 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
3772 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
>> 32);
3775 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
3776 (addr
& ~TARGET_PAGE_MASK
);
3781 /* warning: addr must be aligned */
3782 void stl_phys(target_phys_addr_t addr
, uint32_t val
)
3789 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3791 pd
= IO_MEM_UNASSIGNED
;
3793 pd
= p
->phys_offset
;
3796 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3797 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3799 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3800 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
3802 unsigned long addr1
;
3803 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3805 ptr
= qemu_get_ram_ptr(addr1
);
3807 if (!cpu_physical_memory_is_dirty(addr1
)) {
3808 /* invalidate code */
3809 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
3811 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
3812 (0xff & ~CODE_DIRTY_FLAG
);
3818 void stb_phys(target_phys_addr_t addr
, uint32_t val
)
3821 cpu_physical_memory_write(addr
, &v
, 1);
3825 void stw_phys(target_phys_addr_t addr
, uint32_t val
)
3827 uint16_t v
= tswap16(val
);
3828 cpu_physical_memory_write(addr
, (const uint8_t *)&v
, 2);
3832 void stq_phys(target_phys_addr_t addr
, uint64_t val
)
3835 cpu_physical_memory_write(addr
, (const uint8_t *)&val
, 8);
3838 /* virtual memory access for debug (includes writing to ROM) */
3839 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
3840 uint8_t *buf
, int len
, int is_write
)
3843 target_phys_addr_t phys_addr
;
3847 page
= addr
& TARGET_PAGE_MASK
;
3848 phys_addr
= cpu_get_phys_page_debug(env
, page
);
3849 /* if no physical page mapped, return an error */
3850 if (phys_addr
== -1)
3852 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3855 phys_addr
+= (addr
& ~TARGET_PAGE_MASK
);
3857 cpu_physical_memory_write_rom(phys_addr
, buf
, l
);
3859 cpu_physical_memory_rw(phys_addr
, buf
, l
, is_write
);
3868 /* in deterministic execution mode, instructions doing device I/Os
3869 must be at the end of the TB */
3870 void cpu_io_recompile(CPUState
*env
, void *retaddr
)
3872 TranslationBlock
*tb
;
3874 target_ulong pc
, cs_base
;
3877 tb
= tb_find_pc((unsigned long)retaddr
);
3879 cpu_abort(env
, "cpu_io_recompile: could not find TB for pc=%p",
3882 n
= env
->icount_decr
.u16
.low
+ tb
->icount
;
3883 cpu_restore_state(tb
, env
, (unsigned long)retaddr
, NULL
);
3884 /* Calculate how many instructions had been executed before the fault
3886 n
= n
- env
->icount_decr
.u16
.low
;
3887 /* Generate a new TB ending on the I/O insn. */
3889 /* On MIPS and SH, delay slot instructions can only be restarted if
3890 they were already the first instruction in the TB. If this is not
3891 the first instruction in a TB then re-execute the preceding
3893 #if defined(TARGET_MIPS)
3894 if ((env
->hflags
& MIPS_HFLAG_BMASK
) != 0 && n
> 1) {
3895 env
->active_tc
.PC
-= 4;
3896 env
->icount_decr
.u16
.low
++;
3897 env
->hflags
&= ~MIPS_HFLAG_BMASK
;
3899 #elif defined(TARGET_SH4)
3900 if ((env
->flags
& ((DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
))) != 0
3903 env
->icount_decr
.u16
.low
++;
3904 env
->flags
&= ~(DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
);
3907 /* This should never happen. */
3908 if (n
> CF_COUNT_MASK
)
3909 cpu_abort(env
, "TB too big during recompile");
3911 cflags
= n
| CF_LAST_IO
;
3913 cs_base
= tb
->cs_base
;
3915 tb_phys_invalidate(tb
, -1);
3916 /* FIXME: In theory this could raise an exception. In practice
3917 we have already translated the block once so it's probably ok. */
3918 tb_gen_code(env
, pc
, cs_base
, flags
, cflags
);
3919 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3920 the first in the TB) then we end up generating a whole new TB and
3921 repeating the fault, which is horribly inefficient.
3922 Better would be to execute just this insn uncached, or generate a
3924 cpu_resume_from_signal(env
, NULL
);
3927 void dump_exec_info(FILE *f
,
3928 int (*cpu_fprintf
)(FILE *f
, const char *fmt
, ...))
3930 int i
, target_code_size
, max_target_code_size
;
3931 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
3932 TranslationBlock
*tb
;
3934 target_code_size
= 0;
3935 max_target_code_size
= 0;
3937 direct_jmp_count
= 0;
3938 direct_jmp2_count
= 0;
3939 for(i
= 0; i
< nb_tbs
; i
++) {
3941 target_code_size
+= tb
->size
;
3942 if (tb
->size
> max_target_code_size
)
3943 max_target_code_size
= tb
->size
;
3944 if (tb
->page_addr
[1] != -1)
3946 if (tb
->tb_next_offset
[0] != 0xffff) {
3948 if (tb
->tb_next_offset
[1] != 0xffff) {
3949 direct_jmp2_count
++;
3953 /* XXX: avoid using doubles ? */
3954 cpu_fprintf(f
, "Translation buffer state:\n");
3955 cpu_fprintf(f
, "gen code size %ld/%ld\n",
3956 code_gen_ptr
- code_gen_buffer
, code_gen_buffer_max_size
);
3957 cpu_fprintf(f
, "TB count %d/%d\n",
3958 nb_tbs
, code_gen_max_blocks
);
3959 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
3960 nb_tbs
? target_code_size
/ nb_tbs
: 0,
3961 max_target_code_size
);
3962 cpu_fprintf(f
, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
3963 nb_tbs
? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0,
3964 target_code_size
? (double) (code_gen_ptr
- code_gen_buffer
) / target_code_size
: 0);
3965 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n",
3967 nb_tbs
? (cross_page
* 100) / nb_tbs
: 0);
3968 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
3970 nb_tbs
? (direct_jmp_count
* 100) / nb_tbs
: 0,
3972 nb_tbs
? (direct_jmp2_count
* 100) / nb_tbs
: 0);
3973 cpu_fprintf(f
, "\nStatistics:\n");
3974 cpu_fprintf(f
, "TB flush count %d\n", tb_flush_count
);
3975 cpu_fprintf(f
, "TB invalidate count %d\n", tb_phys_invalidate_count
);
3976 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
3977 tcg_dump_info(f
, cpu_fprintf
);
3980 #if !defined(CONFIG_USER_ONLY)
3982 #define MMUSUFFIX _cmmu
3983 #define GETPC() NULL
3984 #define env cpu_single_env
3985 #define SOFTMMU_CODE_ACCESS
3988 #include "softmmu_template.h"
3991 #include "softmmu_template.h"
3994 #include "softmmu_template.h"
3997 #include "softmmu_template.h"