2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
23 #include <sys/types.h>
36 #include "qemu-common.h"
41 #if defined(CONFIG_USER_ONLY)
46 //#define DEBUG_TB_INVALIDATE
49 //#define DEBUG_UNASSIGNED
51 /* make various TB consistency checks */
52 //#define DEBUG_TB_CHECK
53 //#define DEBUG_TLB_CHECK
55 //#define DEBUG_IOPORT
56 //#define DEBUG_SUBPAGE
58 #if !defined(CONFIG_USER_ONLY)
59 /* TB consistency checks only implemented for usermode emulation. */
63 #define SMC_BITMAP_USE_THRESHOLD 10
65 static TranslationBlock
*tbs
;
66 int code_gen_max_blocks
;
67 TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
69 /* any access to the tbs or the page table must use this lock */
70 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
72 #if defined(__arm__) || defined(__sparc_v9__)
73 /* The prologue must be reachable with a direct jump. ARM and Sparc64
74 have limited branch ranges (possibly also PPC) so place it in a
75 section close to code segment. */
76 #define code_gen_section \
77 __attribute__((__section__(".gen_code"))) \
78 __attribute__((aligned (32)))
80 /* Maximum alignment for Win32 is 16. */
81 #define code_gen_section \
82 __attribute__((aligned (16)))
84 #define code_gen_section \
85 __attribute__((aligned (32)))
88 uint8_t code_gen_prologue
[1024] code_gen_section
;
89 static uint8_t *code_gen_buffer
;
90 static unsigned long code_gen_buffer_size
;
91 /* threshold to flush the translated code buffer */
92 static unsigned long code_gen_buffer_max_size
;
93 uint8_t *code_gen_ptr
;
95 #if !defined(CONFIG_USER_ONLY)
97 uint8_t *phys_ram_dirty
;
98 static int in_migration
;
100 typedef struct RAMBlock
{
104 struct RAMBlock
*next
;
107 static RAMBlock
*ram_blocks
;
108 /* TODO: When we implement (and use) ram deallocation (e.g. for hotplug)
109 then we can no longer assume contiguous ram offsets, and external uses
110 of this variable will break. */
111 ram_addr_t last_ram_offset
;
115 /* current CPU in the current thread. It is only valid inside
117 CPUState
*cpu_single_env
;
118 /* 0 = Do not count executed instructions.
119 1 = Precise instruction counting.
120 2 = Adaptive rate instruction counting. */
122 /* Current instruction counter. While executing translated code this may
123 include some instructions that have not yet been executed. */
126 typedef struct PageDesc
{
127 /* list of TBs intersecting this ram page */
128 TranslationBlock
*first_tb
;
129 /* in order to optimize self modifying code, we count the number
130 of lookups we do to a given page to use a bitmap */
131 unsigned int code_write_count
;
132 uint8_t *code_bitmap
;
133 #if defined(CONFIG_USER_ONLY)
138 /* In system mode we want L1_MAP to be based on ram offsets,
139 while in user mode we want it to be based on virtual addresses. */
140 #if !defined(CONFIG_USER_ONLY)
141 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
142 # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
144 # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
147 # define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
150 /* Size of the L2 (and L3, etc) page tables. */
152 #define L2_SIZE (1 << L2_BITS)
154 /* The bits remaining after N lower levels of page tables. */
155 #define P_L1_BITS_REM \
156 ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
157 #define V_L1_BITS_REM \
158 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
160 /* Size of the L1 page table. Avoid silly small sizes. */
161 #if P_L1_BITS_REM < 4
162 #define P_L1_BITS (P_L1_BITS_REM + L2_BITS)
164 #define P_L1_BITS P_L1_BITS_REM
167 #if V_L1_BITS_REM < 4
168 #define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
170 #define V_L1_BITS V_L1_BITS_REM
173 #define P_L1_SIZE ((target_phys_addr_t)1 << P_L1_BITS)
174 #define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
176 #define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS)
177 #define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
179 unsigned long qemu_real_host_page_size
;
180 unsigned long qemu_host_page_bits
;
181 unsigned long qemu_host_page_size
;
182 unsigned long qemu_host_page_mask
;
184 /* This is a multi-level map on the virtual address space.
185 The bottom level has pointers to PageDesc. */
186 static void *l1_map
[V_L1_SIZE
];
188 #if !defined(CONFIG_USER_ONLY)
189 typedef struct PhysPageDesc
{
190 /* offset in host memory of the page + io_index in the low bits */
191 ram_addr_t phys_offset
;
192 ram_addr_t region_offset
;
195 /* This is a multi-level map on the physical address space.
196 The bottom level has pointers to PhysPageDesc. */
197 static void *l1_phys_map
[P_L1_SIZE
];
199 static void io_mem_init(void);
201 /* io memory support */
202 CPUWriteMemoryFunc
*io_mem_write
[IO_MEM_NB_ENTRIES
][4];
203 CPUReadMemoryFunc
*io_mem_read
[IO_MEM_NB_ENTRIES
][4];
204 void *io_mem_opaque
[IO_MEM_NB_ENTRIES
];
205 static char io_mem_used
[IO_MEM_NB_ENTRIES
];
206 static int io_mem_watch
;
211 static const char *logfilename
= "qemu.log";
213 static const char *logfilename
= "/tmp/qemu.log";
217 static int log_append
= 0;
220 #if !defined(CONFIG_USER_ONLY)
221 static int tlb_flush_count
;
223 static int tb_flush_count
;
224 static int tb_phys_invalidate_count
;
227 static void map_exec(void *addr
, long size
)
230 VirtualProtect(addr
, size
,
231 PAGE_EXECUTE_READWRITE
, &old_protect
);
235 static void map_exec(void *addr
, long size
)
237 unsigned long start
, end
, page_size
;
239 page_size
= getpagesize();
240 start
= (unsigned long)addr
;
241 start
&= ~(page_size
- 1);
243 end
= (unsigned long)addr
+ size
;
244 end
+= page_size
- 1;
245 end
&= ~(page_size
- 1);
247 mprotect((void *)start
, end
- start
,
248 PROT_READ
| PROT_WRITE
| PROT_EXEC
);
252 static void page_init(void)
254 /* NOTE: we can always suppose that qemu_host_page_size >=
258 SYSTEM_INFO system_info
;
260 GetSystemInfo(&system_info
);
261 qemu_real_host_page_size
= system_info
.dwPageSize
;
264 qemu_real_host_page_size
= getpagesize();
266 if (qemu_host_page_size
== 0)
267 qemu_host_page_size
= qemu_real_host_page_size
;
268 if (qemu_host_page_size
< TARGET_PAGE_SIZE
)
269 qemu_host_page_size
= TARGET_PAGE_SIZE
;
270 qemu_host_page_bits
= 0;
271 while ((1 << qemu_host_page_bits
) < qemu_host_page_size
)
272 qemu_host_page_bits
++;
273 qemu_host_page_mask
= ~(qemu_host_page_size
- 1);
275 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
279 last_brk
= (unsigned long)sbrk(0);
281 f
= fopen("/proc/self/maps", "r");
286 unsigned long startaddr
, endaddr
;
289 n
= fscanf (f
, "%lx-%lx %*[^\n]\n", &startaddr
, &endaddr
);
291 if (n
== 2 && h2g_valid(startaddr
)) {
292 startaddr
= h2g(startaddr
) & TARGET_PAGE_MASK
;
294 if (h2g_valid(endaddr
)) {
295 endaddr
= h2g(endaddr
);
299 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
310 static PageDesc
*page_find_alloc(tb_page_addr_t index
, int alloc
)
316 #if defined(CONFIG_USER_ONLY)
317 /* We can't use qemu_malloc because it may recurse into a locked mutex.
318 Neither can we record the new pages we reserve while allocating a
319 given page because that may recurse into an unallocated page table
320 entry. Stuff the allocations we do make into a queue and process
321 them after having completed one entire page table allocation. */
323 unsigned long reserve
[2 * (V_L1_SHIFT
/ L2_BITS
)];
326 # define ALLOC(P, SIZE) \
328 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
329 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
330 if (h2g_valid(P)) { \
331 reserve[reserve_idx] = h2g(P); \
332 reserve[reserve_idx + 1] = SIZE; \
337 # define ALLOC(P, SIZE) \
338 do { P = qemu_mallocz(SIZE); } while (0)
341 /* Level 1. Always allocated. */
342 lp
= l1_map
+ ((index
>> V_L1_SHIFT
) & (V_L1_SIZE
- 1));
345 for (i
= V_L1_SHIFT
/ L2_BITS
- 1; i
> 0; i
--) {
352 ALLOC(p
, sizeof(void *) * L2_SIZE
);
356 lp
= p
+ ((index
>> (i
* L2_BITS
)) & (L2_SIZE
- 1));
364 ALLOC(pd
, sizeof(PageDesc
) * L2_SIZE
);
369 #if defined(CONFIG_USER_ONLY)
370 for (i
= 0; i
< reserve_idx
; i
+= 2) {
371 unsigned long addr
= reserve
[i
];
372 unsigned long len
= reserve
[i
+ 1];
374 page_set_flags(addr
& TARGET_PAGE_MASK
,
375 TARGET_PAGE_ALIGN(addr
+ len
),
380 return pd
+ (index
& (L2_SIZE
- 1));
383 static inline PageDesc
*page_find(tb_page_addr_t index
)
385 return page_find_alloc(index
, 0);
388 #if !defined(CONFIG_USER_ONLY)
389 static PhysPageDesc
*phys_page_find_alloc(target_phys_addr_t index
, int alloc
)
395 /* Level 1. Always allocated. */
396 lp
= l1_phys_map
+ ((index
>> P_L1_SHIFT
) & (P_L1_SIZE
- 1));
399 for (i
= P_L1_SHIFT
/ L2_BITS
- 1; i
> 0; i
--) {
405 *lp
= p
= qemu_mallocz(sizeof(void *) * L2_SIZE
);
407 lp
= p
+ ((index
>> (i
* L2_BITS
)) & (L2_SIZE
- 1));
418 *lp
= pd
= qemu_malloc(sizeof(PhysPageDesc
) * L2_SIZE
);
420 for (i
= 0; i
< L2_SIZE
; i
++) {
421 pd
[i
].phys_offset
= IO_MEM_UNASSIGNED
;
422 pd
[i
].region_offset
= (index
+ i
) << TARGET_PAGE_BITS
;
426 return pd
+ (index
& (L2_SIZE
- 1));
429 static inline PhysPageDesc
*phys_page_find(target_phys_addr_t index
)
431 return phys_page_find_alloc(index
, 0);
434 static void tlb_protect_code(ram_addr_t ram_addr
);
435 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
437 #define mmap_lock() do { } while(0)
438 #define mmap_unlock() do { } while(0)
441 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
443 #if defined(CONFIG_USER_ONLY)
444 /* Currently it is not recommended to allocate big chunks of data in
445 user mode. It will change when a dedicated libc will be used */
446 #define USE_STATIC_CODE_GEN_BUFFER
449 #ifdef USE_STATIC_CODE_GEN_BUFFER
450 static uint8_t static_code_gen_buffer
[DEFAULT_CODE_GEN_BUFFER_SIZE
];
453 static void code_gen_alloc(unsigned long tb_size
)
455 #ifdef USE_STATIC_CODE_GEN_BUFFER
456 code_gen_buffer
= static_code_gen_buffer
;
457 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
458 map_exec(code_gen_buffer
, code_gen_buffer_size
);
460 code_gen_buffer_size
= tb_size
;
461 if (code_gen_buffer_size
== 0) {
462 #if defined(CONFIG_USER_ONLY)
463 /* in user mode, phys_ram_size is not meaningful */
464 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
466 /* XXX: needs adjustments */
467 code_gen_buffer_size
= (unsigned long)(ram_size
/ 4);
470 if (code_gen_buffer_size
< MIN_CODE_GEN_BUFFER_SIZE
)
471 code_gen_buffer_size
= MIN_CODE_GEN_BUFFER_SIZE
;
472 /* The code gen buffer location may have constraints depending on
473 the host cpu and OS */
474 #if defined(__linux__)
479 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
480 #if defined(__x86_64__)
482 /* Cannot map more than that */
483 if (code_gen_buffer_size
> (800 * 1024 * 1024))
484 code_gen_buffer_size
= (800 * 1024 * 1024);
485 #elif defined(__sparc_v9__)
486 // Map the buffer below 2G, so we can use direct calls and branches
488 start
= (void *) 0x60000000UL
;
489 if (code_gen_buffer_size
> (512 * 1024 * 1024))
490 code_gen_buffer_size
= (512 * 1024 * 1024);
491 #elif defined(__arm__)
492 /* Map the buffer below 32M, so we can use direct calls and branches */
494 start
= (void *) 0x01000000UL
;
495 if (code_gen_buffer_size
> 16 * 1024 * 1024)
496 code_gen_buffer_size
= 16 * 1024 * 1024;
498 code_gen_buffer
= mmap(start
, code_gen_buffer_size
,
499 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
501 if (code_gen_buffer
== MAP_FAILED
) {
502 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
506 #elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__DragonFly__)
510 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
511 #if defined(__x86_64__)
512 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
513 * 0x40000000 is free */
515 addr
= (void *)0x40000000;
516 /* Cannot map more than that */
517 if (code_gen_buffer_size
> (800 * 1024 * 1024))
518 code_gen_buffer_size
= (800 * 1024 * 1024);
520 code_gen_buffer
= mmap(addr
, code_gen_buffer_size
,
521 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
523 if (code_gen_buffer
== MAP_FAILED
) {
524 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
529 code_gen_buffer
= qemu_malloc(code_gen_buffer_size
);
530 map_exec(code_gen_buffer
, code_gen_buffer_size
);
532 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
533 map_exec(code_gen_prologue
, sizeof(code_gen_prologue
));
534 code_gen_buffer_max_size
= code_gen_buffer_size
-
535 code_gen_max_block_size();
536 code_gen_max_blocks
= code_gen_buffer_size
/ CODE_GEN_AVG_BLOCK_SIZE
;
537 tbs
= qemu_malloc(code_gen_max_blocks
* sizeof(TranslationBlock
));
540 /* Must be called before using the QEMU cpus. 'tb_size' is the size
541 (in bytes) allocated to the translation buffer. Zero means default
543 void cpu_exec_init_all(unsigned long tb_size
)
546 code_gen_alloc(tb_size
);
547 code_gen_ptr
= code_gen_buffer
;
549 #if !defined(CONFIG_USER_ONLY)
554 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
556 static int cpu_common_post_load(void *opaque
, int version_id
)
558 CPUState
*env
= opaque
;
560 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
561 version_id is increased. */
562 env
->interrupt_request
&= ~0x01;
568 static const VMStateDescription vmstate_cpu_common
= {
569 .name
= "cpu_common",
571 .minimum_version_id
= 1,
572 .minimum_version_id_old
= 1,
573 .post_load
= cpu_common_post_load
,
574 .fields
= (VMStateField
[]) {
575 VMSTATE_UINT32(halted
, CPUState
),
576 VMSTATE_UINT32(interrupt_request
, CPUState
),
577 VMSTATE_END_OF_LIST()
582 CPUState
*qemu_get_cpu(int cpu
)
584 CPUState
*env
= first_cpu
;
587 if (env
->cpu_index
== cpu
)
595 void cpu_exec_init(CPUState
*env
)
600 #if defined(CONFIG_USER_ONLY)
603 env
->next_cpu
= NULL
;
606 while (*penv
!= NULL
) {
607 penv
= &(*penv
)->next_cpu
;
610 env
->cpu_index
= cpu_index
;
612 QTAILQ_INIT(&env
->breakpoints
);
613 QTAILQ_INIT(&env
->watchpoints
);
615 #if defined(CONFIG_USER_ONLY)
618 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
619 vmstate_register(cpu_index
, &vmstate_cpu_common
, env
);
620 register_savevm("cpu", cpu_index
, CPU_SAVE_VERSION
,
621 cpu_save
, cpu_load
, env
);
625 static inline void invalidate_page_bitmap(PageDesc
*p
)
627 if (p
->code_bitmap
) {
628 qemu_free(p
->code_bitmap
);
629 p
->code_bitmap
= NULL
;
631 p
->code_write_count
= 0;
634 /* Set to NULL all the 'first_tb' fields in all PageDescs. */
636 static void page_flush_tb_1 (int level
, void **lp
)
645 for (i
= 0; i
< L2_SIZE
; ++i
) {
646 pd
[i
].first_tb
= NULL
;
647 invalidate_page_bitmap(pd
+ i
);
651 for (i
= 0; i
< L2_SIZE
; ++i
) {
652 page_flush_tb_1 (level
- 1, pp
+ i
);
657 static void page_flush_tb(void)
660 for (i
= 0; i
< V_L1_SIZE
; i
++) {
661 page_flush_tb_1(V_L1_SHIFT
/ L2_BITS
- 1, l1_map
+ i
);
665 /* flush all the translation blocks */
666 /* XXX: tb_flush is currently not thread safe */
667 void tb_flush(CPUState
*env1
)
670 #if defined(DEBUG_FLUSH)
671 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
672 (unsigned long)(code_gen_ptr
- code_gen_buffer
),
674 ((unsigned long)(code_gen_ptr
- code_gen_buffer
)) / nb_tbs
: 0);
676 if ((unsigned long)(code_gen_ptr
- code_gen_buffer
) > code_gen_buffer_size
)
677 cpu_abort(env1
, "Internal error: code buffer overflow\n");
681 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
682 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
685 memset (tb_phys_hash
, 0, CODE_GEN_PHYS_HASH_SIZE
* sizeof (void *));
688 code_gen_ptr
= code_gen_buffer
;
689 /* XXX: flush processor icache at this point if cache flush is
694 #ifdef DEBUG_TB_CHECK
696 static void tb_invalidate_check(target_ulong address
)
698 TranslationBlock
*tb
;
700 address
&= TARGET_PAGE_MASK
;
701 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
702 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
703 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
704 address
>= tb
->pc
+ tb
->size
)) {
705 printf("ERROR invalidate: address=" TARGET_FMT_lx
706 " PC=%08lx size=%04x\n",
707 address
, (long)tb
->pc
, tb
->size
);
713 /* verify that all the pages have correct rights for code */
714 static void tb_page_check(void)
716 TranslationBlock
*tb
;
717 int i
, flags1
, flags2
;
719 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
720 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
721 flags1
= page_get_flags(tb
->pc
);
722 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
723 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
724 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
725 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
733 /* invalidate one TB */
734 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
737 TranslationBlock
*tb1
;
741 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
744 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
748 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
750 TranslationBlock
*tb1
;
756 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
758 *ptb
= tb1
->page_next
[n1
];
761 ptb
= &tb1
->page_next
[n1
];
765 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
767 TranslationBlock
*tb1
, **ptb
;
770 ptb
= &tb
->jmp_next
[n
];
773 /* find tb(n) in circular list */
777 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
778 if (n1
== n
&& tb1
== tb
)
781 ptb
= &tb1
->jmp_first
;
783 ptb
= &tb1
->jmp_next
[n1
];
786 /* now we can suppress tb(n) from the list */
787 *ptb
= tb
->jmp_next
[n
];
789 tb
->jmp_next
[n
] = NULL
;
793 /* reset the jump entry 'n' of a TB so that it is not chained to
795 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
797 tb_set_jmp_target(tb
, n
, (unsigned long)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
800 void tb_phys_invalidate(TranslationBlock
*tb
, tb_page_addr_t page_addr
)
805 tb_page_addr_t phys_pc
;
806 TranslationBlock
*tb1
, *tb2
;
808 /* remove the TB from the hash list */
809 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
810 h
= tb_phys_hash_func(phys_pc
);
811 tb_remove(&tb_phys_hash
[h
], tb
,
812 offsetof(TranslationBlock
, phys_hash_next
));
814 /* remove the TB from the page list */
815 if (tb
->page_addr
[0] != page_addr
) {
816 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
817 tb_page_remove(&p
->first_tb
, tb
);
818 invalidate_page_bitmap(p
);
820 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
821 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
822 tb_page_remove(&p
->first_tb
, tb
);
823 invalidate_page_bitmap(p
);
826 tb_invalidated_flag
= 1;
828 /* remove the TB from the hash list */
829 h
= tb_jmp_cache_hash_func(tb
->pc
);
830 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
831 if (env
->tb_jmp_cache
[h
] == tb
)
832 env
->tb_jmp_cache
[h
] = NULL
;
835 /* suppress this TB from the two jump lists */
836 tb_jmp_remove(tb
, 0);
837 tb_jmp_remove(tb
, 1);
839 /* suppress any remaining jumps to this TB */
845 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
846 tb2
= tb1
->jmp_next
[n1
];
847 tb_reset_jump(tb1
, n1
);
848 tb1
->jmp_next
[n1
] = NULL
;
851 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2); /* fail safe */
853 tb_phys_invalidate_count
++;
856 static inline void set_bits(uint8_t *tab
, int start
, int len
)
862 mask
= 0xff << (start
& 7);
863 if ((start
& ~7) == (end
& ~7)) {
865 mask
&= ~(0xff << (end
& 7));
870 start
= (start
+ 8) & ~7;
872 while (start
< end1
) {
877 mask
= ~(0xff << (end
& 7));
883 static void build_page_bitmap(PageDesc
*p
)
885 int n
, tb_start
, tb_end
;
886 TranslationBlock
*tb
;
888 p
->code_bitmap
= qemu_mallocz(TARGET_PAGE_SIZE
/ 8);
893 tb
= (TranslationBlock
*)((long)tb
& ~3);
894 /* NOTE: this is subtle as a TB may span two physical pages */
896 /* NOTE: tb_end may be after the end of the page, but
897 it is not a problem */
898 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
899 tb_end
= tb_start
+ tb
->size
;
900 if (tb_end
> TARGET_PAGE_SIZE
)
901 tb_end
= TARGET_PAGE_SIZE
;
904 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
906 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
907 tb
= tb
->page_next
[n
];
911 TranslationBlock
*tb_gen_code(CPUState
*env
,
912 target_ulong pc
, target_ulong cs_base
,
913 int flags
, int cflags
)
915 TranslationBlock
*tb
;
917 tb_page_addr_t phys_pc
, phys_page2
;
918 target_ulong virt_page2
;
921 phys_pc
= get_page_addr_code(env
, pc
);
924 /* flush must be done */
926 /* cannot fail at this point */
928 /* Don't forget to invalidate previous TB info. */
929 tb_invalidated_flag
= 1;
931 tc_ptr
= code_gen_ptr
;
933 tb
->cs_base
= cs_base
;
936 cpu_gen_code(env
, tb
, &code_gen_size
);
937 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
939 /* check next page if needed */
940 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
942 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
943 phys_page2
= get_page_addr_code(env
, virt_page2
);
945 tb_link_page(tb
, phys_pc
, phys_page2
);
949 /* invalidate all TBs which intersect with the target physical page
950 starting in range [start;end[. NOTE: start and end must refer to
951 the same physical page. 'is_cpu_write_access' should be true if called
952 from a real cpu write access: the virtual CPU will exit the current
953 TB if code is modified inside this TB. */
954 void tb_invalidate_phys_page_range(tb_page_addr_t start
, tb_page_addr_t end
,
955 int is_cpu_write_access
)
957 TranslationBlock
*tb
, *tb_next
, *saved_tb
;
958 CPUState
*env
= cpu_single_env
;
959 tb_page_addr_t tb_start
, tb_end
;
962 #ifdef TARGET_HAS_PRECISE_SMC
963 int current_tb_not_found
= is_cpu_write_access
;
964 TranslationBlock
*current_tb
= NULL
;
965 int current_tb_modified
= 0;
966 target_ulong current_pc
= 0;
967 target_ulong current_cs_base
= 0;
968 int current_flags
= 0;
969 #endif /* TARGET_HAS_PRECISE_SMC */
971 p
= page_find(start
>> TARGET_PAGE_BITS
);
974 if (!p
->code_bitmap
&&
975 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
&&
976 is_cpu_write_access
) {
977 /* build code bitmap */
978 build_page_bitmap(p
);
981 /* we remove all the TBs in the range [start, end[ */
982 /* XXX: see if in some cases it could be faster to invalidate all the code */
986 tb
= (TranslationBlock
*)((long)tb
& ~3);
987 tb_next
= tb
->page_next
[n
];
988 /* NOTE: this is subtle as a TB may span two physical pages */
990 /* NOTE: tb_end may be after the end of the page, but
991 it is not a problem */
992 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
993 tb_end
= tb_start
+ tb
->size
;
995 tb_start
= tb
->page_addr
[1];
996 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
998 if (!(tb_end
<= start
|| tb_start
>= end
)) {
999 #ifdef TARGET_HAS_PRECISE_SMC
1000 if (current_tb_not_found
) {
1001 current_tb_not_found
= 0;
1003 if (env
->mem_io_pc
) {
1004 /* now we have a real cpu fault */
1005 current_tb
= tb_find_pc(env
->mem_io_pc
);
1008 if (current_tb
== tb
&&
1009 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1010 /* If we are modifying the current TB, we must stop
1011 its execution. We could be more precise by checking
1012 that the modification is after the current PC, but it
1013 would require a specialized function to partially
1014 restore the CPU state */
1016 current_tb_modified
= 1;
1017 cpu_restore_state(current_tb
, env
,
1018 env
->mem_io_pc
, NULL
);
1019 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1022 #endif /* TARGET_HAS_PRECISE_SMC */
1023 /* we need to do that to handle the case where a signal
1024 occurs while doing tb_phys_invalidate() */
1027 saved_tb
= env
->current_tb
;
1028 env
->current_tb
= NULL
;
1030 tb_phys_invalidate(tb
, -1);
1032 env
->current_tb
= saved_tb
;
1033 if (env
->interrupt_request
&& env
->current_tb
)
1034 cpu_interrupt(env
, env
->interrupt_request
);
1039 #if !defined(CONFIG_USER_ONLY)
1040 /* if no code remaining, no need to continue to use slow writes */
1042 invalidate_page_bitmap(p
);
1043 if (is_cpu_write_access
) {
1044 tlb_unprotect_code_phys(env
, start
, env
->mem_io_vaddr
);
1048 #ifdef TARGET_HAS_PRECISE_SMC
1049 if (current_tb_modified
) {
1050 /* we generate a block containing just the instruction
1051 modifying the memory. It will ensure that it cannot modify
1053 env
->current_tb
= NULL
;
1054 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1055 cpu_resume_from_signal(env
, NULL
);
1060 /* len must be <= 8 and start must be a multiple of len */
1061 static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start
, int len
)
1067 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1068 cpu_single_env
->mem_io_vaddr
, len
,
1069 cpu_single_env
->eip
,
1070 cpu_single_env
->eip
+ (long)cpu_single_env
->segs
[R_CS
].base
);
1073 p
= page_find(start
>> TARGET_PAGE_BITS
);
1076 if (p
->code_bitmap
) {
1077 offset
= start
& ~TARGET_PAGE_MASK
;
1078 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
1079 if (b
& ((1 << len
) - 1))
1083 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
1087 #if !defined(CONFIG_SOFTMMU)
1088 static void tb_invalidate_phys_page(tb_page_addr_t addr
,
1089 unsigned long pc
, void *puc
)
1091 TranslationBlock
*tb
;
1094 #ifdef TARGET_HAS_PRECISE_SMC
1095 TranslationBlock
*current_tb
= NULL
;
1096 CPUState
*env
= cpu_single_env
;
1097 int current_tb_modified
= 0;
1098 target_ulong current_pc
= 0;
1099 target_ulong current_cs_base
= 0;
1100 int current_flags
= 0;
1103 addr
&= TARGET_PAGE_MASK
;
1104 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1108 #ifdef TARGET_HAS_PRECISE_SMC
1109 if (tb
&& pc
!= 0) {
1110 current_tb
= tb_find_pc(pc
);
1113 while (tb
!= NULL
) {
1115 tb
= (TranslationBlock
*)((long)tb
& ~3);
1116 #ifdef TARGET_HAS_PRECISE_SMC
1117 if (current_tb
== tb
&&
1118 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1119 /* If we are modifying the current TB, we must stop
1120 its execution. We could be more precise by checking
1121 that the modification is after the current PC, but it
1122 would require a specialized function to partially
1123 restore the CPU state */
1125 current_tb_modified
= 1;
1126 cpu_restore_state(current_tb
, env
, pc
, puc
);
1127 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1130 #endif /* TARGET_HAS_PRECISE_SMC */
1131 tb_phys_invalidate(tb
, addr
);
1132 tb
= tb
->page_next
[n
];
1135 #ifdef TARGET_HAS_PRECISE_SMC
1136 if (current_tb_modified
) {
1137 /* we generate a block containing just the instruction
1138 modifying the memory. It will ensure that it cannot modify
1140 env
->current_tb
= NULL
;
1141 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1142 cpu_resume_from_signal(env
, puc
);
1148 /* add the tb in the target page and protect it if necessary */
1149 static inline void tb_alloc_page(TranslationBlock
*tb
,
1150 unsigned int n
, tb_page_addr_t page_addr
)
1153 TranslationBlock
*last_first_tb
;
1155 tb
->page_addr
[n
] = page_addr
;
1156 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
, 1);
1157 tb
->page_next
[n
] = p
->first_tb
;
1158 last_first_tb
= p
->first_tb
;
1159 p
->first_tb
= (TranslationBlock
*)((long)tb
| n
);
1160 invalidate_page_bitmap(p
);
1162 #if defined(TARGET_HAS_SMC) || 1
1164 #if defined(CONFIG_USER_ONLY)
1165 if (p
->flags
& PAGE_WRITE
) {
1170 /* force the host page as non writable (writes will have a
1171 page fault + mprotect overhead) */
1172 page_addr
&= qemu_host_page_mask
;
1174 for(addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
1175 addr
+= TARGET_PAGE_SIZE
) {
1177 p2
= page_find (addr
>> TARGET_PAGE_BITS
);
1181 p2
->flags
&= ~PAGE_WRITE
;
1183 mprotect(g2h(page_addr
), qemu_host_page_size
,
1184 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
1185 #ifdef DEBUG_TB_INVALIDATE
1186 printf("protecting code page: 0x" TARGET_FMT_lx
"\n",
1191 /* if some code is already present, then the pages are already
1192 protected. So we handle the case where only the first TB is
1193 allocated in a physical page */
1194 if (!last_first_tb
) {
1195 tlb_protect_code(page_addr
);
1199 #endif /* TARGET_HAS_SMC */
1202 /* Allocate a new translation block. Flush the translation buffer if
1203 too many translation blocks or too much generated code. */
1204 TranslationBlock
*tb_alloc(target_ulong pc
)
1206 TranslationBlock
*tb
;
1208 if (nb_tbs
>= code_gen_max_blocks
||
1209 (code_gen_ptr
- code_gen_buffer
) >= code_gen_buffer_max_size
)
1211 tb
= &tbs
[nb_tbs
++];
1217 void tb_free(TranslationBlock
*tb
)
1219 /* In practice this is mostly used for single use temporary TB
1220 Ignore the hard cases and just back up if this TB happens to
1221 be the last one generated. */
1222 if (nb_tbs
> 0 && tb
== &tbs
[nb_tbs
- 1]) {
1223 code_gen_ptr
= tb
->tc_ptr
;
1228 /* add a new TB and link it to the physical page tables. phys_page2 is
1229 (-1) to indicate that only one page contains the TB. */
1230 void tb_link_page(TranslationBlock
*tb
,
1231 tb_page_addr_t phys_pc
, tb_page_addr_t phys_page2
)
1234 TranslationBlock
**ptb
;
1236 /* Grab the mmap lock to stop another thread invalidating this TB
1237 before we are done. */
1239 /* add in the physical hash table */
1240 h
= tb_phys_hash_func(phys_pc
);
1241 ptb
= &tb_phys_hash
[h
];
1242 tb
->phys_hash_next
= *ptb
;
1245 /* add in the page list */
1246 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
1247 if (phys_page2
!= -1)
1248 tb_alloc_page(tb
, 1, phys_page2
);
1250 tb
->page_addr
[1] = -1;
1252 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2);
1253 tb
->jmp_next
[0] = NULL
;
1254 tb
->jmp_next
[1] = NULL
;
1256 /* init original jump addresses */
1257 if (tb
->tb_next_offset
[0] != 0xffff)
1258 tb_reset_jump(tb
, 0);
1259 if (tb
->tb_next_offset
[1] != 0xffff)
1260 tb_reset_jump(tb
, 1);
1262 #ifdef DEBUG_TB_CHECK
1268 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1269 tb[1].tc_ptr. Return NULL if not found */
1270 TranslationBlock
*tb_find_pc(unsigned long tc_ptr
)
1272 int m_min
, m_max
, m
;
1274 TranslationBlock
*tb
;
1278 if (tc_ptr
< (unsigned long)code_gen_buffer
||
1279 tc_ptr
>= (unsigned long)code_gen_ptr
)
1281 /* binary search (cf Knuth) */
1284 while (m_min
<= m_max
) {
1285 m
= (m_min
+ m_max
) >> 1;
1287 v
= (unsigned long)tb
->tc_ptr
;
1290 else if (tc_ptr
< v
) {
1299 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
1301 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
1303 TranslationBlock
*tb1
, *tb_next
, **ptb
;
1306 tb1
= tb
->jmp_next
[n
];
1308 /* find head of list */
1311 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1314 tb1
= tb1
->jmp_next
[n1
];
1316 /* we are now sure now that tb jumps to tb1 */
1319 /* remove tb from the jmp_first list */
1320 ptb
= &tb_next
->jmp_first
;
1324 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1325 if (n1
== n
&& tb1
== tb
)
1327 ptb
= &tb1
->jmp_next
[n1
];
1329 *ptb
= tb
->jmp_next
[n
];
1330 tb
->jmp_next
[n
] = NULL
;
1332 /* suppress the jump to next tb in generated code */
1333 tb_reset_jump(tb
, n
);
1335 /* suppress jumps in the tb on which we could have jumped */
1336 tb_reset_jump_recursive(tb_next
);
1340 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
1342 tb_reset_jump_recursive2(tb
, 0);
1343 tb_reset_jump_recursive2(tb
, 1);
1346 #if defined(TARGET_HAS_ICE)
1347 #if defined(CONFIG_USER_ONLY)
1348 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1350 tb_invalidate_phys_page_range(pc
, pc
+ 1, 0);
1353 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1355 target_phys_addr_t addr
;
1357 ram_addr_t ram_addr
;
1360 addr
= cpu_get_phys_page_debug(env
, pc
);
1361 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
1363 pd
= IO_MEM_UNASSIGNED
;
1365 pd
= p
->phys_offset
;
1367 ram_addr
= (pd
& TARGET_PAGE_MASK
) | (pc
& ~TARGET_PAGE_MASK
);
1368 tb_invalidate_phys_page_range(ram_addr
, ram_addr
+ 1, 0);
1371 #endif /* TARGET_HAS_ICE */
1373 #if defined(CONFIG_USER_ONLY)
1374 void cpu_watchpoint_remove_all(CPUState
*env
, int mask
)
1379 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
, target_ulong len
,
1380 int flags
, CPUWatchpoint
**watchpoint
)
1385 /* Add a watchpoint. */
1386 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
, target_ulong len
,
1387 int flags
, CPUWatchpoint
**watchpoint
)
1389 target_ulong len_mask
= ~(len
- 1);
1392 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1393 if ((len
!= 1 && len
!= 2 && len
!= 4 && len
!= 8) || (addr
& ~len_mask
)) {
1394 fprintf(stderr
, "qemu: tried to set invalid watchpoint at "
1395 TARGET_FMT_lx
", len=" TARGET_FMT_lu
"\n", addr
, len
);
1398 wp
= qemu_malloc(sizeof(*wp
));
1401 wp
->len_mask
= len_mask
;
1404 /* keep all GDB-injected watchpoints in front */
1406 QTAILQ_INSERT_HEAD(&env
->watchpoints
, wp
, entry
);
1408 QTAILQ_INSERT_TAIL(&env
->watchpoints
, wp
, entry
);
1410 tlb_flush_page(env
, addr
);
1417 /* Remove a specific watchpoint. */
1418 int cpu_watchpoint_remove(CPUState
*env
, target_ulong addr
, target_ulong len
,
1421 target_ulong len_mask
= ~(len
- 1);
1424 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1425 if (addr
== wp
->vaddr
&& len_mask
== wp
->len_mask
1426 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
1427 cpu_watchpoint_remove_by_ref(env
, wp
);
1434 /* Remove a specific watchpoint by reference. */
1435 void cpu_watchpoint_remove_by_ref(CPUState
*env
, CPUWatchpoint
*watchpoint
)
1437 QTAILQ_REMOVE(&env
->watchpoints
, watchpoint
, entry
);
1439 tlb_flush_page(env
, watchpoint
->vaddr
);
1441 qemu_free(watchpoint
);
1444 /* Remove all matching watchpoints. */
1445 void cpu_watchpoint_remove_all(CPUState
*env
, int mask
)
1447 CPUWatchpoint
*wp
, *next
;
1449 QTAILQ_FOREACH_SAFE(wp
, &env
->watchpoints
, entry
, next
) {
1450 if (wp
->flags
& mask
)
1451 cpu_watchpoint_remove_by_ref(env
, wp
);
1456 /* Add a breakpoint. */
1457 int cpu_breakpoint_insert(CPUState
*env
, target_ulong pc
, int flags
,
1458 CPUBreakpoint
**breakpoint
)
1460 #if defined(TARGET_HAS_ICE)
1463 bp
= qemu_malloc(sizeof(*bp
));
1468 /* keep all GDB-injected breakpoints in front */
1470 QTAILQ_INSERT_HEAD(&env
->breakpoints
, bp
, entry
);
1472 QTAILQ_INSERT_TAIL(&env
->breakpoints
, bp
, entry
);
1474 breakpoint_invalidate(env
, pc
);
1484 /* Remove a specific breakpoint. */
1485 int cpu_breakpoint_remove(CPUState
*env
, target_ulong pc
, int flags
)
1487 #if defined(TARGET_HAS_ICE)
1490 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1491 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
1492 cpu_breakpoint_remove_by_ref(env
, bp
);
1502 /* Remove a specific breakpoint by reference. */
1503 void cpu_breakpoint_remove_by_ref(CPUState
*env
, CPUBreakpoint
*breakpoint
)
1505 #if defined(TARGET_HAS_ICE)
1506 QTAILQ_REMOVE(&env
->breakpoints
, breakpoint
, entry
);
1508 breakpoint_invalidate(env
, breakpoint
->pc
);
1510 qemu_free(breakpoint
);
1514 /* Remove all matching breakpoints. */
1515 void cpu_breakpoint_remove_all(CPUState
*env
, int mask
)
1517 #if defined(TARGET_HAS_ICE)
1518 CPUBreakpoint
*bp
, *next
;
1520 QTAILQ_FOREACH_SAFE(bp
, &env
->breakpoints
, entry
, next
) {
1521 if (bp
->flags
& mask
)
1522 cpu_breakpoint_remove_by_ref(env
, bp
);
1527 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1528 CPU loop after each instruction */
1529 void cpu_single_step(CPUState
*env
, int enabled
)
1531 #if defined(TARGET_HAS_ICE)
1532 if (env
->singlestep_enabled
!= enabled
) {
1533 env
->singlestep_enabled
= enabled
;
1535 kvm_update_guest_debug(env
, 0);
1537 /* must flush all the translated code to avoid inconsistencies */
1538 /* XXX: only flush what is necessary */
1545 /* enable or disable low levels log */
1546 void cpu_set_log(int log_flags
)
1548 loglevel
= log_flags
;
1549 if (loglevel
&& !logfile
) {
1550 logfile
= fopen(logfilename
, log_append
? "a" : "w");
1552 perror(logfilename
);
1555 #if !defined(CONFIG_SOFTMMU)
1556 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1558 static char logfile_buf
[4096];
1559 setvbuf(logfile
, logfile_buf
, _IOLBF
, sizeof(logfile_buf
));
1561 #elif !defined(_WIN32)
1562 /* Win32 doesn't support line-buffering and requires size >= 2 */
1563 setvbuf(logfile
, NULL
, _IOLBF
, 0);
1567 if (!loglevel
&& logfile
) {
1573 void cpu_set_log_filename(const char *filename
)
1575 logfilename
= strdup(filename
);
1580 cpu_set_log(loglevel
);
1583 static void cpu_unlink_tb(CPUState
*env
)
1585 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1586 problem and hope the cpu will stop of its own accord. For userspace
1587 emulation this often isn't actually as bad as it sounds. Often
1588 signals are used primarily to interrupt blocking syscalls. */
1589 TranslationBlock
*tb
;
1590 static spinlock_t interrupt_lock
= SPIN_LOCK_UNLOCKED
;
1592 spin_lock(&interrupt_lock
);
1593 tb
= env
->current_tb
;
1594 /* if the cpu is currently executing code, we must unlink it and
1595 all the potentially executing TB */
1597 env
->current_tb
= NULL
;
1598 tb_reset_jump_recursive(tb
);
1600 spin_unlock(&interrupt_lock
);
1603 /* mask must never be zero, except for A20 change call */
1604 void cpu_interrupt(CPUState
*env
, int mask
)
1608 old_mask
= env
->interrupt_request
;
1609 env
->interrupt_request
|= mask
;
1611 #ifndef CONFIG_USER_ONLY
1613 * If called from iothread context, wake the target cpu in
1616 if (!qemu_cpu_self(env
)) {
1623 env
->icount_decr
.u16
.high
= 0xffff;
1624 #ifndef CONFIG_USER_ONLY
1626 && (mask
& ~old_mask
) != 0) {
1627 cpu_abort(env
, "Raised interrupt while not in I/O function");
1635 void cpu_reset_interrupt(CPUState
*env
, int mask
)
1637 env
->interrupt_request
&= ~mask
;
1640 void cpu_exit(CPUState
*env
)
1642 env
->exit_request
= 1;
1646 const CPULogItem cpu_log_items
[] = {
1647 { CPU_LOG_TB_OUT_ASM
, "out_asm",
1648 "show generated host assembly code for each compiled TB" },
1649 { CPU_LOG_TB_IN_ASM
, "in_asm",
1650 "show target assembly code for each compiled TB" },
1651 { CPU_LOG_TB_OP
, "op",
1652 "show micro ops for each compiled TB" },
1653 { CPU_LOG_TB_OP_OPT
, "op_opt",
1656 "before eflags optimization and "
1658 "after liveness analysis" },
1659 { CPU_LOG_INT
, "int",
1660 "show interrupts/exceptions in short format" },
1661 { CPU_LOG_EXEC
, "exec",
1662 "show trace before each executed TB (lots of logs)" },
1663 { CPU_LOG_TB_CPU
, "cpu",
1664 "show CPU state before block translation" },
1666 { CPU_LOG_PCALL
, "pcall",
1667 "show protected mode far calls/returns/exceptions" },
1668 { CPU_LOG_RESET
, "cpu_reset",
1669 "show CPU state before CPU resets" },
1672 { CPU_LOG_IOPORT
, "ioport",
1673 "show all i/o ports accesses" },
1678 #ifndef CONFIG_USER_ONLY
1679 static QLIST_HEAD(memory_client_list
, CPUPhysMemoryClient
) memory_client_list
1680 = QLIST_HEAD_INITIALIZER(memory_client_list
);
1682 static void cpu_notify_set_memory(target_phys_addr_t start_addr
,
1684 ram_addr_t phys_offset
)
1686 CPUPhysMemoryClient
*client
;
1687 QLIST_FOREACH(client
, &memory_client_list
, list
) {
1688 client
->set_memory(client
, start_addr
, size
, phys_offset
);
1692 static int cpu_notify_sync_dirty_bitmap(target_phys_addr_t start
,
1693 target_phys_addr_t end
)
1695 CPUPhysMemoryClient
*client
;
1696 QLIST_FOREACH(client
, &memory_client_list
, list
) {
1697 int r
= client
->sync_dirty_bitmap(client
, start
, end
);
1704 static int cpu_notify_migration_log(int enable
)
1706 CPUPhysMemoryClient
*client
;
1707 QLIST_FOREACH(client
, &memory_client_list
, list
) {
1708 int r
= client
->migration_log(client
, enable
);
1715 static void phys_page_for_each_1(CPUPhysMemoryClient
*client
,
1716 int level
, void **lp
)
1724 PhysPageDesc
*pd
= *lp
;
1725 for (i
= 0; i
< L2_SIZE
; ++i
) {
1726 if (pd
[i
].phys_offset
!= IO_MEM_UNASSIGNED
) {
1727 client
->set_memory(client
, pd
[i
].region_offset
,
1728 TARGET_PAGE_SIZE
, pd
[i
].phys_offset
);
1733 for (i
= 0; i
< L2_SIZE
; ++i
) {
1734 phys_page_for_each_1(client
, level
- 1, pp
+ i
);
1739 static void phys_page_for_each(CPUPhysMemoryClient
*client
)
1742 for (i
= 0; i
< P_L1_SIZE
; ++i
) {
1743 phys_page_for_each_1(client
, P_L1_SHIFT
/ L2_BITS
- 1,
1748 void cpu_register_phys_memory_client(CPUPhysMemoryClient
*client
)
1750 QLIST_INSERT_HEAD(&memory_client_list
, client
, list
);
1751 phys_page_for_each(client
);
1754 void cpu_unregister_phys_memory_client(CPUPhysMemoryClient
*client
)
1756 QLIST_REMOVE(client
, list
);
1760 static int cmp1(const char *s1
, int n
, const char *s2
)
1762 if (strlen(s2
) != n
)
1764 return memcmp(s1
, s2
, n
) == 0;
1767 /* takes a comma separated list of log masks. Return 0 if error. */
1768 int cpu_str_to_log_mask(const char *str
)
1770 const CPULogItem
*item
;
1777 p1
= strchr(p
, ',');
1780 if(cmp1(p
,p1
-p
,"all")) {
1781 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1785 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1786 if (cmp1(p
, p1
- p
, item
->name
))
1800 void cpu_abort(CPUState
*env
, const char *fmt
, ...)
1807 fprintf(stderr
, "qemu: fatal: ");
1808 vfprintf(stderr
, fmt
, ap
);
1809 fprintf(stderr
, "\n");
1811 cpu_dump_state(env
, stderr
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1813 cpu_dump_state(env
, stderr
, fprintf
, 0);
1815 if (qemu_log_enabled()) {
1816 qemu_log("qemu: fatal: ");
1817 qemu_log_vprintf(fmt
, ap2
);
1820 log_cpu_state(env
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1822 log_cpu_state(env
, 0);
1829 #if defined(CONFIG_USER_ONLY)
1831 struct sigaction act
;
1832 sigfillset(&act
.sa_mask
);
1833 act
.sa_handler
= SIG_DFL
;
1834 sigaction(SIGABRT
, &act
, NULL
);
1840 CPUState
*cpu_copy(CPUState
*env
)
1842 CPUState
*new_env
= cpu_init(env
->cpu_model_str
);
1843 CPUState
*next_cpu
= new_env
->next_cpu
;
1844 int cpu_index
= new_env
->cpu_index
;
1845 #if defined(TARGET_HAS_ICE)
1850 memcpy(new_env
, env
, sizeof(CPUState
));
1852 /* Preserve chaining and index. */
1853 new_env
->next_cpu
= next_cpu
;
1854 new_env
->cpu_index
= cpu_index
;
1856 /* Clone all break/watchpoints.
1857 Note: Once we support ptrace with hw-debug register access, make sure
1858 BP_CPU break/watchpoints are handled correctly on clone. */
1859 QTAILQ_INIT(&env
->breakpoints
);
1860 QTAILQ_INIT(&env
->watchpoints
);
1861 #if defined(TARGET_HAS_ICE)
1862 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1863 cpu_breakpoint_insert(new_env
, bp
->pc
, bp
->flags
, NULL
);
1865 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1866 cpu_watchpoint_insert(new_env
, wp
->vaddr
, (~wp
->len_mask
) + 1,
1874 #if !defined(CONFIG_USER_ONLY)
1876 static inline void tlb_flush_jmp_cache(CPUState
*env
, target_ulong addr
)
1880 /* Discard jump cache entries for any tb which might potentially
1881 overlap the flushed page. */
1882 i
= tb_jmp_cache_hash_page(addr
- TARGET_PAGE_SIZE
);
1883 memset (&env
->tb_jmp_cache
[i
], 0,
1884 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1886 i
= tb_jmp_cache_hash_page(addr
);
1887 memset (&env
->tb_jmp_cache
[i
], 0,
1888 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1891 static CPUTLBEntry s_cputlb_empty_entry
= {
1898 /* NOTE: if flush_global is true, also flush global entries (not
1900 void tlb_flush(CPUState
*env
, int flush_global
)
1904 #if defined(DEBUG_TLB)
1905 printf("tlb_flush:\n");
1907 /* must reset current TB so that interrupts cannot modify the
1908 links while we are modifying them */
1909 env
->current_tb
= NULL
;
1911 for(i
= 0; i
< CPU_TLB_SIZE
; i
++) {
1913 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
1914 env
->tlb_table
[mmu_idx
][i
] = s_cputlb_empty_entry
;
1918 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
1920 env
->tlb_flush_addr
= -1;
1921 env
->tlb_flush_mask
= 0;
1925 static inline void tlb_flush_entry(CPUTLBEntry
*tlb_entry
, target_ulong addr
)
1927 if (addr
== (tlb_entry
->addr_read
&
1928 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1929 addr
== (tlb_entry
->addr_write
&
1930 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1931 addr
== (tlb_entry
->addr_code
&
1932 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
1933 *tlb_entry
= s_cputlb_empty_entry
;
1937 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1942 #if defined(DEBUG_TLB)
1943 printf("tlb_flush_page: " TARGET_FMT_lx
"\n", addr
);
1945 /* Check if we need to flush due to large pages. */
1946 if ((addr
& env
->tlb_flush_mask
) == env
->tlb_flush_addr
) {
1947 #if defined(DEBUG_TLB)
1948 printf("tlb_flush_page: forced full flush ("
1949 TARGET_FMT_lx
"/" TARGET_FMT_lx
")\n",
1950 env
->tlb_flush_addr
, env
->tlb_flush_mask
);
1955 /* must reset current TB so that interrupts cannot modify the
1956 links while we are modifying them */
1957 env
->current_tb
= NULL
;
1959 addr
&= TARGET_PAGE_MASK
;
1960 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1961 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++)
1962 tlb_flush_entry(&env
->tlb_table
[mmu_idx
][i
], addr
);
1964 tlb_flush_jmp_cache(env
, addr
);
1967 /* update the TLBs so that writes to code in the virtual page 'addr'
1969 static void tlb_protect_code(ram_addr_t ram_addr
)
1971 cpu_physical_memory_reset_dirty(ram_addr
,
1972 ram_addr
+ TARGET_PAGE_SIZE
,
1976 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1977 tested for self modifying code */
1978 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
1981 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] |= CODE_DIRTY_FLAG
;
1984 static inline void tlb_reset_dirty_range(CPUTLBEntry
*tlb_entry
,
1985 unsigned long start
, unsigned long length
)
1988 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
1989 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1990 if ((addr
- start
) < length
) {
1991 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | TLB_NOTDIRTY
;
1996 /* Note: start and end must be within the same ram block. */
1997 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t end
,
2001 unsigned long length
, start1
;
2005 start
&= TARGET_PAGE_MASK
;
2006 end
= TARGET_PAGE_ALIGN(end
);
2008 length
= end
- start
;
2011 len
= length
>> TARGET_PAGE_BITS
;
2012 mask
= ~dirty_flags
;
2013 p
= phys_ram_dirty
+ (start
>> TARGET_PAGE_BITS
);
2014 for(i
= 0; i
< len
; i
++)
2017 /* we modify the TLB cache so that the dirty bit will be set again
2018 when accessing the range */
2019 start1
= (unsigned long)qemu_get_ram_ptr(start
);
2020 /* Chek that we don't span multiple blocks - this breaks the
2021 address comparisons below. */
2022 if ((unsigned long)qemu_get_ram_ptr(end
- 1) - start1
2023 != (end
- 1) - start
) {
2027 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
2029 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
2030 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
2031 tlb_reset_dirty_range(&env
->tlb_table
[mmu_idx
][i
],
2037 int cpu_physical_memory_set_dirty_tracking(int enable
)
2040 in_migration
= enable
;
2041 ret
= cpu_notify_migration_log(!!enable
);
2045 int cpu_physical_memory_get_dirty_tracking(void)
2047 return in_migration
;
2050 int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr
,
2051 target_phys_addr_t end_addr
)
2055 ret
= cpu_notify_sync_dirty_bitmap(start_addr
, end_addr
);
2059 static inline void tlb_update_dirty(CPUTLBEntry
*tlb_entry
)
2061 ram_addr_t ram_addr
;
2064 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
) {
2065 p
= (void *)(unsigned long)((tlb_entry
->addr_write
& TARGET_PAGE_MASK
)
2066 + tlb_entry
->addend
);
2067 ram_addr
= qemu_ram_addr_from_host(p
);
2068 if (!cpu_physical_memory_is_dirty(ram_addr
)) {
2069 tlb_entry
->addr_write
|= TLB_NOTDIRTY
;
2074 /* update the TLB according to the current state of the dirty bits */
2075 void cpu_tlb_update_dirty(CPUState
*env
)
2079 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
2080 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
2081 tlb_update_dirty(&env
->tlb_table
[mmu_idx
][i
]);
2085 static inline void tlb_set_dirty1(CPUTLBEntry
*tlb_entry
, target_ulong vaddr
)
2087 if (tlb_entry
->addr_write
== (vaddr
| TLB_NOTDIRTY
))
2088 tlb_entry
->addr_write
= vaddr
;
2091 /* update the TLB corresponding to virtual page vaddr
2092 so that it is no longer dirty */
2093 static inline void tlb_set_dirty(CPUState
*env
, target_ulong vaddr
)
2098 vaddr
&= TARGET_PAGE_MASK
;
2099 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
2100 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++)
2101 tlb_set_dirty1(&env
->tlb_table
[mmu_idx
][i
], vaddr
);
2104 /* Our TLB does not support large pages, so remember the area covered by
2105 large pages and trigger a full TLB flush if these are invalidated. */
2106 static void tlb_add_large_page(CPUState
*env
, target_ulong vaddr
,
2109 target_ulong mask
= ~(size
- 1);
2111 if (env
->tlb_flush_addr
== (target_ulong
)-1) {
2112 env
->tlb_flush_addr
= vaddr
& mask
;
2113 env
->tlb_flush_mask
= mask
;
2116 /* Extend the existing region to include the new page.
2117 This is a compromise between unnecessary flushes and the cost
2118 of maintaining a full variable size TLB. */
2119 mask
&= env
->tlb_flush_mask
;
2120 while (((env
->tlb_flush_addr
^ vaddr
) & mask
) != 0) {
2123 env
->tlb_flush_addr
&= mask
;
2124 env
->tlb_flush_mask
= mask
;
2127 /* Add a new TLB entry. At most one entry for a given virtual address
2128 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2129 supplied size is only used by tlb_flush_page. */
2130 void tlb_set_page(CPUState
*env
, target_ulong vaddr
,
2131 target_phys_addr_t paddr
, int prot
,
2132 int mmu_idx
, target_ulong size
)
2137 target_ulong address
;
2138 target_ulong code_address
;
2139 target_phys_addr_t addend
;
2142 target_phys_addr_t iotlb
;
2144 assert(size
>= TARGET_PAGE_SIZE
);
2145 if (size
!= TARGET_PAGE_SIZE
) {
2146 tlb_add_large_page(env
, vaddr
, size
);
2148 p
= phys_page_find(paddr
>> TARGET_PAGE_BITS
);
2150 pd
= IO_MEM_UNASSIGNED
;
2152 pd
= p
->phys_offset
;
2154 #if defined(DEBUG_TLB)
2155 printf("tlb_set_page: vaddr=" TARGET_FMT_lx
" paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
2156 vaddr
, (int)paddr
, prot
, mmu_idx
, is_softmmu
, pd
);
2160 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&& !(pd
& IO_MEM_ROMD
)) {
2161 /* IO memory case (romd handled later) */
2162 address
|= TLB_MMIO
;
2164 addend
= (unsigned long)qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
);
2165 if ((pd
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
) {
2167 iotlb
= pd
& TARGET_PAGE_MASK
;
2168 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
)
2169 iotlb
|= IO_MEM_NOTDIRTY
;
2171 iotlb
|= IO_MEM_ROM
;
2173 /* IO handlers are currently passed a physical address.
2174 It would be nice to pass an offset from the base address
2175 of that region. This would avoid having to special case RAM,
2176 and avoid full address decoding in every device.
2177 We can't use the high bits of pd for this because
2178 IO_MEM_ROMD uses these as a ram address. */
2179 iotlb
= (pd
& ~TARGET_PAGE_MASK
);
2181 iotlb
+= p
->region_offset
;
2187 code_address
= address
;
2188 /* Make accesses to pages with watchpoints go via the
2189 watchpoint trap routines. */
2190 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
2191 if (vaddr
== (wp
->vaddr
& TARGET_PAGE_MASK
)) {
2192 iotlb
= io_mem_watch
+ paddr
;
2193 /* TODO: The memory case can be optimized by not trapping
2194 reads of pages with a write breakpoint. */
2195 address
|= TLB_MMIO
;
2199 index
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
2200 env
->iotlb
[mmu_idx
][index
] = iotlb
- vaddr
;
2201 te
= &env
->tlb_table
[mmu_idx
][index
];
2202 te
->addend
= addend
- vaddr
;
2203 if (prot
& PAGE_READ
) {
2204 te
->addr_read
= address
;
2209 if (prot
& PAGE_EXEC
) {
2210 te
->addr_code
= code_address
;
2214 if (prot
& PAGE_WRITE
) {
2215 if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_ROM
||
2216 (pd
& IO_MEM_ROMD
)) {
2217 /* Write access calls the I/O callback. */
2218 te
->addr_write
= address
| TLB_MMIO
;
2219 } else if ((pd
& ~TARGET_PAGE_MASK
) == IO_MEM_RAM
&&
2220 !cpu_physical_memory_is_dirty(pd
)) {
2221 te
->addr_write
= address
| TLB_NOTDIRTY
;
2223 te
->addr_write
= address
;
2226 te
->addr_write
= -1;
2232 void tlb_flush(CPUState
*env
, int flush_global
)
2236 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
2241 * Walks guest process memory "regions" one by one
2242 * and calls callback function 'fn' for each region.
2245 struct walk_memory_regions_data
2247 walk_memory_regions_fn fn
;
2249 unsigned long start
;
2253 static int walk_memory_regions_end(struct walk_memory_regions_data
*data
,
2254 abi_ulong end
, int new_prot
)
2256 if (data
->start
!= -1ul) {
2257 int rc
= data
->fn(data
->priv
, data
->start
, end
, data
->prot
);
2263 data
->start
= (new_prot
? end
: -1ul);
2264 data
->prot
= new_prot
;
2269 static int walk_memory_regions_1(struct walk_memory_regions_data
*data
,
2270 abi_ulong base
, int level
, void **lp
)
2276 return walk_memory_regions_end(data
, base
, 0);
2281 for (i
= 0; i
< L2_SIZE
; ++i
) {
2282 int prot
= pd
[i
].flags
;
2284 pa
= base
| (i
<< TARGET_PAGE_BITS
);
2285 if (prot
!= data
->prot
) {
2286 rc
= walk_memory_regions_end(data
, pa
, prot
);
2294 for (i
= 0; i
< L2_SIZE
; ++i
) {
2295 pa
= base
| ((abi_ulong
)i
<<
2296 (TARGET_PAGE_BITS
+ L2_BITS
* level
));
2297 rc
= walk_memory_regions_1(data
, pa
, level
- 1, pp
+ i
);
2307 int walk_memory_regions(void *priv
, walk_memory_regions_fn fn
)
2309 struct walk_memory_regions_data data
;
2317 for (i
= 0; i
< V_L1_SIZE
; i
++) {
2318 int rc
= walk_memory_regions_1(&data
, (abi_ulong
)i
<< V_L1_SHIFT
,
2319 V_L1_SHIFT
/ L2_BITS
- 1, l1_map
+ i
);
2325 return walk_memory_regions_end(&data
, 0, 0);
2328 static int dump_region(void *priv
, abi_ulong start
,
2329 abi_ulong end
, unsigned long prot
)
2331 FILE *f
= (FILE *)priv
;
2333 (void) fprintf(f
, TARGET_ABI_FMT_lx
"-"TARGET_ABI_FMT_lx
2334 " "TARGET_ABI_FMT_lx
" %c%c%c\n",
2335 start
, end
, end
- start
,
2336 ((prot
& PAGE_READ
) ? 'r' : '-'),
2337 ((prot
& PAGE_WRITE
) ? 'w' : '-'),
2338 ((prot
& PAGE_EXEC
) ? 'x' : '-'));
2343 /* dump memory mappings */
2344 void page_dump(FILE *f
)
2346 (void) fprintf(f
, "%-8s %-8s %-8s %s\n",
2347 "start", "end", "size", "prot");
2348 walk_memory_regions(f
, dump_region
);
2351 int page_get_flags(target_ulong address
)
2355 p
= page_find(address
>> TARGET_PAGE_BITS
);
2361 /* Modify the flags of a page and invalidate the code if necessary.
2362 The flag PAGE_WRITE_ORG is positioned automatically depending
2363 on PAGE_WRITE. The mmap_lock should already be held. */
2364 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
2366 target_ulong addr
, len
;
2368 /* This function should never be called with addresses outside the
2369 guest address space. If this assert fires, it probably indicates
2370 a missing call to h2g_valid. */
2371 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2372 assert(end
< ((abi_ulong
)1 << L1_MAP_ADDR_SPACE_BITS
));
2374 assert(start
< end
);
2376 start
= start
& TARGET_PAGE_MASK
;
2377 end
= TARGET_PAGE_ALIGN(end
);
2379 if (flags
& PAGE_WRITE
) {
2380 flags
|= PAGE_WRITE_ORG
;
2383 for (addr
= start
, len
= end
- start
;
2385 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
2386 PageDesc
*p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2388 /* If the write protection bit is set, then we invalidate
2390 if (!(p
->flags
& PAGE_WRITE
) &&
2391 (flags
& PAGE_WRITE
) &&
2393 tb_invalidate_phys_page(addr
, 0, NULL
);
2399 int page_check_range(target_ulong start
, target_ulong len
, int flags
)
2405 /* This function should never be called with addresses outside the
2406 guest address space. If this assert fires, it probably indicates
2407 a missing call to h2g_valid. */
2408 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2409 assert(start
< ((abi_ulong
)1 << L1_MAP_ADDR_SPACE_BITS
));
2412 if (start
+ len
- 1 < start
) {
2413 /* We've wrapped around. */
2417 end
= TARGET_PAGE_ALIGN(start
+len
); /* must do before we loose bits in the next step */
2418 start
= start
& TARGET_PAGE_MASK
;
2420 for (addr
= start
, len
= end
- start
;
2422 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
2423 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2426 if( !(p
->flags
& PAGE_VALID
) )
2429 if ((flags
& PAGE_READ
) && !(p
->flags
& PAGE_READ
))
2431 if (flags
& PAGE_WRITE
) {
2432 if (!(p
->flags
& PAGE_WRITE_ORG
))
2434 /* unprotect the page if it was put read-only because it
2435 contains translated code */
2436 if (!(p
->flags
& PAGE_WRITE
)) {
2437 if (!page_unprotect(addr
, 0, NULL
))
2446 /* called from signal handler: invalidate the code and unprotect the
2447 page. Return TRUE if the fault was successfully handled. */
2448 int page_unprotect(target_ulong address
, unsigned long pc
, void *puc
)
2450 unsigned int page_index
, prot
, pindex
;
2452 target_ulong host_start
, host_end
, addr
;
2454 /* Technically this isn't safe inside a signal handler. However we
2455 know this only ever happens in a synchronous SEGV handler, so in
2456 practice it seems to be ok. */
2459 host_start
= address
& qemu_host_page_mask
;
2460 page_index
= host_start
>> TARGET_PAGE_BITS
;
2461 p1
= page_find(page_index
);
2466 host_end
= host_start
+ qemu_host_page_size
;
2469 for(addr
= host_start
;addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
2473 /* if the page was really writable, then we change its
2474 protection back to writable */
2475 if (prot
& PAGE_WRITE_ORG
) {
2476 pindex
= (address
- host_start
) >> TARGET_PAGE_BITS
;
2477 if (!(p1
[pindex
].flags
& PAGE_WRITE
)) {
2478 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
2479 (prot
& PAGE_BITS
) | PAGE_WRITE
);
2480 p1
[pindex
].flags
|= PAGE_WRITE
;
2481 /* and since the content will be modified, we must invalidate
2482 the corresponding translated code. */
2483 tb_invalidate_phys_page(address
, pc
, puc
);
2484 #ifdef DEBUG_TB_CHECK
2485 tb_invalidate_check(address
);
2495 static inline void tlb_set_dirty(CPUState
*env
,
2496 unsigned long addr
, target_ulong vaddr
)
2499 #endif /* defined(CONFIG_USER_ONLY) */
2501 #if !defined(CONFIG_USER_ONLY)
2503 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2504 typedef struct subpage_t
{
2505 target_phys_addr_t base
;
2506 CPUReadMemoryFunc
* const *mem_read
[TARGET_PAGE_SIZE
][4];
2507 CPUWriteMemoryFunc
* const *mem_write
[TARGET_PAGE_SIZE
][4];
2508 void *opaque
[TARGET_PAGE_SIZE
][2][4];
2509 ram_addr_t region_offset
[TARGET_PAGE_SIZE
][2][4];
2512 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2513 ram_addr_t memory
, ram_addr_t region_offset
);
2514 static void *subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
2515 ram_addr_t orig_memory
, ram_addr_t region_offset
);
2516 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2519 if (addr > start_addr) \
2522 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2523 if (start_addr2 > 0) \
2527 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2528 end_addr2 = TARGET_PAGE_SIZE - 1; \
2530 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2531 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2536 /* register physical memory.
2537 For RAM, 'size' must be a multiple of the target page size.
2538 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2539 io memory page. The address used when calling the IO function is
2540 the offset from the start of the region, plus region_offset. Both
2541 start_addr and region_offset are rounded down to a page boundary
2542 before calculating this offset. This should not be a problem unless
2543 the low bits of start_addr and region_offset differ. */
2544 void cpu_register_physical_memory_offset(target_phys_addr_t start_addr
,
2546 ram_addr_t phys_offset
,
2547 ram_addr_t region_offset
)
2549 target_phys_addr_t addr
, end_addr
;
2552 ram_addr_t orig_size
= size
;
2555 cpu_notify_set_memory(start_addr
, size
, phys_offset
);
2557 if (phys_offset
== IO_MEM_UNASSIGNED
) {
2558 region_offset
= start_addr
;
2560 region_offset
&= TARGET_PAGE_MASK
;
2561 size
= (size
+ TARGET_PAGE_SIZE
- 1) & TARGET_PAGE_MASK
;
2562 end_addr
= start_addr
+ (target_phys_addr_t
)size
;
2563 for(addr
= start_addr
; addr
!= end_addr
; addr
+= TARGET_PAGE_SIZE
) {
2564 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2565 if (p
&& p
->phys_offset
!= IO_MEM_UNASSIGNED
) {
2566 ram_addr_t orig_memory
= p
->phys_offset
;
2567 target_phys_addr_t start_addr2
, end_addr2
;
2568 int need_subpage
= 0;
2570 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
, end_addr2
,
2572 if (need_subpage
|| phys_offset
& IO_MEM_SUBWIDTH
) {
2573 if (!(orig_memory
& IO_MEM_SUBPAGE
)) {
2574 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2575 &p
->phys_offset
, orig_memory
,
2578 subpage
= io_mem_opaque
[(orig_memory
& ~TARGET_PAGE_MASK
)
2581 subpage_register(subpage
, start_addr2
, end_addr2
, phys_offset
,
2583 p
->region_offset
= 0;
2585 p
->phys_offset
= phys_offset
;
2586 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2587 (phys_offset
& IO_MEM_ROMD
))
2588 phys_offset
+= TARGET_PAGE_SIZE
;
2591 p
= phys_page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2592 p
->phys_offset
= phys_offset
;
2593 p
->region_offset
= region_offset
;
2594 if ((phys_offset
& ~TARGET_PAGE_MASK
) <= IO_MEM_ROM
||
2595 (phys_offset
& IO_MEM_ROMD
)) {
2596 phys_offset
+= TARGET_PAGE_SIZE
;
2598 target_phys_addr_t start_addr2
, end_addr2
;
2599 int need_subpage
= 0;
2601 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
,
2602 end_addr2
, need_subpage
);
2604 if (need_subpage
|| phys_offset
& IO_MEM_SUBWIDTH
) {
2605 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2606 &p
->phys_offset
, IO_MEM_UNASSIGNED
,
2607 addr
& TARGET_PAGE_MASK
);
2608 subpage_register(subpage
, start_addr2
, end_addr2
,
2609 phys_offset
, region_offset
);
2610 p
->region_offset
= 0;
2614 region_offset
+= TARGET_PAGE_SIZE
;
2617 /* since each CPU stores ram addresses in its TLB cache, we must
2618 reset the modified entries */
2620 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
2625 /* XXX: temporary until new memory mapping API */
2626 ram_addr_t
cpu_get_physical_page_desc(target_phys_addr_t addr
)
2630 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
2632 return IO_MEM_UNASSIGNED
;
2633 return p
->phys_offset
;
2636 void qemu_register_coalesced_mmio(target_phys_addr_t addr
, ram_addr_t size
)
2639 kvm_coalesce_mmio_region(addr
, size
);
2642 void qemu_unregister_coalesced_mmio(target_phys_addr_t addr
, ram_addr_t size
)
2645 kvm_uncoalesce_mmio_region(addr
, size
);
2648 void qemu_flush_coalesced_mmio_buffer(void)
2651 kvm_flush_coalesced_mmio_buffer();
2654 #if defined(__linux__) && !defined(TARGET_S390X)
2656 #include <sys/vfs.h>
2658 #define HUGETLBFS_MAGIC 0x958458f6
2660 static long gethugepagesize(const char *path
)
2666 ret
= statfs(path
, &fs
);
2667 } while (ret
!= 0 && errno
== EINTR
);
2674 if (fs
.f_type
!= HUGETLBFS_MAGIC
)
2675 fprintf(stderr
, "Warning: path not on HugeTLBFS: %s\n", path
);
2680 static void *file_ram_alloc(ram_addr_t memory
, const char *path
)
2688 unsigned long hpagesize
;
2690 hpagesize
= gethugepagesize(path
);
2695 if (memory
< hpagesize
) {
2699 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2700 fprintf(stderr
, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2704 if (asprintf(&filename
, "%s/qemu_back_mem.XXXXXX", path
) == -1) {
2708 fd
= mkstemp(filename
);
2710 perror("unable to create backing store for hugepages");
2717 memory
= (memory
+hpagesize
-1) & ~(hpagesize
-1);
2720 * ftruncate is not supported by hugetlbfs in older
2721 * hosts, so don't bother bailing out on errors.
2722 * If anything goes wrong with it under other filesystems,
2725 if (ftruncate(fd
, memory
))
2726 perror("ftruncate");
2729 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2730 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2731 * to sidestep this quirk.
2733 flags
= mem_prealloc
? MAP_POPULATE
| MAP_SHARED
: MAP_PRIVATE
;
2734 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
, flags
, fd
, 0);
2736 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
, MAP_PRIVATE
, fd
, 0);
2738 if (area
== MAP_FAILED
) {
2739 perror("file_ram_alloc: can't mmap RAM pages");
2747 ram_addr_t
qemu_ram_alloc(ram_addr_t size
)
2749 RAMBlock
*new_block
;
2751 size
= TARGET_PAGE_ALIGN(size
);
2752 new_block
= qemu_malloc(sizeof(*new_block
));
2755 #if defined (__linux__) && !defined(TARGET_S390X)
2756 new_block
->host
= file_ram_alloc(size
, mem_path
);
2757 if (!new_block
->host
)
2760 fprintf(stderr
, "-mem-path option unsupported\n");
2764 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2765 /* XXX S390 KVM requires the topmost vma of the RAM to be < 256GB */
2766 new_block
->host
= mmap((void*)0x1000000, size
,
2767 PROT_EXEC
|PROT_READ
|PROT_WRITE
,
2768 MAP_SHARED
| MAP_ANONYMOUS
, -1, 0);
2770 new_block
->host
= qemu_vmalloc(size
);
2772 #ifdef MADV_MERGEABLE
2773 madvise(new_block
->host
, size
, MADV_MERGEABLE
);
2776 new_block
->offset
= last_ram_offset
;
2777 new_block
->length
= size
;
2779 new_block
->next
= ram_blocks
;
2780 ram_blocks
= new_block
;
2782 phys_ram_dirty
= qemu_realloc(phys_ram_dirty
,
2783 (last_ram_offset
+ size
) >> TARGET_PAGE_BITS
);
2784 memset(phys_ram_dirty
+ (last_ram_offset
>> TARGET_PAGE_BITS
),
2785 0xff, size
>> TARGET_PAGE_BITS
);
2787 last_ram_offset
+= size
;
2790 kvm_setup_guest_memory(new_block
->host
, size
);
2792 return new_block
->offset
;
2795 void qemu_ram_free(ram_addr_t addr
)
2797 /* TODO: implement this. */
2800 /* Return a host pointer to ram allocated with qemu_ram_alloc.
2801 With the exception of the softmmu code in this file, this should
2802 only be used for local memory (e.g. video ram) that the device owns,
2803 and knows it isn't going to access beyond the end of the block.
2805 It should not be used for general purpose DMA.
2806 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2808 void *qemu_get_ram_ptr(ram_addr_t addr
)
2815 prevp
= &ram_blocks
;
2817 while (block
&& (block
->offset
> addr
2818 || block
->offset
+ block
->length
<= addr
)) {
2820 prevp
= &prev
->next
;
2822 block
= block
->next
;
2825 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
2828 /* Move this entry to to start of the list. */
2830 prev
->next
= block
->next
;
2831 block
->next
= *prevp
;
2834 return block
->host
+ (addr
- block
->offset
);
2837 /* Some of the softmmu routines need to translate from a host pointer
2838 (typically a TLB entry) back to a ram offset. */
2839 ram_addr_t
qemu_ram_addr_from_host(void *ptr
)
2843 uint8_t *host
= ptr
;
2847 while (block
&& (block
->host
> host
2848 || block
->host
+ block
->length
<= host
)) {
2850 block
= block
->next
;
2853 fprintf(stderr
, "Bad ram pointer %p\n", ptr
);
2856 return block
->offset
+ (host
- block
->host
);
2859 static uint32_t unassigned_mem_readb(void *opaque
, target_phys_addr_t addr
)
2861 #ifdef DEBUG_UNASSIGNED
2862 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2864 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2865 do_unassigned_access(addr
, 0, 0, 0, 1);
2870 static uint32_t unassigned_mem_readw(void *opaque
, target_phys_addr_t addr
)
2872 #ifdef DEBUG_UNASSIGNED
2873 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2875 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2876 do_unassigned_access(addr
, 0, 0, 0, 2);
2881 static uint32_t unassigned_mem_readl(void *opaque
, target_phys_addr_t addr
)
2883 #ifdef DEBUG_UNASSIGNED
2884 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2886 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2887 do_unassigned_access(addr
, 0, 0, 0, 4);
2892 static void unassigned_mem_writeb(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2894 #ifdef DEBUG_UNASSIGNED
2895 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2897 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2898 do_unassigned_access(addr
, 1, 0, 0, 1);
2902 static void unassigned_mem_writew(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2904 #ifdef DEBUG_UNASSIGNED
2905 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2907 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2908 do_unassigned_access(addr
, 1, 0, 0, 2);
2912 static void unassigned_mem_writel(void *opaque
, target_phys_addr_t addr
, uint32_t val
)
2914 #ifdef DEBUG_UNASSIGNED
2915 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%x\n", addr
, val
);
2917 #if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2918 do_unassigned_access(addr
, 1, 0, 0, 4);
2922 static CPUReadMemoryFunc
* const unassigned_mem_read
[3] = {
2923 unassigned_mem_readb
,
2924 unassigned_mem_readw
,
2925 unassigned_mem_readl
,
2928 static CPUWriteMemoryFunc
* const unassigned_mem_write
[3] = {
2929 unassigned_mem_writeb
,
2930 unassigned_mem_writew
,
2931 unassigned_mem_writel
,
2934 static void notdirty_mem_writeb(void *opaque
, target_phys_addr_t ram_addr
,
2938 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2939 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2940 #if !defined(CONFIG_USER_ONLY)
2941 tb_invalidate_phys_page_fast(ram_addr
, 1);
2942 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2945 stb_p(qemu_get_ram_ptr(ram_addr
), val
);
2946 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2947 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2948 /* we remove the notdirty callback only if the code has been
2950 if (dirty_flags
== 0xff)
2951 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
2954 static void notdirty_mem_writew(void *opaque
, target_phys_addr_t ram_addr
,
2958 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2959 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2960 #if !defined(CONFIG_USER_ONLY)
2961 tb_invalidate_phys_page_fast(ram_addr
, 2);
2962 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2965 stw_p(qemu_get_ram_ptr(ram_addr
), val
);
2966 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2967 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2968 /* we remove the notdirty callback only if the code has been
2970 if (dirty_flags
== 0xff)
2971 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
2974 static void notdirty_mem_writel(void *opaque
, target_phys_addr_t ram_addr
,
2978 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2979 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2980 #if !defined(CONFIG_USER_ONLY)
2981 tb_invalidate_phys_page_fast(ram_addr
, 4);
2982 dirty_flags
= phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
];
2985 stl_p(qemu_get_ram_ptr(ram_addr
), val
);
2986 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2987 phys_ram_dirty
[ram_addr
>> TARGET_PAGE_BITS
] = dirty_flags
;
2988 /* we remove the notdirty callback only if the code has been
2990 if (dirty_flags
== 0xff)
2991 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
2994 static CPUReadMemoryFunc
* const error_mem_read
[3] = {
2995 NULL
, /* never used */
2996 NULL
, /* never used */
2997 NULL
, /* never used */
3000 static CPUWriteMemoryFunc
* const notdirty_mem_write
[3] = {
3001 notdirty_mem_writeb
,
3002 notdirty_mem_writew
,
3003 notdirty_mem_writel
,
3006 /* Generate a debug exception if a watchpoint has been hit. */
3007 static void check_watchpoint(int offset
, int len_mask
, int flags
)
3009 CPUState
*env
= cpu_single_env
;
3010 target_ulong pc
, cs_base
;
3011 TranslationBlock
*tb
;
3016 if (env
->watchpoint_hit
) {
3017 /* We re-entered the check after replacing the TB. Now raise
3018 * the debug interrupt so that is will trigger after the
3019 * current instruction. */
3020 cpu_interrupt(env
, CPU_INTERRUPT_DEBUG
);
3023 vaddr
= (env
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
3024 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
3025 if ((vaddr
== (wp
->vaddr
& len_mask
) ||
3026 (vaddr
& wp
->len_mask
) == wp
->vaddr
) && (wp
->flags
& flags
)) {
3027 wp
->flags
|= BP_WATCHPOINT_HIT
;
3028 if (!env
->watchpoint_hit
) {
3029 env
->watchpoint_hit
= wp
;
3030 tb
= tb_find_pc(env
->mem_io_pc
);
3032 cpu_abort(env
, "check_watchpoint: could not find TB for "
3033 "pc=%p", (void *)env
->mem_io_pc
);
3035 cpu_restore_state(tb
, env
, env
->mem_io_pc
, NULL
);
3036 tb_phys_invalidate(tb
, -1);
3037 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
3038 env
->exception_index
= EXCP_DEBUG
;
3040 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
3041 tb_gen_code(env
, pc
, cs_base
, cpu_flags
, 1);
3043 cpu_resume_from_signal(env
, NULL
);
3046 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
3051 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3052 so these check for a hit then pass through to the normal out-of-line
3054 static uint32_t watch_mem_readb(void *opaque
, target_phys_addr_t addr
)
3056 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x0, BP_MEM_READ
);
3057 return ldub_phys(addr
);
3060 static uint32_t watch_mem_readw(void *opaque
, target_phys_addr_t addr
)
3062 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x1, BP_MEM_READ
);
3063 return lduw_phys(addr
);
3066 static uint32_t watch_mem_readl(void *opaque
, target_phys_addr_t addr
)
3068 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x3, BP_MEM_READ
);
3069 return ldl_phys(addr
);
3072 static void watch_mem_writeb(void *opaque
, target_phys_addr_t addr
,
3075 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x0, BP_MEM_WRITE
);
3076 stb_phys(addr
, val
);
3079 static void watch_mem_writew(void *opaque
, target_phys_addr_t addr
,
3082 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x1, BP_MEM_WRITE
);
3083 stw_phys(addr
, val
);
3086 static void watch_mem_writel(void *opaque
, target_phys_addr_t addr
,
3089 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~0x3, BP_MEM_WRITE
);
3090 stl_phys(addr
, val
);
3093 static CPUReadMemoryFunc
* const watch_mem_read
[3] = {
3099 static CPUWriteMemoryFunc
* const watch_mem_write
[3] = {
3105 static inline uint32_t subpage_readlen (subpage_t
*mmio
, target_phys_addr_t addr
,
3111 idx
= SUBPAGE_IDX(addr
);
3112 #if defined(DEBUG_SUBPAGE)
3113 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d\n", __func__
,
3114 mmio
, len
, addr
, idx
);
3116 ret
= (**mmio
->mem_read
[idx
][len
])(mmio
->opaque
[idx
][0][len
],
3117 addr
+ mmio
->region_offset
[idx
][0][len
]);
3122 static inline void subpage_writelen (subpage_t
*mmio
, target_phys_addr_t addr
,
3123 uint32_t value
, unsigned int len
)
3127 idx
= SUBPAGE_IDX(addr
);
3128 #if defined(DEBUG_SUBPAGE)
3129 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d value %08x\n", __func__
,
3130 mmio
, len
, addr
, idx
, value
);
3132 (**mmio
->mem_write
[idx
][len
])(mmio
->opaque
[idx
][1][len
],
3133 addr
+ mmio
->region_offset
[idx
][1][len
],
3137 static uint32_t subpage_readb (void *opaque
, target_phys_addr_t addr
)
3139 #if defined(DEBUG_SUBPAGE)
3140 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
3143 return subpage_readlen(opaque
, addr
, 0);
3146 static void subpage_writeb (void *opaque
, target_phys_addr_t addr
,
3149 #if defined(DEBUG_SUBPAGE)
3150 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
3152 subpage_writelen(opaque
, addr
, value
, 0);
3155 static uint32_t subpage_readw (void *opaque
, target_phys_addr_t addr
)
3157 #if defined(DEBUG_SUBPAGE)
3158 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
3161 return subpage_readlen(opaque
, addr
, 1);
3164 static void subpage_writew (void *opaque
, target_phys_addr_t addr
,
3167 #if defined(DEBUG_SUBPAGE)
3168 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
3170 subpage_writelen(opaque
, addr
, value
, 1);
3173 static uint32_t subpage_readl (void *opaque
, target_phys_addr_t addr
)
3175 #if defined(DEBUG_SUBPAGE)
3176 printf("%s: addr " TARGET_FMT_plx
"\n", __func__
, addr
);
3179 return subpage_readlen(opaque
, addr
, 2);
3182 static void subpage_writel (void *opaque
,
3183 target_phys_addr_t addr
, uint32_t value
)
3185 #if defined(DEBUG_SUBPAGE)
3186 printf("%s: addr " TARGET_FMT_plx
" val %08x\n", __func__
, addr
, value
);
3188 subpage_writelen(opaque
, addr
, value
, 2);
3191 static CPUReadMemoryFunc
* const subpage_read
[] = {
3197 static CPUWriteMemoryFunc
* const subpage_write
[] = {
3203 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
3204 ram_addr_t memory
, ram_addr_t region_offset
)
3209 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
3211 idx
= SUBPAGE_IDX(start
);
3212 eidx
= SUBPAGE_IDX(end
);
3213 #if defined(DEBUG_SUBPAGE)
3214 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__
,
3215 mmio
, start
, end
, idx
, eidx
, memory
);
3217 memory
>>= IO_MEM_SHIFT
;
3218 for (; idx
<= eidx
; idx
++) {
3219 for (i
= 0; i
< 4; i
++) {
3220 if (io_mem_read
[memory
][i
]) {
3221 mmio
->mem_read
[idx
][i
] = &io_mem_read
[memory
][i
];
3222 mmio
->opaque
[idx
][0][i
] = io_mem_opaque
[memory
];
3223 mmio
->region_offset
[idx
][0][i
] = region_offset
;
3225 if (io_mem_write
[memory
][i
]) {
3226 mmio
->mem_write
[idx
][i
] = &io_mem_write
[memory
][i
];
3227 mmio
->opaque
[idx
][1][i
] = io_mem_opaque
[memory
];
3228 mmio
->region_offset
[idx
][1][i
] = region_offset
;
3236 static void *subpage_init (target_phys_addr_t base
, ram_addr_t
*phys
,
3237 ram_addr_t orig_memory
, ram_addr_t region_offset
)
3242 mmio
= qemu_mallocz(sizeof(subpage_t
));
3245 subpage_memory
= cpu_register_io_memory(subpage_read
, subpage_write
, mmio
);
3246 #if defined(DEBUG_SUBPAGE)
3247 printf("%s: %p base " TARGET_FMT_plx
" len %08x %d\n", __func__
,
3248 mmio
, base
, TARGET_PAGE_SIZE
, subpage_memory
);
3250 *phys
= subpage_memory
| IO_MEM_SUBPAGE
;
3251 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
- 1, orig_memory
,
3257 static int get_free_io_mem_idx(void)
3261 for (i
= 0; i
<IO_MEM_NB_ENTRIES
; i
++)
3262 if (!io_mem_used
[i
]) {
3266 fprintf(stderr
, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES
);
3270 /* mem_read and mem_write are arrays of functions containing the
3271 function to access byte (index 0), word (index 1) and dword (index
3272 2). Functions can be omitted with a NULL function pointer.
3273 If io_index is non zero, the corresponding io zone is
3274 modified. If it is zero, a new io zone is allocated. The return
3275 value can be used with cpu_register_physical_memory(). (-1) is
3276 returned if error. */
3277 static int cpu_register_io_memory_fixed(int io_index
,
3278 CPUReadMemoryFunc
* const *mem_read
,
3279 CPUWriteMemoryFunc
* const *mem_write
,
3282 int i
, subwidth
= 0;
3284 if (io_index
<= 0) {
3285 io_index
= get_free_io_mem_idx();
3289 io_index
>>= IO_MEM_SHIFT
;
3290 if (io_index
>= IO_MEM_NB_ENTRIES
)
3294 for(i
= 0;i
< 3; i
++) {
3295 if (!mem_read
[i
] || !mem_write
[i
])
3296 subwidth
= IO_MEM_SUBWIDTH
;
3297 io_mem_read
[io_index
][i
] = mem_read
[i
];
3298 io_mem_write
[io_index
][i
] = mem_write
[i
];
3300 io_mem_opaque
[io_index
] = opaque
;
3301 return (io_index
<< IO_MEM_SHIFT
) | subwidth
;
3304 int cpu_register_io_memory(CPUReadMemoryFunc
* const *mem_read
,
3305 CPUWriteMemoryFunc
* const *mem_write
,
3308 return cpu_register_io_memory_fixed(0, mem_read
, mem_write
, opaque
);
3311 void cpu_unregister_io_memory(int io_table_address
)
3314 int io_index
= io_table_address
>> IO_MEM_SHIFT
;
3316 for (i
=0;i
< 3; i
++) {
3317 io_mem_read
[io_index
][i
] = unassigned_mem_read
[i
];
3318 io_mem_write
[io_index
][i
] = unassigned_mem_write
[i
];
3320 io_mem_opaque
[io_index
] = NULL
;
3321 io_mem_used
[io_index
] = 0;
3324 static void io_mem_init(void)
3328 cpu_register_io_memory_fixed(IO_MEM_ROM
, error_mem_read
, unassigned_mem_write
, NULL
);
3329 cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED
, unassigned_mem_read
, unassigned_mem_write
, NULL
);
3330 cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY
, error_mem_read
, notdirty_mem_write
, NULL
);
3334 io_mem_watch
= cpu_register_io_memory(watch_mem_read
,
3335 watch_mem_write
, NULL
);
3338 #endif /* !defined(CONFIG_USER_ONLY) */
3340 /* physical memory access (slow version, mainly for debug) */
3341 #if defined(CONFIG_USER_ONLY)
3342 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
3343 uint8_t *buf
, int len
, int is_write
)
3350 page
= addr
& TARGET_PAGE_MASK
;
3351 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3354 flags
= page_get_flags(page
);
3355 if (!(flags
& PAGE_VALID
))
3358 if (!(flags
& PAGE_WRITE
))
3360 /* XXX: this code should not depend on lock_user */
3361 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
3364 unlock_user(p
, addr
, l
);
3366 if (!(flags
& PAGE_READ
))
3368 /* XXX: this code should not depend on lock_user */
3369 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
3372 unlock_user(p
, addr
, 0);
3382 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
3383 int len
, int is_write
)
3388 target_phys_addr_t page
;
3393 page
= addr
& TARGET_PAGE_MASK
;
3394 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3397 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3399 pd
= IO_MEM_UNASSIGNED
;
3401 pd
= p
->phys_offset
;
3405 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3406 target_phys_addr_t addr1
= addr
;
3407 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3409 addr1
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3410 /* XXX: could force cpu_single_env to NULL to avoid
3412 if (l
>= 4 && ((addr1
& 3) == 0)) {
3413 /* 32 bit write access */
3415 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr1
, val
);
3417 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
3418 /* 16 bit write access */
3420 io_mem_write
[io_index
][1](io_mem_opaque
[io_index
], addr1
, val
);
3423 /* 8 bit write access */
3425 io_mem_write
[io_index
][0](io_mem_opaque
[io_index
], addr1
, val
);
3429 unsigned long addr1
;
3430 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3432 ptr
= qemu_get_ram_ptr(addr1
);
3433 memcpy(ptr
, buf
, l
);
3434 if (!cpu_physical_memory_is_dirty(addr1
)) {
3435 /* invalidate code */
3436 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
3438 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
3439 (0xff & ~CODE_DIRTY_FLAG
);
3443 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
3444 !(pd
& IO_MEM_ROMD
)) {
3445 target_phys_addr_t addr1
= addr
;
3447 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3449 addr1
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3450 if (l
>= 4 && ((addr1
& 3) == 0)) {
3451 /* 32 bit read access */
3452 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr1
);
3455 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
3456 /* 16 bit read access */
3457 val
= io_mem_read
[io_index
][1](io_mem_opaque
[io_index
], addr1
);
3461 /* 8 bit read access */
3462 val
= io_mem_read
[io_index
][0](io_mem_opaque
[io_index
], addr1
);
3468 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
3469 (addr
& ~TARGET_PAGE_MASK
);
3470 memcpy(buf
, ptr
, l
);
3479 /* used for ROM loading : can write in RAM and ROM */
3480 void cpu_physical_memory_write_rom(target_phys_addr_t addr
,
3481 const uint8_t *buf
, int len
)
3485 target_phys_addr_t page
;
3490 page
= addr
& TARGET_PAGE_MASK
;
3491 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3494 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3496 pd
= IO_MEM_UNASSIGNED
;
3498 pd
= p
->phys_offset
;
3501 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
&&
3502 (pd
& ~TARGET_PAGE_MASK
) != IO_MEM_ROM
&&
3503 !(pd
& IO_MEM_ROMD
)) {
3506 unsigned long addr1
;
3507 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3509 ptr
= qemu_get_ram_ptr(addr1
);
3510 memcpy(ptr
, buf
, l
);
3520 target_phys_addr_t addr
;
3521 target_phys_addr_t len
;
3524 static BounceBuffer bounce
;
3526 typedef struct MapClient
{
3528 void (*callback
)(void *opaque
);
3529 QLIST_ENTRY(MapClient
) link
;
3532 static QLIST_HEAD(map_client_list
, MapClient
) map_client_list
3533 = QLIST_HEAD_INITIALIZER(map_client_list
);
3535 void *cpu_register_map_client(void *opaque
, void (*callback
)(void *opaque
))
3537 MapClient
*client
= qemu_malloc(sizeof(*client
));
3539 client
->opaque
= opaque
;
3540 client
->callback
= callback
;
3541 QLIST_INSERT_HEAD(&map_client_list
, client
, link
);
3545 void cpu_unregister_map_client(void *_client
)
3547 MapClient
*client
= (MapClient
*)_client
;
3549 QLIST_REMOVE(client
, link
);
3553 static void cpu_notify_map_clients(void)
3557 while (!QLIST_EMPTY(&map_client_list
)) {
3558 client
= QLIST_FIRST(&map_client_list
);
3559 client
->callback(client
->opaque
);
3560 cpu_unregister_map_client(client
);
3564 /* Map a physical memory region into a host virtual address.
3565 * May map a subset of the requested range, given by and returned in *plen.
3566 * May return NULL if resources needed to perform the mapping are exhausted.
3567 * Use only for reads OR writes - not for read-modify-write operations.
3568 * Use cpu_register_map_client() to know when retrying the map operation is
3569 * likely to succeed.
3571 void *cpu_physical_memory_map(target_phys_addr_t addr
,
3572 target_phys_addr_t
*plen
,
3575 target_phys_addr_t len
= *plen
;
3576 target_phys_addr_t done
= 0;
3578 uint8_t *ret
= NULL
;
3580 target_phys_addr_t page
;
3583 unsigned long addr1
;
3586 page
= addr
& TARGET_PAGE_MASK
;
3587 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3590 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3592 pd
= IO_MEM_UNASSIGNED
;
3594 pd
= p
->phys_offset
;
3597 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3598 if (done
|| bounce
.buffer
) {
3601 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, TARGET_PAGE_SIZE
);
3605 cpu_physical_memory_rw(addr
, bounce
.buffer
, l
, 0);
3607 ptr
= bounce
.buffer
;
3609 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3610 ptr
= qemu_get_ram_ptr(addr1
);
3614 } else if (ret
+ done
!= ptr
) {
3626 /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3627 * Will also mark the memory as dirty if is_write == 1. access_len gives
3628 * the amount of memory that was actually read or written by the caller.
3630 void cpu_physical_memory_unmap(void *buffer
, target_phys_addr_t len
,
3631 int is_write
, target_phys_addr_t access_len
)
3633 if (buffer
!= bounce
.buffer
) {
3635 ram_addr_t addr1
= qemu_ram_addr_from_host(buffer
);
3636 while (access_len
) {
3638 l
= TARGET_PAGE_SIZE
;
3641 if (!cpu_physical_memory_is_dirty(addr1
)) {
3642 /* invalidate code */
3643 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
3645 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
3646 (0xff & ~CODE_DIRTY_FLAG
);
3655 cpu_physical_memory_write(bounce
.addr
, bounce
.buffer
, access_len
);
3657 qemu_vfree(bounce
.buffer
);
3658 bounce
.buffer
= NULL
;
3659 cpu_notify_map_clients();
3662 /* warning: addr must be aligned */
3663 uint32_t ldl_phys(target_phys_addr_t addr
)
3671 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3673 pd
= IO_MEM_UNASSIGNED
;
3675 pd
= p
->phys_offset
;
3678 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
3679 !(pd
& IO_MEM_ROMD
)) {
3681 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3683 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3684 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
3687 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
3688 (addr
& ~TARGET_PAGE_MASK
);
3694 /* warning: addr must be aligned */
3695 uint64_t ldq_phys(target_phys_addr_t addr
)
3703 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3705 pd
= IO_MEM_UNASSIGNED
;
3707 pd
= p
->phys_offset
;
3710 if ((pd
& ~TARGET_PAGE_MASK
) > IO_MEM_ROM
&&
3711 !(pd
& IO_MEM_ROMD
)) {
3713 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3715 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3716 #ifdef TARGET_WORDS_BIGENDIAN
3717 val
= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
) << 32;
3718 val
|= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4);
3720 val
= io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
);
3721 val
|= (uint64_t)io_mem_read
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4) << 32;
3725 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
3726 (addr
& ~TARGET_PAGE_MASK
);
3733 uint32_t ldub_phys(target_phys_addr_t addr
)
3736 cpu_physical_memory_read(addr
, &val
, 1);
3741 uint32_t lduw_phys(target_phys_addr_t addr
)
3744 cpu_physical_memory_read(addr
, (uint8_t *)&val
, 2);
3745 return tswap16(val
);
3748 /* warning: addr must be aligned. The ram page is not masked as dirty
3749 and the code inside is not invalidated. It is useful if the dirty
3750 bits are used to track modified PTEs */
3751 void stl_phys_notdirty(target_phys_addr_t addr
, uint32_t val
)
3758 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3760 pd
= IO_MEM_UNASSIGNED
;
3762 pd
= p
->phys_offset
;
3765 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3766 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3768 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3769 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
3771 unsigned long addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3772 ptr
= qemu_get_ram_ptr(addr1
);
3775 if (unlikely(in_migration
)) {
3776 if (!cpu_physical_memory_is_dirty(addr1
)) {
3777 /* invalidate code */
3778 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
3780 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
3781 (0xff & ~CODE_DIRTY_FLAG
);
3787 void stq_phys_notdirty(target_phys_addr_t addr
, uint64_t val
)
3794 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3796 pd
= IO_MEM_UNASSIGNED
;
3798 pd
= p
->phys_offset
;
3801 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3802 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3804 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3805 #ifdef TARGET_WORDS_BIGENDIAN
3806 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
>> 32);
3807 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
);
3809 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
3810 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
+ 4, val
>> 32);
3813 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
3814 (addr
& ~TARGET_PAGE_MASK
);
3819 /* warning: addr must be aligned */
3820 void stl_phys(target_phys_addr_t addr
, uint32_t val
)
3827 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3829 pd
= IO_MEM_UNASSIGNED
;
3831 pd
= p
->phys_offset
;
3834 if ((pd
& ~TARGET_PAGE_MASK
) != IO_MEM_RAM
) {
3835 io_index
= (pd
>> IO_MEM_SHIFT
) & (IO_MEM_NB_ENTRIES
- 1);
3837 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
->region_offset
;
3838 io_mem_write
[io_index
][2](io_mem_opaque
[io_index
], addr
, val
);
3840 unsigned long addr1
;
3841 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3843 ptr
= qemu_get_ram_ptr(addr1
);
3845 if (!cpu_physical_memory_is_dirty(addr1
)) {
3846 /* invalidate code */
3847 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
3849 phys_ram_dirty
[addr1
>> TARGET_PAGE_BITS
] |=
3850 (0xff & ~CODE_DIRTY_FLAG
);
3856 void stb_phys(target_phys_addr_t addr
, uint32_t val
)
3859 cpu_physical_memory_write(addr
, &v
, 1);
3863 void stw_phys(target_phys_addr_t addr
, uint32_t val
)
3865 uint16_t v
= tswap16(val
);
3866 cpu_physical_memory_write(addr
, (const uint8_t *)&v
, 2);
3870 void stq_phys(target_phys_addr_t addr
, uint64_t val
)
3873 cpu_physical_memory_write(addr
, (const uint8_t *)&val
, 8);
3876 /* virtual memory access for debug (includes writing to ROM) */
3877 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
3878 uint8_t *buf
, int len
, int is_write
)
3881 target_phys_addr_t phys_addr
;
3885 page
= addr
& TARGET_PAGE_MASK
;
3886 phys_addr
= cpu_get_phys_page_debug(env
, page
);
3887 /* if no physical page mapped, return an error */
3888 if (phys_addr
== -1)
3890 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3893 phys_addr
+= (addr
& ~TARGET_PAGE_MASK
);
3895 cpu_physical_memory_write_rom(phys_addr
, buf
, l
);
3897 cpu_physical_memory_rw(phys_addr
, buf
, l
, is_write
);
3906 /* in deterministic execution mode, instructions doing device I/Os
3907 must be at the end of the TB */
3908 void cpu_io_recompile(CPUState
*env
, void *retaddr
)
3910 TranslationBlock
*tb
;
3912 target_ulong pc
, cs_base
;
3915 tb
= tb_find_pc((unsigned long)retaddr
);
3917 cpu_abort(env
, "cpu_io_recompile: could not find TB for pc=%p",
3920 n
= env
->icount_decr
.u16
.low
+ tb
->icount
;
3921 cpu_restore_state(tb
, env
, (unsigned long)retaddr
, NULL
);
3922 /* Calculate how many instructions had been executed before the fault
3924 n
= n
- env
->icount_decr
.u16
.low
;
3925 /* Generate a new TB ending on the I/O insn. */
3927 /* On MIPS and SH, delay slot instructions can only be restarted if
3928 they were already the first instruction in the TB. If this is not
3929 the first instruction in a TB then re-execute the preceding
3931 #if defined(TARGET_MIPS)
3932 if ((env
->hflags
& MIPS_HFLAG_BMASK
) != 0 && n
> 1) {
3933 env
->active_tc
.PC
-= 4;
3934 env
->icount_decr
.u16
.low
++;
3935 env
->hflags
&= ~MIPS_HFLAG_BMASK
;
3937 #elif defined(TARGET_SH4)
3938 if ((env
->flags
& ((DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
))) != 0
3941 env
->icount_decr
.u16
.low
++;
3942 env
->flags
&= ~(DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
);
3945 /* This should never happen. */
3946 if (n
> CF_COUNT_MASK
)
3947 cpu_abort(env
, "TB too big during recompile");
3949 cflags
= n
| CF_LAST_IO
;
3951 cs_base
= tb
->cs_base
;
3953 tb_phys_invalidate(tb
, -1);
3954 /* FIXME: In theory this could raise an exception. In practice
3955 we have already translated the block once so it's probably ok. */
3956 tb_gen_code(env
, pc
, cs_base
, flags
, cflags
);
3957 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3958 the first in the TB) then we end up generating a whole new TB and
3959 repeating the fault, which is horribly inefficient.
3960 Better would be to execute just this insn uncached, or generate a
3962 cpu_resume_from_signal(env
, NULL
);
3965 #if !defined(CONFIG_USER_ONLY)
3967 void dump_exec_info(FILE *f
,
3968 int (*cpu_fprintf
)(FILE *f
, const char *fmt
, ...))
3970 int i
, target_code_size
, max_target_code_size
;
3971 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
3972 TranslationBlock
*tb
;
3974 target_code_size
= 0;
3975 max_target_code_size
= 0;
3977 direct_jmp_count
= 0;
3978 direct_jmp2_count
= 0;
3979 for(i
= 0; i
< nb_tbs
; i
++) {
3981 target_code_size
+= tb
->size
;
3982 if (tb
->size
> max_target_code_size
)
3983 max_target_code_size
= tb
->size
;
3984 if (tb
->page_addr
[1] != -1)
3986 if (tb
->tb_next_offset
[0] != 0xffff) {
3988 if (tb
->tb_next_offset
[1] != 0xffff) {
3989 direct_jmp2_count
++;
3993 /* XXX: avoid using doubles ? */
3994 cpu_fprintf(f
, "Translation buffer state:\n");
3995 cpu_fprintf(f
, "gen code size %ld/%ld\n",
3996 code_gen_ptr
- code_gen_buffer
, code_gen_buffer_max_size
);
3997 cpu_fprintf(f
, "TB count %d/%d\n",
3998 nb_tbs
, code_gen_max_blocks
);
3999 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
4000 nb_tbs
? target_code_size
/ nb_tbs
: 0,
4001 max_target_code_size
);
4002 cpu_fprintf(f
, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
4003 nb_tbs
? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0,
4004 target_code_size
? (double) (code_gen_ptr
- code_gen_buffer
) / target_code_size
: 0);
4005 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n",
4007 nb_tbs
? (cross_page
* 100) / nb_tbs
: 0);
4008 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
4010 nb_tbs
? (direct_jmp_count
* 100) / nb_tbs
: 0,
4012 nb_tbs
? (direct_jmp2_count
* 100) / nb_tbs
: 0);
4013 cpu_fprintf(f
, "\nStatistics:\n");
4014 cpu_fprintf(f
, "TB flush count %d\n", tb_flush_count
);
4015 cpu_fprintf(f
, "TB invalidate count %d\n", tb_phys_invalidate_count
);
4016 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
4017 tcg_dump_info(f
, cpu_fprintf
);
4020 #define MMUSUFFIX _cmmu
4021 #define GETPC() NULL
4022 #define env cpu_single_env
4023 #define SOFTMMU_CODE_ACCESS
4026 #include "softmmu_template.h"
4029 #include "softmmu_template.h"
4032 #include "softmmu_template.h"
4035 #include "softmmu_template.h"