2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
23 #include <sys/types.h>
27 #include "qemu-common.h"
35 #include "qemu-timer.h"
37 #include "exec-memory.h"
38 #if defined(CONFIG_USER_ONLY)
40 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41 #include <sys/param.h>
42 #if __FreeBSD_version >= 700104
43 #define HAVE_KINFO_GETVMMAP
44 #define sigqueue sigqueue_freebsd /* avoid redefinition */
47 #include <machine/profile.h>
55 #else /* !CONFIG_USER_ONLY */
56 #include "xen-mapcache.h"
60 #define WANT_EXEC_OBSOLETE
61 #include "exec-obsolete.h"
63 //#define DEBUG_TB_INVALIDATE
66 //#define DEBUG_UNASSIGNED
68 /* make various TB consistency checks */
69 //#define DEBUG_TB_CHECK
70 //#define DEBUG_TLB_CHECK
72 //#define DEBUG_IOPORT
73 //#define DEBUG_SUBPAGE
75 #if !defined(CONFIG_USER_ONLY)
76 /* TB consistency checks only implemented for usermode emulation. */
80 #define SMC_BITMAP_USE_THRESHOLD 10
82 static TranslationBlock
*tbs
;
83 static int code_gen_max_blocks
;
84 TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
86 /* any access to the tbs or the page table must use this lock */
87 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
89 #if defined(__arm__) || defined(__sparc_v9__)
90 /* The prologue must be reachable with a direct jump. ARM and Sparc64
91 have limited branch ranges (possibly also PPC) so place it in a
92 section close to code segment. */
93 #define code_gen_section \
94 __attribute__((__section__(".gen_code"))) \
95 __attribute__((aligned (32)))
97 /* Maximum alignment for Win32 is 16. */
98 #define code_gen_section \
99 __attribute__((aligned (16)))
101 #define code_gen_section \
102 __attribute__((aligned (32)))
105 uint8_t code_gen_prologue
[1024] code_gen_section
;
106 static uint8_t *code_gen_buffer
;
107 static unsigned long code_gen_buffer_size
;
108 /* threshold to flush the translated code buffer */
109 static unsigned long code_gen_buffer_max_size
;
110 static uint8_t *code_gen_ptr
;
112 #if !defined(CONFIG_USER_ONLY)
114 static int in_migration
;
116 RAMList ram_list
= { .blocks
= QLIST_HEAD_INITIALIZER(ram_list
.blocks
) };
118 static MemoryRegion
*system_memory
;
119 static MemoryRegion
*system_io
;
121 MemoryRegion io_mem_ram
, io_mem_rom
, io_mem_unassigned
, io_mem_notdirty
;
122 static MemoryRegion io_mem_subpage_ram
;
127 /* current CPU in the current thread. It is only valid inside
129 DEFINE_TLS(CPUState
*,cpu_single_env
);
130 /* 0 = Do not count executed instructions.
131 1 = Precise instruction counting.
132 2 = Adaptive rate instruction counting. */
135 typedef struct PageDesc
{
136 /* list of TBs intersecting this ram page */
137 TranslationBlock
*first_tb
;
138 /* in order to optimize self modifying code, we count the number
139 of lookups we do to a given page to use a bitmap */
140 unsigned int code_write_count
;
141 uint8_t *code_bitmap
;
142 #if defined(CONFIG_USER_ONLY)
147 /* In system mode we want L1_MAP to be based on ram offsets,
148 while in user mode we want it to be based on virtual addresses. */
149 #if !defined(CONFIG_USER_ONLY)
150 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
151 # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
153 # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
156 # define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
159 /* Size of the L2 (and L3, etc) page tables. */
161 #define L2_SIZE (1 << L2_BITS)
163 #define P_L2_LEVELS \
164 (((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / L2_BITS) + 1)
166 /* The bits remaining after N lower levels of page tables. */
167 #define V_L1_BITS_REM \
168 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
170 #if V_L1_BITS_REM < 4
171 #define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
173 #define V_L1_BITS V_L1_BITS_REM
176 #define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
178 #define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
180 unsigned long qemu_real_host_page_size
;
181 unsigned long qemu_host_page_size
;
182 unsigned long qemu_host_page_mask
;
184 /* This is a multi-level map on the virtual address space.
185 The bottom level has pointers to PageDesc. */
186 static void *l1_map
[V_L1_SIZE
];
188 #if !defined(CONFIG_USER_ONLY)
189 typedef struct PhysPageDesc
{
190 /* offset in host memory of the page + io_index in the low bits */
191 ram_addr_t phys_offset
;
192 ram_addr_t region_offset
;
195 typedef struct PhysPageEntry PhysPageEntry
;
197 static MemoryRegionSection
*phys_sections
;
198 static unsigned phys_sections_nb
, phys_sections_nb_alloc
;
199 static uint16_t phys_section_unassigned
;
201 struct PhysPageEntry
{
203 uint16_t leaf
; /* index into phys_sections */
208 /* This is a multi-level map on the physical address space.
209 The bottom level has pointers to PhysPageDesc. */
210 static PhysPageEntry phys_map
;
212 static void io_mem_init(void);
213 static void memory_map_init(void);
215 /* io memory support */
216 MemoryRegion
*io_mem_region
[IO_MEM_NB_ENTRIES
];
217 static char io_mem_used
[IO_MEM_NB_ENTRIES
];
218 static MemoryRegion io_mem_watch
;
223 static const char *logfilename
= "qemu.log";
225 static const char *logfilename
= "/tmp/qemu.log";
229 static int log_append
= 0;
232 #if !defined(CONFIG_USER_ONLY)
233 static int tlb_flush_count
;
235 static int tb_flush_count
;
236 static int tb_phys_invalidate_count
;
239 static void map_exec(void *addr
, long size
)
242 VirtualProtect(addr
, size
,
243 PAGE_EXECUTE_READWRITE
, &old_protect
);
247 static void map_exec(void *addr
, long size
)
249 unsigned long start
, end
, page_size
;
251 page_size
= getpagesize();
252 start
= (unsigned long)addr
;
253 start
&= ~(page_size
- 1);
255 end
= (unsigned long)addr
+ size
;
256 end
+= page_size
- 1;
257 end
&= ~(page_size
- 1);
259 mprotect((void *)start
, end
- start
,
260 PROT_READ
| PROT_WRITE
| PROT_EXEC
);
264 static void page_init(void)
266 /* NOTE: we can always suppose that qemu_host_page_size >=
270 SYSTEM_INFO system_info
;
272 GetSystemInfo(&system_info
);
273 qemu_real_host_page_size
= system_info
.dwPageSize
;
276 qemu_real_host_page_size
= getpagesize();
278 if (qemu_host_page_size
== 0)
279 qemu_host_page_size
= qemu_real_host_page_size
;
280 if (qemu_host_page_size
< TARGET_PAGE_SIZE
)
281 qemu_host_page_size
= TARGET_PAGE_SIZE
;
282 qemu_host_page_mask
= ~(qemu_host_page_size
- 1);
284 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
286 #ifdef HAVE_KINFO_GETVMMAP
287 struct kinfo_vmentry
*freep
;
290 freep
= kinfo_getvmmap(getpid(), &cnt
);
293 for (i
= 0; i
< cnt
; i
++) {
294 unsigned long startaddr
, endaddr
;
296 startaddr
= freep
[i
].kve_start
;
297 endaddr
= freep
[i
].kve_end
;
298 if (h2g_valid(startaddr
)) {
299 startaddr
= h2g(startaddr
) & TARGET_PAGE_MASK
;
301 if (h2g_valid(endaddr
)) {
302 endaddr
= h2g(endaddr
);
303 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
305 #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
307 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
318 last_brk
= (unsigned long)sbrk(0);
320 f
= fopen("/compat/linux/proc/self/maps", "r");
325 unsigned long startaddr
, endaddr
;
328 n
= fscanf (f
, "%lx-%lx %*[^\n]\n", &startaddr
, &endaddr
);
330 if (n
== 2 && h2g_valid(startaddr
)) {
331 startaddr
= h2g(startaddr
) & TARGET_PAGE_MASK
;
333 if (h2g_valid(endaddr
)) {
334 endaddr
= h2g(endaddr
);
338 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
350 static PageDesc
*page_find_alloc(tb_page_addr_t index
, int alloc
)
356 #if defined(CONFIG_USER_ONLY)
357 /* We can't use g_malloc because it may recurse into a locked mutex. */
358 # define ALLOC(P, SIZE) \
360 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
361 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
364 # define ALLOC(P, SIZE) \
365 do { P = g_malloc0(SIZE); } while (0)
368 /* Level 1. Always allocated. */
369 lp
= l1_map
+ ((index
>> V_L1_SHIFT
) & (V_L1_SIZE
- 1));
372 for (i
= V_L1_SHIFT
/ L2_BITS
- 1; i
> 0; i
--) {
379 ALLOC(p
, sizeof(void *) * L2_SIZE
);
383 lp
= p
+ ((index
>> (i
* L2_BITS
)) & (L2_SIZE
- 1));
391 ALLOC(pd
, sizeof(PageDesc
) * L2_SIZE
);
397 return pd
+ (index
& (L2_SIZE
- 1));
400 static inline PageDesc
*page_find(tb_page_addr_t index
)
402 return page_find_alloc(index
, 0);
405 #if !defined(CONFIG_USER_ONLY)
406 static uint16_t *phys_page_find_alloc(target_phys_addr_t index
, int alloc
)
408 PhysPageEntry
*lp
, *p
;
414 for (i
= P_L2_LEVELS
- 1; i
>= 0; i
--) {
415 if (lp
->u
.node
== NULL
) {
419 lp
->u
.node
= p
= g_malloc0(sizeof(PhysPageEntry
) * L2_SIZE
);
421 for (j
= 0; j
< L2_SIZE
; j
++) {
422 p
[j
].u
.leaf
= phys_section_unassigned
;
426 lp
= &lp
->u
.node
[(index
>> (i
* L2_BITS
)) & (L2_SIZE
- 1)];
432 static inline PhysPageDesc
phys_page_find(target_phys_addr_t index
)
434 uint16_t *p
= phys_page_find_alloc(index
, 0);
435 uint16_t s_index
= phys_section_unassigned
;
436 MemoryRegionSection
*section
;
442 section
= &phys_sections
[s_index
];
443 index
<<= TARGET_PAGE_BITS
;
444 assert(section
->offset_within_address_space
<= index
445 && index
<= section
->offset_within_address_space
+ section
->size
-1);
446 pd
.phys_offset
= section
->mr
->ram_addr
;
447 pd
.region_offset
= (index
- section
->offset_within_address_space
)
448 + section
->offset_within_region
;
449 if (memory_region_is_ram(section
->mr
)) {
450 pd
.phys_offset
+= pd
.region_offset
;
451 pd
.region_offset
= 0;
452 } else if (section
->mr
->rom_device
) {
453 pd
.phys_offset
+= pd
.region_offset
;
455 if (section
->readonly
) {
456 pd
.phys_offset
|= io_mem_rom
.ram_addr
;
461 static void tlb_protect_code(ram_addr_t ram_addr
);
462 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
464 #define mmap_lock() do { } while(0)
465 #define mmap_unlock() do { } while(0)
468 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
470 #if defined(CONFIG_USER_ONLY)
471 /* Currently it is not recommended to allocate big chunks of data in
472 user mode. It will change when a dedicated libc will be used */
473 #define USE_STATIC_CODE_GEN_BUFFER
476 #ifdef USE_STATIC_CODE_GEN_BUFFER
477 static uint8_t static_code_gen_buffer
[DEFAULT_CODE_GEN_BUFFER_SIZE
]
478 __attribute__((aligned (CODE_GEN_ALIGN
)));
481 static void code_gen_alloc(unsigned long tb_size
)
483 #ifdef USE_STATIC_CODE_GEN_BUFFER
484 code_gen_buffer
= static_code_gen_buffer
;
485 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
486 map_exec(code_gen_buffer
, code_gen_buffer_size
);
488 code_gen_buffer_size
= tb_size
;
489 if (code_gen_buffer_size
== 0) {
490 #if defined(CONFIG_USER_ONLY)
491 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
493 /* XXX: needs adjustments */
494 code_gen_buffer_size
= (unsigned long)(ram_size
/ 4);
497 if (code_gen_buffer_size
< MIN_CODE_GEN_BUFFER_SIZE
)
498 code_gen_buffer_size
= MIN_CODE_GEN_BUFFER_SIZE
;
499 /* The code gen buffer location may have constraints depending on
500 the host cpu and OS */
501 #if defined(__linux__)
506 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
507 #if defined(__x86_64__)
509 /* Cannot map more than that */
510 if (code_gen_buffer_size
> (800 * 1024 * 1024))
511 code_gen_buffer_size
= (800 * 1024 * 1024);
512 #elif defined(__sparc_v9__)
513 // Map the buffer below 2G, so we can use direct calls and branches
515 start
= (void *) 0x60000000UL
;
516 if (code_gen_buffer_size
> (512 * 1024 * 1024))
517 code_gen_buffer_size
= (512 * 1024 * 1024);
518 #elif defined(__arm__)
519 /* Keep the buffer no bigger than 16MB to branch between blocks */
520 if (code_gen_buffer_size
> 16 * 1024 * 1024)
521 code_gen_buffer_size
= 16 * 1024 * 1024;
522 #elif defined(__s390x__)
523 /* Map the buffer so that we can use direct calls and branches. */
524 /* We have a +- 4GB range on the branches; leave some slop. */
525 if (code_gen_buffer_size
> (3ul * 1024 * 1024 * 1024)) {
526 code_gen_buffer_size
= 3ul * 1024 * 1024 * 1024;
528 start
= (void *)0x90000000UL
;
530 code_gen_buffer
= mmap(start
, code_gen_buffer_size
,
531 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
533 if (code_gen_buffer
== MAP_FAILED
) {
534 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
538 #elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
539 || defined(__DragonFly__) || defined(__OpenBSD__) \
540 || defined(__NetBSD__)
544 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
545 #if defined(__x86_64__)
546 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
547 * 0x40000000 is free */
549 addr
= (void *)0x40000000;
550 /* Cannot map more than that */
551 if (code_gen_buffer_size
> (800 * 1024 * 1024))
552 code_gen_buffer_size
= (800 * 1024 * 1024);
553 #elif defined(__sparc_v9__)
554 // Map the buffer below 2G, so we can use direct calls and branches
556 addr
= (void *) 0x60000000UL
;
557 if (code_gen_buffer_size
> (512 * 1024 * 1024)) {
558 code_gen_buffer_size
= (512 * 1024 * 1024);
561 code_gen_buffer
= mmap(addr
, code_gen_buffer_size
,
562 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
564 if (code_gen_buffer
== MAP_FAILED
) {
565 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
570 code_gen_buffer
= g_malloc(code_gen_buffer_size
);
571 map_exec(code_gen_buffer
, code_gen_buffer_size
);
573 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
574 map_exec(code_gen_prologue
, sizeof(code_gen_prologue
));
575 code_gen_buffer_max_size
= code_gen_buffer_size
-
576 (TCG_MAX_OP_SIZE
* OPC_BUF_SIZE
);
577 code_gen_max_blocks
= code_gen_buffer_size
/ CODE_GEN_AVG_BLOCK_SIZE
;
578 tbs
= g_malloc(code_gen_max_blocks
* sizeof(TranslationBlock
));
581 /* Must be called before using the QEMU cpus. 'tb_size' is the size
582 (in bytes) allocated to the translation buffer. Zero means default
584 void tcg_exec_init(unsigned long tb_size
)
587 code_gen_alloc(tb_size
);
588 code_gen_ptr
= code_gen_buffer
;
590 #if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
591 /* There's no guest base to take into account, so go ahead and
592 initialize the prologue now. */
593 tcg_prologue_init(&tcg_ctx
);
597 bool tcg_enabled(void)
599 return code_gen_buffer
!= NULL
;
602 void cpu_exec_init_all(void)
604 #if !defined(CONFIG_USER_ONLY)
610 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
612 static int cpu_common_post_load(void *opaque
, int version_id
)
614 CPUState
*env
= opaque
;
616 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
617 version_id is increased. */
618 env
->interrupt_request
&= ~0x01;
624 static const VMStateDescription vmstate_cpu_common
= {
625 .name
= "cpu_common",
627 .minimum_version_id
= 1,
628 .minimum_version_id_old
= 1,
629 .post_load
= cpu_common_post_load
,
630 .fields
= (VMStateField
[]) {
631 VMSTATE_UINT32(halted
, CPUState
),
632 VMSTATE_UINT32(interrupt_request
, CPUState
),
633 VMSTATE_END_OF_LIST()
638 CPUState
*qemu_get_cpu(int cpu
)
640 CPUState
*env
= first_cpu
;
643 if (env
->cpu_index
== cpu
)
651 void cpu_exec_init(CPUState
*env
)
656 #if defined(CONFIG_USER_ONLY)
659 env
->next_cpu
= NULL
;
662 while (*penv
!= NULL
) {
663 penv
= &(*penv
)->next_cpu
;
666 env
->cpu_index
= cpu_index
;
668 QTAILQ_INIT(&env
->breakpoints
);
669 QTAILQ_INIT(&env
->watchpoints
);
670 #ifndef CONFIG_USER_ONLY
671 env
->thread_id
= qemu_get_thread_id();
674 #if defined(CONFIG_USER_ONLY)
677 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
678 vmstate_register(NULL
, cpu_index
, &vmstate_cpu_common
, env
);
679 register_savevm(NULL
, "cpu", cpu_index
, CPU_SAVE_VERSION
,
680 cpu_save
, cpu_load
, env
);
684 /* Allocate a new translation block. Flush the translation buffer if
685 too many translation blocks or too much generated code. */
686 static TranslationBlock
*tb_alloc(target_ulong pc
)
688 TranslationBlock
*tb
;
690 if (nb_tbs
>= code_gen_max_blocks
||
691 (code_gen_ptr
- code_gen_buffer
) >= code_gen_buffer_max_size
)
699 void tb_free(TranslationBlock
*tb
)
701 /* In practice this is mostly used for single use temporary TB
702 Ignore the hard cases and just back up if this TB happens to
703 be the last one generated. */
704 if (nb_tbs
> 0 && tb
== &tbs
[nb_tbs
- 1]) {
705 code_gen_ptr
= tb
->tc_ptr
;
710 static inline void invalidate_page_bitmap(PageDesc
*p
)
712 if (p
->code_bitmap
) {
713 g_free(p
->code_bitmap
);
714 p
->code_bitmap
= NULL
;
716 p
->code_write_count
= 0;
719 /* Set to NULL all the 'first_tb' fields in all PageDescs. */
721 static void page_flush_tb_1 (int level
, void **lp
)
730 for (i
= 0; i
< L2_SIZE
; ++i
) {
731 pd
[i
].first_tb
= NULL
;
732 invalidate_page_bitmap(pd
+ i
);
736 for (i
= 0; i
< L2_SIZE
; ++i
) {
737 page_flush_tb_1 (level
- 1, pp
+ i
);
742 static void page_flush_tb(void)
745 for (i
= 0; i
< V_L1_SIZE
; i
++) {
746 page_flush_tb_1(V_L1_SHIFT
/ L2_BITS
- 1, l1_map
+ i
);
750 /* flush all the translation blocks */
751 /* XXX: tb_flush is currently not thread safe */
752 void tb_flush(CPUState
*env1
)
755 #if defined(DEBUG_FLUSH)
756 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
757 (unsigned long)(code_gen_ptr
- code_gen_buffer
),
759 ((unsigned long)(code_gen_ptr
- code_gen_buffer
)) / nb_tbs
: 0);
761 if ((unsigned long)(code_gen_ptr
- code_gen_buffer
) > code_gen_buffer_size
)
762 cpu_abort(env1
, "Internal error: code buffer overflow\n");
766 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
767 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
770 memset (tb_phys_hash
, 0, CODE_GEN_PHYS_HASH_SIZE
* sizeof (void *));
773 code_gen_ptr
= code_gen_buffer
;
774 /* XXX: flush processor icache at this point if cache flush is
779 #ifdef DEBUG_TB_CHECK
781 static void tb_invalidate_check(target_ulong address
)
783 TranslationBlock
*tb
;
785 address
&= TARGET_PAGE_MASK
;
786 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
787 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
788 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
789 address
>= tb
->pc
+ tb
->size
)) {
790 printf("ERROR invalidate: address=" TARGET_FMT_lx
791 " PC=%08lx size=%04x\n",
792 address
, (long)tb
->pc
, tb
->size
);
798 /* verify that all the pages have correct rights for code */
799 static void tb_page_check(void)
801 TranslationBlock
*tb
;
802 int i
, flags1
, flags2
;
804 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
805 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
806 flags1
= page_get_flags(tb
->pc
);
807 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
808 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
809 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
810 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
818 /* invalidate one TB */
819 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
822 TranslationBlock
*tb1
;
826 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
829 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
833 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
835 TranslationBlock
*tb1
;
841 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
843 *ptb
= tb1
->page_next
[n1
];
846 ptb
= &tb1
->page_next
[n1
];
850 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
852 TranslationBlock
*tb1
, **ptb
;
855 ptb
= &tb
->jmp_next
[n
];
858 /* find tb(n) in circular list */
862 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
863 if (n1
== n
&& tb1
== tb
)
866 ptb
= &tb1
->jmp_first
;
868 ptb
= &tb1
->jmp_next
[n1
];
871 /* now we can suppress tb(n) from the list */
872 *ptb
= tb
->jmp_next
[n
];
874 tb
->jmp_next
[n
] = NULL
;
878 /* reset the jump entry 'n' of a TB so that it is not chained to
880 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
882 tb_set_jmp_target(tb
, n
, (unsigned long)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
885 void tb_phys_invalidate(TranslationBlock
*tb
, tb_page_addr_t page_addr
)
890 tb_page_addr_t phys_pc
;
891 TranslationBlock
*tb1
, *tb2
;
893 /* remove the TB from the hash list */
894 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
895 h
= tb_phys_hash_func(phys_pc
);
896 tb_remove(&tb_phys_hash
[h
], tb
,
897 offsetof(TranslationBlock
, phys_hash_next
));
899 /* remove the TB from the page list */
900 if (tb
->page_addr
[0] != page_addr
) {
901 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
902 tb_page_remove(&p
->first_tb
, tb
);
903 invalidate_page_bitmap(p
);
905 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
906 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
907 tb_page_remove(&p
->first_tb
, tb
);
908 invalidate_page_bitmap(p
);
911 tb_invalidated_flag
= 1;
913 /* remove the TB from the hash list */
914 h
= tb_jmp_cache_hash_func(tb
->pc
);
915 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
916 if (env
->tb_jmp_cache
[h
] == tb
)
917 env
->tb_jmp_cache
[h
] = NULL
;
920 /* suppress this TB from the two jump lists */
921 tb_jmp_remove(tb
, 0);
922 tb_jmp_remove(tb
, 1);
924 /* suppress any remaining jumps to this TB */
930 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
931 tb2
= tb1
->jmp_next
[n1
];
932 tb_reset_jump(tb1
, n1
);
933 tb1
->jmp_next
[n1
] = NULL
;
936 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2); /* fail safe */
938 tb_phys_invalidate_count
++;
941 static inline void set_bits(uint8_t *tab
, int start
, int len
)
947 mask
= 0xff << (start
& 7);
948 if ((start
& ~7) == (end
& ~7)) {
950 mask
&= ~(0xff << (end
& 7));
955 start
= (start
+ 8) & ~7;
957 while (start
< end1
) {
962 mask
= ~(0xff << (end
& 7));
968 static void build_page_bitmap(PageDesc
*p
)
970 int n
, tb_start
, tb_end
;
971 TranslationBlock
*tb
;
973 p
->code_bitmap
= g_malloc0(TARGET_PAGE_SIZE
/ 8);
978 tb
= (TranslationBlock
*)((long)tb
& ~3);
979 /* NOTE: this is subtle as a TB may span two physical pages */
981 /* NOTE: tb_end may be after the end of the page, but
982 it is not a problem */
983 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
984 tb_end
= tb_start
+ tb
->size
;
985 if (tb_end
> TARGET_PAGE_SIZE
)
986 tb_end
= TARGET_PAGE_SIZE
;
989 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
991 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
992 tb
= tb
->page_next
[n
];
996 TranslationBlock
*tb_gen_code(CPUState
*env
,
997 target_ulong pc
, target_ulong cs_base
,
998 int flags
, int cflags
)
1000 TranslationBlock
*tb
;
1002 tb_page_addr_t phys_pc
, phys_page2
;
1003 target_ulong virt_page2
;
1006 phys_pc
= get_page_addr_code(env
, pc
);
1009 /* flush must be done */
1011 /* cannot fail at this point */
1013 /* Don't forget to invalidate previous TB info. */
1014 tb_invalidated_flag
= 1;
1016 tc_ptr
= code_gen_ptr
;
1017 tb
->tc_ptr
= tc_ptr
;
1018 tb
->cs_base
= cs_base
;
1020 tb
->cflags
= cflags
;
1021 cpu_gen_code(env
, tb
, &code_gen_size
);
1022 code_gen_ptr
= (void *)(((unsigned long)code_gen_ptr
+ code_gen_size
+ CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
1024 /* check next page if needed */
1025 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
1027 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
1028 phys_page2
= get_page_addr_code(env
, virt_page2
);
1030 tb_link_page(tb
, phys_pc
, phys_page2
);
1034 /* invalidate all TBs which intersect with the target physical page
1035 starting in range [start;end[. NOTE: start and end must refer to
1036 the same physical page. 'is_cpu_write_access' should be true if called
1037 from a real cpu write access: the virtual CPU will exit the current
1038 TB if code is modified inside this TB. */
1039 void tb_invalidate_phys_page_range(tb_page_addr_t start
, tb_page_addr_t end
,
1040 int is_cpu_write_access
)
1042 TranslationBlock
*tb
, *tb_next
, *saved_tb
;
1043 CPUState
*env
= cpu_single_env
;
1044 tb_page_addr_t tb_start
, tb_end
;
1047 #ifdef TARGET_HAS_PRECISE_SMC
1048 int current_tb_not_found
= is_cpu_write_access
;
1049 TranslationBlock
*current_tb
= NULL
;
1050 int current_tb_modified
= 0;
1051 target_ulong current_pc
= 0;
1052 target_ulong current_cs_base
= 0;
1053 int current_flags
= 0;
1054 #endif /* TARGET_HAS_PRECISE_SMC */
1056 p
= page_find(start
>> TARGET_PAGE_BITS
);
1059 if (!p
->code_bitmap
&&
1060 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
&&
1061 is_cpu_write_access
) {
1062 /* build code bitmap */
1063 build_page_bitmap(p
);
1066 /* we remove all the TBs in the range [start, end[ */
1067 /* XXX: see if in some cases it could be faster to invalidate all the code */
1069 while (tb
!= NULL
) {
1071 tb
= (TranslationBlock
*)((long)tb
& ~3);
1072 tb_next
= tb
->page_next
[n
];
1073 /* NOTE: this is subtle as a TB may span two physical pages */
1075 /* NOTE: tb_end may be after the end of the page, but
1076 it is not a problem */
1077 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
1078 tb_end
= tb_start
+ tb
->size
;
1080 tb_start
= tb
->page_addr
[1];
1081 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
1083 if (!(tb_end
<= start
|| tb_start
>= end
)) {
1084 #ifdef TARGET_HAS_PRECISE_SMC
1085 if (current_tb_not_found
) {
1086 current_tb_not_found
= 0;
1088 if (env
->mem_io_pc
) {
1089 /* now we have a real cpu fault */
1090 current_tb
= tb_find_pc(env
->mem_io_pc
);
1093 if (current_tb
== tb
&&
1094 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1095 /* If we are modifying the current TB, we must stop
1096 its execution. We could be more precise by checking
1097 that the modification is after the current PC, but it
1098 would require a specialized function to partially
1099 restore the CPU state */
1101 current_tb_modified
= 1;
1102 cpu_restore_state(current_tb
, env
, env
->mem_io_pc
);
1103 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1106 #endif /* TARGET_HAS_PRECISE_SMC */
1107 /* we need to do that to handle the case where a signal
1108 occurs while doing tb_phys_invalidate() */
1111 saved_tb
= env
->current_tb
;
1112 env
->current_tb
= NULL
;
1114 tb_phys_invalidate(tb
, -1);
1116 env
->current_tb
= saved_tb
;
1117 if (env
->interrupt_request
&& env
->current_tb
)
1118 cpu_interrupt(env
, env
->interrupt_request
);
1123 #if !defined(CONFIG_USER_ONLY)
1124 /* if no code remaining, no need to continue to use slow writes */
1126 invalidate_page_bitmap(p
);
1127 if (is_cpu_write_access
) {
1128 tlb_unprotect_code_phys(env
, start
, env
->mem_io_vaddr
);
1132 #ifdef TARGET_HAS_PRECISE_SMC
1133 if (current_tb_modified
) {
1134 /* we generate a block containing just the instruction
1135 modifying the memory. It will ensure that it cannot modify
1137 env
->current_tb
= NULL
;
1138 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1139 cpu_resume_from_signal(env
, NULL
);
1144 /* len must be <= 8 and start must be a multiple of len */
1145 static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start
, int len
)
1151 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1152 cpu_single_env
->mem_io_vaddr
, len
,
1153 cpu_single_env
->eip
,
1154 cpu_single_env
->eip
+ (long)cpu_single_env
->segs
[R_CS
].base
);
1157 p
= page_find(start
>> TARGET_PAGE_BITS
);
1160 if (p
->code_bitmap
) {
1161 offset
= start
& ~TARGET_PAGE_MASK
;
1162 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
1163 if (b
& ((1 << len
) - 1))
1167 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
1171 #if !defined(CONFIG_SOFTMMU)
1172 static void tb_invalidate_phys_page(tb_page_addr_t addr
,
1173 unsigned long pc
, void *puc
)
1175 TranslationBlock
*tb
;
1178 #ifdef TARGET_HAS_PRECISE_SMC
1179 TranslationBlock
*current_tb
= NULL
;
1180 CPUState
*env
= cpu_single_env
;
1181 int current_tb_modified
= 0;
1182 target_ulong current_pc
= 0;
1183 target_ulong current_cs_base
= 0;
1184 int current_flags
= 0;
1187 addr
&= TARGET_PAGE_MASK
;
1188 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1192 #ifdef TARGET_HAS_PRECISE_SMC
1193 if (tb
&& pc
!= 0) {
1194 current_tb
= tb_find_pc(pc
);
1197 while (tb
!= NULL
) {
1199 tb
= (TranslationBlock
*)((long)tb
& ~3);
1200 #ifdef TARGET_HAS_PRECISE_SMC
1201 if (current_tb
== tb
&&
1202 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1203 /* If we are modifying the current TB, we must stop
1204 its execution. We could be more precise by checking
1205 that the modification is after the current PC, but it
1206 would require a specialized function to partially
1207 restore the CPU state */
1209 current_tb_modified
= 1;
1210 cpu_restore_state(current_tb
, env
, pc
);
1211 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1214 #endif /* TARGET_HAS_PRECISE_SMC */
1215 tb_phys_invalidate(tb
, addr
);
1216 tb
= tb
->page_next
[n
];
1219 #ifdef TARGET_HAS_PRECISE_SMC
1220 if (current_tb_modified
) {
1221 /* we generate a block containing just the instruction
1222 modifying the memory. It will ensure that it cannot modify
1224 env
->current_tb
= NULL
;
1225 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1226 cpu_resume_from_signal(env
, puc
);
1232 /* add the tb in the target page and protect it if necessary */
1233 static inline void tb_alloc_page(TranslationBlock
*tb
,
1234 unsigned int n
, tb_page_addr_t page_addr
)
1237 #ifndef CONFIG_USER_ONLY
1238 bool page_already_protected
;
1241 tb
->page_addr
[n
] = page_addr
;
1242 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
, 1);
1243 tb
->page_next
[n
] = p
->first_tb
;
1244 #ifndef CONFIG_USER_ONLY
1245 page_already_protected
= p
->first_tb
!= NULL
;
1247 p
->first_tb
= (TranslationBlock
*)((long)tb
| n
);
1248 invalidate_page_bitmap(p
);
1250 #if defined(TARGET_HAS_SMC) || 1
1252 #if defined(CONFIG_USER_ONLY)
1253 if (p
->flags
& PAGE_WRITE
) {
1258 /* force the host page as non writable (writes will have a
1259 page fault + mprotect overhead) */
1260 page_addr
&= qemu_host_page_mask
;
1262 for(addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
1263 addr
+= TARGET_PAGE_SIZE
) {
1265 p2
= page_find (addr
>> TARGET_PAGE_BITS
);
1269 p2
->flags
&= ~PAGE_WRITE
;
1271 mprotect(g2h(page_addr
), qemu_host_page_size
,
1272 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
1273 #ifdef DEBUG_TB_INVALIDATE
1274 printf("protecting code page: 0x" TARGET_FMT_lx
"\n",
1279 /* if some code is already present, then the pages are already
1280 protected. So we handle the case where only the first TB is
1281 allocated in a physical page */
1282 if (!page_already_protected
) {
1283 tlb_protect_code(page_addr
);
1287 #endif /* TARGET_HAS_SMC */
1290 /* add a new TB and link it to the physical page tables. phys_page2 is
1291 (-1) to indicate that only one page contains the TB. */
1292 void tb_link_page(TranslationBlock
*tb
,
1293 tb_page_addr_t phys_pc
, tb_page_addr_t phys_page2
)
1296 TranslationBlock
**ptb
;
1298 /* Grab the mmap lock to stop another thread invalidating this TB
1299 before we are done. */
1301 /* add in the physical hash table */
1302 h
= tb_phys_hash_func(phys_pc
);
1303 ptb
= &tb_phys_hash
[h
];
1304 tb
->phys_hash_next
= *ptb
;
1307 /* add in the page list */
1308 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
1309 if (phys_page2
!= -1)
1310 tb_alloc_page(tb
, 1, phys_page2
);
1312 tb
->page_addr
[1] = -1;
1314 tb
->jmp_first
= (TranslationBlock
*)((long)tb
| 2);
1315 tb
->jmp_next
[0] = NULL
;
1316 tb
->jmp_next
[1] = NULL
;
1318 /* init original jump addresses */
1319 if (tb
->tb_next_offset
[0] != 0xffff)
1320 tb_reset_jump(tb
, 0);
1321 if (tb
->tb_next_offset
[1] != 0xffff)
1322 tb_reset_jump(tb
, 1);
1324 #ifdef DEBUG_TB_CHECK
1330 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1331 tb[1].tc_ptr. Return NULL if not found */
1332 TranslationBlock
*tb_find_pc(unsigned long tc_ptr
)
1334 int m_min
, m_max
, m
;
1336 TranslationBlock
*tb
;
1340 if (tc_ptr
< (unsigned long)code_gen_buffer
||
1341 tc_ptr
>= (unsigned long)code_gen_ptr
)
1343 /* binary search (cf Knuth) */
1346 while (m_min
<= m_max
) {
1347 m
= (m_min
+ m_max
) >> 1;
1349 v
= (unsigned long)tb
->tc_ptr
;
1352 else if (tc_ptr
< v
) {
1361 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
1363 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
1365 TranslationBlock
*tb1
, *tb_next
, **ptb
;
1368 tb1
= tb
->jmp_next
[n
];
1370 /* find head of list */
1373 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1376 tb1
= tb1
->jmp_next
[n1
];
1378 /* we are now sure now that tb jumps to tb1 */
1381 /* remove tb from the jmp_first list */
1382 ptb
= &tb_next
->jmp_first
;
1386 tb1
= (TranslationBlock
*)((long)tb1
& ~3);
1387 if (n1
== n
&& tb1
== tb
)
1389 ptb
= &tb1
->jmp_next
[n1
];
1391 *ptb
= tb
->jmp_next
[n
];
1392 tb
->jmp_next
[n
] = NULL
;
1394 /* suppress the jump to next tb in generated code */
1395 tb_reset_jump(tb
, n
);
1397 /* suppress jumps in the tb on which we could have jumped */
1398 tb_reset_jump_recursive(tb_next
);
1402 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
1404 tb_reset_jump_recursive2(tb
, 0);
1405 tb_reset_jump_recursive2(tb
, 1);
1408 #if defined(TARGET_HAS_ICE)
1409 #if defined(CONFIG_USER_ONLY)
1410 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1412 tb_invalidate_phys_page_range(pc
, pc
+ 1, 0);
1415 static void breakpoint_invalidate(CPUState
*env
, target_ulong pc
)
1417 target_phys_addr_t addr
;
1419 ram_addr_t ram_addr
;
1422 addr
= cpu_get_phys_page_debug(env
, pc
);
1423 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
1425 ram_addr
= (pd
& TARGET_PAGE_MASK
) | (pc
& ~TARGET_PAGE_MASK
);
1426 tb_invalidate_phys_page_range(ram_addr
, ram_addr
+ 1, 0);
1429 #endif /* TARGET_HAS_ICE */
1431 #if defined(CONFIG_USER_ONLY)
1432 void cpu_watchpoint_remove_all(CPUState
*env
, int mask
)
1437 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
, target_ulong len
,
1438 int flags
, CPUWatchpoint
**watchpoint
)
1443 /* Add a watchpoint. */
1444 int cpu_watchpoint_insert(CPUState
*env
, target_ulong addr
, target_ulong len
,
1445 int flags
, CPUWatchpoint
**watchpoint
)
1447 target_ulong len_mask
= ~(len
- 1);
1450 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1451 if ((len
!= 1 && len
!= 2 && len
!= 4 && len
!= 8) || (addr
& ~len_mask
)) {
1452 fprintf(stderr
, "qemu: tried to set invalid watchpoint at "
1453 TARGET_FMT_lx
", len=" TARGET_FMT_lu
"\n", addr
, len
);
1456 wp
= g_malloc(sizeof(*wp
));
1459 wp
->len_mask
= len_mask
;
1462 /* keep all GDB-injected watchpoints in front */
1464 QTAILQ_INSERT_HEAD(&env
->watchpoints
, wp
, entry
);
1466 QTAILQ_INSERT_TAIL(&env
->watchpoints
, wp
, entry
);
1468 tlb_flush_page(env
, addr
);
1475 /* Remove a specific watchpoint. */
1476 int cpu_watchpoint_remove(CPUState
*env
, target_ulong addr
, target_ulong len
,
1479 target_ulong len_mask
= ~(len
- 1);
1482 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1483 if (addr
== wp
->vaddr
&& len_mask
== wp
->len_mask
1484 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
1485 cpu_watchpoint_remove_by_ref(env
, wp
);
1492 /* Remove a specific watchpoint by reference. */
1493 void cpu_watchpoint_remove_by_ref(CPUState
*env
, CPUWatchpoint
*watchpoint
)
1495 QTAILQ_REMOVE(&env
->watchpoints
, watchpoint
, entry
);
1497 tlb_flush_page(env
, watchpoint
->vaddr
);
1502 /* Remove all matching watchpoints. */
1503 void cpu_watchpoint_remove_all(CPUState
*env
, int mask
)
1505 CPUWatchpoint
*wp
, *next
;
1507 QTAILQ_FOREACH_SAFE(wp
, &env
->watchpoints
, entry
, next
) {
1508 if (wp
->flags
& mask
)
1509 cpu_watchpoint_remove_by_ref(env
, wp
);
1514 /* Add a breakpoint. */
1515 int cpu_breakpoint_insert(CPUState
*env
, target_ulong pc
, int flags
,
1516 CPUBreakpoint
**breakpoint
)
1518 #if defined(TARGET_HAS_ICE)
1521 bp
= g_malloc(sizeof(*bp
));
1526 /* keep all GDB-injected breakpoints in front */
1528 QTAILQ_INSERT_HEAD(&env
->breakpoints
, bp
, entry
);
1530 QTAILQ_INSERT_TAIL(&env
->breakpoints
, bp
, entry
);
1532 breakpoint_invalidate(env
, pc
);
1542 /* Remove a specific breakpoint. */
1543 int cpu_breakpoint_remove(CPUState
*env
, target_ulong pc
, int flags
)
1545 #if defined(TARGET_HAS_ICE)
1548 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1549 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
1550 cpu_breakpoint_remove_by_ref(env
, bp
);
1560 /* Remove a specific breakpoint by reference. */
1561 void cpu_breakpoint_remove_by_ref(CPUState
*env
, CPUBreakpoint
*breakpoint
)
1563 #if defined(TARGET_HAS_ICE)
1564 QTAILQ_REMOVE(&env
->breakpoints
, breakpoint
, entry
);
1566 breakpoint_invalidate(env
, breakpoint
->pc
);
1572 /* Remove all matching breakpoints. */
1573 void cpu_breakpoint_remove_all(CPUState
*env
, int mask
)
1575 #if defined(TARGET_HAS_ICE)
1576 CPUBreakpoint
*bp
, *next
;
1578 QTAILQ_FOREACH_SAFE(bp
, &env
->breakpoints
, entry
, next
) {
1579 if (bp
->flags
& mask
)
1580 cpu_breakpoint_remove_by_ref(env
, bp
);
1585 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1586 CPU loop after each instruction */
1587 void cpu_single_step(CPUState
*env
, int enabled
)
1589 #if defined(TARGET_HAS_ICE)
1590 if (env
->singlestep_enabled
!= enabled
) {
1591 env
->singlestep_enabled
= enabled
;
1593 kvm_update_guest_debug(env
, 0);
1595 /* must flush all the translated code to avoid inconsistencies */
1596 /* XXX: only flush what is necessary */
1603 /* enable or disable low levels log */
1604 void cpu_set_log(int log_flags
)
1606 loglevel
= log_flags
;
1607 if (loglevel
&& !logfile
) {
1608 logfile
= fopen(logfilename
, log_append
? "a" : "w");
1610 perror(logfilename
);
1613 #if !defined(CONFIG_SOFTMMU)
1614 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1616 static char logfile_buf
[4096];
1617 setvbuf(logfile
, logfile_buf
, _IOLBF
, sizeof(logfile_buf
));
1619 #elif defined(_WIN32)
1620 /* Win32 doesn't support line-buffering, so use unbuffered output. */
1621 setvbuf(logfile
, NULL
, _IONBF
, 0);
1623 setvbuf(logfile
, NULL
, _IOLBF
, 0);
1627 if (!loglevel
&& logfile
) {
1633 void cpu_set_log_filename(const char *filename
)
1635 logfilename
= strdup(filename
);
1640 cpu_set_log(loglevel
);
1643 static void cpu_unlink_tb(CPUState
*env
)
1645 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1646 problem and hope the cpu will stop of its own accord. For userspace
1647 emulation this often isn't actually as bad as it sounds. Often
1648 signals are used primarily to interrupt blocking syscalls. */
1649 TranslationBlock
*tb
;
1650 static spinlock_t interrupt_lock
= SPIN_LOCK_UNLOCKED
;
1652 spin_lock(&interrupt_lock
);
1653 tb
= env
->current_tb
;
1654 /* if the cpu is currently executing code, we must unlink it and
1655 all the potentially executing TB */
1657 env
->current_tb
= NULL
;
1658 tb_reset_jump_recursive(tb
);
1660 spin_unlock(&interrupt_lock
);
1663 #ifndef CONFIG_USER_ONLY
1664 /* mask must never be zero, except for A20 change call */
1665 static void tcg_handle_interrupt(CPUState
*env
, int mask
)
1669 old_mask
= env
->interrupt_request
;
1670 env
->interrupt_request
|= mask
;
1673 * If called from iothread context, wake the target cpu in
1676 if (!qemu_cpu_is_self(env
)) {
1682 env
->icount_decr
.u16
.high
= 0xffff;
1684 && (mask
& ~old_mask
) != 0) {
1685 cpu_abort(env
, "Raised interrupt while not in I/O function");
1692 CPUInterruptHandler cpu_interrupt_handler
= tcg_handle_interrupt
;
1694 #else /* CONFIG_USER_ONLY */
1696 void cpu_interrupt(CPUState
*env
, int mask
)
1698 env
->interrupt_request
|= mask
;
1701 #endif /* CONFIG_USER_ONLY */
1703 void cpu_reset_interrupt(CPUState
*env
, int mask
)
1705 env
->interrupt_request
&= ~mask
;
1708 void cpu_exit(CPUState
*env
)
1710 env
->exit_request
= 1;
1714 const CPULogItem cpu_log_items
[] = {
1715 { CPU_LOG_TB_OUT_ASM
, "out_asm",
1716 "show generated host assembly code for each compiled TB" },
1717 { CPU_LOG_TB_IN_ASM
, "in_asm",
1718 "show target assembly code for each compiled TB" },
1719 { CPU_LOG_TB_OP
, "op",
1720 "show micro ops for each compiled TB" },
1721 { CPU_LOG_TB_OP_OPT
, "op_opt",
1724 "before eflags optimization and "
1726 "after liveness analysis" },
1727 { CPU_LOG_INT
, "int",
1728 "show interrupts/exceptions in short format" },
1729 { CPU_LOG_EXEC
, "exec",
1730 "show trace before each executed TB (lots of logs)" },
1731 { CPU_LOG_TB_CPU
, "cpu",
1732 "show CPU state before block translation" },
1734 { CPU_LOG_PCALL
, "pcall",
1735 "show protected mode far calls/returns/exceptions" },
1736 { CPU_LOG_RESET
, "cpu_reset",
1737 "show CPU state before CPU resets" },
1740 { CPU_LOG_IOPORT
, "ioport",
1741 "show all i/o ports accesses" },
1746 static int cmp1(const char *s1
, int n
, const char *s2
)
1748 if (strlen(s2
) != n
)
1750 return memcmp(s1
, s2
, n
) == 0;
1753 /* takes a comma separated list of log masks. Return 0 if error. */
1754 int cpu_str_to_log_mask(const char *str
)
1756 const CPULogItem
*item
;
1763 p1
= strchr(p
, ',');
1766 if(cmp1(p
,p1
-p
,"all")) {
1767 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1771 for(item
= cpu_log_items
; item
->mask
!= 0; item
++) {
1772 if (cmp1(p
, p1
- p
, item
->name
))
1786 void cpu_abort(CPUState
*env
, const char *fmt
, ...)
1793 fprintf(stderr
, "qemu: fatal: ");
1794 vfprintf(stderr
, fmt
, ap
);
1795 fprintf(stderr
, "\n");
1797 cpu_dump_state(env
, stderr
, fprintf
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1799 cpu_dump_state(env
, stderr
, fprintf
, 0);
1801 if (qemu_log_enabled()) {
1802 qemu_log("qemu: fatal: ");
1803 qemu_log_vprintf(fmt
, ap2
);
1806 log_cpu_state(env
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1808 log_cpu_state(env
, 0);
1815 #if defined(CONFIG_USER_ONLY)
1817 struct sigaction act
;
1818 sigfillset(&act
.sa_mask
);
1819 act
.sa_handler
= SIG_DFL
;
1820 sigaction(SIGABRT
, &act
, NULL
);
1826 CPUState
*cpu_copy(CPUState
*env
)
1828 CPUState
*new_env
= cpu_init(env
->cpu_model_str
);
1829 CPUState
*next_cpu
= new_env
->next_cpu
;
1830 int cpu_index
= new_env
->cpu_index
;
1831 #if defined(TARGET_HAS_ICE)
1836 memcpy(new_env
, env
, sizeof(CPUState
));
1838 /* Preserve chaining and index. */
1839 new_env
->next_cpu
= next_cpu
;
1840 new_env
->cpu_index
= cpu_index
;
1842 /* Clone all break/watchpoints.
1843 Note: Once we support ptrace with hw-debug register access, make sure
1844 BP_CPU break/watchpoints are handled correctly on clone. */
1845 QTAILQ_INIT(&env
->breakpoints
);
1846 QTAILQ_INIT(&env
->watchpoints
);
1847 #if defined(TARGET_HAS_ICE)
1848 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1849 cpu_breakpoint_insert(new_env
, bp
->pc
, bp
->flags
, NULL
);
1851 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1852 cpu_watchpoint_insert(new_env
, wp
->vaddr
, (~wp
->len_mask
) + 1,
1860 #if !defined(CONFIG_USER_ONLY)
1862 static inline void tlb_flush_jmp_cache(CPUState
*env
, target_ulong addr
)
1866 /* Discard jump cache entries for any tb which might potentially
1867 overlap the flushed page. */
1868 i
= tb_jmp_cache_hash_page(addr
- TARGET_PAGE_SIZE
);
1869 memset (&env
->tb_jmp_cache
[i
], 0,
1870 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1872 i
= tb_jmp_cache_hash_page(addr
);
1873 memset (&env
->tb_jmp_cache
[i
], 0,
1874 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1877 static CPUTLBEntry s_cputlb_empty_entry
= {
1885 * If flush_global is true (the usual case), flush all tlb entries.
1886 * If flush_global is false, flush (at least) all tlb entries not
1889 * Since QEMU doesn't currently implement a global/not-global flag
1890 * for tlb entries, at the moment tlb_flush() will also flush all
1891 * tlb entries in the flush_global == false case. This is OK because
1892 * CPU architectures generally permit an implementation to drop
1893 * entries from the TLB at any time, so flushing more entries than
1894 * required is only an efficiency issue, not a correctness issue.
1896 void tlb_flush(CPUState
*env
, int flush_global
)
1900 #if defined(DEBUG_TLB)
1901 printf("tlb_flush:\n");
1903 /* must reset current TB so that interrupts cannot modify the
1904 links while we are modifying them */
1905 env
->current_tb
= NULL
;
1907 for(i
= 0; i
< CPU_TLB_SIZE
; i
++) {
1909 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
1910 env
->tlb_table
[mmu_idx
][i
] = s_cputlb_empty_entry
;
1914 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
1916 env
->tlb_flush_addr
= -1;
1917 env
->tlb_flush_mask
= 0;
1921 static inline void tlb_flush_entry(CPUTLBEntry
*tlb_entry
, target_ulong addr
)
1923 if (addr
== (tlb_entry
->addr_read
&
1924 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1925 addr
== (tlb_entry
->addr_write
&
1926 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
)) ||
1927 addr
== (tlb_entry
->addr_code
&
1928 (TARGET_PAGE_MASK
| TLB_INVALID_MASK
))) {
1929 *tlb_entry
= s_cputlb_empty_entry
;
1933 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
1938 #if defined(DEBUG_TLB)
1939 printf("tlb_flush_page: " TARGET_FMT_lx
"\n", addr
);
1941 /* Check if we need to flush due to large pages. */
1942 if ((addr
& env
->tlb_flush_mask
) == env
->tlb_flush_addr
) {
1943 #if defined(DEBUG_TLB)
1944 printf("tlb_flush_page: forced full flush ("
1945 TARGET_FMT_lx
"/" TARGET_FMT_lx
")\n",
1946 env
->tlb_flush_addr
, env
->tlb_flush_mask
);
1951 /* must reset current TB so that interrupts cannot modify the
1952 links while we are modifying them */
1953 env
->current_tb
= NULL
;
1955 addr
&= TARGET_PAGE_MASK
;
1956 i
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
1957 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++)
1958 tlb_flush_entry(&env
->tlb_table
[mmu_idx
][i
], addr
);
1960 tlb_flush_jmp_cache(env
, addr
);
1963 /* update the TLBs so that writes to code in the virtual page 'addr'
1965 static void tlb_protect_code(ram_addr_t ram_addr
)
1967 cpu_physical_memory_reset_dirty(ram_addr
,
1968 ram_addr
+ TARGET_PAGE_SIZE
,
1972 /* update the TLB so that writes in physical page 'phys_addr' are no longer
1973 tested for self modifying code */
1974 static void tlb_unprotect_code_phys(CPUState
*env
, ram_addr_t ram_addr
,
1977 cpu_physical_memory_set_dirty_flags(ram_addr
, CODE_DIRTY_FLAG
);
1980 static inline void tlb_reset_dirty_range(CPUTLBEntry
*tlb_entry
,
1981 unsigned long start
, unsigned long length
)
1984 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == io_mem_ram
.ram_addr
) {
1985 addr
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) + tlb_entry
->addend
;
1986 if ((addr
- start
) < length
) {
1987 tlb_entry
->addr_write
= (tlb_entry
->addr_write
& TARGET_PAGE_MASK
) | TLB_NOTDIRTY
;
1992 /* Note: start and end must be within the same ram block. */
1993 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t end
,
1997 unsigned long length
, start1
;
2000 start
&= TARGET_PAGE_MASK
;
2001 end
= TARGET_PAGE_ALIGN(end
);
2003 length
= end
- start
;
2006 cpu_physical_memory_mask_dirty_range(start
, length
, dirty_flags
);
2008 /* we modify the TLB cache so that the dirty bit will be set again
2009 when accessing the range */
2010 start1
= (unsigned long)qemu_safe_ram_ptr(start
);
2011 /* Check that we don't span multiple blocks - this breaks the
2012 address comparisons below. */
2013 if ((unsigned long)qemu_safe_ram_ptr(end
- 1) - start1
2014 != (end
- 1) - start
) {
2018 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
2020 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
2021 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
2022 tlb_reset_dirty_range(&env
->tlb_table
[mmu_idx
][i
],
2028 int cpu_physical_memory_set_dirty_tracking(int enable
)
2031 in_migration
= enable
;
2035 static inline void tlb_update_dirty(CPUTLBEntry
*tlb_entry
)
2037 ram_addr_t ram_addr
;
2040 if ((tlb_entry
->addr_write
& ~TARGET_PAGE_MASK
) == io_mem_ram
.ram_addr
) {
2041 p
= (void *)(unsigned long)((tlb_entry
->addr_write
& TARGET_PAGE_MASK
)
2042 + tlb_entry
->addend
);
2043 ram_addr
= qemu_ram_addr_from_host_nofail(p
);
2044 if (!cpu_physical_memory_is_dirty(ram_addr
)) {
2045 tlb_entry
->addr_write
|= TLB_NOTDIRTY
;
2050 /* update the TLB according to the current state of the dirty bits */
2051 void cpu_tlb_update_dirty(CPUState
*env
)
2055 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++) {
2056 for(i
= 0; i
< CPU_TLB_SIZE
; i
++)
2057 tlb_update_dirty(&env
->tlb_table
[mmu_idx
][i
]);
2061 static inline void tlb_set_dirty1(CPUTLBEntry
*tlb_entry
, target_ulong vaddr
)
2063 if (tlb_entry
->addr_write
== (vaddr
| TLB_NOTDIRTY
))
2064 tlb_entry
->addr_write
= vaddr
;
2067 /* update the TLB corresponding to virtual page vaddr
2068 so that it is no longer dirty */
2069 static inline void tlb_set_dirty(CPUState
*env
, target_ulong vaddr
)
2074 vaddr
&= TARGET_PAGE_MASK
;
2075 i
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
2076 for (mmu_idx
= 0; mmu_idx
< NB_MMU_MODES
; mmu_idx
++)
2077 tlb_set_dirty1(&env
->tlb_table
[mmu_idx
][i
], vaddr
);
2080 /* Our TLB does not support large pages, so remember the area covered by
2081 large pages and trigger a full TLB flush if these are invalidated. */
2082 static void tlb_add_large_page(CPUState
*env
, target_ulong vaddr
,
2085 target_ulong mask
= ~(size
- 1);
2087 if (env
->tlb_flush_addr
== (target_ulong
)-1) {
2088 env
->tlb_flush_addr
= vaddr
& mask
;
2089 env
->tlb_flush_mask
= mask
;
2092 /* Extend the existing region to include the new page.
2093 This is a compromise between unnecessary flushes and the cost
2094 of maintaining a full variable size TLB. */
2095 mask
&= env
->tlb_flush_mask
;
2096 while (((env
->tlb_flush_addr
^ vaddr
) & mask
) != 0) {
2099 env
->tlb_flush_addr
&= mask
;
2100 env
->tlb_flush_mask
= mask
;
2103 static bool is_ram_rom(ram_addr_t pd
)
2105 pd
&= ~TARGET_PAGE_MASK
;
2106 return pd
== io_mem_ram
.ram_addr
|| pd
== io_mem_rom
.ram_addr
;
2109 static bool is_romd(ram_addr_t pd
)
2113 pd
&= ~TARGET_PAGE_MASK
;
2114 mr
= io_mem_region
[pd
];
2115 return mr
->rom_device
&& mr
->readable
;
2118 static bool is_ram_rom_romd(ram_addr_t pd
)
2120 return is_ram_rom(pd
) || is_romd(pd
);
2123 /* Add a new TLB entry. At most one entry for a given virtual address
2124 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2125 supplied size is only used by tlb_flush_page. */
2126 void tlb_set_page(CPUState
*env
, target_ulong vaddr
,
2127 target_phys_addr_t paddr
, int prot
,
2128 int mmu_idx
, target_ulong size
)
2133 target_ulong address
;
2134 target_ulong code_address
;
2135 unsigned long addend
;
2138 target_phys_addr_t iotlb
;
2140 assert(size
>= TARGET_PAGE_SIZE
);
2141 if (size
!= TARGET_PAGE_SIZE
) {
2142 tlb_add_large_page(env
, vaddr
, size
);
2144 p
= phys_page_find(paddr
>> TARGET_PAGE_BITS
);
2146 #if defined(DEBUG_TLB)
2147 printf("tlb_set_page: vaddr=" TARGET_FMT_lx
" paddr=0x" TARGET_FMT_plx
2148 " prot=%x idx=%d pd=0x%08lx\n",
2149 vaddr
, paddr
, prot
, mmu_idx
, pd
);
2153 if (!is_ram_rom_romd(pd
)) {
2154 /* IO memory case (romd handled later) */
2155 address
|= TLB_MMIO
;
2157 addend
= (unsigned long)qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
);
2158 if (is_ram_rom(pd
)) {
2160 iotlb
= pd
& TARGET_PAGE_MASK
;
2161 if ((pd
& ~TARGET_PAGE_MASK
) == io_mem_ram
.ram_addr
)
2162 iotlb
|= io_mem_notdirty
.ram_addr
;
2164 iotlb
|= io_mem_rom
.ram_addr
;
2166 /* IO handlers are currently passed a physical address.
2167 It would be nice to pass an offset from the base address
2168 of that region. This would avoid having to special case RAM,
2169 and avoid full address decoding in every device.
2170 We can't use the high bits of pd for this because
2171 IO_MEM_ROMD uses these as a ram address. */
2172 iotlb
= (pd
& ~TARGET_PAGE_MASK
);
2173 iotlb
+= p
.region_offset
;
2176 code_address
= address
;
2177 /* Make accesses to pages with watchpoints go via the
2178 watchpoint trap routines. */
2179 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
2180 if (vaddr
== (wp
->vaddr
& TARGET_PAGE_MASK
)) {
2181 /* Avoid trapping reads of pages with a write breakpoint. */
2182 if ((prot
& PAGE_WRITE
) || (wp
->flags
& BP_MEM_READ
)) {
2183 iotlb
= io_mem_watch
.ram_addr
+ paddr
;
2184 address
|= TLB_MMIO
;
2190 index
= (vaddr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
2191 env
->iotlb
[mmu_idx
][index
] = iotlb
- vaddr
;
2192 te
= &env
->tlb_table
[mmu_idx
][index
];
2193 te
->addend
= addend
- vaddr
;
2194 if (prot
& PAGE_READ
) {
2195 te
->addr_read
= address
;
2200 if (prot
& PAGE_EXEC
) {
2201 te
->addr_code
= code_address
;
2205 if (prot
& PAGE_WRITE
) {
2206 if ((pd
& ~TARGET_PAGE_MASK
) == io_mem_rom
.ram_addr
|| is_romd(pd
)) {
2207 /* Write access calls the I/O callback. */
2208 te
->addr_write
= address
| TLB_MMIO
;
2209 } else if ((pd
& ~TARGET_PAGE_MASK
) == io_mem_ram
.ram_addr
&&
2210 !cpu_physical_memory_is_dirty(pd
)) {
2211 te
->addr_write
= address
| TLB_NOTDIRTY
;
2213 te
->addr_write
= address
;
2216 te
->addr_write
= -1;
2222 void tlb_flush(CPUState
*env
, int flush_global
)
2226 void tlb_flush_page(CPUState
*env
, target_ulong addr
)
2231 * Walks guest process memory "regions" one by one
2232 * and calls callback function 'fn' for each region.
2235 struct walk_memory_regions_data
2237 walk_memory_regions_fn fn
;
2239 unsigned long start
;
2243 static int walk_memory_regions_end(struct walk_memory_regions_data
*data
,
2244 abi_ulong end
, int new_prot
)
2246 if (data
->start
!= -1ul) {
2247 int rc
= data
->fn(data
->priv
, data
->start
, end
, data
->prot
);
2253 data
->start
= (new_prot
? end
: -1ul);
2254 data
->prot
= new_prot
;
2259 static int walk_memory_regions_1(struct walk_memory_regions_data
*data
,
2260 abi_ulong base
, int level
, void **lp
)
2266 return walk_memory_regions_end(data
, base
, 0);
2271 for (i
= 0; i
< L2_SIZE
; ++i
) {
2272 int prot
= pd
[i
].flags
;
2274 pa
= base
| (i
<< TARGET_PAGE_BITS
);
2275 if (prot
!= data
->prot
) {
2276 rc
= walk_memory_regions_end(data
, pa
, prot
);
2284 for (i
= 0; i
< L2_SIZE
; ++i
) {
2285 pa
= base
| ((abi_ulong
)i
<<
2286 (TARGET_PAGE_BITS
+ L2_BITS
* level
));
2287 rc
= walk_memory_regions_1(data
, pa
, level
- 1, pp
+ i
);
2297 int walk_memory_regions(void *priv
, walk_memory_regions_fn fn
)
2299 struct walk_memory_regions_data data
;
2307 for (i
= 0; i
< V_L1_SIZE
; i
++) {
2308 int rc
= walk_memory_regions_1(&data
, (abi_ulong
)i
<< V_L1_SHIFT
,
2309 V_L1_SHIFT
/ L2_BITS
- 1, l1_map
+ i
);
2315 return walk_memory_regions_end(&data
, 0, 0);
2318 static int dump_region(void *priv
, abi_ulong start
,
2319 abi_ulong end
, unsigned long prot
)
2321 FILE *f
= (FILE *)priv
;
2323 (void) fprintf(f
, TARGET_ABI_FMT_lx
"-"TARGET_ABI_FMT_lx
2324 " "TARGET_ABI_FMT_lx
" %c%c%c\n",
2325 start
, end
, end
- start
,
2326 ((prot
& PAGE_READ
) ? 'r' : '-'),
2327 ((prot
& PAGE_WRITE
) ? 'w' : '-'),
2328 ((prot
& PAGE_EXEC
) ? 'x' : '-'));
2333 /* dump memory mappings */
2334 void page_dump(FILE *f
)
2336 (void) fprintf(f
, "%-8s %-8s %-8s %s\n",
2337 "start", "end", "size", "prot");
2338 walk_memory_regions(f
, dump_region
);
2341 int page_get_flags(target_ulong address
)
2345 p
= page_find(address
>> TARGET_PAGE_BITS
);
2351 /* Modify the flags of a page and invalidate the code if necessary.
2352 The flag PAGE_WRITE_ORG is positioned automatically depending
2353 on PAGE_WRITE. The mmap_lock should already be held. */
2354 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
2356 target_ulong addr
, len
;
2358 /* This function should never be called with addresses outside the
2359 guest address space. If this assert fires, it probably indicates
2360 a missing call to h2g_valid. */
2361 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2362 assert(end
< ((abi_ulong
)1 << L1_MAP_ADDR_SPACE_BITS
));
2364 assert(start
< end
);
2366 start
= start
& TARGET_PAGE_MASK
;
2367 end
= TARGET_PAGE_ALIGN(end
);
2369 if (flags
& PAGE_WRITE
) {
2370 flags
|= PAGE_WRITE_ORG
;
2373 for (addr
= start
, len
= end
- start
;
2375 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
2376 PageDesc
*p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2378 /* If the write protection bit is set, then we invalidate
2380 if (!(p
->flags
& PAGE_WRITE
) &&
2381 (flags
& PAGE_WRITE
) &&
2383 tb_invalidate_phys_page(addr
, 0, NULL
);
2389 int page_check_range(target_ulong start
, target_ulong len
, int flags
)
2395 /* This function should never be called with addresses outside the
2396 guest address space. If this assert fires, it probably indicates
2397 a missing call to h2g_valid. */
2398 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2399 assert(start
< ((abi_ulong
)1 << L1_MAP_ADDR_SPACE_BITS
));
2405 if (start
+ len
- 1 < start
) {
2406 /* We've wrapped around. */
2410 end
= TARGET_PAGE_ALIGN(start
+len
); /* must do before we loose bits in the next step */
2411 start
= start
& TARGET_PAGE_MASK
;
2413 for (addr
= start
, len
= end
- start
;
2415 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
2416 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2419 if( !(p
->flags
& PAGE_VALID
) )
2422 if ((flags
& PAGE_READ
) && !(p
->flags
& PAGE_READ
))
2424 if (flags
& PAGE_WRITE
) {
2425 if (!(p
->flags
& PAGE_WRITE_ORG
))
2427 /* unprotect the page if it was put read-only because it
2428 contains translated code */
2429 if (!(p
->flags
& PAGE_WRITE
)) {
2430 if (!page_unprotect(addr
, 0, NULL
))
2439 /* called from signal handler: invalidate the code and unprotect the
2440 page. Return TRUE if the fault was successfully handled. */
2441 int page_unprotect(target_ulong address
, unsigned long pc
, void *puc
)
2445 target_ulong host_start
, host_end
, addr
;
2447 /* Technically this isn't safe inside a signal handler. However we
2448 know this only ever happens in a synchronous SEGV handler, so in
2449 practice it seems to be ok. */
2452 p
= page_find(address
>> TARGET_PAGE_BITS
);
2458 /* if the page was really writable, then we change its
2459 protection back to writable */
2460 if ((p
->flags
& PAGE_WRITE_ORG
) && !(p
->flags
& PAGE_WRITE
)) {
2461 host_start
= address
& qemu_host_page_mask
;
2462 host_end
= host_start
+ qemu_host_page_size
;
2465 for (addr
= host_start
; addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
2466 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2467 p
->flags
|= PAGE_WRITE
;
2470 /* and since the content will be modified, we must invalidate
2471 the corresponding translated code. */
2472 tb_invalidate_phys_page(addr
, pc
, puc
);
2473 #ifdef DEBUG_TB_CHECK
2474 tb_invalidate_check(addr
);
2477 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
2487 static inline void tlb_set_dirty(CPUState
*env
,
2488 unsigned long addr
, target_ulong vaddr
)
2491 #endif /* defined(CONFIG_USER_ONLY) */
2493 #if !defined(CONFIG_USER_ONLY)
2495 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2496 typedef struct subpage_t
{
2498 target_phys_addr_t base
;
2499 uint16_t sub_section
[TARGET_PAGE_SIZE
];
2502 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2504 static subpage_t
*subpage_init (target_phys_addr_t base
, uint16_t *section
,
2505 uint16_t orig_section
);
2506 #define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2509 if (addr > start_addr) \
2512 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2513 if (start_addr2 > 0) \
2517 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
2518 end_addr2 = TARGET_PAGE_SIZE - 1; \
2520 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2521 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2526 static void destroy_page_desc(uint16_t section_index
)
2528 MemoryRegionSection
*section
= &phys_sections
[section_index
];
2529 MemoryRegion
*mr
= section
->mr
;
2532 subpage_t
*subpage
= container_of(mr
, subpage_t
, iomem
);
2533 memory_region_destroy(&subpage
->iomem
);
2538 static void destroy_l2_mapping(PhysPageEntry
*lp
, unsigned level
)
2541 PhysPageEntry
*p
= lp
->u
.node
;
2547 for (i
= 0; i
< L2_SIZE
; ++i
) {
2549 destroy_l2_mapping(&p
[i
], level
- 1);
2551 destroy_page_desc(p
[i
].u
.leaf
);
2558 static void destroy_all_mappings(void)
2560 destroy_l2_mapping(&phys_map
, P_L2_LEVELS
- 1);
2563 static uint16_t phys_section_add(MemoryRegionSection
*section
)
2565 if (phys_sections_nb
== phys_sections_nb_alloc
) {
2566 phys_sections_nb_alloc
= MAX(phys_sections_nb_alloc
* 2, 16);
2567 phys_sections
= g_renew(MemoryRegionSection
, phys_sections
,
2568 phys_sections_nb_alloc
);
2570 phys_sections
[phys_sections_nb
] = *section
;
2571 return phys_sections_nb
++;
2574 static void phys_sections_clear(void)
2576 phys_sections_nb
= 0;
2579 /* register physical memory.
2580 For RAM, 'size' must be a multiple of the target page size.
2581 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2582 io memory page. The address used when calling the IO function is
2583 the offset from the start of the region, plus region_offset. Both
2584 start_addr and region_offset are rounded down to a page boundary
2585 before calculating this offset. This should not be a problem unless
2586 the low bits of start_addr and region_offset differ. */
2587 void cpu_register_physical_memory_log(MemoryRegionSection
*section
,
2590 target_phys_addr_t start_addr
= section
->offset_within_address_space
;
2591 ram_addr_t size
= section
->size
;
2592 target_phys_addr_t addr
, end_addr
;
2595 ram_addr_t orig_size
= size
;
2597 uint16_t section_index
= phys_section_add(section
);
2601 size
= (size
+ TARGET_PAGE_SIZE
- 1) & TARGET_PAGE_MASK
;
2602 end_addr
= start_addr
+ (target_phys_addr_t
)size
;
2606 p
= phys_page_find_alloc(addr
>> TARGET_PAGE_BITS
, 0);
2607 if (p
&& *p
!= phys_section_unassigned
) {
2608 uint16_t orig_memory
= *p
;
2609 target_phys_addr_t start_addr2
, end_addr2
;
2610 int need_subpage
= 0;
2611 MemoryRegion
*mr
= phys_sections
[orig_memory
].mr
;
2613 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
, end_addr2
,
2616 if (!(mr
->subpage
)) {
2617 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2620 subpage
= container_of(mr
, subpage_t
, iomem
);
2622 subpage_register(subpage
, start_addr2
, end_addr2
,
2628 MemoryRegion
*mr
= section
->mr
;
2629 p
= phys_page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2631 if (!(memory_region_is_ram(mr
) || mr
->rom_device
)) {
2632 target_phys_addr_t start_addr2
, end_addr2
;
2633 int need_subpage
= 0;
2635 CHECK_SUBPAGE(addr
, start_addr
, start_addr2
, end_addr
,
2636 end_addr2
, need_subpage
);
2639 subpage
= subpage_init((addr
& TARGET_PAGE_MASK
),
2640 p
, phys_section_unassigned
);
2641 subpage_register(subpage
, start_addr2
, end_addr2
,
2646 addr
+= TARGET_PAGE_SIZE
;
2647 } while (addr
!= end_addr
);
2649 /* since each CPU stores ram addresses in its TLB cache, we must
2650 reset the modified entries */
2652 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
2657 void qemu_register_coalesced_mmio(target_phys_addr_t addr
, ram_addr_t size
)
2660 kvm_coalesce_mmio_region(addr
, size
);
2663 void qemu_unregister_coalesced_mmio(target_phys_addr_t addr
, ram_addr_t size
)
2666 kvm_uncoalesce_mmio_region(addr
, size
);
2669 void qemu_flush_coalesced_mmio_buffer(void)
2672 kvm_flush_coalesced_mmio_buffer();
2675 #if defined(__linux__) && !defined(TARGET_S390X)
2677 #include <sys/vfs.h>
2679 #define HUGETLBFS_MAGIC 0x958458f6
2681 static long gethugepagesize(const char *path
)
2687 ret
= statfs(path
, &fs
);
2688 } while (ret
!= 0 && errno
== EINTR
);
2695 if (fs
.f_type
!= HUGETLBFS_MAGIC
)
2696 fprintf(stderr
, "Warning: path not on HugeTLBFS: %s\n", path
);
2701 static void *file_ram_alloc(RAMBlock
*block
,
2711 unsigned long hpagesize
;
2713 hpagesize
= gethugepagesize(path
);
2718 if (memory
< hpagesize
) {
2722 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2723 fprintf(stderr
, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2727 if (asprintf(&filename
, "%s/qemu_back_mem.XXXXXX", path
) == -1) {
2731 fd
= mkstemp(filename
);
2733 perror("unable to create backing store for hugepages");
2740 memory
= (memory
+hpagesize
-1) & ~(hpagesize
-1);
2743 * ftruncate is not supported by hugetlbfs in older
2744 * hosts, so don't bother bailing out on errors.
2745 * If anything goes wrong with it under other filesystems,
2748 if (ftruncate(fd
, memory
))
2749 perror("ftruncate");
2752 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2753 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2754 * to sidestep this quirk.
2756 flags
= mem_prealloc
? MAP_POPULATE
| MAP_SHARED
: MAP_PRIVATE
;
2757 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
, flags
, fd
, 0);
2759 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
, MAP_PRIVATE
, fd
, 0);
2761 if (area
== MAP_FAILED
) {
2762 perror("file_ram_alloc: can't mmap RAM pages");
2771 static ram_addr_t
find_ram_offset(ram_addr_t size
)
2773 RAMBlock
*block
, *next_block
;
2774 ram_addr_t offset
= RAM_ADDR_MAX
, mingap
= RAM_ADDR_MAX
;
2776 if (QLIST_EMPTY(&ram_list
.blocks
))
2779 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2780 ram_addr_t end
, next
= RAM_ADDR_MAX
;
2782 end
= block
->offset
+ block
->length
;
2784 QLIST_FOREACH(next_block
, &ram_list
.blocks
, next
) {
2785 if (next_block
->offset
>= end
) {
2786 next
= MIN(next
, next_block
->offset
);
2789 if (next
- end
>= size
&& next
- end
< mingap
) {
2791 mingap
= next
- end
;
2795 if (offset
== RAM_ADDR_MAX
) {
2796 fprintf(stderr
, "Failed to find gap of requested size: %" PRIu64
"\n",
2804 static ram_addr_t
last_ram_offset(void)
2807 ram_addr_t last
= 0;
2809 QLIST_FOREACH(block
, &ram_list
.blocks
, next
)
2810 last
= MAX(last
, block
->offset
+ block
->length
);
2815 void qemu_ram_set_idstr(ram_addr_t addr
, const char *name
, DeviceState
*dev
)
2817 RAMBlock
*new_block
, *block
;
2820 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2821 if (block
->offset
== addr
) {
2827 assert(!new_block
->idstr
[0]);
2829 if (dev
&& dev
->parent_bus
&& dev
->parent_bus
->info
->get_dev_path
) {
2830 char *id
= dev
->parent_bus
->info
->get_dev_path(dev
);
2832 snprintf(new_block
->idstr
, sizeof(new_block
->idstr
), "%s/", id
);
2836 pstrcat(new_block
->idstr
, sizeof(new_block
->idstr
), name
);
2838 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2839 if (block
!= new_block
&& !strcmp(block
->idstr
, new_block
->idstr
)) {
2840 fprintf(stderr
, "RAMBlock \"%s\" already registered, abort!\n",
2847 ram_addr_t
qemu_ram_alloc_from_ptr(ram_addr_t size
, void *host
,
2850 RAMBlock
*new_block
;
2852 size
= TARGET_PAGE_ALIGN(size
);
2853 new_block
= g_malloc0(sizeof(*new_block
));
2856 new_block
->offset
= find_ram_offset(size
);
2858 new_block
->host
= host
;
2859 new_block
->flags
|= RAM_PREALLOC_MASK
;
2862 #if defined (__linux__) && !defined(TARGET_S390X)
2863 new_block
->host
= file_ram_alloc(new_block
, size
, mem_path
);
2864 if (!new_block
->host
) {
2865 new_block
->host
= qemu_vmalloc(size
);
2866 qemu_madvise(new_block
->host
, size
, QEMU_MADV_MERGEABLE
);
2869 fprintf(stderr
, "-mem-path option unsupported\n");
2873 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2874 /* S390 KVM requires the topmost vma of the RAM to be smaller than
2875 an system defined value, which is at least 256GB. Larger systems
2876 have larger values. We put the guest between the end of data
2877 segment (system break) and this value. We use 32GB as a base to
2878 have enough room for the system break to grow. */
2879 new_block
->host
= mmap((void*)0x800000000, size
,
2880 PROT_EXEC
|PROT_READ
|PROT_WRITE
,
2881 MAP_SHARED
| MAP_ANONYMOUS
| MAP_FIXED
, -1, 0);
2882 if (new_block
->host
== MAP_FAILED
) {
2883 fprintf(stderr
, "Allocating RAM failed\n");
2887 if (xen_enabled()) {
2888 xen_ram_alloc(new_block
->offset
, size
, mr
);
2890 new_block
->host
= qemu_vmalloc(size
);
2893 qemu_madvise(new_block
->host
, size
, QEMU_MADV_MERGEABLE
);
2896 new_block
->length
= size
;
2898 QLIST_INSERT_HEAD(&ram_list
.blocks
, new_block
, next
);
2900 ram_list
.phys_dirty
= g_realloc(ram_list
.phys_dirty
,
2901 last_ram_offset() >> TARGET_PAGE_BITS
);
2902 memset(ram_list
.phys_dirty
+ (new_block
->offset
>> TARGET_PAGE_BITS
),
2903 0xff, size
>> TARGET_PAGE_BITS
);
2906 kvm_setup_guest_memory(new_block
->host
, size
);
2908 return new_block
->offset
;
2911 ram_addr_t
qemu_ram_alloc(ram_addr_t size
, MemoryRegion
*mr
)
2913 return qemu_ram_alloc_from_ptr(size
, NULL
, mr
);
2916 void qemu_ram_free_from_ptr(ram_addr_t addr
)
2920 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2921 if (addr
== block
->offset
) {
2922 QLIST_REMOVE(block
, next
);
2929 void qemu_ram_free(ram_addr_t addr
)
2933 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2934 if (addr
== block
->offset
) {
2935 QLIST_REMOVE(block
, next
);
2936 if (block
->flags
& RAM_PREALLOC_MASK
) {
2938 } else if (mem_path
) {
2939 #if defined (__linux__) && !defined(TARGET_S390X)
2941 munmap(block
->host
, block
->length
);
2944 qemu_vfree(block
->host
);
2950 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2951 munmap(block
->host
, block
->length
);
2953 if (xen_enabled()) {
2954 xen_invalidate_map_cache_entry(block
->host
);
2956 qemu_vfree(block
->host
);
2968 void qemu_ram_remap(ram_addr_t addr
, ram_addr_t length
)
2975 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2976 offset
= addr
- block
->offset
;
2977 if (offset
< block
->length
) {
2978 vaddr
= block
->host
+ offset
;
2979 if (block
->flags
& RAM_PREALLOC_MASK
) {
2983 munmap(vaddr
, length
);
2985 #if defined(__linux__) && !defined(TARGET_S390X)
2988 flags
|= mem_prealloc
? MAP_POPULATE
| MAP_SHARED
:
2991 flags
|= MAP_PRIVATE
;
2993 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
2994 flags
, block
->fd
, offset
);
2996 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
2997 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
3004 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
3005 flags
|= MAP_SHARED
| MAP_ANONYMOUS
;
3006 area
= mmap(vaddr
, length
, PROT_EXEC
|PROT_READ
|PROT_WRITE
,
3009 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
3010 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
3014 if (area
!= vaddr
) {
3015 fprintf(stderr
, "Could not remap addr: "
3016 RAM_ADDR_FMT
"@" RAM_ADDR_FMT
"\n",
3020 qemu_madvise(vaddr
, length
, QEMU_MADV_MERGEABLE
);
3026 #endif /* !_WIN32 */
3028 /* Return a host pointer to ram allocated with qemu_ram_alloc.
3029 With the exception of the softmmu code in this file, this should
3030 only be used for local memory (e.g. video ram) that the device owns,
3031 and knows it isn't going to access beyond the end of the block.
3033 It should not be used for general purpose DMA.
3034 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
3036 void *qemu_get_ram_ptr(ram_addr_t addr
)
3040 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
3041 if (addr
- block
->offset
< block
->length
) {
3042 /* Move this entry to to start of the list. */
3043 if (block
!= QLIST_FIRST(&ram_list
.blocks
)) {
3044 QLIST_REMOVE(block
, next
);
3045 QLIST_INSERT_HEAD(&ram_list
.blocks
, block
, next
);
3047 if (xen_enabled()) {
3048 /* We need to check if the requested address is in the RAM
3049 * because we don't want to map the entire memory in QEMU.
3050 * In that case just map until the end of the page.
3052 if (block
->offset
== 0) {
3053 return xen_map_cache(addr
, 0, 0);
3054 } else if (block
->host
== NULL
) {
3056 xen_map_cache(block
->offset
, block
->length
, 1);
3059 return block
->host
+ (addr
- block
->offset
);
3063 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
3069 /* Return a host pointer to ram allocated with qemu_ram_alloc.
3070 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
3072 void *qemu_safe_ram_ptr(ram_addr_t addr
)
3076 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
3077 if (addr
- block
->offset
< block
->length
) {
3078 if (xen_enabled()) {
3079 /* We need to check if the requested address is in the RAM
3080 * because we don't want to map the entire memory in QEMU.
3081 * In that case just map until the end of the page.
3083 if (block
->offset
== 0) {
3084 return xen_map_cache(addr
, 0, 0);
3085 } else if (block
->host
== NULL
) {
3087 xen_map_cache(block
->offset
, block
->length
, 1);
3090 return block
->host
+ (addr
- block
->offset
);
3094 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
3100 /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
3101 * but takes a size argument */
3102 void *qemu_ram_ptr_length(ram_addr_t addr
, ram_addr_t
*size
)
3107 if (xen_enabled()) {
3108 return xen_map_cache(addr
, *size
, 1);
3112 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
3113 if (addr
- block
->offset
< block
->length
) {
3114 if (addr
- block
->offset
+ *size
> block
->length
)
3115 *size
= block
->length
- addr
+ block
->offset
;
3116 return block
->host
+ (addr
- block
->offset
);
3120 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
3125 void qemu_put_ram_ptr(void *addr
)
3127 trace_qemu_put_ram_ptr(addr
);
3130 int qemu_ram_addr_from_host(void *ptr
, ram_addr_t
*ram_addr
)
3133 uint8_t *host
= ptr
;
3135 if (xen_enabled()) {
3136 *ram_addr
= xen_ram_addr_from_mapcache(ptr
);
3140 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
3141 /* This case append when the block is not mapped. */
3142 if (block
->host
== NULL
) {
3145 if (host
- block
->host
< block
->length
) {
3146 *ram_addr
= block
->offset
+ (host
- block
->host
);
3154 /* Some of the softmmu routines need to translate from a host pointer
3155 (typically a TLB entry) back to a ram offset. */
3156 ram_addr_t
qemu_ram_addr_from_host_nofail(void *ptr
)
3158 ram_addr_t ram_addr
;
3160 if (qemu_ram_addr_from_host(ptr
, &ram_addr
)) {
3161 fprintf(stderr
, "Bad ram pointer %p\n", ptr
);
3167 static uint64_t unassigned_mem_read(void *opaque
, target_phys_addr_t addr
,
3170 #ifdef DEBUG_UNASSIGNED
3171 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
3173 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3174 cpu_unassigned_access(cpu_single_env
, addr
, 0, 0, 0, size
);
3179 static void unassigned_mem_write(void *opaque
, target_phys_addr_t addr
,
3180 uint64_t val
, unsigned size
)
3182 #ifdef DEBUG_UNASSIGNED
3183 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%"PRIx64
"\n", addr
, val
);
3185 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3186 cpu_unassigned_access(cpu_single_env
, addr
, 1, 0, 0, size
);
3190 static const MemoryRegionOps unassigned_mem_ops
= {
3191 .read
= unassigned_mem_read
,
3192 .write
= unassigned_mem_write
,
3193 .endianness
= DEVICE_NATIVE_ENDIAN
,
3196 static uint64_t error_mem_read(void *opaque
, target_phys_addr_t addr
,
3202 static void error_mem_write(void *opaque
, target_phys_addr_t addr
,
3203 uint64_t value
, unsigned size
)
3208 static const MemoryRegionOps error_mem_ops
= {
3209 .read
= error_mem_read
,
3210 .write
= error_mem_write
,
3211 .endianness
= DEVICE_NATIVE_ENDIAN
,
3214 static const MemoryRegionOps rom_mem_ops
= {
3215 .read
= error_mem_read
,
3216 .write
= unassigned_mem_write
,
3217 .endianness
= DEVICE_NATIVE_ENDIAN
,
3220 static void notdirty_mem_write(void *opaque
, target_phys_addr_t ram_addr
,
3221 uint64_t val
, unsigned size
)
3224 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3225 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
3226 #if !defined(CONFIG_USER_ONLY)
3227 tb_invalidate_phys_page_fast(ram_addr
, size
);
3228 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
3233 stb_p(qemu_get_ram_ptr(ram_addr
), val
);
3236 stw_p(qemu_get_ram_ptr(ram_addr
), val
);
3239 stl_p(qemu_get_ram_ptr(ram_addr
), val
);
3244 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
3245 cpu_physical_memory_set_dirty_flags(ram_addr
, dirty_flags
);
3246 /* we remove the notdirty callback only if the code has been
3248 if (dirty_flags
== 0xff)
3249 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
3252 static const MemoryRegionOps notdirty_mem_ops
= {
3253 .read
= error_mem_read
,
3254 .write
= notdirty_mem_write
,
3255 .endianness
= DEVICE_NATIVE_ENDIAN
,
3258 /* Generate a debug exception if a watchpoint has been hit. */
3259 static void check_watchpoint(int offset
, int len_mask
, int flags
)
3261 CPUState
*env
= cpu_single_env
;
3262 target_ulong pc
, cs_base
;
3263 TranslationBlock
*tb
;
3268 if (env
->watchpoint_hit
) {
3269 /* We re-entered the check after replacing the TB. Now raise
3270 * the debug interrupt so that is will trigger after the
3271 * current instruction. */
3272 cpu_interrupt(env
, CPU_INTERRUPT_DEBUG
);
3275 vaddr
= (env
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
3276 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
3277 if ((vaddr
== (wp
->vaddr
& len_mask
) ||
3278 (vaddr
& wp
->len_mask
) == wp
->vaddr
) && (wp
->flags
& flags
)) {
3279 wp
->flags
|= BP_WATCHPOINT_HIT
;
3280 if (!env
->watchpoint_hit
) {
3281 env
->watchpoint_hit
= wp
;
3282 tb
= tb_find_pc(env
->mem_io_pc
);
3284 cpu_abort(env
, "check_watchpoint: could not find TB for "
3285 "pc=%p", (void *)env
->mem_io_pc
);
3287 cpu_restore_state(tb
, env
, env
->mem_io_pc
);
3288 tb_phys_invalidate(tb
, -1);
3289 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
3290 env
->exception_index
= EXCP_DEBUG
;
3292 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
3293 tb_gen_code(env
, pc
, cs_base
, cpu_flags
, 1);
3295 cpu_resume_from_signal(env
, NULL
);
3298 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
3303 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3304 so these check for a hit then pass through to the normal out-of-line
3306 static uint64_t watch_mem_read(void *opaque
, target_phys_addr_t addr
,
3309 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~(size
- 1), BP_MEM_READ
);
3311 case 1: return ldub_phys(addr
);
3312 case 2: return lduw_phys(addr
);
3313 case 4: return ldl_phys(addr
);
3318 static void watch_mem_write(void *opaque
, target_phys_addr_t addr
,
3319 uint64_t val
, unsigned size
)
3321 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~(size
- 1), BP_MEM_WRITE
);
3323 case 1: stb_phys(addr
, val
);
3324 case 2: stw_phys(addr
, val
);
3325 case 4: stl_phys(addr
, val
);
3330 static const MemoryRegionOps watch_mem_ops
= {
3331 .read
= watch_mem_read
,
3332 .write
= watch_mem_write
,
3333 .endianness
= DEVICE_NATIVE_ENDIAN
,
3336 static uint64_t subpage_read(void *opaque
, target_phys_addr_t addr
,
3339 subpage_t
*mmio
= opaque
;
3340 unsigned int idx
= SUBPAGE_IDX(addr
);
3341 MemoryRegionSection
*section
;
3342 #if defined(DEBUG_SUBPAGE)
3343 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d\n", __func__
,
3344 mmio
, len
, addr
, idx
);
3347 section
= &phys_sections
[mmio
->sub_section
[idx
]];
3349 addr
-= section
->offset_within_address_space
;
3350 addr
+= section
->offset_within_region
;
3351 return io_mem_read(section
->mr
->ram_addr
, addr
, len
);
3354 static void subpage_write(void *opaque
, target_phys_addr_t addr
,
3355 uint64_t value
, unsigned len
)
3357 subpage_t
*mmio
= opaque
;
3358 unsigned int idx
= SUBPAGE_IDX(addr
);
3359 MemoryRegionSection
*section
;
3360 #if defined(DEBUG_SUBPAGE)
3361 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
3362 " idx %d value %"PRIx64
"\n",
3363 __func__
, mmio
, len
, addr
, idx
, value
);
3366 section
= &phys_sections
[mmio
->sub_section
[idx
]];
3368 addr
-= section
->offset_within_address_space
;
3369 addr
+= section
->offset_within_region
;
3370 io_mem_write(section
->mr
->ram_addr
, addr
, value
, len
);
3373 static const MemoryRegionOps subpage_ops
= {
3374 .read
= subpage_read
,
3375 .write
= subpage_write
,
3376 .endianness
= DEVICE_NATIVE_ENDIAN
,
3379 static uint64_t subpage_ram_read(void *opaque
, target_phys_addr_t addr
,
3382 ram_addr_t raddr
= addr
;
3383 void *ptr
= qemu_get_ram_ptr(raddr
);
3385 case 1: return ldub_p(ptr
);
3386 case 2: return lduw_p(ptr
);
3387 case 4: return ldl_p(ptr
);
3392 static void subpage_ram_write(void *opaque
, target_phys_addr_t addr
,
3393 uint64_t value
, unsigned size
)
3395 ram_addr_t raddr
= addr
;
3396 void *ptr
= qemu_get_ram_ptr(raddr
);
3398 case 1: return stb_p(ptr
, value
);
3399 case 2: return stw_p(ptr
, value
);
3400 case 4: return stl_p(ptr
, value
);
3405 static const MemoryRegionOps subpage_ram_ops
= {
3406 .read
= subpage_ram_read
,
3407 .write
= subpage_ram_write
,
3408 .endianness
= DEVICE_NATIVE_ENDIAN
,
3411 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
3416 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
3418 idx
= SUBPAGE_IDX(start
);
3419 eidx
= SUBPAGE_IDX(end
);
3420 #if defined(DEBUG_SUBPAGE)
3421 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__
,
3422 mmio
, start
, end
, idx
, eidx
, memory
);
3424 if (memory_region_is_ram(phys_sections
[section
].mr
)) {
3425 MemoryRegionSection new_section
= phys_sections
[section
];
3426 new_section
.mr
= &io_mem_subpage_ram
;
3427 section
= phys_section_add(&new_section
);
3429 for (; idx
<= eidx
; idx
++) {
3430 mmio
->sub_section
[idx
] = section
;
3436 static subpage_t
*subpage_init (target_phys_addr_t base
, uint16_t *section_ind
,
3437 uint16_t orig_section
)
3440 MemoryRegionSection section
= {
3441 .offset_within_address_space
= base
,
3442 .size
= TARGET_PAGE_SIZE
,
3445 mmio
= g_malloc0(sizeof(subpage_t
));
3448 memory_region_init_io(&mmio
->iomem
, &subpage_ops
, mmio
,
3449 "subpage", TARGET_PAGE_SIZE
);
3450 mmio
->iomem
.subpage
= true;
3451 section
.mr
= &mmio
->iomem
;
3452 #if defined(DEBUG_SUBPAGE)
3453 printf("%s: %p base " TARGET_FMT_plx
" len %08x %d\n", __func__
,
3454 mmio
, base
, TARGET_PAGE_SIZE
, subpage_memory
);
3456 *section_ind
= phys_section_add(§ion
);
3457 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
-1, orig_section
);
3462 static int get_free_io_mem_idx(void)
3466 for (i
= 0; i
<IO_MEM_NB_ENTRIES
; i
++)
3467 if (!io_mem_used
[i
]) {
3471 fprintf(stderr
, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES
);
3475 /* mem_read and mem_write are arrays of functions containing the
3476 function to access byte (index 0), word (index 1) and dword (index
3477 2). Functions can be omitted with a NULL function pointer.
3478 If io_index is non zero, the corresponding io zone is
3479 modified. If it is zero, a new io zone is allocated. The return
3480 value can be used with cpu_register_physical_memory(). (-1) is
3481 returned if error. */
3482 static int cpu_register_io_memory_fixed(int io_index
, MemoryRegion
*mr
)
3484 if (io_index
<= 0) {
3485 io_index
= get_free_io_mem_idx();
3489 if (io_index
>= IO_MEM_NB_ENTRIES
)
3493 io_mem_region
[io_index
] = mr
;
3498 int cpu_register_io_memory(MemoryRegion
*mr
)
3500 return cpu_register_io_memory_fixed(0, mr
);
3503 void cpu_unregister_io_memory(int io_index
)
3505 io_mem_region
[io_index
] = NULL
;
3506 io_mem_used
[io_index
] = 0;
3509 static uint16_t dummy_section(MemoryRegion
*mr
)
3511 MemoryRegionSection section
= {
3513 .offset_within_address_space
= 0,
3514 .offset_within_region
= 0,
3518 return phys_section_add(§ion
);
3521 static void io_mem_init(void)
3525 /* Must be first: */
3526 memory_region_init_io(&io_mem_ram
, &error_mem_ops
, NULL
, "ram", UINT64_MAX
);
3527 assert(io_mem_ram
.ram_addr
== 0);
3528 memory_region_init_io(&io_mem_rom
, &rom_mem_ops
, NULL
, "rom", UINT64_MAX
);
3529 memory_region_init_io(&io_mem_unassigned
, &unassigned_mem_ops
, NULL
,
3530 "unassigned", UINT64_MAX
);
3531 memory_region_init_io(&io_mem_notdirty
, ¬dirty_mem_ops
, NULL
,
3532 "notdirty", UINT64_MAX
);
3533 memory_region_init_io(&io_mem_subpage_ram
, &subpage_ram_ops
, NULL
,
3534 "subpage-ram", UINT64_MAX
);
3538 memory_region_init_io(&io_mem_watch
, &watch_mem_ops
, NULL
,
3539 "watch", UINT64_MAX
);
3542 static void core_begin(MemoryListener
*listener
)
3544 destroy_all_mappings();
3545 phys_sections_clear();
3546 phys_section_unassigned
= dummy_section(&io_mem_unassigned
);
3549 static void core_commit(MemoryListener
*listener
)
3553 static void core_region_add(MemoryListener
*listener
,
3554 MemoryRegionSection
*section
)
3556 cpu_register_physical_memory_log(section
, section
->readonly
);
3559 static void core_region_del(MemoryListener
*listener
,
3560 MemoryRegionSection
*section
)
3564 static void core_region_nop(MemoryListener
*listener
,
3565 MemoryRegionSection
*section
)
3567 cpu_register_physical_memory_log(section
, section
->readonly
);
3570 static void core_log_start(MemoryListener
*listener
,
3571 MemoryRegionSection
*section
)
3575 static void core_log_stop(MemoryListener
*listener
,
3576 MemoryRegionSection
*section
)
3580 static void core_log_sync(MemoryListener
*listener
,
3581 MemoryRegionSection
*section
)
3585 static void core_log_global_start(MemoryListener
*listener
)
3587 cpu_physical_memory_set_dirty_tracking(1);
3590 static void core_log_global_stop(MemoryListener
*listener
)
3592 cpu_physical_memory_set_dirty_tracking(0);
3595 static void core_eventfd_add(MemoryListener
*listener
,
3596 MemoryRegionSection
*section
,
3597 bool match_data
, uint64_t data
, int fd
)
3601 static void core_eventfd_del(MemoryListener
*listener
,
3602 MemoryRegionSection
*section
,
3603 bool match_data
, uint64_t data
, int fd
)
3607 static void io_begin(MemoryListener
*listener
)
3611 static void io_commit(MemoryListener
*listener
)
3615 static void io_region_add(MemoryListener
*listener
,
3616 MemoryRegionSection
*section
)
3618 iorange_init(§ion
->mr
->iorange
, &memory_region_iorange_ops
,
3619 section
->offset_within_address_space
, section
->size
);
3620 ioport_register(§ion
->mr
->iorange
);
3623 static void io_region_del(MemoryListener
*listener
,
3624 MemoryRegionSection
*section
)
3626 isa_unassign_ioport(section
->offset_within_address_space
, section
->size
);
3629 static void io_region_nop(MemoryListener
*listener
,
3630 MemoryRegionSection
*section
)
3634 static void io_log_start(MemoryListener
*listener
,
3635 MemoryRegionSection
*section
)
3639 static void io_log_stop(MemoryListener
*listener
,
3640 MemoryRegionSection
*section
)
3644 static void io_log_sync(MemoryListener
*listener
,
3645 MemoryRegionSection
*section
)
3649 static void io_log_global_start(MemoryListener
*listener
)
3653 static void io_log_global_stop(MemoryListener
*listener
)
3657 static void io_eventfd_add(MemoryListener
*listener
,
3658 MemoryRegionSection
*section
,
3659 bool match_data
, uint64_t data
, int fd
)
3663 static void io_eventfd_del(MemoryListener
*listener
,
3664 MemoryRegionSection
*section
,
3665 bool match_data
, uint64_t data
, int fd
)
3669 static MemoryListener core_memory_listener
= {
3670 .begin
= core_begin
,
3671 .commit
= core_commit
,
3672 .region_add
= core_region_add
,
3673 .region_del
= core_region_del
,
3674 .region_nop
= core_region_nop
,
3675 .log_start
= core_log_start
,
3676 .log_stop
= core_log_stop
,
3677 .log_sync
= core_log_sync
,
3678 .log_global_start
= core_log_global_start
,
3679 .log_global_stop
= core_log_global_stop
,
3680 .eventfd_add
= core_eventfd_add
,
3681 .eventfd_del
= core_eventfd_del
,
3685 static MemoryListener io_memory_listener
= {
3687 .commit
= io_commit
,
3688 .region_add
= io_region_add
,
3689 .region_del
= io_region_del
,
3690 .region_nop
= io_region_nop
,
3691 .log_start
= io_log_start
,
3692 .log_stop
= io_log_stop
,
3693 .log_sync
= io_log_sync
,
3694 .log_global_start
= io_log_global_start
,
3695 .log_global_stop
= io_log_global_stop
,
3696 .eventfd_add
= io_eventfd_add
,
3697 .eventfd_del
= io_eventfd_del
,
3701 static void memory_map_init(void)
3703 system_memory
= g_malloc(sizeof(*system_memory
));
3704 memory_region_init(system_memory
, "system", INT64_MAX
);
3705 set_system_memory_map(system_memory
);
3707 system_io
= g_malloc(sizeof(*system_io
));
3708 memory_region_init(system_io
, "io", 65536);
3709 set_system_io_map(system_io
);
3711 memory_listener_register(&core_memory_listener
, system_memory
);
3712 memory_listener_register(&io_memory_listener
, system_io
);
3715 MemoryRegion
*get_system_memory(void)
3717 return system_memory
;
3720 MemoryRegion
*get_system_io(void)
3725 #endif /* !defined(CONFIG_USER_ONLY) */
3727 /* physical memory access (slow version, mainly for debug) */
3728 #if defined(CONFIG_USER_ONLY)
3729 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
3730 uint8_t *buf
, int len
, int is_write
)
3737 page
= addr
& TARGET_PAGE_MASK
;
3738 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3741 flags
= page_get_flags(page
);
3742 if (!(flags
& PAGE_VALID
))
3745 if (!(flags
& PAGE_WRITE
))
3747 /* XXX: this code should not depend on lock_user */
3748 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
3751 unlock_user(p
, addr
, l
);
3753 if (!(flags
& PAGE_READ
))
3755 /* XXX: this code should not depend on lock_user */
3756 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
3759 unlock_user(p
, addr
, 0);
3769 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
3770 int len
, int is_write
)
3775 target_phys_addr_t page
;
3780 page
= addr
& TARGET_PAGE_MASK
;
3781 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3784 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3788 if ((pd
& ~TARGET_PAGE_MASK
) != io_mem_ram
.ram_addr
) {
3789 target_phys_addr_t addr1
;
3790 io_index
= pd
& (IO_MEM_NB_ENTRIES
- 1);
3791 addr1
= (addr
& ~TARGET_PAGE_MASK
) + p
.region_offset
;
3792 /* XXX: could force cpu_single_env to NULL to avoid
3794 if (l
>= 4 && ((addr1
& 3) == 0)) {
3795 /* 32 bit write access */
3797 io_mem_write(io_index
, addr1
, val
, 4);
3799 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
3800 /* 16 bit write access */
3802 io_mem_write(io_index
, addr1
, val
, 2);
3805 /* 8 bit write access */
3807 io_mem_write(io_index
, addr1
, val
, 1);
3812 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3814 ptr
= qemu_get_ram_ptr(addr1
);
3815 memcpy(ptr
, buf
, l
);
3816 if (!cpu_physical_memory_is_dirty(addr1
)) {
3817 /* invalidate code */
3818 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
3820 cpu_physical_memory_set_dirty_flags(
3821 addr1
, (0xff & ~CODE_DIRTY_FLAG
));
3823 qemu_put_ram_ptr(ptr
);
3826 if (!is_ram_rom_romd(pd
)) {
3827 target_phys_addr_t addr1
;
3829 io_index
= pd
& (IO_MEM_NB_ENTRIES
- 1);
3830 addr1
= (addr
& ~TARGET_PAGE_MASK
) + p
.region_offset
;
3831 if (l
>= 4 && ((addr1
& 3) == 0)) {
3832 /* 32 bit read access */
3833 val
= io_mem_read(io_index
, addr1
, 4);
3836 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
3837 /* 16 bit read access */
3838 val
= io_mem_read(io_index
, addr1
, 2);
3842 /* 8 bit read access */
3843 val
= io_mem_read(io_index
, addr1
, 1);
3849 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
);
3850 memcpy(buf
, ptr
+ (addr
& ~TARGET_PAGE_MASK
), l
);
3851 qemu_put_ram_ptr(ptr
);
3860 /* used for ROM loading : can write in RAM and ROM */
3861 void cpu_physical_memory_write_rom(target_phys_addr_t addr
,
3862 const uint8_t *buf
, int len
)
3866 target_phys_addr_t page
;
3871 page
= addr
& TARGET_PAGE_MASK
;
3872 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3875 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3878 if (!is_ram_rom_romd(pd
)) {
3881 unsigned long addr1
;
3882 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3884 ptr
= qemu_get_ram_ptr(addr1
);
3885 memcpy(ptr
, buf
, l
);
3886 qemu_put_ram_ptr(ptr
);
3896 target_phys_addr_t addr
;
3897 target_phys_addr_t len
;
3900 static BounceBuffer bounce
;
3902 typedef struct MapClient
{
3904 void (*callback
)(void *opaque
);
3905 QLIST_ENTRY(MapClient
) link
;
3908 static QLIST_HEAD(map_client_list
, MapClient
) map_client_list
3909 = QLIST_HEAD_INITIALIZER(map_client_list
);
3911 void *cpu_register_map_client(void *opaque
, void (*callback
)(void *opaque
))
3913 MapClient
*client
= g_malloc(sizeof(*client
));
3915 client
->opaque
= opaque
;
3916 client
->callback
= callback
;
3917 QLIST_INSERT_HEAD(&map_client_list
, client
, link
);
3921 void cpu_unregister_map_client(void *_client
)
3923 MapClient
*client
= (MapClient
*)_client
;
3925 QLIST_REMOVE(client
, link
);
3929 static void cpu_notify_map_clients(void)
3933 while (!QLIST_EMPTY(&map_client_list
)) {
3934 client
= QLIST_FIRST(&map_client_list
);
3935 client
->callback(client
->opaque
);
3936 cpu_unregister_map_client(client
);
3940 /* Map a physical memory region into a host virtual address.
3941 * May map a subset of the requested range, given by and returned in *plen.
3942 * May return NULL if resources needed to perform the mapping are exhausted.
3943 * Use only for reads OR writes - not for read-modify-write operations.
3944 * Use cpu_register_map_client() to know when retrying the map operation is
3945 * likely to succeed.
3947 void *cpu_physical_memory_map(target_phys_addr_t addr
,
3948 target_phys_addr_t
*plen
,
3951 target_phys_addr_t len
= *plen
;
3952 target_phys_addr_t todo
= 0;
3954 target_phys_addr_t page
;
3957 ram_addr_t raddr
= RAM_ADDR_MAX
;
3962 page
= addr
& TARGET_PAGE_MASK
;
3963 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3966 p
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3969 if ((pd
& ~TARGET_PAGE_MASK
) != io_mem_ram
.ram_addr
) {
3970 if (todo
|| bounce
.buffer
) {
3973 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, TARGET_PAGE_SIZE
);
3977 cpu_physical_memory_read(addr
, bounce
.buffer
, l
);
3981 return bounce
.buffer
;
3984 raddr
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
3992 ret
= qemu_ram_ptr_length(raddr
, &rlen
);
3997 /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3998 * Will also mark the memory as dirty if is_write == 1. access_len gives
3999 * the amount of memory that was actually read or written by the caller.
4001 void cpu_physical_memory_unmap(void *buffer
, target_phys_addr_t len
,
4002 int is_write
, target_phys_addr_t access_len
)
4004 if (buffer
!= bounce
.buffer
) {
4006 ram_addr_t addr1
= qemu_ram_addr_from_host_nofail(buffer
);
4007 while (access_len
) {
4009 l
= TARGET_PAGE_SIZE
;
4012 if (!cpu_physical_memory_is_dirty(addr1
)) {
4013 /* invalidate code */
4014 tb_invalidate_phys_page_range(addr1
, addr1
+ l
, 0);
4016 cpu_physical_memory_set_dirty_flags(
4017 addr1
, (0xff & ~CODE_DIRTY_FLAG
));
4023 if (xen_enabled()) {
4024 xen_invalidate_map_cache_entry(buffer
);
4029 cpu_physical_memory_write(bounce
.addr
, bounce
.buffer
, access_len
);
4031 qemu_vfree(bounce
.buffer
);
4032 bounce
.buffer
= NULL
;
4033 cpu_notify_map_clients();
4036 /* warning: addr must be aligned */
4037 static inline uint32_t ldl_phys_internal(target_phys_addr_t addr
,
4038 enum device_endian endian
)
4046 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4049 if (!is_ram_rom_romd(pd
)) {
4051 io_index
= pd
& (IO_MEM_NB_ENTRIES
- 1);
4052 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
.region_offset
;
4053 val
= io_mem_read(io_index
, addr
, 4);
4054 #if defined(TARGET_WORDS_BIGENDIAN)
4055 if (endian
== DEVICE_LITTLE_ENDIAN
) {
4059 if (endian
== DEVICE_BIG_ENDIAN
) {
4065 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
4066 (addr
& ~TARGET_PAGE_MASK
);
4068 case DEVICE_LITTLE_ENDIAN
:
4069 val
= ldl_le_p(ptr
);
4071 case DEVICE_BIG_ENDIAN
:
4072 val
= ldl_be_p(ptr
);
4082 uint32_t ldl_phys(target_phys_addr_t addr
)
4084 return ldl_phys_internal(addr
, DEVICE_NATIVE_ENDIAN
);
4087 uint32_t ldl_le_phys(target_phys_addr_t addr
)
4089 return ldl_phys_internal(addr
, DEVICE_LITTLE_ENDIAN
);
4092 uint32_t ldl_be_phys(target_phys_addr_t addr
)
4094 return ldl_phys_internal(addr
, DEVICE_BIG_ENDIAN
);
4097 /* warning: addr must be aligned */
4098 static inline uint64_t ldq_phys_internal(target_phys_addr_t addr
,
4099 enum device_endian endian
)
4107 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4110 if (!is_ram_rom_romd(pd
)) {
4112 io_index
= pd
& (IO_MEM_NB_ENTRIES
- 1);
4113 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
.region_offset
;
4115 /* XXX This is broken when device endian != cpu endian.
4116 Fix and add "endian" variable check */
4117 #ifdef TARGET_WORDS_BIGENDIAN
4118 val
= io_mem_read(io_index
, addr
, 4) << 32;
4119 val
|= io_mem_read(io_index
, addr
+ 4, 4);
4121 val
= io_mem_read(io_index
, addr
, 4);
4122 val
|= io_mem_read(io_index
, addr
+ 4, 4) << 32;
4126 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
4127 (addr
& ~TARGET_PAGE_MASK
);
4129 case DEVICE_LITTLE_ENDIAN
:
4130 val
= ldq_le_p(ptr
);
4132 case DEVICE_BIG_ENDIAN
:
4133 val
= ldq_be_p(ptr
);
4143 uint64_t ldq_phys(target_phys_addr_t addr
)
4145 return ldq_phys_internal(addr
, DEVICE_NATIVE_ENDIAN
);
4148 uint64_t ldq_le_phys(target_phys_addr_t addr
)
4150 return ldq_phys_internal(addr
, DEVICE_LITTLE_ENDIAN
);
4153 uint64_t ldq_be_phys(target_phys_addr_t addr
)
4155 return ldq_phys_internal(addr
, DEVICE_BIG_ENDIAN
);
4159 uint32_t ldub_phys(target_phys_addr_t addr
)
4162 cpu_physical_memory_read(addr
, &val
, 1);
4166 /* warning: addr must be aligned */
4167 static inline uint32_t lduw_phys_internal(target_phys_addr_t addr
,
4168 enum device_endian endian
)
4176 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4179 if (!is_ram_rom_romd(pd
)) {
4181 io_index
= pd
& (IO_MEM_NB_ENTRIES
- 1);
4182 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
.region_offset
;
4183 val
= io_mem_read(io_index
, addr
, 2);
4184 #if defined(TARGET_WORDS_BIGENDIAN)
4185 if (endian
== DEVICE_LITTLE_ENDIAN
) {
4189 if (endian
== DEVICE_BIG_ENDIAN
) {
4195 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
4196 (addr
& ~TARGET_PAGE_MASK
);
4198 case DEVICE_LITTLE_ENDIAN
:
4199 val
= lduw_le_p(ptr
);
4201 case DEVICE_BIG_ENDIAN
:
4202 val
= lduw_be_p(ptr
);
4212 uint32_t lduw_phys(target_phys_addr_t addr
)
4214 return lduw_phys_internal(addr
, DEVICE_NATIVE_ENDIAN
);
4217 uint32_t lduw_le_phys(target_phys_addr_t addr
)
4219 return lduw_phys_internal(addr
, DEVICE_LITTLE_ENDIAN
);
4222 uint32_t lduw_be_phys(target_phys_addr_t addr
)
4224 return lduw_phys_internal(addr
, DEVICE_BIG_ENDIAN
);
4227 /* warning: addr must be aligned. The ram page is not masked as dirty
4228 and the code inside is not invalidated. It is useful if the dirty
4229 bits are used to track modified PTEs */
4230 void stl_phys_notdirty(target_phys_addr_t addr
, uint32_t val
)
4237 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4240 if ((pd
& ~TARGET_PAGE_MASK
) != io_mem_ram
.ram_addr
) {
4241 io_index
= pd
& (IO_MEM_NB_ENTRIES
- 1);
4242 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
.region_offset
;
4243 io_mem_write(io_index
, addr
, val
, 4);
4245 unsigned long addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
4246 ptr
= qemu_get_ram_ptr(addr1
);
4249 if (unlikely(in_migration
)) {
4250 if (!cpu_physical_memory_is_dirty(addr1
)) {
4251 /* invalidate code */
4252 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
4254 cpu_physical_memory_set_dirty_flags(
4255 addr1
, (0xff & ~CODE_DIRTY_FLAG
));
4261 void stq_phys_notdirty(target_phys_addr_t addr
, uint64_t val
)
4268 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4271 if ((pd
& ~TARGET_PAGE_MASK
) != io_mem_ram
.ram_addr
) {
4272 io_index
= pd
& (IO_MEM_NB_ENTRIES
- 1);
4273 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
.region_offset
;
4274 #ifdef TARGET_WORDS_BIGENDIAN
4275 io_mem_write(io_index
, addr
, val
>> 32, 4);
4276 io_mem_write(io_index
, addr
+ 4, (uint32_t)val
, 4);
4278 io_mem_write(io_index
, addr
, (uint32_t)val
, 4);
4279 io_mem_write(io_index
, addr
+ 4, val
>> 32, 4);
4282 ptr
= qemu_get_ram_ptr(pd
& TARGET_PAGE_MASK
) +
4283 (addr
& ~TARGET_PAGE_MASK
);
4288 /* warning: addr must be aligned */
4289 static inline void stl_phys_internal(target_phys_addr_t addr
, uint32_t val
,
4290 enum device_endian endian
)
4297 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4300 if ((pd
& ~TARGET_PAGE_MASK
) != io_mem_ram
.ram_addr
) {
4301 io_index
= pd
& (IO_MEM_NB_ENTRIES
- 1);
4302 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
.region_offset
;
4303 #if defined(TARGET_WORDS_BIGENDIAN)
4304 if (endian
== DEVICE_LITTLE_ENDIAN
) {
4308 if (endian
== DEVICE_BIG_ENDIAN
) {
4312 io_mem_write(io_index
, addr
, val
, 4);
4314 unsigned long addr1
;
4315 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
4317 ptr
= qemu_get_ram_ptr(addr1
);
4319 case DEVICE_LITTLE_ENDIAN
:
4322 case DEVICE_BIG_ENDIAN
:
4329 if (!cpu_physical_memory_is_dirty(addr1
)) {
4330 /* invalidate code */
4331 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
4333 cpu_physical_memory_set_dirty_flags(addr1
,
4334 (0xff & ~CODE_DIRTY_FLAG
));
4339 void stl_phys(target_phys_addr_t addr
, uint32_t val
)
4341 stl_phys_internal(addr
, val
, DEVICE_NATIVE_ENDIAN
);
4344 void stl_le_phys(target_phys_addr_t addr
, uint32_t val
)
4346 stl_phys_internal(addr
, val
, DEVICE_LITTLE_ENDIAN
);
4349 void stl_be_phys(target_phys_addr_t addr
, uint32_t val
)
4351 stl_phys_internal(addr
, val
, DEVICE_BIG_ENDIAN
);
4355 void stb_phys(target_phys_addr_t addr
, uint32_t val
)
4358 cpu_physical_memory_write(addr
, &v
, 1);
4361 /* warning: addr must be aligned */
4362 static inline void stw_phys_internal(target_phys_addr_t addr
, uint32_t val
,
4363 enum device_endian endian
)
4370 p
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4373 if ((pd
& ~TARGET_PAGE_MASK
) != io_mem_ram
.ram_addr
) {
4374 io_index
= pd
& (IO_MEM_NB_ENTRIES
- 1);
4375 addr
= (addr
& ~TARGET_PAGE_MASK
) + p
.region_offset
;
4376 #if defined(TARGET_WORDS_BIGENDIAN)
4377 if (endian
== DEVICE_LITTLE_ENDIAN
) {
4381 if (endian
== DEVICE_BIG_ENDIAN
) {
4385 io_mem_write(io_index
, addr
, val
, 2);
4387 unsigned long addr1
;
4388 addr1
= (pd
& TARGET_PAGE_MASK
) + (addr
& ~TARGET_PAGE_MASK
);
4390 ptr
= qemu_get_ram_ptr(addr1
);
4392 case DEVICE_LITTLE_ENDIAN
:
4395 case DEVICE_BIG_ENDIAN
:
4402 if (!cpu_physical_memory_is_dirty(addr1
)) {
4403 /* invalidate code */
4404 tb_invalidate_phys_page_range(addr1
, addr1
+ 2, 0);
4406 cpu_physical_memory_set_dirty_flags(addr1
,
4407 (0xff & ~CODE_DIRTY_FLAG
));
4412 void stw_phys(target_phys_addr_t addr
, uint32_t val
)
4414 stw_phys_internal(addr
, val
, DEVICE_NATIVE_ENDIAN
);
4417 void stw_le_phys(target_phys_addr_t addr
, uint32_t val
)
4419 stw_phys_internal(addr
, val
, DEVICE_LITTLE_ENDIAN
);
4422 void stw_be_phys(target_phys_addr_t addr
, uint32_t val
)
4424 stw_phys_internal(addr
, val
, DEVICE_BIG_ENDIAN
);
4428 void stq_phys(target_phys_addr_t addr
, uint64_t val
)
4431 cpu_physical_memory_write(addr
, &val
, 8);
4434 void stq_le_phys(target_phys_addr_t addr
, uint64_t val
)
4436 val
= cpu_to_le64(val
);
4437 cpu_physical_memory_write(addr
, &val
, 8);
4440 void stq_be_phys(target_phys_addr_t addr
, uint64_t val
)
4442 val
= cpu_to_be64(val
);
4443 cpu_physical_memory_write(addr
, &val
, 8);
4446 /* virtual memory access for debug (includes writing to ROM) */
4447 int cpu_memory_rw_debug(CPUState
*env
, target_ulong addr
,
4448 uint8_t *buf
, int len
, int is_write
)
4451 target_phys_addr_t phys_addr
;
4455 page
= addr
& TARGET_PAGE_MASK
;
4456 phys_addr
= cpu_get_phys_page_debug(env
, page
);
4457 /* if no physical page mapped, return an error */
4458 if (phys_addr
== -1)
4460 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
4463 phys_addr
+= (addr
& ~TARGET_PAGE_MASK
);
4465 cpu_physical_memory_write_rom(phys_addr
, buf
, l
);
4467 cpu_physical_memory_rw(phys_addr
, buf
, l
, is_write
);
4476 /* in deterministic execution mode, instructions doing device I/Os
4477 must be at the end of the TB */
4478 void cpu_io_recompile(CPUState
*env
, void *retaddr
)
4480 TranslationBlock
*tb
;
4482 target_ulong pc
, cs_base
;
4485 tb
= tb_find_pc((unsigned long)retaddr
);
4487 cpu_abort(env
, "cpu_io_recompile: could not find TB for pc=%p",
4490 n
= env
->icount_decr
.u16
.low
+ tb
->icount
;
4491 cpu_restore_state(tb
, env
, (unsigned long)retaddr
);
4492 /* Calculate how many instructions had been executed before the fault
4494 n
= n
- env
->icount_decr
.u16
.low
;
4495 /* Generate a new TB ending on the I/O insn. */
4497 /* On MIPS and SH, delay slot instructions can only be restarted if
4498 they were already the first instruction in the TB. If this is not
4499 the first instruction in a TB then re-execute the preceding
4501 #if defined(TARGET_MIPS)
4502 if ((env
->hflags
& MIPS_HFLAG_BMASK
) != 0 && n
> 1) {
4503 env
->active_tc
.PC
-= 4;
4504 env
->icount_decr
.u16
.low
++;
4505 env
->hflags
&= ~MIPS_HFLAG_BMASK
;
4507 #elif defined(TARGET_SH4)
4508 if ((env
->flags
& ((DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
))) != 0
4511 env
->icount_decr
.u16
.low
++;
4512 env
->flags
&= ~(DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
);
4515 /* This should never happen. */
4516 if (n
> CF_COUNT_MASK
)
4517 cpu_abort(env
, "TB too big during recompile");
4519 cflags
= n
| CF_LAST_IO
;
4521 cs_base
= tb
->cs_base
;
4523 tb_phys_invalidate(tb
, -1);
4524 /* FIXME: In theory this could raise an exception. In practice
4525 we have already translated the block once so it's probably ok. */
4526 tb_gen_code(env
, pc
, cs_base
, flags
, cflags
);
4527 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
4528 the first in the TB) then we end up generating a whole new TB and
4529 repeating the fault, which is horribly inefficient.
4530 Better would be to execute just this insn uncached, or generate a
4532 cpu_resume_from_signal(env
, NULL
);
4535 #if !defined(CONFIG_USER_ONLY)
4537 void dump_exec_info(FILE *f
, fprintf_function cpu_fprintf
)
4539 int i
, target_code_size
, max_target_code_size
;
4540 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
4541 TranslationBlock
*tb
;
4543 target_code_size
= 0;
4544 max_target_code_size
= 0;
4546 direct_jmp_count
= 0;
4547 direct_jmp2_count
= 0;
4548 for(i
= 0; i
< nb_tbs
; i
++) {
4550 target_code_size
+= tb
->size
;
4551 if (tb
->size
> max_target_code_size
)
4552 max_target_code_size
= tb
->size
;
4553 if (tb
->page_addr
[1] != -1)
4555 if (tb
->tb_next_offset
[0] != 0xffff) {
4557 if (tb
->tb_next_offset
[1] != 0xffff) {
4558 direct_jmp2_count
++;
4562 /* XXX: avoid using doubles ? */
4563 cpu_fprintf(f
, "Translation buffer state:\n");
4564 cpu_fprintf(f
, "gen code size %td/%ld\n",
4565 code_gen_ptr
- code_gen_buffer
, code_gen_buffer_max_size
);
4566 cpu_fprintf(f
, "TB count %d/%d\n",
4567 nb_tbs
, code_gen_max_blocks
);
4568 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
4569 nb_tbs
? target_code_size
/ nb_tbs
: 0,
4570 max_target_code_size
);
4571 cpu_fprintf(f
, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
4572 nb_tbs
? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0,
4573 target_code_size
? (double) (code_gen_ptr
- code_gen_buffer
) / target_code_size
: 0);
4574 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n",
4576 nb_tbs
? (cross_page
* 100) / nb_tbs
: 0);
4577 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
4579 nb_tbs
? (direct_jmp_count
* 100) / nb_tbs
: 0,
4581 nb_tbs
? (direct_jmp2_count
* 100) / nb_tbs
: 0);
4582 cpu_fprintf(f
, "\nStatistics:\n");
4583 cpu_fprintf(f
, "TB flush count %d\n", tb_flush_count
);
4584 cpu_fprintf(f
, "TB invalidate count %d\n", tb_phys_invalidate_count
);
4585 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
4586 tcg_dump_info(f
, cpu_fprintf
);
4589 /* NOTE: this function can trigger an exception */
4590 /* NOTE2: the returned address is not exactly the physical address: it
4591 is the offset relative to phys_ram_base */
4592 tb_page_addr_t
get_page_addr_code(CPUState
*env1
, target_ulong addr
)
4594 int mmu_idx
, page_index
, pd
;
4597 page_index
= (addr
>> TARGET_PAGE_BITS
) & (CPU_TLB_SIZE
- 1);
4598 mmu_idx
= cpu_mmu_index(env1
);
4599 if (unlikely(env1
->tlb_table
[mmu_idx
][page_index
].addr_code
!=
4600 (addr
& TARGET_PAGE_MASK
))) {
4603 pd
= env1
->tlb_table
[mmu_idx
][page_index
].addr_code
& ~TARGET_PAGE_MASK
;
4604 if (pd
!= io_mem_ram
.ram_addr
&& pd
!= io_mem_rom
.ram_addr
4606 #if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_SPARC)
4607 cpu_unassigned_access(env1
, addr
, 0, 1, 0, 4);
4609 cpu_abort(env1
, "Trying to execute code outside RAM or ROM at 0x" TARGET_FMT_lx
"\n", addr
);
4612 p
= (void *)((uintptr_t)addr
+ env1
->tlb_table
[mmu_idx
][page_index
].addend
);
4613 return qemu_ram_addr_from_host_nofail(p
);
4617 * A helper function for the _utterly broken_ virtio device model to find out if
4618 * it's running on a big endian machine. Don't do this at home kids!
4620 bool virtio_is_big_endian(void);
4621 bool virtio_is_big_endian(void)
4623 #if defined(TARGET_WORDS_BIGENDIAN)
4630 #define MMUSUFFIX _cmmu
4632 #define GETPC() NULL
4633 #define env cpu_single_env
4634 #define SOFTMMU_CODE_ACCESS
4637 #include "softmmu_template.h"
4640 #include "softmmu_template.h"
4643 #include "softmmu_template.h"
4646 #include "softmmu_template.h"