2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
23 #include <sys/types.h>
27 #include "qemu-common.h"
35 #include "qemu-timer.h"
37 #include "exec-memory.h"
38 #if defined(CONFIG_USER_ONLY)
40 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41 #include <sys/param.h>
42 #if __FreeBSD_version >= 700104
43 #define HAVE_KINFO_GETVMMAP
44 #define sigqueue sigqueue_freebsd /* avoid redefinition */
47 #include <machine/profile.h>
55 #else /* !CONFIG_USER_ONLY */
56 #include "xen-mapcache.h"
62 #include "memory-internal.h"
64 //#define DEBUG_TB_INVALIDATE
66 //#define DEBUG_UNASSIGNED
68 /* make various TB consistency checks */
69 //#define DEBUG_TB_CHECK
71 //#define DEBUG_IOPORT
72 //#define DEBUG_SUBPAGE
74 #if !defined(CONFIG_USER_ONLY)
75 /* TB consistency checks only implemented for usermode emulation. */
79 #define SMC_BITMAP_USE_THRESHOLD 10
81 static TranslationBlock
*tbs
;
82 static int code_gen_max_blocks
;
83 TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
85 /* any access to the tbs or the page table must use this lock */
86 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
88 #if defined(__arm__) || defined(__sparc__)
89 /* The prologue must be reachable with a direct jump. ARM and Sparc64
90 have limited branch ranges (possibly also PPC) so place it in a
91 section close to code segment. */
92 #define code_gen_section \
93 __attribute__((__section__(".gen_code"))) \
94 __attribute__((aligned (32)))
95 #elif defined(_WIN32) && !defined(_WIN64)
96 #define code_gen_section \
97 __attribute__((aligned (16)))
99 #define code_gen_section \
100 __attribute__((aligned (32)))
103 uint8_t code_gen_prologue
[1024] code_gen_section
;
104 static uint8_t *code_gen_buffer
;
105 static unsigned long code_gen_buffer_size
;
106 /* threshold to flush the translated code buffer */
107 static unsigned long code_gen_buffer_max_size
;
108 static uint8_t *code_gen_ptr
;
110 #if !defined(CONFIG_USER_ONLY)
112 static int in_migration
;
114 RAMList ram_list
= { .blocks
= QLIST_HEAD_INITIALIZER(ram_list
.blocks
) };
116 static MemoryRegion
*system_memory
;
117 static MemoryRegion
*system_io
;
119 MemoryRegion io_mem_ram
, io_mem_rom
, io_mem_unassigned
, io_mem_notdirty
;
120 static MemoryRegion io_mem_subpage_ram
;
124 CPUArchState
*first_cpu
;
125 /* current CPU in the current thread. It is only valid inside
127 DEFINE_TLS(CPUArchState
*,cpu_single_env
);
128 /* 0 = Do not count executed instructions.
129 1 = Precise instruction counting.
130 2 = Adaptive rate instruction counting. */
133 typedef struct PageDesc
{
134 /* list of TBs intersecting this ram page */
135 TranslationBlock
*first_tb
;
136 /* in order to optimize self modifying code, we count the number
137 of lookups we do to a given page to use a bitmap */
138 unsigned int code_write_count
;
139 uint8_t *code_bitmap
;
140 #if defined(CONFIG_USER_ONLY)
145 /* In system mode we want L1_MAP to be based on ram offsets,
146 while in user mode we want it to be based on virtual addresses. */
147 #if !defined(CONFIG_USER_ONLY)
148 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
149 # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
151 # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
154 # define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
157 /* Size of the L2 (and L3, etc) page tables. */
159 #define L2_SIZE (1 << L2_BITS)
161 #define P_L2_LEVELS \
162 (((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / L2_BITS) + 1)
164 /* The bits remaining after N lower levels of page tables. */
165 #define V_L1_BITS_REM \
166 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
168 #if V_L1_BITS_REM < 4
169 #define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
171 #define V_L1_BITS V_L1_BITS_REM
174 #define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
176 #define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
178 uintptr_t qemu_real_host_page_size
;
179 uintptr_t qemu_host_page_size
;
180 uintptr_t qemu_host_page_mask
;
182 /* This is a multi-level map on the virtual address space.
183 The bottom level has pointers to PageDesc. */
184 static void *l1_map
[V_L1_SIZE
];
186 #if !defined(CONFIG_USER_ONLY)
187 typedef struct PhysPageEntry PhysPageEntry
;
189 static MemoryRegionSection
*phys_sections
;
190 static unsigned phys_sections_nb
, phys_sections_nb_alloc
;
191 static uint16_t phys_section_unassigned
;
192 static uint16_t phys_section_notdirty
;
193 static uint16_t phys_section_rom
;
194 static uint16_t phys_section_watch
;
196 struct PhysPageEntry
{
197 uint16_t is_leaf
: 1;
198 /* index into phys_sections (is_leaf) or phys_map_nodes (!is_leaf) */
202 /* Simple allocator for PhysPageEntry nodes */
203 static PhysPageEntry (*phys_map_nodes
)[L2_SIZE
];
204 static unsigned phys_map_nodes_nb
, phys_map_nodes_nb_alloc
;
206 #define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
208 /* This is a multi-level map on the physical address space.
209 The bottom level has pointers to MemoryRegionSections. */
210 static PhysPageEntry phys_map
= { .ptr
= PHYS_MAP_NODE_NIL
, .is_leaf
= 0 };
212 static void io_mem_init(void);
213 static void memory_map_init(void);
215 static MemoryRegion io_mem_watch
;
219 static int tb_flush_count
;
220 static int tb_phys_invalidate_count
;
223 static void map_exec(void *addr
, long size
)
226 VirtualProtect(addr
, size
,
227 PAGE_EXECUTE_READWRITE
, &old_protect
);
231 static void map_exec(void *addr
, long size
)
233 unsigned long start
, end
, page_size
;
235 page_size
= getpagesize();
236 start
= (unsigned long)addr
;
237 start
&= ~(page_size
- 1);
239 end
= (unsigned long)addr
+ size
;
240 end
+= page_size
- 1;
241 end
&= ~(page_size
- 1);
243 mprotect((void *)start
, end
- start
,
244 PROT_READ
| PROT_WRITE
| PROT_EXEC
);
248 static void page_init(void)
250 /* NOTE: we can always suppose that qemu_host_page_size >=
254 SYSTEM_INFO system_info
;
256 GetSystemInfo(&system_info
);
257 qemu_real_host_page_size
= system_info
.dwPageSize
;
260 qemu_real_host_page_size
= getpagesize();
262 if (qemu_host_page_size
== 0)
263 qemu_host_page_size
= qemu_real_host_page_size
;
264 if (qemu_host_page_size
< TARGET_PAGE_SIZE
)
265 qemu_host_page_size
= TARGET_PAGE_SIZE
;
266 qemu_host_page_mask
= ~(qemu_host_page_size
- 1);
268 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
270 #ifdef HAVE_KINFO_GETVMMAP
271 struct kinfo_vmentry
*freep
;
274 freep
= kinfo_getvmmap(getpid(), &cnt
);
277 for (i
= 0; i
< cnt
; i
++) {
278 unsigned long startaddr
, endaddr
;
280 startaddr
= freep
[i
].kve_start
;
281 endaddr
= freep
[i
].kve_end
;
282 if (h2g_valid(startaddr
)) {
283 startaddr
= h2g(startaddr
) & TARGET_PAGE_MASK
;
285 if (h2g_valid(endaddr
)) {
286 endaddr
= h2g(endaddr
);
287 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
289 #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
291 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
302 last_brk
= (unsigned long)sbrk(0);
304 f
= fopen("/compat/linux/proc/self/maps", "r");
309 unsigned long startaddr
, endaddr
;
312 n
= fscanf (f
, "%lx-%lx %*[^\n]\n", &startaddr
, &endaddr
);
314 if (n
== 2 && h2g_valid(startaddr
)) {
315 startaddr
= h2g(startaddr
) & TARGET_PAGE_MASK
;
317 if (h2g_valid(endaddr
)) {
318 endaddr
= h2g(endaddr
);
322 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
334 static PageDesc
*page_find_alloc(tb_page_addr_t index
, int alloc
)
340 #if defined(CONFIG_USER_ONLY)
341 /* We can't use g_malloc because it may recurse into a locked mutex. */
342 # define ALLOC(P, SIZE) \
344 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
345 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
348 # define ALLOC(P, SIZE) \
349 do { P = g_malloc0(SIZE); } while (0)
352 /* Level 1. Always allocated. */
353 lp
= l1_map
+ ((index
>> V_L1_SHIFT
) & (V_L1_SIZE
- 1));
356 for (i
= V_L1_SHIFT
/ L2_BITS
- 1; i
> 0; i
--) {
363 ALLOC(p
, sizeof(void *) * L2_SIZE
);
367 lp
= p
+ ((index
>> (i
* L2_BITS
)) & (L2_SIZE
- 1));
375 ALLOC(pd
, sizeof(PageDesc
) * L2_SIZE
);
381 return pd
+ (index
& (L2_SIZE
- 1));
384 static inline PageDesc
*page_find(tb_page_addr_t index
)
386 return page_find_alloc(index
, 0);
389 #if !defined(CONFIG_USER_ONLY)
391 static void phys_map_node_reserve(unsigned nodes
)
393 if (phys_map_nodes_nb
+ nodes
> phys_map_nodes_nb_alloc
) {
394 typedef PhysPageEntry Node
[L2_SIZE
];
395 phys_map_nodes_nb_alloc
= MAX(phys_map_nodes_nb_alloc
* 2, 16);
396 phys_map_nodes_nb_alloc
= MAX(phys_map_nodes_nb_alloc
,
397 phys_map_nodes_nb
+ nodes
);
398 phys_map_nodes
= g_renew(Node
, phys_map_nodes
,
399 phys_map_nodes_nb_alloc
);
403 static uint16_t phys_map_node_alloc(void)
408 ret
= phys_map_nodes_nb
++;
409 assert(ret
!= PHYS_MAP_NODE_NIL
);
410 assert(ret
!= phys_map_nodes_nb_alloc
);
411 for (i
= 0; i
< L2_SIZE
; ++i
) {
412 phys_map_nodes
[ret
][i
].is_leaf
= 0;
413 phys_map_nodes
[ret
][i
].ptr
= PHYS_MAP_NODE_NIL
;
418 static void phys_map_nodes_reset(void)
420 phys_map_nodes_nb
= 0;
424 static void phys_page_set_level(PhysPageEntry
*lp
, target_phys_addr_t
*index
,
425 target_phys_addr_t
*nb
, uint16_t leaf
,
430 target_phys_addr_t step
= (target_phys_addr_t
)1 << (level
* L2_BITS
);
432 if (!lp
->is_leaf
&& lp
->ptr
== PHYS_MAP_NODE_NIL
) {
433 lp
->ptr
= phys_map_node_alloc();
434 p
= phys_map_nodes
[lp
->ptr
];
436 for (i
= 0; i
< L2_SIZE
; i
++) {
438 p
[i
].ptr
= phys_section_unassigned
;
442 p
= phys_map_nodes
[lp
->ptr
];
444 lp
= &p
[(*index
>> (level
* L2_BITS
)) & (L2_SIZE
- 1)];
446 while (*nb
&& lp
< &p
[L2_SIZE
]) {
447 if ((*index
& (step
- 1)) == 0 && *nb
>= step
) {
453 phys_page_set_level(lp
, index
, nb
, leaf
, level
- 1);
459 static void phys_page_set(target_phys_addr_t index
, target_phys_addr_t nb
,
462 /* Wildly overreserve - it doesn't matter much. */
463 phys_map_node_reserve(3 * P_L2_LEVELS
);
465 phys_page_set_level(&phys_map
, &index
, &nb
, leaf
, P_L2_LEVELS
- 1);
468 MemoryRegionSection
*phys_page_find(target_phys_addr_t index
)
470 PhysPageEntry lp
= phys_map
;
473 uint16_t s_index
= phys_section_unassigned
;
475 for (i
= P_L2_LEVELS
- 1; i
>= 0 && !lp
.is_leaf
; i
--) {
476 if (lp
.ptr
== PHYS_MAP_NODE_NIL
) {
479 p
= phys_map_nodes
[lp
.ptr
];
480 lp
= p
[(index
>> (i
* L2_BITS
)) & (L2_SIZE
- 1)];
485 return &phys_sections
[s_index
];
488 bool memory_region_is_unassigned(MemoryRegion
*mr
)
490 return mr
!= &io_mem_ram
&& mr
!= &io_mem_rom
491 && mr
!= &io_mem_notdirty
&& !mr
->rom_device
492 && mr
!= &io_mem_watch
;
495 #define mmap_lock() do { } while(0)
496 #define mmap_unlock() do { } while(0)
499 #define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
501 #if defined(CONFIG_USER_ONLY)
502 /* Currently it is not recommended to allocate big chunks of data in
503 user mode. It will change when a dedicated libc will be used */
504 #define USE_STATIC_CODE_GEN_BUFFER
507 #ifdef USE_STATIC_CODE_GEN_BUFFER
508 static uint8_t static_code_gen_buffer
[DEFAULT_CODE_GEN_BUFFER_SIZE
]
509 __attribute__((aligned (CODE_GEN_ALIGN
)));
512 static void code_gen_alloc(unsigned long tb_size
)
514 #ifdef USE_STATIC_CODE_GEN_BUFFER
515 code_gen_buffer
= static_code_gen_buffer
;
516 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
517 map_exec(code_gen_buffer
, code_gen_buffer_size
);
519 code_gen_buffer_size
= tb_size
;
520 if (code_gen_buffer_size
== 0) {
521 #if defined(CONFIG_USER_ONLY)
522 code_gen_buffer_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
524 /* XXX: needs adjustments */
525 code_gen_buffer_size
= (unsigned long)(ram_size
/ 4);
528 if (code_gen_buffer_size
< MIN_CODE_GEN_BUFFER_SIZE
)
529 code_gen_buffer_size
= MIN_CODE_GEN_BUFFER_SIZE
;
530 /* The code gen buffer location may have constraints depending on
531 the host cpu and OS */
532 #if defined(__linux__)
537 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
538 #if defined(__x86_64__)
540 /* Cannot map more than that */
541 if (code_gen_buffer_size
> (800 * 1024 * 1024))
542 code_gen_buffer_size
= (800 * 1024 * 1024);
543 #elif defined(__sparc__) && HOST_LONG_BITS == 64
544 // Map the buffer below 2G, so we can use direct calls and branches
545 start
= (void *) 0x40000000UL
;
546 if (code_gen_buffer_size
> (512 * 1024 * 1024))
547 code_gen_buffer_size
= (512 * 1024 * 1024);
548 #elif defined(__arm__)
549 /* Keep the buffer no bigger than 16MB to branch between blocks */
550 if (code_gen_buffer_size
> 16 * 1024 * 1024)
551 code_gen_buffer_size
= 16 * 1024 * 1024;
552 #elif defined(__s390x__)
553 /* Map the buffer so that we can use direct calls and branches. */
554 /* We have a +- 4GB range on the branches; leave some slop. */
555 if (code_gen_buffer_size
> (3ul * 1024 * 1024 * 1024)) {
556 code_gen_buffer_size
= 3ul * 1024 * 1024 * 1024;
558 start
= (void *)0x90000000UL
;
560 code_gen_buffer
= mmap(start
, code_gen_buffer_size
,
561 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
563 if (code_gen_buffer
== MAP_FAILED
) {
564 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
568 #elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
569 || defined(__DragonFly__) || defined(__OpenBSD__) \
570 || defined(__NetBSD__)
574 flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
575 #if defined(__x86_64__)
576 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
577 * 0x40000000 is free */
579 addr
= (void *)0x40000000;
580 /* Cannot map more than that */
581 if (code_gen_buffer_size
> (800 * 1024 * 1024))
582 code_gen_buffer_size
= (800 * 1024 * 1024);
583 #elif defined(__sparc__) && HOST_LONG_BITS == 64
584 // Map the buffer below 2G, so we can use direct calls and branches
585 addr
= (void *) 0x40000000UL
;
586 if (code_gen_buffer_size
> (512 * 1024 * 1024)) {
587 code_gen_buffer_size
= (512 * 1024 * 1024);
590 code_gen_buffer
= mmap(addr
, code_gen_buffer_size
,
591 PROT_WRITE
| PROT_READ
| PROT_EXEC
,
593 if (code_gen_buffer
== MAP_FAILED
) {
594 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
599 code_gen_buffer
= g_malloc(code_gen_buffer_size
);
600 map_exec(code_gen_buffer
, code_gen_buffer_size
);
602 #endif /* !USE_STATIC_CODE_GEN_BUFFER */
603 map_exec(code_gen_prologue
, sizeof(code_gen_prologue
));
604 code_gen_buffer_max_size
= code_gen_buffer_size
-
605 (TCG_MAX_OP_SIZE
* OPC_BUF_SIZE
);
606 code_gen_max_blocks
= code_gen_buffer_size
/ CODE_GEN_AVG_BLOCK_SIZE
;
607 tbs
= g_malloc(code_gen_max_blocks
* sizeof(TranslationBlock
));
610 /* Must be called before using the QEMU cpus. 'tb_size' is the size
611 (in bytes) allocated to the translation buffer. Zero means default
613 void tcg_exec_init(unsigned long tb_size
)
616 code_gen_alloc(tb_size
);
617 code_gen_ptr
= code_gen_buffer
;
618 tcg_register_jit(code_gen_buffer
, code_gen_buffer_size
);
620 #if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
621 /* There's no guest base to take into account, so go ahead and
622 initialize the prologue now. */
623 tcg_prologue_init(&tcg_ctx
);
627 bool tcg_enabled(void)
629 return code_gen_buffer
!= NULL
;
632 void cpu_exec_init_all(void)
634 #if !defined(CONFIG_USER_ONLY)
640 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
642 static int cpu_common_post_load(void *opaque
, int version_id
)
644 CPUArchState
*env
= opaque
;
646 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
647 version_id is increased. */
648 env
->interrupt_request
&= ~0x01;
654 static const VMStateDescription vmstate_cpu_common
= {
655 .name
= "cpu_common",
657 .minimum_version_id
= 1,
658 .minimum_version_id_old
= 1,
659 .post_load
= cpu_common_post_load
,
660 .fields
= (VMStateField
[]) {
661 VMSTATE_UINT32(halted
, CPUArchState
),
662 VMSTATE_UINT32(interrupt_request
, CPUArchState
),
663 VMSTATE_END_OF_LIST()
668 CPUArchState
*qemu_get_cpu(int cpu
)
670 CPUArchState
*env
= first_cpu
;
673 if (env
->cpu_index
== cpu
)
681 void cpu_exec_init(CPUArchState
*env
)
686 #if defined(CONFIG_USER_ONLY)
689 env
->next_cpu
= NULL
;
692 while (*penv
!= NULL
) {
693 penv
= &(*penv
)->next_cpu
;
696 env
->cpu_index
= cpu_index
;
698 QTAILQ_INIT(&env
->breakpoints
);
699 QTAILQ_INIT(&env
->watchpoints
);
700 #ifndef CONFIG_USER_ONLY
701 env
->thread_id
= qemu_get_thread_id();
704 #if defined(CONFIG_USER_ONLY)
707 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
708 vmstate_register(NULL
, cpu_index
, &vmstate_cpu_common
, env
);
709 register_savevm(NULL
, "cpu", cpu_index
, CPU_SAVE_VERSION
,
710 cpu_save
, cpu_load
, env
);
714 /* Allocate a new translation block. Flush the translation buffer if
715 too many translation blocks or too much generated code. */
716 static TranslationBlock
*tb_alloc(target_ulong pc
)
718 TranslationBlock
*tb
;
720 if (nb_tbs
>= code_gen_max_blocks
||
721 (code_gen_ptr
- code_gen_buffer
) >= code_gen_buffer_max_size
)
729 void tb_free(TranslationBlock
*tb
)
731 /* In practice this is mostly used for single use temporary TB
732 Ignore the hard cases and just back up if this TB happens to
733 be the last one generated. */
734 if (nb_tbs
> 0 && tb
== &tbs
[nb_tbs
- 1]) {
735 code_gen_ptr
= tb
->tc_ptr
;
740 static inline void invalidate_page_bitmap(PageDesc
*p
)
742 if (p
->code_bitmap
) {
743 g_free(p
->code_bitmap
);
744 p
->code_bitmap
= NULL
;
746 p
->code_write_count
= 0;
749 /* Set to NULL all the 'first_tb' fields in all PageDescs. */
751 static void page_flush_tb_1 (int level
, void **lp
)
760 for (i
= 0; i
< L2_SIZE
; ++i
) {
761 pd
[i
].first_tb
= NULL
;
762 invalidate_page_bitmap(pd
+ i
);
766 for (i
= 0; i
< L2_SIZE
; ++i
) {
767 page_flush_tb_1 (level
- 1, pp
+ i
);
772 static void page_flush_tb(void)
775 for (i
= 0; i
< V_L1_SIZE
; i
++) {
776 page_flush_tb_1(V_L1_SHIFT
/ L2_BITS
- 1, l1_map
+ i
);
780 /* flush all the translation blocks */
781 /* XXX: tb_flush is currently not thread safe */
782 void tb_flush(CPUArchState
*env1
)
785 #if defined(DEBUG_FLUSH)
786 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
787 (unsigned long)(code_gen_ptr
- code_gen_buffer
),
789 ((unsigned long)(code_gen_ptr
- code_gen_buffer
)) / nb_tbs
: 0);
791 if ((unsigned long)(code_gen_ptr
- code_gen_buffer
) > code_gen_buffer_size
)
792 cpu_abort(env1
, "Internal error: code buffer overflow\n");
796 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
797 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
800 memset (tb_phys_hash
, 0, CODE_GEN_PHYS_HASH_SIZE
* sizeof (void *));
803 code_gen_ptr
= code_gen_buffer
;
804 /* XXX: flush processor icache at this point if cache flush is
809 #ifdef DEBUG_TB_CHECK
811 static void tb_invalidate_check(target_ulong address
)
813 TranslationBlock
*tb
;
815 address
&= TARGET_PAGE_MASK
;
816 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
817 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
818 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
819 address
>= tb
->pc
+ tb
->size
)) {
820 printf("ERROR invalidate: address=" TARGET_FMT_lx
821 " PC=%08lx size=%04x\n",
822 address
, (long)tb
->pc
, tb
->size
);
828 /* verify that all the pages have correct rights for code */
829 static void tb_page_check(void)
831 TranslationBlock
*tb
;
832 int i
, flags1
, flags2
;
834 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
835 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
836 flags1
= page_get_flags(tb
->pc
);
837 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
838 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
839 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
840 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
848 /* invalidate one TB */
849 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
852 TranslationBlock
*tb1
;
856 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
859 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
863 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
865 TranslationBlock
*tb1
;
870 n1
= (uintptr_t)tb1
& 3;
871 tb1
= (TranslationBlock
*)((uintptr_t)tb1
& ~3);
873 *ptb
= tb1
->page_next
[n1
];
876 ptb
= &tb1
->page_next
[n1
];
880 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
882 TranslationBlock
*tb1
, **ptb
;
885 ptb
= &tb
->jmp_next
[n
];
888 /* find tb(n) in circular list */
891 n1
= (uintptr_t)tb1
& 3;
892 tb1
= (TranslationBlock
*)((uintptr_t)tb1
& ~3);
893 if (n1
== n
&& tb1
== tb
)
896 ptb
= &tb1
->jmp_first
;
898 ptb
= &tb1
->jmp_next
[n1
];
901 /* now we can suppress tb(n) from the list */
902 *ptb
= tb
->jmp_next
[n
];
904 tb
->jmp_next
[n
] = NULL
;
908 /* reset the jump entry 'n' of a TB so that it is not chained to
910 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
912 tb_set_jmp_target(tb
, n
, (uintptr_t)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
915 void tb_phys_invalidate(TranslationBlock
*tb
, tb_page_addr_t page_addr
)
920 tb_page_addr_t phys_pc
;
921 TranslationBlock
*tb1
, *tb2
;
923 /* remove the TB from the hash list */
924 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
925 h
= tb_phys_hash_func(phys_pc
);
926 tb_remove(&tb_phys_hash
[h
], tb
,
927 offsetof(TranslationBlock
, phys_hash_next
));
929 /* remove the TB from the page list */
930 if (tb
->page_addr
[0] != page_addr
) {
931 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
932 tb_page_remove(&p
->first_tb
, tb
);
933 invalidate_page_bitmap(p
);
935 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
936 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
937 tb_page_remove(&p
->first_tb
, tb
);
938 invalidate_page_bitmap(p
);
941 tb_invalidated_flag
= 1;
943 /* remove the TB from the hash list */
944 h
= tb_jmp_cache_hash_func(tb
->pc
);
945 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
946 if (env
->tb_jmp_cache
[h
] == tb
)
947 env
->tb_jmp_cache
[h
] = NULL
;
950 /* suppress this TB from the two jump lists */
951 tb_jmp_remove(tb
, 0);
952 tb_jmp_remove(tb
, 1);
954 /* suppress any remaining jumps to this TB */
957 n1
= (uintptr_t)tb1
& 3;
960 tb1
= (TranslationBlock
*)((uintptr_t)tb1
& ~3);
961 tb2
= tb1
->jmp_next
[n1
];
962 tb_reset_jump(tb1
, n1
);
963 tb1
->jmp_next
[n1
] = NULL
;
966 tb
->jmp_first
= (TranslationBlock
*)((uintptr_t)tb
| 2); /* fail safe */
968 tb_phys_invalidate_count
++;
971 static inline void set_bits(uint8_t *tab
, int start
, int len
)
977 mask
= 0xff << (start
& 7);
978 if ((start
& ~7) == (end
& ~7)) {
980 mask
&= ~(0xff << (end
& 7));
985 start
= (start
+ 8) & ~7;
987 while (start
< end1
) {
992 mask
= ~(0xff << (end
& 7));
998 static void build_page_bitmap(PageDesc
*p
)
1000 int n
, tb_start
, tb_end
;
1001 TranslationBlock
*tb
;
1003 p
->code_bitmap
= g_malloc0(TARGET_PAGE_SIZE
/ 8);
1006 while (tb
!= NULL
) {
1007 n
= (uintptr_t)tb
& 3;
1008 tb
= (TranslationBlock
*)((uintptr_t)tb
& ~3);
1009 /* NOTE: this is subtle as a TB may span two physical pages */
1011 /* NOTE: tb_end may be after the end of the page, but
1012 it is not a problem */
1013 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
1014 tb_end
= tb_start
+ tb
->size
;
1015 if (tb_end
> TARGET_PAGE_SIZE
)
1016 tb_end
= TARGET_PAGE_SIZE
;
1019 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
1021 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
1022 tb
= tb
->page_next
[n
];
1026 TranslationBlock
*tb_gen_code(CPUArchState
*env
,
1027 target_ulong pc
, target_ulong cs_base
,
1028 int flags
, int cflags
)
1030 TranslationBlock
*tb
;
1032 tb_page_addr_t phys_pc
, phys_page2
;
1033 target_ulong virt_page2
;
1036 phys_pc
= get_page_addr_code(env
, pc
);
1039 /* flush must be done */
1041 /* cannot fail at this point */
1043 /* Don't forget to invalidate previous TB info. */
1044 tb_invalidated_flag
= 1;
1046 tc_ptr
= code_gen_ptr
;
1047 tb
->tc_ptr
= tc_ptr
;
1048 tb
->cs_base
= cs_base
;
1050 tb
->cflags
= cflags
;
1051 cpu_gen_code(env
, tb
, &code_gen_size
);
1052 code_gen_ptr
= (void *)(((uintptr_t)code_gen_ptr
+ code_gen_size
+
1053 CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
1055 /* check next page if needed */
1056 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
1058 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
1059 phys_page2
= get_page_addr_code(env
, virt_page2
);
1061 tb_link_page(tb
, phys_pc
, phys_page2
);
1066 * Invalidate all TBs which intersect with the target physical address range
1067 * [start;end[. NOTE: start and end may refer to *different* physical pages.
1068 * 'is_cpu_write_access' should be true if called from a real cpu write
1069 * access: the virtual CPU will exit the current TB if code is modified inside
1072 void tb_invalidate_phys_range(tb_page_addr_t start
, tb_page_addr_t end
,
1073 int is_cpu_write_access
)
1075 while (start
< end
) {
1076 tb_invalidate_phys_page_range(start
, end
, is_cpu_write_access
);
1077 start
&= TARGET_PAGE_MASK
;
1078 start
+= TARGET_PAGE_SIZE
;
1083 * Invalidate all TBs which intersect with the target physical address range
1084 * [start;end[. NOTE: start and end must refer to the *same* physical page.
1085 * 'is_cpu_write_access' should be true if called from a real cpu write
1086 * access: the virtual CPU will exit the current TB if code is modified inside
1089 void tb_invalidate_phys_page_range(tb_page_addr_t start
, tb_page_addr_t end
,
1090 int is_cpu_write_access
)
1092 TranslationBlock
*tb
, *tb_next
, *saved_tb
;
1093 CPUArchState
*env
= cpu_single_env
;
1094 tb_page_addr_t tb_start
, tb_end
;
1097 #ifdef TARGET_HAS_PRECISE_SMC
1098 int current_tb_not_found
= is_cpu_write_access
;
1099 TranslationBlock
*current_tb
= NULL
;
1100 int current_tb_modified
= 0;
1101 target_ulong current_pc
= 0;
1102 target_ulong current_cs_base
= 0;
1103 int current_flags
= 0;
1104 #endif /* TARGET_HAS_PRECISE_SMC */
1106 p
= page_find(start
>> TARGET_PAGE_BITS
);
1109 if (!p
->code_bitmap
&&
1110 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
&&
1111 is_cpu_write_access
) {
1112 /* build code bitmap */
1113 build_page_bitmap(p
);
1116 /* we remove all the TBs in the range [start, end[ */
1117 /* XXX: see if in some cases it could be faster to invalidate all the code */
1119 while (tb
!= NULL
) {
1120 n
= (uintptr_t)tb
& 3;
1121 tb
= (TranslationBlock
*)((uintptr_t)tb
& ~3);
1122 tb_next
= tb
->page_next
[n
];
1123 /* NOTE: this is subtle as a TB may span two physical pages */
1125 /* NOTE: tb_end may be after the end of the page, but
1126 it is not a problem */
1127 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
1128 tb_end
= tb_start
+ tb
->size
;
1130 tb_start
= tb
->page_addr
[1];
1131 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
1133 if (!(tb_end
<= start
|| tb_start
>= end
)) {
1134 #ifdef TARGET_HAS_PRECISE_SMC
1135 if (current_tb_not_found
) {
1136 current_tb_not_found
= 0;
1138 if (env
->mem_io_pc
) {
1139 /* now we have a real cpu fault */
1140 current_tb
= tb_find_pc(env
->mem_io_pc
);
1143 if (current_tb
== tb
&&
1144 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1145 /* If we are modifying the current TB, we must stop
1146 its execution. We could be more precise by checking
1147 that the modification is after the current PC, but it
1148 would require a specialized function to partially
1149 restore the CPU state */
1151 current_tb_modified
= 1;
1152 cpu_restore_state(current_tb
, env
, env
->mem_io_pc
);
1153 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1156 #endif /* TARGET_HAS_PRECISE_SMC */
1157 /* we need to do that to handle the case where a signal
1158 occurs while doing tb_phys_invalidate() */
1161 saved_tb
= env
->current_tb
;
1162 env
->current_tb
= NULL
;
1164 tb_phys_invalidate(tb
, -1);
1166 env
->current_tb
= saved_tb
;
1167 if (env
->interrupt_request
&& env
->current_tb
)
1168 cpu_interrupt(env
, env
->interrupt_request
);
1173 #if !defined(CONFIG_USER_ONLY)
1174 /* if no code remaining, no need to continue to use slow writes */
1176 invalidate_page_bitmap(p
);
1177 if (is_cpu_write_access
) {
1178 tlb_unprotect_code_phys(env
, start
, env
->mem_io_vaddr
);
1182 #ifdef TARGET_HAS_PRECISE_SMC
1183 if (current_tb_modified
) {
1184 /* we generate a block containing just the instruction
1185 modifying the memory. It will ensure that it cannot modify
1187 env
->current_tb
= NULL
;
1188 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1189 cpu_resume_from_signal(env
, NULL
);
1194 /* len must be <= 8 and start must be a multiple of len */
1195 static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start
, int len
)
1201 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1202 cpu_single_env
->mem_io_vaddr
, len
,
1203 cpu_single_env
->eip
,
1204 cpu_single_env
->eip
+
1205 (intptr_t)cpu_single_env
->segs
[R_CS
].base
);
1208 p
= page_find(start
>> TARGET_PAGE_BITS
);
1211 if (p
->code_bitmap
) {
1212 offset
= start
& ~TARGET_PAGE_MASK
;
1213 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
1214 if (b
& ((1 << len
) - 1))
1218 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
1222 #if !defined(CONFIG_SOFTMMU)
1223 static void tb_invalidate_phys_page(tb_page_addr_t addr
,
1224 uintptr_t pc
, void *puc
)
1226 TranslationBlock
*tb
;
1229 #ifdef TARGET_HAS_PRECISE_SMC
1230 TranslationBlock
*current_tb
= NULL
;
1231 CPUArchState
*env
= cpu_single_env
;
1232 int current_tb_modified
= 0;
1233 target_ulong current_pc
= 0;
1234 target_ulong current_cs_base
= 0;
1235 int current_flags
= 0;
1238 addr
&= TARGET_PAGE_MASK
;
1239 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1243 #ifdef TARGET_HAS_PRECISE_SMC
1244 if (tb
&& pc
!= 0) {
1245 current_tb
= tb_find_pc(pc
);
1248 while (tb
!= NULL
) {
1249 n
= (uintptr_t)tb
& 3;
1250 tb
= (TranslationBlock
*)((uintptr_t)tb
& ~3);
1251 #ifdef TARGET_HAS_PRECISE_SMC
1252 if (current_tb
== tb
&&
1253 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1254 /* If we are modifying the current TB, we must stop
1255 its execution. We could be more precise by checking
1256 that the modification is after the current PC, but it
1257 would require a specialized function to partially
1258 restore the CPU state */
1260 current_tb_modified
= 1;
1261 cpu_restore_state(current_tb
, env
, pc
);
1262 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1265 #endif /* TARGET_HAS_PRECISE_SMC */
1266 tb_phys_invalidate(tb
, addr
);
1267 tb
= tb
->page_next
[n
];
1270 #ifdef TARGET_HAS_PRECISE_SMC
1271 if (current_tb_modified
) {
1272 /* we generate a block containing just the instruction
1273 modifying the memory. It will ensure that it cannot modify
1275 env
->current_tb
= NULL
;
1276 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1277 cpu_resume_from_signal(env
, puc
);
1283 /* add the tb in the target page and protect it if necessary */
1284 static inline void tb_alloc_page(TranslationBlock
*tb
,
1285 unsigned int n
, tb_page_addr_t page_addr
)
1288 #ifndef CONFIG_USER_ONLY
1289 bool page_already_protected
;
1292 tb
->page_addr
[n
] = page_addr
;
1293 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
, 1);
1294 tb
->page_next
[n
] = p
->first_tb
;
1295 #ifndef CONFIG_USER_ONLY
1296 page_already_protected
= p
->first_tb
!= NULL
;
1298 p
->first_tb
= (TranslationBlock
*)((uintptr_t)tb
| n
);
1299 invalidate_page_bitmap(p
);
1301 #if defined(TARGET_HAS_SMC) || 1
1303 #if defined(CONFIG_USER_ONLY)
1304 if (p
->flags
& PAGE_WRITE
) {
1309 /* force the host page as non writable (writes will have a
1310 page fault + mprotect overhead) */
1311 page_addr
&= qemu_host_page_mask
;
1313 for(addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
1314 addr
+= TARGET_PAGE_SIZE
) {
1316 p2
= page_find (addr
>> TARGET_PAGE_BITS
);
1320 p2
->flags
&= ~PAGE_WRITE
;
1322 mprotect(g2h(page_addr
), qemu_host_page_size
,
1323 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
1324 #ifdef DEBUG_TB_INVALIDATE
1325 printf("protecting code page: 0x" TARGET_FMT_lx
"\n",
1330 /* if some code is already present, then the pages are already
1331 protected. So we handle the case where only the first TB is
1332 allocated in a physical page */
1333 if (!page_already_protected
) {
1334 tlb_protect_code(page_addr
);
1338 #endif /* TARGET_HAS_SMC */
1341 /* add a new TB and link it to the physical page tables. phys_page2 is
1342 (-1) to indicate that only one page contains the TB. */
1343 void tb_link_page(TranslationBlock
*tb
,
1344 tb_page_addr_t phys_pc
, tb_page_addr_t phys_page2
)
1347 TranslationBlock
**ptb
;
1349 /* Grab the mmap lock to stop another thread invalidating this TB
1350 before we are done. */
1352 /* add in the physical hash table */
1353 h
= tb_phys_hash_func(phys_pc
);
1354 ptb
= &tb_phys_hash
[h
];
1355 tb
->phys_hash_next
= *ptb
;
1358 /* add in the page list */
1359 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
1360 if (phys_page2
!= -1)
1361 tb_alloc_page(tb
, 1, phys_page2
);
1363 tb
->page_addr
[1] = -1;
1365 tb
->jmp_first
= (TranslationBlock
*)((uintptr_t)tb
| 2);
1366 tb
->jmp_next
[0] = NULL
;
1367 tb
->jmp_next
[1] = NULL
;
1369 /* init original jump addresses */
1370 if (tb
->tb_next_offset
[0] != 0xffff)
1371 tb_reset_jump(tb
, 0);
1372 if (tb
->tb_next_offset
[1] != 0xffff)
1373 tb_reset_jump(tb
, 1);
1375 #ifdef DEBUG_TB_CHECK
1381 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1382 tb[1].tc_ptr. Return NULL if not found */
1383 TranslationBlock
*tb_find_pc(uintptr_t tc_ptr
)
1385 int m_min
, m_max
, m
;
1387 TranslationBlock
*tb
;
1391 if (tc_ptr
< (uintptr_t)code_gen_buffer
||
1392 tc_ptr
>= (uintptr_t)code_gen_ptr
) {
1395 /* binary search (cf Knuth) */
1398 while (m_min
<= m_max
) {
1399 m
= (m_min
+ m_max
) >> 1;
1401 v
= (uintptr_t)tb
->tc_ptr
;
1404 else if (tc_ptr
< v
) {
1413 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
1415 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
1417 TranslationBlock
*tb1
, *tb_next
, **ptb
;
1420 tb1
= tb
->jmp_next
[n
];
1422 /* find head of list */
1424 n1
= (uintptr_t)tb1
& 3;
1425 tb1
= (TranslationBlock
*)((uintptr_t)tb1
& ~3);
1428 tb1
= tb1
->jmp_next
[n1
];
1430 /* we are now sure now that tb jumps to tb1 */
1433 /* remove tb from the jmp_first list */
1434 ptb
= &tb_next
->jmp_first
;
1437 n1
= (uintptr_t)tb1
& 3;
1438 tb1
= (TranslationBlock
*)((uintptr_t)tb1
& ~3);
1439 if (n1
== n
&& tb1
== tb
)
1441 ptb
= &tb1
->jmp_next
[n1
];
1443 *ptb
= tb
->jmp_next
[n
];
1444 tb
->jmp_next
[n
] = NULL
;
1446 /* suppress the jump to next tb in generated code */
1447 tb_reset_jump(tb
, n
);
1449 /* suppress jumps in the tb on which we could have jumped */
1450 tb_reset_jump_recursive(tb_next
);
1454 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
1456 tb_reset_jump_recursive2(tb
, 0);
1457 tb_reset_jump_recursive2(tb
, 1);
1460 #if defined(TARGET_HAS_ICE)
1461 #if defined(CONFIG_USER_ONLY)
1462 static void breakpoint_invalidate(CPUArchState
*env
, target_ulong pc
)
1464 tb_invalidate_phys_page_range(pc
, pc
+ 1, 0);
1467 void tb_invalidate_phys_addr(target_phys_addr_t addr
)
1469 ram_addr_t ram_addr
;
1470 MemoryRegionSection
*section
;
1472 section
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
1473 if (!(memory_region_is_ram(section
->mr
)
1474 || (section
->mr
->rom_device
&& section
->mr
->readable
))) {
1477 ram_addr
= (memory_region_get_ram_addr(section
->mr
) & TARGET_PAGE_MASK
)
1478 + memory_region_section_addr(section
, addr
);
1479 tb_invalidate_phys_page_range(ram_addr
, ram_addr
+ 1, 0);
1482 static void breakpoint_invalidate(CPUArchState
*env
, target_ulong pc
)
1484 tb_invalidate_phys_addr(cpu_get_phys_page_debug(env
, pc
) |
1485 (pc
& ~TARGET_PAGE_MASK
));
1488 #endif /* TARGET_HAS_ICE */
1490 #if defined(CONFIG_USER_ONLY)
1491 void cpu_watchpoint_remove_all(CPUArchState
*env
, int mask
)
1496 int cpu_watchpoint_insert(CPUArchState
*env
, target_ulong addr
, target_ulong len
,
1497 int flags
, CPUWatchpoint
**watchpoint
)
1502 /* Add a watchpoint. */
1503 int cpu_watchpoint_insert(CPUArchState
*env
, target_ulong addr
, target_ulong len
,
1504 int flags
, CPUWatchpoint
**watchpoint
)
1506 target_ulong len_mask
= ~(len
- 1);
1509 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1510 if ((len
& (len
- 1)) || (addr
& ~len_mask
) ||
1511 len
== 0 || len
> TARGET_PAGE_SIZE
) {
1512 fprintf(stderr
, "qemu: tried to set invalid watchpoint at "
1513 TARGET_FMT_lx
", len=" TARGET_FMT_lu
"\n", addr
, len
);
1516 wp
= g_malloc(sizeof(*wp
));
1519 wp
->len_mask
= len_mask
;
1522 /* keep all GDB-injected watchpoints in front */
1524 QTAILQ_INSERT_HEAD(&env
->watchpoints
, wp
, entry
);
1526 QTAILQ_INSERT_TAIL(&env
->watchpoints
, wp
, entry
);
1528 tlb_flush_page(env
, addr
);
1535 /* Remove a specific watchpoint. */
1536 int cpu_watchpoint_remove(CPUArchState
*env
, target_ulong addr
, target_ulong len
,
1539 target_ulong len_mask
= ~(len
- 1);
1542 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1543 if (addr
== wp
->vaddr
&& len_mask
== wp
->len_mask
1544 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
1545 cpu_watchpoint_remove_by_ref(env
, wp
);
1552 /* Remove a specific watchpoint by reference. */
1553 void cpu_watchpoint_remove_by_ref(CPUArchState
*env
, CPUWatchpoint
*watchpoint
)
1555 QTAILQ_REMOVE(&env
->watchpoints
, watchpoint
, entry
);
1557 tlb_flush_page(env
, watchpoint
->vaddr
);
1562 /* Remove all matching watchpoints. */
1563 void cpu_watchpoint_remove_all(CPUArchState
*env
, int mask
)
1565 CPUWatchpoint
*wp
, *next
;
1567 QTAILQ_FOREACH_SAFE(wp
, &env
->watchpoints
, entry
, next
) {
1568 if (wp
->flags
& mask
)
1569 cpu_watchpoint_remove_by_ref(env
, wp
);
1574 /* Add a breakpoint. */
1575 int cpu_breakpoint_insert(CPUArchState
*env
, target_ulong pc
, int flags
,
1576 CPUBreakpoint
**breakpoint
)
1578 #if defined(TARGET_HAS_ICE)
1581 bp
= g_malloc(sizeof(*bp
));
1586 /* keep all GDB-injected breakpoints in front */
1588 QTAILQ_INSERT_HEAD(&env
->breakpoints
, bp
, entry
);
1590 QTAILQ_INSERT_TAIL(&env
->breakpoints
, bp
, entry
);
1592 breakpoint_invalidate(env
, pc
);
1602 /* Remove a specific breakpoint. */
1603 int cpu_breakpoint_remove(CPUArchState
*env
, target_ulong pc
, int flags
)
1605 #if defined(TARGET_HAS_ICE)
1608 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1609 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
1610 cpu_breakpoint_remove_by_ref(env
, bp
);
1620 /* Remove a specific breakpoint by reference. */
1621 void cpu_breakpoint_remove_by_ref(CPUArchState
*env
, CPUBreakpoint
*breakpoint
)
1623 #if defined(TARGET_HAS_ICE)
1624 QTAILQ_REMOVE(&env
->breakpoints
, breakpoint
, entry
);
1626 breakpoint_invalidate(env
, breakpoint
->pc
);
1632 /* Remove all matching breakpoints. */
1633 void cpu_breakpoint_remove_all(CPUArchState
*env
, int mask
)
1635 #if defined(TARGET_HAS_ICE)
1636 CPUBreakpoint
*bp
, *next
;
1638 QTAILQ_FOREACH_SAFE(bp
, &env
->breakpoints
, entry
, next
) {
1639 if (bp
->flags
& mask
)
1640 cpu_breakpoint_remove_by_ref(env
, bp
);
1645 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1646 CPU loop after each instruction */
1647 void cpu_single_step(CPUArchState
*env
, int enabled
)
1649 #if defined(TARGET_HAS_ICE)
1650 if (env
->singlestep_enabled
!= enabled
) {
1651 env
->singlestep_enabled
= enabled
;
1653 kvm_update_guest_debug(env
, 0);
1655 /* must flush all the translated code to avoid inconsistencies */
1656 /* XXX: only flush what is necessary */
1663 static void cpu_unlink_tb(CPUArchState
*env
)
1665 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1666 problem and hope the cpu will stop of its own accord. For userspace
1667 emulation this often isn't actually as bad as it sounds. Often
1668 signals are used primarily to interrupt blocking syscalls. */
1669 TranslationBlock
*tb
;
1670 static spinlock_t interrupt_lock
= SPIN_LOCK_UNLOCKED
;
1672 spin_lock(&interrupt_lock
);
1673 tb
= env
->current_tb
;
1674 /* if the cpu is currently executing code, we must unlink it and
1675 all the potentially executing TB */
1677 env
->current_tb
= NULL
;
1678 tb_reset_jump_recursive(tb
);
1680 spin_unlock(&interrupt_lock
);
1683 #ifndef CONFIG_USER_ONLY
1684 /* mask must never be zero, except for A20 change call */
1685 static void tcg_handle_interrupt(CPUArchState
*env
, int mask
)
1689 old_mask
= env
->interrupt_request
;
1690 env
->interrupt_request
|= mask
;
1693 * If called from iothread context, wake the target cpu in
1696 if (!qemu_cpu_is_self(env
)) {
1702 env
->icount_decr
.u16
.high
= 0xffff;
1704 && (mask
& ~old_mask
) != 0) {
1705 cpu_abort(env
, "Raised interrupt while not in I/O function");
1712 CPUInterruptHandler cpu_interrupt_handler
= tcg_handle_interrupt
;
1714 #else /* CONFIG_USER_ONLY */
1716 void cpu_interrupt(CPUArchState
*env
, int mask
)
1718 env
->interrupt_request
|= mask
;
1721 #endif /* CONFIG_USER_ONLY */
1723 void cpu_reset_interrupt(CPUArchState
*env
, int mask
)
1725 env
->interrupt_request
&= ~mask
;
1728 void cpu_exit(CPUArchState
*env
)
1730 env
->exit_request
= 1;
1734 void cpu_abort(CPUArchState
*env
, const char *fmt
, ...)
1741 fprintf(stderr
, "qemu: fatal: ");
1742 vfprintf(stderr
, fmt
, ap
);
1743 fprintf(stderr
, "\n");
1744 cpu_dump_state(env
, stderr
, fprintf
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
1745 if (qemu_log_enabled()) {
1746 qemu_log("qemu: fatal: ");
1747 qemu_log_vprintf(fmt
, ap2
);
1749 log_cpu_state(env
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
1755 #if defined(CONFIG_USER_ONLY)
1757 struct sigaction act
;
1758 sigfillset(&act
.sa_mask
);
1759 act
.sa_handler
= SIG_DFL
;
1760 sigaction(SIGABRT
, &act
, NULL
);
1766 CPUArchState
*cpu_copy(CPUArchState
*env
)
1768 CPUArchState
*new_env
= cpu_init(env
->cpu_model_str
);
1769 CPUArchState
*next_cpu
= new_env
->next_cpu
;
1770 int cpu_index
= new_env
->cpu_index
;
1771 #if defined(TARGET_HAS_ICE)
1776 memcpy(new_env
, env
, sizeof(CPUArchState
));
1778 /* Preserve chaining and index. */
1779 new_env
->next_cpu
= next_cpu
;
1780 new_env
->cpu_index
= cpu_index
;
1782 /* Clone all break/watchpoints.
1783 Note: Once we support ptrace with hw-debug register access, make sure
1784 BP_CPU break/watchpoints are handled correctly on clone. */
1785 QTAILQ_INIT(&env
->breakpoints
);
1786 QTAILQ_INIT(&env
->watchpoints
);
1787 #if defined(TARGET_HAS_ICE)
1788 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1789 cpu_breakpoint_insert(new_env
, bp
->pc
, bp
->flags
, NULL
);
1791 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1792 cpu_watchpoint_insert(new_env
, wp
->vaddr
, (~wp
->len_mask
) + 1,
1800 #if !defined(CONFIG_USER_ONLY)
1801 void tb_flush_jmp_cache(CPUArchState
*env
, target_ulong addr
)
1805 /* Discard jump cache entries for any tb which might potentially
1806 overlap the flushed page. */
1807 i
= tb_jmp_cache_hash_page(addr
- TARGET_PAGE_SIZE
);
1808 memset (&env
->tb_jmp_cache
[i
], 0,
1809 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1811 i
= tb_jmp_cache_hash_page(addr
);
1812 memset (&env
->tb_jmp_cache
[i
], 0,
1813 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1816 static void tlb_reset_dirty_range_all(ram_addr_t start
, ram_addr_t end
,
1821 /* we modify the TLB cache so that the dirty bit will be set again
1822 when accessing the range */
1823 start1
= (uintptr_t)qemu_safe_ram_ptr(start
);
1824 /* Check that we don't span multiple blocks - this breaks the
1825 address comparisons below. */
1826 if ((uintptr_t)qemu_safe_ram_ptr(end
- 1) - start1
1827 != (end
- 1) - start
) {
1830 cpu_tlb_reset_dirty_all(start1
, length
);
1834 /* Note: start and end must be within the same ram block. */
1835 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t end
,
1840 start
&= TARGET_PAGE_MASK
;
1841 end
= TARGET_PAGE_ALIGN(end
);
1843 length
= end
- start
;
1846 cpu_physical_memory_mask_dirty_range(start
, length
, dirty_flags
);
1848 if (tcg_enabled()) {
1849 tlb_reset_dirty_range_all(start
, end
, length
);
1853 int cpu_physical_memory_set_dirty_tracking(int enable
)
1856 in_migration
= enable
;
1860 target_phys_addr_t
memory_region_section_get_iotlb(CPUArchState
*env
,
1861 MemoryRegionSection
*section
,
1863 target_phys_addr_t paddr
,
1865 target_ulong
*address
)
1867 target_phys_addr_t iotlb
;
1870 if (memory_region_is_ram(section
->mr
)) {
1872 iotlb
= (memory_region_get_ram_addr(section
->mr
) & TARGET_PAGE_MASK
)
1873 + memory_region_section_addr(section
, paddr
);
1874 if (!section
->readonly
) {
1875 iotlb
|= phys_section_notdirty
;
1877 iotlb
|= phys_section_rom
;
1880 /* IO handlers are currently passed a physical address.
1881 It would be nice to pass an offset from the base address
1882 of that region. This would avoid having to special case RAM,
1883 and avoid full address decoding in every device.
1884 We can't use the high bits of pd for this because
1885 IO_MEM_ROMD uses these as a ram address. */
1886 iotlb
= section
- phys_sections
;
1887 iotlb
+= memory_region_section_addr(section
, paddr
);
1890 /* Make accesses to pages with watchpoints go via the
1891 watchpoint trap routines. */
1892 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1893 if (vaddr
== (wp
->vaddr
& TARGET_PAGE_MASK
)) {
1894 /* Avoid trapping reads of pages with a write breakpoint. */
1895 if ((prot
& PAGE_WRITE
) || (wp
->flags
& BP_MEM_READ
)) {
1896 iotlb
= phys_section_watch
+ paddr
;
1897 *address
|= TLB_MMIO
;
1908 * Walks guest process memory "regions" one by one
1909 * and calls callback function 'fn' for each region.
1912 struct walk_memory_regions_data
1914 walk_memory_regions_fn fn
;
1920 static int walk_memory_regions_end(struct walk_memory_regions_data
*data
,
1921 abi_ulong end
, int new_prot
)
1923 if (data
->start
!= -1ul) {
1924 int rc
= data
->fn(data
->priv
, data
->start
, end
, data
->prot
);
1930 data
->start
= (new_prot
? end
: -1ul);
1931 data
->prot
= new_prot
;
1936 static int walk_memory_regions_1(struct walk_memory_regions_data
*data
,
1937 abi_ulong base
, int level
, void **lp
)
1943 return walk_memory_regions_end(data
, base
, 0);
1948 for (i
= 0; i
< L2_SIZE
; ++i
) {
1949 int prot
= pd
[i
].flags
;
1951 pa
= base
| (i
<< TARGET_PAGE_BITS
);
1952 if (prot
!= data
->prot
) {
1953 rc
= walk_memory_regions_end(data
, pa
, prot
);
1961 for (i
= 0; i
< L2_SIZE
; ++i
) {
1962 pa
= base
| ((abi_ulong
)i
<<
1963 (TARGET_PAGE_BITS
+ L2_BITS
* level
));
1964 rc
= walk_memory_regions_1(data
, pa
, level
- 1, pp
+ i
);
1974 int walk_memory_regions(void *priv
, walk_memory_regions_fn fn
)
1976 struct walk_memory_regions_data data
;
1984 for (i
= 0; i
< V_L1_SIZE
; i
++) {
1985 int rc
= walk_memory_regions_1(&data
, (abi_ulong
)i
<< V_L1_SHIFT
,
1986 V_L1_SHIFT
/ L2_BITS
- 1, l1_map
+ i
);
1992 return walk_memory_regions_end(&data
, 0, 0);
1995 static int dump_region(void *priv
, abi_ulong start
,
1996 abi_ulong end
, unsigned long prot
)
1998 FILE *f
= (FILE *)priv
;
2000 (void) fprintf(f
, TARGET_ABI_FMT_lx
"-"TARGET_ABI_FMT_lx
2001 " "TARGET_ABI_FMT_lx
" %c%c%c\n",
2002 start
, end
, end
- start
,
2003 ((prot
& PAGE_READ
) ? 'r' : '-'),
2004 ((prot
& PAGE_WRITE
) ? 'w' : '-'),
2005 ((prot
& PAGE_EXEC
) ? 'x' : '-'));
2010 /* dump memory mappings */
2011 void page_dump(FILE *f
)
2013 (void) fprintf(f
, "%-8s %-8s %-8s %s\n",
2014 "start", "end", "size", "prot");
2015 walk_memory_regions(f
, dump_region
);
2018 int page_get_flags(target_ulong address
)
2022 p
= page_find(address
>> TARGET_PAGE_BITS
);
2028 /* Modify the flags of a page and invalidate the code if necessary.
2029 The flag PAGE_WRITE_ORG is positioned automatically depending
2030 on PAGE_WRITE. The mmap_lock should already be held. */
2031 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
2033 target_ulong addr
, len
;
2035 /* This function should never be called with addresses outside the
2036 guest address space. If this assert fires, it probably indicates
2037 a missing call to h2g_valid. */
2038 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2039 assert(end
< ((abi_ulong
)1 << L1_MAP_ADDR_SPACE_BITS
));
2041 assert(start
< end
);
2043 start
= start
& TARGET_PAGE_MASK
;
2044 end
= TARGET_PAGE_ALIGN(end
);
2046 if (flags
& PAGE_WRITE
) {
2047 flags
|= PAGE_WRITE_ORG
;
2050 for (addr
= start
, len
= end
- start
;
2052 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
2053 PageDesc
*p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2055 /* If the write protection bit is set, then we invalidate
2057 if (!(p
->flags
& PAGE_WRITE
) &&
2058 (flags
& PAGE_WRITE
) &&
2060 tb_invalidate_phys_page(addr
, 0, NULL
);
2066 int page_check_range(target_ulong start
, target_ulong len
, int flags
)
2072 /* This function should never be called with addresses outside the
2073 guest address space. If this assert fires, it probably indicates
2074 a missing call to h2g_valid. */
2075 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2076 assert(start
< ((abi_ulong
)1 << L1_MAP_ADDR_SPACE_BITS
));
2082 if (start
+ len
- 1 < start
) {
2083 /* We've wrapped around. */
2087 end
= TARGET_PAGE_ALIGN(start
+len
); /* must do before we loose bits in the next step */
2088 start
= start
& TARGET_PAGE_MASK
;
2090 for (addr
= start
, len
= end
- start
;
2092 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
2093 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2096 if( !(p
->flags
& PAGE_VALID
) )
2099 if ((flags
& PAGE_READ
) && !(p
->flags
& PAGE_READ
))
2101 if (flags
& PAGE_WRITE
) {
2102 if (!(p
->flags
& PAGE_WRITE_ORG
))
2104 /* unprotect the page if it was put read-only because it
2105 contains translated code */
2106 if (!(p
->flags
& PAGE_WRITE
)) {
2107 if (!page_unprotect(addr
, 0, NULL
))
2116 /* called from signal handler: invalidate the code and unprotect the
2117 page. Return TRUE if the fault was successfully handled. */
2118 int page_unprotect(target_ulong address
, uintptr_t pc
, void *puc
)
2122 target_ulong host_start
, host_end
, addr
;
2124 /* Technically this isn't safe inside a signal handler. However we
2125 know this only ever happens in a synchronous SEGV handler, so in
2126 practice it seems to be ok. */
2129 p
= page_find(address
>> TARGET_PAGE_BITS
);
2135 /* if the page was really writable, then we change its
2136 protection back to writable */
2137 if ((p
->flags
& PAGE_WRITE_ORG
) && !(p
->flags
& PAGE_WRITE
)) {
2138 host_start
= address
& qemu_host_page_mask
;
2139 host_end
= host_start
+ qemu_host_page_size
;
2142 for (addr
= host_start
; addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
2143 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2144 p
->flags
|= PAGE_WRITE
;
2147 /* and since the content will be modified, we must invalidate
2148 the corresponding translated code. */
2149 tb_invalidate_phys_page(addr
, pc
, puc
);
2150 #ifdef DEBUG_TB_CHECK
2151 tb_invalidate_check(addr
);
2154 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
2163 #endif /* defined(CONFIG_USER_ONLY) */
2165 #if !defined(CONFIG_USER_ONLY)
2167 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2168 typedef struct subpage_t
{
2170 target_phys_addr_t base
;
2171 uint16_t sub_section
[TARGET_PAGE_SIZE
];
2174 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2176 static subpage_t
*subpage_init(target_phys_addr_t base
);
2177 static void destroy_page_desc(uint16_t section_index
)
2179 MemoryRegionSection
*section
= &phys_sections
[section_index
];
2180 MemoryRegion
*mr
= section
->mr
;
2183 subpage_t
*subpage
= container_of(mr
, subpage_t
, iomem
);
2184 memory_region_destroy(&subpage
->iomem
);
2189 static void destroy_l2_mapping(PhysPageEntry
*lp
, unsigned level
)
2194 if (lp
->ptr
== PHYS_MAP_NODE_NIL
) {
2198 p
= phys_map_nodes
[lp
->ptr
];
2199 for (i
= 0; i
< L2_SIZE
; ++i
) {
2200 if (!p
[i
].is_leaf
) {
2201 destroy_l2_mapping(&p
[i
], level
- 1);
2203 destroy_page_desc(p
[i
].ptr
);
2207 lp
->ptr
= PHYS_MAP_NODE_NIL
;
2210 static void destroy_all_mappings(void)
2212 destroy_l2_mapping(&phys_map
, P_L2_LEVELS
- 1);
2213 phys_map_nodes_reset();
2216 static uint16_t phys_section_add(MemoryRegionSection
*section
)
2218 if (phys_sections_nb
== phys_sections_nb_alloc
) {
2219 phys_sections_nb_alloc
= MAX(phys_sections_nb_alloc
* 2, 16);
2220 phys_sections
= g_renew(MemoryRegionSection
, phys_sections
,
2221 phys_sections_nb_alloc
);
2223 phys_sections
[phys_sections_nb
] = *section
;
2224 return phys_sections_nb
++;
2227 static void phys_sections_clear(void)
2229 phys_sections_nb
= 0;
2232 static void register_subpage(MemoryRegionSection
*section
)
2235 target_phys_addr_t base
= section
->offset_within_address_space
2237 MemoryRegionSection
*existing
= phys_page_find(base
>> TARGET_PAGE_BITS
);
2238 MemoryRegionSection subsection
= {
2239 .offset_within_address_space
= base
,
2240 .size
= TARGET_PAGE_SIZE
,
2242 target_phys_addr_t start
, end
;
2244 assert(existing
->mr
->subpage
|| existing
->mr
== &io_mem_unassigned
);
2246 if (!(existing
->mr
->subpage
)) {
2247 subpage
= subpage_init(base
);
2248 subsection
.mr
= &subpage
->iomem
;
2249 phys_page_set(base
>> TARGET_PAGE_BITS
, 1,
2250 phys_section_add(&subsection
));
2252 subpage
= container_of(existing
->mr
, subpage_t
, iomem
);
2254 start
= section
->offset_within_address_space
& ~TARGET_PAGE_MASK
;
2255 end
= start
+ section
->size
- 1;
2256 subpage_register(subpage
, start
, end
, phys_section_add(section
));
2260 static void register_multipage(MemoryRegionSection
*section
)
2262 target_phys_addr_t start_addr
= section
->offset_within_address_space
;
2263 ram_addr_t size
= section
->size
;
2264 target_phys_addr_t addr
;
2265 uint16_t section_index
= phys_section_add(section
);
2270 phys_page_set(addr
>> TARGET_PAGE_BITS
, size
>> TARGET_PAGE_BITS
,
2274 void cpu_register_physical_memory_log(MemoryRegionSection
*section
,
2277 MemoryRegionSection now
= *section
, remain
= *section
;
2279 if ((now
.offset_within_address_space
& ~TARGET_PAGE_MASK
)
2280 || (now
.size
< TARGET_PAGE_SIZE
)) {
2281 now
.size
= MIN(TARGET_PAGE_ALIGN(now
.offset_within_address_space
)
2282 - now
.offset_within_address_space
,
2284 register_subpage(&now
);
2285 remain
.size
-= now
.size
;
2286 remain
.offset_within_address_space
+= now
.size
;
2287 remain
.offset_within_region
+= now
.size
;
2289 while (remain
.size
>= TARGET_PAGE_SIZE
) {
2291 if (remain
.offset_within_region
& ~TARGET_PAGE_MASK
) {
2292 now
.size
= TARGET_PAGE_SIZE
;
2293 register_subpage(&now
);
2295 now
.size
&= TARGET_PAGE_MASK
;
2296 register_multipage(&now
);
2298 remain
.size
-= now
.size
;
2299 remain
.offset_within_address_space
+= now
.size
;
2300 remain
.offset_within_region
+= now
.size
;
2304 register_subpage(&now
);
2308 void qemu_flush_coalesced_mmio_buffer(void)
2311 kvm_flush_coalesced_mmio_buffer();
2314 #if defined(__linux__) && !defined(TARGET_S390X)
2316 #include <sys/vfs.h>
2318 #define HUGETLBFS_MAGIC 0x958458f6
2320 static long gethugepagesize(const char *path
)
2326 ret
= statfs(path
, &fs
);
2327 } while (ret
!= 0 && errno
== EINTR
);
2334 if (fs
.f_type
!= HUGETLBFS_MAGIC
)
2335 fprintf(stderr
, "Warning: path not on HugeTLBFS: %s\n", path
);
2340 static void *file_ram_alloc(RAMBlock
*block
,
2350 unsigned long hpagesize
;
2352 hpagesize
= gethugepagesize(path
);
2357 if (memory
< hpagesize
) {
2361 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2362 fprintf(stderr
, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2366 if (asprintf(&filename
, "%s/qemu_back_mem.XXXXXX", path
) == -1) {
2370 fd
= mkstemp(filename
);
2372 perror("unable to create backing store for hugepages");
2379 memory
= (memory
+hpagesize
-1) & ~(hpagesize
-1);
2382 * ftruncate is not supported by hugetlbfs in older
2383 * hosts, so don't bother bailing out on errors.
2384 * If anything goes wrong with it under other filesystems,
2387 if (ftruncate(fd
, memory
))
2388 perror("ftruncate");
2391 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2392 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2393 * to sidestep this quirk.
2395 flags
= mem_prealloc
? MAP_POPULATE
| MAP_SHARED
: MAP_PRIVATE
;
2396 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
, flags
, fd
, 0);
2398 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
, MAP_PRIVATE
, fd
, 0);
2400 if (area
== MAP_FAILED
) {
2401 perror("file_ram_alloc: can't mmap RAM pages");
2410 static ram_addr_t
find_ram_offset(ram_addr_t size
)
2412 RAMBlock
*block
, *next_block
;
2413 ram_addr_t offset
= RAM_ADDR_MAX
, mingap
= RAM_ADDR_MAX
;
2415 if (QLIST_EMPTY(&ram_list
.blocks
))
2418 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2419 ram_addr_t end
, next
= RAM_ADDR_MAX
;
2421 end
= block
->offset
+ block
->length
;
2423 QLIST_FOREACH(next_block
, &ram_list
.blocks
, next
) {
2424 if (next_block
->offset
>= end
) {
2425 next
= MIN(next
, next_block
->offset
);
2428 if (next
- end
>= size
&& next
- end
< mingap
) {
2430 mingap
= next
- end
;
2434 if (offset
== RAM_ADDR_MAX
) {
2435 fprintf(stderr
, "Failed to find gap of requested size: %" PRIu64
"\n",
2443 static ram_addr_t
last_ram_offset(void)
2446 ram_addr_t last
= 0;
2448 QLIST_FOREACH(block
, &ram_list
.blocks
, next
)
2449 last
= MAX(last
, block
->offset
+ block
->length
);
2454 static void qemu_ram_setup_dump(void *addr
, ram_addr_t size
)
2457 QemuOpts
*machine_opts
;
2459 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
2460 machine_opts
= qemu_opts_find(qemu_find_opts("machine"), 0);
2462 !qemu_opt_get_bool(machine_opts
, "dump-guest-core", true)) {
2463 ret
= qemu_madvise(addr
, size
, QEMU_MADV_DONTDUMP
);
2465 perror("qemu_madvise");
2466 fprintf(stderr
, "madvise doesn't support MADV_DONTDUMP, "
2467 "but dump_guest_core=off specified\n");
2472 void qemu_ram_set_idstr(ram_addr_t addr
, const char *name
, DeviceState
*dev
)
2474 RAMBlock
*new_block
, *block
;
2477 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2478 if (block
->offset
== addr
) {
2484 assert(!new_block
->idstr
[0]);
2487 char *id
= qdev_get_dev_path(dev
);
2489 snprintf(new_block
->idstr
, sizeof(new_block
->idstr
), "%s/", id
);
2493 pstrcat(new_block
->idstr
, sizeof(new_block
->idstr
), name
);
2495 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2496 if (block
!= new_block
&& !strcmp(block
->idstr
, new_block
->idstr
)) {
2497 fprintf(stderr
, "RAMBlock \"%s\" already registered, abort!\n",
2504 static int memory_try_enable_merging(void *addr
, size_t len
)
2508 opts
= qemu_opts_find(qemu_find_opts("machine"), 0);
2509 if (opts
&& !qemu_opt_get_bool(opts
, "mem-merge", true)) {
2510 /* disabled by the user */
2514 return qemu_madvise(addr
, len
, QEMU_MADV_MERGEABLE
);
2517 ram_addr_t
qemu_ram_alloc_from_ptr(ram_addr_t size
, void *host
,
2520 RAMBlock
*new_block
;
2522 size
= TARGET_PAGE_ALIGN(size
);
2523 new_block
= g_malloc0(sizeof(*new_block
));
2526 new_block
->offset
= find_ram_offset(size
);
2528 new_block
->host
= host
;
2529 new_block
->flags
|= RAM_PREALLOC_MASK
;
2532 #if defined (__linux__) && !defined(TARGET_S390X)
2533 new_block
->host
= file_ram_alloc(new_block
, size
, mem_path
);
2534 if (!new_block
->host
) {
2535 new_block
->host
= qemu_vmalloc(size
);
2536 memory_try_enable_merging(new_block
->host
, size
);
2539 fprintf(stderr
, "-mem-path option unsupported\n");
2543 if (xen_enabled()) {
2544 xen_ram_alloc(new_block
->offset
, size
, mr
);
2545 } else if (kvm_enabled()) {
2546 /* some s390/kvm configurations have special constraints */
2547 new_block
->host
= kvm_vmalloc(size
);
2549 new_block
->host
= qemu_vmalloc(size
);
2551 memory_try_enable_merging(new_block
->host
, size
);
2554 new_block
->length
= size
;
2556 QLIST_INSERT_HEAD(&ram_list
.blocks
, new_block
, next
);
2558 ram_list
.phys_dirty
= g_realloc(ram_list
.phys_dirty
,
2559 last_ram_offset() >> TARGET_PAGE_BITS
);
2560 memset(ram_list
.phys_dirty
+ (new_block
->offset
>> TARGET_PAGE_BITS
),
2561 0, size
>> TARGET_PAGE_BITS
);
2562 cpu_physical_memory_set_dirty_range(new_block
->offset
, size
, 0xff);
2564 qemu_ram_setup_dump(new_block
->host
, size
);
2567 kvm_setup_guest_memory(new_block
->host
, size
);
2569 return new_block
->offset
;
2572 ram_addr_t
qemu_ram_alloc(ram_addr_t size
, MemoryRegion
*mr
)
2574 return qemu_ram_alloc_from_ptr(size
, NULL
, mr
);
2577 void qemu_ram_free_from_ptr(ram_addr_t addr
)
2581 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2582 if (addr
== block
->offset
) {
2583 QLIST_REMOVE(block
, next
);
2590 void qemu_ram_free(ram_addr_t addr
)
2594 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2595 if (addr
== block
->offset
) {
2596 QLIST_REMOVE(block
, next
);
2597 if (block
->flags
& RAM_PREALLOC_MASK
) {
2599 } else if (mem_path
) {
2600 #if defined (__linux__) && !defined(TARGET_S390X)
2602 munmap(block
->host
, block
->length
);
2605 qemu_vfree(block
->host
);
2611 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2612 munmap(block
->host
, block
->length
);
2614 if (xen_enabled()) {
2615 xen_invalidate_map_cache_entry(block
->host
);
2617 qemu_vfree(block
->host
);
2629 void qemu_ram_remap(ram_addr_t addr
, ram_addr_t length
)
2636 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2637 offset
= addr
- block
->offset
;
2638 if (offset
< block
->length
) {
2639 vaddr
= block
->host
+ offset
;
2640 if (block
->flags
& RAM_PREALLOC_MASK
) {
2644 munmap(vaddr
, length
);
2646 #if defined(__linux__) && !defined(TARGET_S390X)
2649 flags
|= mem_prealloc
? MAP_POPULATE
| MAP_SHARED
:
2652 flags
|= MAP_PRIVATE
;
2654 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
2655 flags
, block
->fd
, offset
);
2657 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
2658 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
2665 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2666 flags
|= MAP_SHARED
| MAP_ANONYMOUS
;
2667 area
= mmap(vaddr
, length
, PROT_EXEC
|PROT_READ
|PROT_WRITE
,
2670 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
2671 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
2675 if (area
!= vaddr
) {
2676 fprintf(stderr
, "Could not remap addr: "
2677 RAM_ADDR_FMT
"@" RAM_ADDR_FMT
"\n",
2681 memory_try_enable_merging(vaddr
, length
);
2682 qemu_ram_setup_dump(vaddr
, length
);
2688 #endif /* !_WIN32 */
2690 /* Return a host pointer to ram allocated with qemu_ram_alloc.
2691 With the exception of the softmmu code in this file, this should
2692 only be used for local memory (e.g. video ram) that the device owns,
2693 and knows it isn't going to access beyond the end of the block.
2695 It should not be used for general purpose DMA.
2696 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2698 void *qemu_get_ram_ptr(ram_addr_t addr
)
2702 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2703 if (addr
- block
->offset
< block
->length
) {
2704 /* Move this entry to to start of the list. */
2705 if (block
!= QLIST_FIRST(&ram_list
.blocks
)) {
2706 QLIST_REMOVE(block
, next
);
2707 QLIST_INSERT_HEAD(&ram_list
.blocks
, block
, next
);
2709 if (xen_enabled()) {
2710 /* We need to check if the requested address is in the RAM
2711 * because we don't want to map the entire memory in QEMU.
2712 * In that case just map until the end of the page.
2714 if (block
->offset
== 0) {
2715 return xen_map_cache(addr
, 0, 0);
2716 } else if (block
->host
== NULL
) {
2718 xen_map_cache(block
->offset
, block
->length
, 1);
2721 return block
->host
+ (addr
- block
->offset
);
2725 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
2731 /* Return a host pointer to ram allocated with qemu_ram_alloc.
2732 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
2734 void *qemu_safe_ram_ptr(ram_addr_t addr
)
2738 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2739 if (addr
- block
->offset
< block
->length
) {
2740 if (xen_enabled()) {
2741 /* We need to check if the requested address is in the RAM
2742 * because we don't want to map the entire memory in QEMU.
2743 * In that case just map until the end of the page.
2745 if (block
->offset
== 0) {
2746 return xen_map_cache(addr
, 0, 0);
2747 } else if (block
->host
== NULL
) {
2749 xen_map_cache(block
->offset
, block
->length
, 1);
2752 return block
->host
+ (addr
- block
->offset
);
2756 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
2762 /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
2763 * but takes a size argument */
2764 void *qemu_ram_ptr_length(ram_addr_t addr
, ram_addr_t
*size
)
2769 if (xen_enabled()) {
2770 return xen_map_cache(addr
, *size
, 1);
2774 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2775 if (addr
- block
->offset
< block
->length
) {
2776 if (addr
- block
->offset
+ *size
> block
->length
)
2777 *size
= block
->length
- addr
+ block
->offset
;
2778 return block
->host
+ (addr
- block
->offset
);
2782 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
2787 void qemu_put_ram_ptr(void *addr
)
2789 trace_qemu_put_ram_ptr(addr
);
2792 int qemu_ram_addr_from_host(void *ptr
, ram_addr_t
*ram_addr
)
2795 uint8_t *host
= ptr
;
2797 if (xen_enabled()) {
2798 *ram_addr
= xen_ram_addr_from_mapcache(ptr
);
2802 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2803 /* This case append when the block is not mapped. */
2804 if (block
->host
== NULL
) {
2807 if (host
- block
->host
< block
->length
) {
2808 *ram_addr
= block
->offset
+ (host
- block
->host
);
2816 /* Some of the softmmu routines need to translate from a host pointer
2817 (typically a TLB entry) back to a ram offset. */
2818 ram_addr_t
qemu_ram_addr_from_host_nofail(void *ptr
)
2820 ram_addr_t ram_addr
;
2822 if (qemu_ram_addr_from_host(ptr
, &ram_addr
)) {
2823 fprintf(stderr
, "Bad ram pointer %p\n", ptr
);
2829 static uint64_t unassigned_mem_read(void *opaque
, target_phys_addr_t addr
,
2832 #ifdef DEBUG_UNASSIGNED
2833 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2835 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2836 cpu_unassigned_access(cpu_single_env
, addr
, 0, 0, 0, size
);
2841 static void unassigned_mem_write(void *opaque
, target_phys_addr_t addr
,
2842 uint64_t val
, unsigned size
)
2844 #ifdef DEBUG_UNASSIGNED
2845 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%"PRIx64
"\n", addr
, val
);
2847 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2848 cpu_unassigned_access(cpu_single_env
, addr
, 1, 0, 0, size
);
2852 static const MemoryRegionOps unassigned_mem_ops
= {
2853 .read
= unassigned_mem_read
,
2854 .write
= unassigned_mem_write
,
2855 .endianness
= DEVICE_NATIVE_ENDIAN
,
2858 static uint64_t error_mem_read(void *opaque
, target_phys_addr_t addr
,
2864 static void error_mem_write(void *opaque
, target_phys_addr_t addr
,
2865 uint64_t value
, unsigned size
)
2870 static const MemoryRegionOps error_mem_ops
= {
2871 .read
= error_mem_read
,
2872 .write
= error_mem_write
,
2873 .endianness
= DEVICE_NATIVE_ENDIAN
,
2876 static const MemoryRegionOps rom_mem_ops
= {
2877 .read
= error_mem_read
,
2878 .write
= unassigned_mem_write
,
2879 .endianness
= DEVICE_NATIVE_ENDIAN
,
2882 static void notdirty_mem_write(void *opaque
, target_phys_addr_t ram_addr
,
2883 uint64_t val
, unsigned size
)
2886 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
2887 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2888 #if !defined(CONFIG_USER_ONLY)
2889 tb_invalidate_phys_page_fast(ram_addr
, size
);
2890 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
2895 stb_p(qemu_get_ram_ptr(ram_addr
), val
);
2898 stw_p(qemu_get_ram_ptr(ram_addr
), val
);
2901 stl_p(qemu_get_ram_ptr(ram_addr
), val
);
2906 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2907 cpu_physical_memory_set_dirty_flags(ram_addr
, dirty_flags
);
2908 /* we remove the notdirty callback only if the code has been
2910 if (dirty_flags
== 0xff)
2911 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
2914 static const MemoryRegionOps notdirty_mem_ops
= {
2915 .read
= error_mem_read
,
2916 .write
= notdirty_mem_write
,
2917 .endianness
= DEVICE_NATIVE_ENDIAN
,
2920 /* Generate a debug exception if a watchpoint has been hit. */
2921 static void check_watchpoint(int offset
, int len_mask
, int flags
)
2923 CPUArchState
*env
= cpu_single_env
;
2924 target_ulong pc
, cs_base
;
2925 TranslationBlock
*tb
;
2930 if (env
->watchpoint_hit
) {
2931 /* We re-entered the check after replacing the TB. Now raise
2932 * the debug interrupt so that is will trigger after the
2933 * current instruction. */
2934 cpu_interrupt(env
, CPU_INTERRUPT_DEBUG
);
2937 vaddr
= (env
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
2938 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
2939 if ((vaddr
== (wp
->vaddr
& len_mask
) ||
2940 (vaddr
& wp
->len_mask
) == wp
->vaddr
) && (wp
->flags
& flags
)) {
2941 wp
->flags
|= BP_WATCHPOINT_HIT
;
2942 if (!env
->watchpoint_hit
) {
2943 env
->watchpoint_hit
= wp
;
2944 tb
= tb_find_pc(env
->mem_io_pc
);
2946 cpu_abort(env
, "check_watchpoint: could not find TB for "
2947 "pc=%p", (void *)env
->mem_io_pc
);
2949 cpu_restore_state(tb
, env
, env
->mem_io_pc
);
2950 tb_phys_invalidate(tb
, -1);
2951 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
2952 env
->exception_index
= EXCP_DEBUG
;
2955 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
2956 tb_gen_code(env
, pc
, cs_base
, cpu_flags
, 1);
2957 cpu_resume_from_signal(env
, NULL
);
2961 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
2966 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2967 so these check for a hit then pass through to the normal out-of-line
2969 static uint64_t watch_mem_read(void *opaque
, target_phys_addr_t addr
,
2972 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~(size
- 1), BP_MEM_READ
);
2974 case 1: return ldub_phys(addr
);
2975 case 2: return lduw_phys(addr
);
2976 case 4: return ldl_phys(addr
);
2981 static void watch_mem_write(void *opaque
, target_phys_addr_t addr
,
2982 uint64_t val
, unsigned size
)
2984 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~(size
- 1), BP_MEM_WRITE
);
2987 stb_phys(addr
, val
);
2990 stw_phys(addr
, val
);
2993 stl_phys(addr
, val
);
2999 static const MemoryRegionOps watch_mem_ops
= {
3000 .read
= watch_mem_read
,
3001 .write
= watch_mem_write
,
3002 .endianness
= DEVICE_NATIVE_ENDIAN
,
3005 static uint64_t subpage_read(void *opaque
, target_phys_addr_t addr
,
3008 subpage_t
*mmio
= opaque
;
3009 unsigned int idx
= SUBPAGE_IDX(addr
);
3010 MemoryRegionSection
*section
;
3011 #if defined(DEBUG_SUBPAGE)
3012 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d\n", __func__
,
3013 mmio
, len
, addr
, idx
);
3016 section
= &phys_sections
[mmio
->sub_section
[idx
]];
3018 addr
-= section
->offset_within_address_space
;
3019 addr
+= section
->offset_within_region
;
3020 return io_mem_read(section
->mr
, addr
, len
);
3023 static void subpage_write(void *opaque
, target_phys_addr_t addr
,
3024 uint64_t value
, unsigned len
)
3026 subpage_t
*mmio
= opaque
;
3027 unsigned int idx
= SUBPAGE_IDX(addr
);
3028 MemoryRegionSection
*section
;
3029 #if defined(DEBUG_SUBPAGE)
3030 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
3031 " idx %d value %"PRIx64
"\n",
3032 __func__
, mmio
, len
, addr
, idx
, value
);
3035 section
= &phys_sections
[mmio
->sub_section
[idx
]];
3037 addr
-= section
->offset_within_address_space
;
3038 addr
+= section
->offset_within_region
;
3039 io_mem_write(section
->mr
, addr
, value
, len
);
3042 static const MemoryRegionOps subpage_ops
= {
3043 .read
= subpage_read
,
3044 .write
= subpage_write
,
3045 .endianness
= DEVICE_NATIVE_ENDIAN
,
3048 static uint64_t subpage_ram_read(void *opaque
, target_phys_addr_t addr
,
3051 ram_addr_t raddr
= addr
;
3052 void *ptr
= qemu_get_ram_ptr(raddr
);
3054 case 1: return ldub_p(ptr
);
3055 case 2: return lduw_p(ptr
);
3056 case 4: return ldl_p(ptr
);
3061 static void subpage_ram_write(void *opaque
, target_phys_addr_t addr
,
3062 uint64_t value
, unsigned size
)
3064 ram_addr_t raddr
= addr
;
3065 void *ptr
= qemu_get_ram_ptr(raddr
);
3067 case 1: return stb_p(ptr
, value
);
3068 case 2: return stw_p(ptr
, value
);
3069 case 4: return stl_p(ptr
, value
);
3074 static const MemoryRegionOps subpage_ram_ops
= {
3075 .read
= subpage_ram_read
,
3076 .write
= subpage_ram_write
,
3077 .endianness
= DEVICE_NATIVE_ENDIAN
,
3080 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
3085 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
3087 idx
= SUBPAGE_IDX(start
);
3088 eidx
= SUBPAGE_IDX(end
);
3089 #if defined(DEBUG_SUBPAGE)
3090 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__
,
3091 mmio
, start
, end
, idx
, eidx
, memory
);
3093 if (memory_region_is_ram(phys_sections
[section
].mr
)) {
3094 MemoryRegionSection new_section
= phys_sections
[section
];
3095 new_section
.mr
= &io_mem_subpage_ram
;
3096 section
= phys_section_add(&new_section
);
3098 for (; idx
<= eidx
; idx
++) {
3099 mmio
->sub_section
[idx
] = section
;
3105 static subpage_t
*subpage_init(target_phys_addr_t base
)
3109 mmio
= g_malloc0(sizeof(subpage_t
));
3112 memory_region_init_io(&mmio
->iomem
, &subpage_ops
, mmio
,
3113 "subpage", TARGET_PAGE_SIZE
);
3114 mmio
->iomem
.subpage
= true;
3115 #if defined(DEBUG_SUBPAGE)
3116 printf("%s: %p base " TARGET_FMT_plx
" len %08x %d\n", __func__
,
3117 mmio
, base
, TARGET_PAGE_SIZE
, subpage_memory
);
3119 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
-1, phys_section_unassigned
);
3124 static uint16_t dummy_section(MemoryRegion
*mr
)
3126 MemoryRegionSection section
= {
3128 .offset_within_address_space
= 0,
3129 .offset_within_region
= 0,
3133 return phys_section_add(§ion
);
3136 MemoryRegion
*iotlb_to_region(target_phys_addr_t index
)
3138 return phys_sections
[index
& ~TARGET_PAGE_MASK
].mr
;
3141 static void io_mem_init(void)
3143 memory_region_init_io(&io_mem_ram
, &error_mem_ops
, NULL
, "ram", UINT64_MAX
);
3144 memory_region_init_io(&io_mem_rom
, &rom_mem_ops
, NULL
, "rom", UINT64_MAX
);
3145 memory_region_init_io(&io_mem_unassigned
, &unassigned_mem_ops
, NULL
,
3146 "unassigned", UINT64_MAX
);
3147 memory_region_init_io(&io_mem_notdirty
, ¬dirty_mem_ops
, NULL
,
3148 "notdirty", UINT64_MAX
);
3149 memory_region_init_io(&io_mem_subpage_ram
, &subpage_ram_ops
, NULL
,
3150 "subpage-ram", UINT64_MAX
);
3151 memory_region_init_io(&io_mem_watch
, &watch_mem_ops
, NULL
,
3152 "watch", UINT64_MAX
);
3155 static void core_begin(MemoryListener
*listener
)
3157 destroy_all_mappings();
3158 phys_sections_clear();
3159 phys_map
.ptr
= PHYS_MAP_NODE_NIL
;
3160 phys_section_unassigned
= dummy_section(&io_mem_unassigned
);
3161 phys_section_notdirty
= dummy_section(&io_mem_notdirty
);
3162 phys_section_rom
= dummy_section(&io_mem_rom
);
3163 phys_section_watch
= dummy_section(&io_mem_watch
);
3166 static void core_commit(MemoryListener
*listener
)
3170 /* since each CPU stores ram addresses in its TLB cache, we must
3171 reset the modified entries */
3173 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
3178 static void core_region_add(MemoryListener
*listener
,
3179 MemoryRegionSection
*section
)
3181 cpu_register_physical_memory_log(section
, section
->readonly
);
3184 static void core_region_nop(MemoryListener
*listener
,
3185 MemoryRegionSection
*section
)
3187 cpu_register_physical_memory_log(section
, section
->readonly
);
3190 static void core_log_global_start(MemoryListener
*listener
)
3192 cpu_physical_memory_set_dirty_tracking(1);
3195 static void core_log_global_stop(MemoryListener
*listener
)
3197 cpu_physical_memory_set_dirty_tracking(0);
3200 static void io_region_add(MemoryListener
*listener
,
3201 MemoryRegionSection
*section
)
3203 MemoryRegionIORange
*mrio
= g_new(MemoryRegionIORange
, 1);
3205 mrio
->mr
= section
->mr
;
3206 mrio
->offset
= section
->offset_within_region
;
3207 iorange_init(&mrio
->iorange
, &memory_region_iorange_ops
,
3208 section
->offset_within_address_space
, section
->size
);
3209 ioport_register(&mrio
->iorange
);
3212 static void io_region_del(MemoryListener
*listener
,
3213 MemoryRegionSection
*section
)
3215 isa_unassign_ioport(section
->offset_within_address_space
, section
->size
);
3218 static MemoryListener core_memory_listener
= {
3219 .begin
= core_begin
,
3220 .commit
= core_commit
,
3221 .region_add
= core_region_add
,
3222 .region_nop
= core_region_nop
,
3223 .log_global_start
= core_log_global_start
,
3224 .log_global_stop
= core_log_global_stop
,
3228 static MemoryListener io_memory_listener
= {
3229 .region_add
= io_region_add
,
3230 .region_del
= io_region_del
,
3234 static void memory_map_init(void)
3236 system_memory
= g_malloc(sizeof(*system_memory
));
3237 memory_region_init(system_memory
, "system", INT64_MAX
);
3238 set_system_memory_map(system_memory
);
3240 system_io
= g_malloc(sizeof(*system_io
));
3241 memory_region_init(system_io
, "io", 65536);
3242 set_system_io_map(system_io
);
3244 memory_listener_register(&core_memory_listener
, system_memory
);
3245 memory_listener_register(&io_memory_listener
, system_io
);
3248 MemoryRegion
*get_system_memory(void)
3250 return system_memory
;
3253 MemoryRegion
*get_system_io(void)
3258 #endif /* !defined(CONFIG_USER_ONLY) */
3260 /* physical memory access (slow version, mainly for debug) */
3261 #if defined(CONFIG_USER_ONLY)
3262 int cpu_memory_rw_debug(CPUArchState
*env
, target_ulong addr
,
3263 uint8_t *buf
, int len
, int is_write
)
3270 page
= addr
& TARGET_PAGE_MASK
;
3271 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3274 flags
= page_get_flags(page
);
3275 if (!(flags
& PAGE_VALID
))
3278 if (!(flags
& PAGE_WRITE
))
3280 /* XXX: this code should not depend on lock_user */
3281 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
3284 unlock_user(p
, addr
, l
);
3286 if (!(flags
& PAGE_READ
))
3288 /* XXX: this code should not depend on lock_user */
3289 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
3292 unlock_user(p
, addr
, 0);
3303 static void invalidate_and_set_dirty(target_phys_addr_t addr
,
3304 target_phys_addr_t length
)
3306 if (!cpu_physical_memory_is_dirty(addr
)) {
3307 /* invalidate code */
3308 tb_invalidate_phys_page_range(addr
, addr
+ length
, 0);
3310 cpu_physical_memory_set_dirty_flags(addr
, (0xff & ~CODE_DIRTY_FLAG
));
3312 xen_modified_memory(addr
, length
);
3315 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
3316 int len
, int is_write
)
3321 target_phys_addr_t page
;
3322 MemoryRegionSection
*section
;
3325 page
= addr
& TARGET_PAGE_MASK
;
3326 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3329 section
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3332 if (!memory_region_is_ram(section
->mr
)) {
3333 target_phys_addr_t addr1
;
3334 addr1
= memory_region_section_addr(section
, addr
);
3335 /* XXX: could force cpu_single_env to NULL to avoid
3337 if (l
>= 4 && ((addr1
& 3) == 0)) {
3338 /* 32 bit write access */
3340 io_mem_write(section
->mr
, addr1
, val
, 4);
3342 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
3343 /* 16 bit write access */
3345 io_mem_write(section
->mr
, addr1
, val
, 2);
3348 /* 8 bit write access */
3350 io_mem_write(section
->mr
, addr1
, val
, 1);
3353 } else if (!section
->readonly
) {
3355 addr1
= memory_region_get_ram_addr(section
->mr
)
3356 + memory_region_section_addr(section
, addr
);
3358 ptr
= qemu_get_ram_ptr(addr1
);
3359 memcpy(ptr
, buf
, l
);
3360 invalidate_and_set_dirty(addr1
, l
);
3361 qemu_put_ram_ptr(ptr
);
3364 if (!(memory_region_is_ram(section
->mr
) ||
3365 memory_region_is_romd(section
->mr
))) {
3366 target_phys_addr_t addr1
;
3368 addr1
= memory_region_section_addr(section
, addr
);
3369 if (l
>= 4 && ((addr1
& 3) == 0)) {
3370 /* 32 bit read access */
3371 val
= io_mem_read(section
->mr
, addr1
, 4);
3374 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
3375 /* 16 bit read access */
3376 val
= io_mem_read(section
->mr
, addr1
, 2);
3380 /* 8 bit read access */
3381 val
= io_mem_read(section
->mr
, addr1
, 1);
3387 ptr
= qemu_get_ram_ptr(section
->mr
->ram_addr
3388 + memory_region_section_addr(section
,
3390 memcpy(buf
, ptr
, l
);
3391 qemu_put_ram_ptr(ptr
);
3400 /* used for ROM loading : can write in RAM and ROM */
3401 void cpu_physical_memory_write_rom(target_phys_addr_t addr
,
3402 const uint8_t *buf
, int len
)
3406 target_phys_addr_t page
;
3407 MemoryRegionSection
*section
;
3410 page
= addr
& TARGET_PAGE_MASK
;
3411 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3414 section
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3416 if (!(memory_region_is_ram(section
->mr
) ||
3417 memory_region_is_romd(section
->mr
))) {
3420 unsigned long addr1
;
3421 addr1
= memory_region_get_ram_addr(section
->mr
)
3422 + memory_region_section_addr(section
, addr
);
3424 ptr
= qemu_get_ram_ptr(addr1
);
3425 memcpy(ptr
, buf
, l
);
3426 invalidate_and_set_dirty(addr1
, l
);
3427 qemu_put_ram_ptr(ptr
);
3437 target_phys_addr_t addr
;
3438 target_phys_addr_t len
;
3441 static BounceBuffer bounce
;
3443 typedef struct MapClient
{
3445 void (*callback
)(void *opaque
);
3446 QLIST_ENTRY(MapClient
) link
;
3449 static QLIST_HEAD(map_client_list
, MapClient
) map_client_list
3450 = QLIST_HEAD_INITIALIZER(map_client_list
);
3452 void *cpu_register_map_client(void *opaque
, void (*callback
)(void *opaque
))
3454 MapClient
*client
= g_malloc(sizeof(*client
));
3456 client
->opaque
= opaque
;
3457 client
->callback
= callback
;
3458 QLIST_INSERT_HEAD(&map_client_list
, client
, link
);
3462 void cpu_unregister_map_client(void *_client
)
3464 MapClient
*client
= (MapClient
*)_client
;
3466 QLIST_REMOVE(client
, link
);
3470 static void cpu_notify_map_clients(void)
3474 while (!QLIST_EMPTY(&map_client_list
)) {
3475 client
= QLIST_FIRST(&map_client_list
);
3476 client
->callback(client
->opaque
);
3477 cpu_unregister_map_client(client
);
3481 /* Map a physical memory region into a host virtual address.
3482 * May map a subset of the requested range, given by and returned in *plen.
3483 * May return NULL if resources needed to perform the mapping are exhausted.
3484 * Use only for reads OR writes - not for read-modify-write operations.
3485 * Use cpu_register_map_client() to know when retrying the map operation is
3486 * likely to succeed.
3488 void *cpu_physical_memory_map(target_phys_addr_t addr
,
3489 target_phys_addr_t
*plen
,
3492 target_phys_addr_t len
= *plen
;
3493 target_phys_addr_t todo
= 0;
3495 target_phys_addr_t page
;
3496 MemoryRegionSection
*section
;
3497 ram_addr_t raddr
= RAM_ADDR_MAX
;
3502 page
= addr
& TARGET_PAGE_MASK
;
3503 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3506 section
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3508 if (!(memory_region_is_ram(section
->mr
) && !section
->readonly
)) {
3509 if (todo
|| bounce
.buffer
) {
3512 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, TARGET_PAGE_SIZE
);
3516 cpu_physical_memory_read(addr
, bounce
.buffer
, l
);
3520 return bounce
.buffer
;
3523 raddr
= memory_region_get_ram_addr(section
->mr
)
3524 + memory_region_section_addr(section
, addr
);
3532 ret
= qemu_ram_ptr_length(raddr
, &rlen
);
3537 /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3538 * Will also mark the memory as dirty if is_write == 1. access_len gives
3539 * the amount of memory that was actually read or written by the caller.
3541 void cpu_physical_memory_unmap(void *buffer
, target_phys_addr_t len
,
3542 int is_write
, target_phys_addr_t access_len
)
3544 if (buffer
!= bounce
.buffer
) {
3546 ram_addr_t addr1
= qemu_ram_addr_from_host_nofail(buffer
);
3547 while (access_len
) {
3549 l
= TARGET_PAGE_SIZE
;
3552 invalidate_and_set_dirty(addr1
, l
);
3557 if (xen_enabled()) {
3558 xen_invalidate_map_cache_entry(buffer
);
3563 cpu_physical_memory_write(bounce
.addr
, bounce
.buffer
, access_len
);
3565 qemu_vfree(bounce
.buffer
);
3566 bounce
.buffer
= NULL
;
3567 cpu_notify_map_clients();
3570 /* warning: addr must be aligned */
3571 static inline uint32_t ldl_phys_internal(target_phys_addr_t addr
,
3572 enum device_endian endian
)
3576 MemoryRegionSection
*section
;
3578 section
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3580 if (!(memory_region_is_ram(section
->mr
) ||
3581 memory_region_is_romd(section
->mr
))) {
3583 addr
= memory_region_section_addr(section
, addr
);
3584 val
= io_mem_read(section
->mr
, addr
, 4);
3585 #if defined(TARGET_WORDS_BIGENDIAN)
3586 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3590 if (endian
== DEVICE_BIG_ENDIAN
) {
3596 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(section
->mr
)
3598 + memory_region_section_addr(section
, addr
));
3600 case DEVICE_LITTLE_ENDIAN
:
3601 val
= ldl_le_p(ptr
);
3603 case DEVICE_BIG_ENDIAN
:
3604 val
= ldl_be_p(ptr
);
3614 uint32_t ldl_phys(target_phys_addr_t addr
)
3616 return ldl_phys_internal(addr
, DEVICE_NATIVE_ENDIAN
);
3619 uint32_t ldl_le_phys(target_phys_addr_t addr
)
3621 return ldl_phys_internal(addr
, DEVICE_LITTLE_ENDIAN
);
3624 uint32_t ldl_be_phys(target_phys_addr_t addr
)
3626 return ldl_phys_internal(addr
, DEVICE_BIG_ENDIAN
);
3629 /* warning: addr must be aligned */
3630 static inline uint64_t ldq_phys_internal(target_phys_addr_t addr
,
3631 enum device_endian endian
)
3635 MemoryRegionSection
*section
;
3637 section
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3639 if (!(memory_region_is_ram(section
->mr
) ||
3640 memory_region_is_romd(section
->mr
))) {
3642 addr
= memory_region_section_addr(section
, addr
);
3644 /* XXX This is broken when device endian != cpu endian.
3645 Fix and add "endian" variable check */
3646 #ifdef TARGET_WORDS_BIGENDIAN
3647 val
= io_mem_read(section
->mr
, addr
, 4) << 32;
3648 val
|= io_mem_read(section
->mr
, addr
+ 4, 4);
3650 val
= io_mem_read(section
->mr
, addr
, 4);
3651 val
|= io_mem_read(section
->mr
, addr
+ 4, 4) << 32;
3655 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(section
->mr
)
3657 + memory_region_section_addr(section
, addr
));
3659 case DEVICE_LITTLE_ENDIAN
:
3660 val
= ldq_le_p(ptr
);
3662 case DEVICE_BIG_ENDIAN
:
3663 val
= ldq_be_p(ptr
);
3673 uint64_t ldq_phys(target_phys_addr_t addr
)
3675 return ldq_phys_internal(addr
, DEVICE_NATIVE_ENDIAN
);
3678 uint64_t ldq_le_phys(target_phys_addr_t addr
)
3680 return ldq_phys_internal(addr
, DEVICE_LITTLE_ENDIAN
);
3683 uint64_t ldq_be_phys(target_phys_addr_t addr
)
3685 return ldq_phys_internal(addr
, DEVICE_BIG_ENDIAN
);
3689 uint32_t ldub_phys(target_phys_addr_t addr
)
3692 cpu_physical_memory_read(addr
, &val
, 1);
3696 /* warning: addr must be aligned */
3697 static inline uint32_t lduw_phys_internal(target_phys_addr_t addr
,
3698 enum device_endian endian
)
3702 MemoryRegionSection
*section
;
3704 section
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3706 if (!(memory_region_is_ram(section
->mr
) ||
3707 memory_region_is_romd(section
->mr
))) {
3709 addr
= memory_region_section_addr(section
, addr
);
3710 val
= io_mem_read(section
->mr
, addr
, 2);
3711 #if defined(TARGET_WORDS_BIGENDIAN)
3712 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3716 if (endian
== DEVICE_BIG_ENDIAN
) {
3722 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(section
->mr
)
3724 + memory_region_section_addr(section
, addr
));
3726 case DEVICE_LITTLE_ENDIAN
:
3727 val
= lduw_le_p(ptr
);
3729 case DEVICE_BIG_ENDIAN
:
3730 val
= lduw_be_p(ptr
);
3740 uint32_t lduw_phys(target_phys_addr_t addr
)
3742 return lduw_phys_internal(addr
, DEVICE_NATIVE_ENDIAN
);
3745 uint32_t lduw_le_phys(target_phys_addr_t addr
)
3747 return lduw_phys_internal(addr
, DEVICE_LITTLE_ENDIAN
);
3750 uint32_t lduw_be_phys(target_phys_addr_t addr
)
3752 return lduw_phys_internal(addr
, DEVICE_BIG_ENDIAN
);
3755 /* warning: addr must be aligned. The ram page is not masked as dirty
3756 and the code inside is not invalidated. It is useful if the dirty
3757 bits are used to track modified PTEs */
3758 void stl_phys_notdirty(target_phys_addr_t addr
, uint32_t val
)
3761 MemoryRegionSection
*section
;
3763 section
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3765 if (!memory_region_is_ram(section
->mr
) || section
->readonly
) {
3766 addr
= memory_region_section_addr(section
, addr
);
3767 if (memory_region_is_ram(section
->mr
)) {
3768 section
= &phys_sections
[phys_section_rom
];
3770 io_mem_write(section
->mr
, addr
, val
, 4);
3772 unsigned long addr1
= (memory_region_get_ram_addr(section
->mr
)
3774 + memory_region_section_addr(section
, addr
);
3775 ptr
= qemu_get_ram_ptr(addr1
);
3778 if (unlikely(in_migration
)) {
3779 if (!cpu_physical_memory_is_dirty(addr1
)) {
3780 /* invalidate code */
3781 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
3783 cpu_physical_memory_set_dirty_flags(
3784 addr1
, (0xff & ~CODE_DIRTY_FLAG
));
3790 void stq_phys_notdirty(target_phys_addr_t addr
, uint64_t val
)
3793 MemoryRegionSection
*section
;
3795 section
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3797 if (!memory_region_is_ram(section
->mr
) || section
->readonly
) {
3798 addr
= memory_region_section_addr(section
, addr
);
3799 if (memory_region_is_ram(section
->mr
)) {
3800 section
= &phys_sections
[phys_section_rom
];
3802 #ifdef TARGET_WORDS_BIGENDIAN
3803 io_mem_write(section
->mr
, addr
, val
>> 32, 4);
3804 io_mem_write(section
->mr
, addr
+ 4, (uint32_t)val
, 4);
3806 io_mem_write(section
->mr
, addr
, (uint32_t)val
, 4);
3807 io_mem_write(section
->mr
, addr
+ 4, val
>> 32, 4);
3810 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(section
->mr
)
3812 + memory_region_section_addr(section
, addr
));
3817 /* warning: addr must be aligned */
3818 static inline void stl_phys_internal(target_phys_addr_t addr
, uint32_t val
,
3819 enum device_endian endian
)
3822 MemoryRegionSection
*section
;
3824 section
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3826 if (!memory_region_is_ram(section
->mr
) || section
->readonly
) {
3827 addr
= memory_region_section_addr(section
, addr
);
3828 if (memory_region_is_ram(section
->mr
)) {
3829 section
= &phys_sections
[phys_section_rom
];
3831 #if defined(TARGET_WORDS_BIGENDIAN)
3832 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3836 if (endian
== DEVICE_BIG_ENDIAN
) {
3840 io_mem_write(section
->mr
, addr
, val
, 4);
3842 unsigned long addr1
;
3843 addr1
= (memory_region_get_ram_addr(section
->mr
) & TARGET_PAGE_MASK
)
3844 + memory_region_section_addr(section
, addr
);
3846 ptr
= qemu_get_ram_ptr(addr1
);
3848 case DEVICE_LITTLE_ENDIAN
:
3851 case DEVICE_BIG_ENDIAN
:
3858 invalidate_and_set_dirty(addr1
, 4);
3862 void stl_phys(target_phys_addr_t addr
, uint32_t val
)
3864 stl_phys_internal(addr
, val
, DEVICE_NATIVE_ENDIAN
);
3867 void stl_le_phys(target_phys_addr_t addr
, uint32_t val
)
3869 stl_phys_internal(addr
, val
, DEVICE_LITTLE_ENDIAN
);
3872 void stl_be_phys(target_phys_addr_t addr
, uint32_t val
)
3874 stl_phys_internal(addr
, val
, DEVICE_BIG_ENDIAN
);
3878 void stb_phys(target_phys_addr_t addr
, uint32_t val
)
3881 cpu_physical_memory_write(addr
, &v
, 1);
3884 /* warning: addr must be aligned */
3885 static inline void stw_phys_internal(target_phys_addr_t addr
, uint32_t val
,
3886 enum device_endian endian
)
3889 MemoryRegionSection
*section
;
3891 section
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3893 if (!memory_region_is_ram(section
->mr
) || section
->readonly
) {
3894 addr
= memory_region_section_addr(section
, addr
);
3895 if (memory_region_is_ram(section
->mr
)) {
3896 section
= &phys_sections
[phys_section_rom
];
3898 #if defined(TARGET_WORDS_BIGENDIAN)
3899 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3903 if (endian
== DEVICE_BIG_ENDIAN
) {
3907 io_mem_write(section
->mr
, addr
, val
, 2);
3909 unsigned long addr1
;
3910 addr1
= (memory_region_get_ram_addr(section
->mr
) & TARGET_PAGE_MASK
)
3911 + memory_region_section_addr(section
, addr
);
3913 ptr
= qemu_get_ram_ptr(addr1
);
3915 case DEVICE_LITTLE_ENDIAN
:
3918 case DEVICE_BIG_ENDIAN
:
3925 invalidate_and_set_dirty(addr1
, 2);
3929 void stw_phys(target_phys_addr_t addr
, uint32_t val
)
3931 stw_phys_internal(addr
, val
, DEVICE_NATIVE_ENDIAN
);
3934 void stw_le_phys(target_phys_addr_t addr
, uint32_t val
)
3936 stw_phys_internal(addr
, val
, DEVICE_LITTLE_ENDIAN
);
3939 void stw_be_phys(target_phys_addr_t addr
, uint32_t val
)
3941 stw_phys_internal(addr
, val
, DEVICE_BIG_ENDIAN
);
3945 void stq_phys(target_phys_addr_t addr
, uint64_t val
)
3948 cpu_physical_memory_write(addr
, &val
, 8);
3951 void stq_le_phys(target_phys_addr_t addr
, uint64_t val
)
3953 val
= cpu_to_le64(val
);
3954 cpu_physical_memory_write(addr
, &val
, 8);
3957 void stq_be_phys(target_phys_addr_t addr
, uint64_t val
)
3959 val
= cpu_to_be64(val
);
3960 cpu_physical_memory_write(addr
, &val
, 8);
3963 /* virtual memory access for debug (includes writing to ROM) */
3964 int cpu_memory_rw_debug(CPUArchState
*env
, target_ulong addr
,
3965 uint8_t *buf
, int len
, int is_write
)
3968 target_phys_addr_t phys_addr
;
3972 page
= addr
& TARGET_PAGE_MASK
;
3973 phys_addr
= cpu_get_phys_page_debug(env
, page
);
3974 /* if no physical page mapped, return an error */
3975 if (phys_addr
== -1)
3977 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3980 phys_addr
+= (addr
& ~TARGET_PAGE_MASK
);
3982 cpu_physical_memory_write_rom(phys_addr
, buf
, l
);
3984 cpu_physical_memory_rw(phys_addr
, buf
, l
, is_write
);
3993 /* in deterministic execution mode, instructions doing device I/Os
3994 must be at the end of the TB */
3995 void cpu_io_recompile(CPUArchState
*env
, uintptr_t retaddr
)
3997 TranslationBlock
*tb
;
3999 target_ulong pc
, cs_base
;
4002 tb
= tb_find_pc(retaddr
);
4004 cpu_abort(env
, "cpu_io_recompile: could not find TB for pc=%p",
4007 n
= env
->icount_decr
.u16
.low
+ tb
->icount
;
4008 cpu_restore_state(tb
, env
, retaddr
);
4009 /* Calculate how many instructions had been executed before the fault
4011 n
= n
- env
->icount_decr
.u16
.low
;
4012 /* Generate a new TB ending on the I/O insn. */
4014 /* On MIPS and SH, delay slot instructions can only be restarted if
4015 they were already the first instruction in the TB. If this is not
4016 the first instruction in a TB then re-execute the preceding
4018 #if defined(TARGET_MIPS)
4019 if ((env
->hflags
& MIPS_HFLAG_BMASK
) != 0 && n
> 1) {
4020 env
->active_tc
.PC
-= 4;
4021 env
->icount_decr
.u16
.low
++;
4022 env
->hflags
&= ~MIPS_HFLAG_BMASK
;
4024 #elif defined(TARGET_SH4)
4025 if ((env
->flags
& ((DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
))) != 0
4028 env
->icount_decr
.u16
.low
++;
4029 env
->flags
&= ~(DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
);
4032 /* This should never happen. */
4033 if (n
> CF_COUNT_MASK
)
4034 cpu_abort(env
, "TB too big during recompile");
4036 cflags
= n
| CF_LAST_IO
;
4038 cs_base
= tb
->cs_base
;
4040 tb_phys_invalidate(tb
, -1);
4041 /* FIXME: In theory this could raise an exception. In practice
4042 we have already translated the block once so it's probably ok. */
4043 tb_gen_code(env
, pc
, cs_base
, flags
, cflags
);
4044 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
4045 the first in the TB) then we end up generating a whole new TB and
4046 repeating the fault, which is horribly inefficient.
4047 Better would be to execute just this insn uncached, or generate a
4049 cpu_resume_from_signal(env
, NULL
);
4052 #if !defined(CONFIG_USER_ONLY)
4054 void dump_exec_info(FILE *f
, fprintf_function cpu_fprintf
)
4056 int i
, target_code_size
, max_target_code_size
;
4057 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
4058 TranslationBlock
*tb
;
4060 target_code_size
= 0;
4061 max_target_code_size
= 0;
4063 direct_jmp_count
= 0;
4064 direct_jmp2_count
= 0;
4065 for(i
= 0; i
< nb_tbs
; i
++) {
4067 target_code_size
+= tb
->size
;
4068 if (tb
->size
> max_target_code_size
)
4069 max_target_code_size
= tb
->size
;
4070 if (tb
->page_addr
[1] != -1)
4072 if (tb
->tb_next_offset
[0] != 0xffff) {
4074 if (tb
->tb_next_offset
[1] != 0xffff) {
4075 direct_jmp2_count
++;
4079 /* XXX: avoid using doubles ? */
4080 cpu_fprintf(f
, "Translation buffer state:\n");
4081 cpu_fprintf(f
, "gen code size %td/%ld\n",
4082 code_gen_ptr
- code_gen_buffer
, code_gen_buffer_max_size
);
4083 cpu_fprintf(f
, "TB count %d/%d\n",
4084 nb_tbs
, code_gen_max_blocks
);
4085 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
4086 nb_tbs
? target_code_size
/ nb_tbs
: 0,
4087 max_target_code_size
);
4088 cpu_fprintf(f
, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
4089 nb_tbs
? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0,
4090 target_code_size
? (double) (code_gen_ptr
- code_gen_buffer
) / target_code_size
: 0);
4091 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n",
4093 nb_tbs
? (cross_page
* 100) / nb_tbs
: 0);
4094 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
4096 nb_tbs
? (direct_jmp_count
* 100) / nb_tbs
: 0,
4098 nb_tbs
? (direct_jmp2_count
* 100) / nb_tbs
: 0);
4099 cpu_fprintf(f
, "\nStatistics:\n");
4100 cpu_fprintf(f
, "TB flush count %d\n", tb_flush_count
);
4101 cpu_fprintf(f
, "TB invalidate count %d\n", tb_phys_invalidate_count
);
4102 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
4103 tcg_dump_info(f
, cpu_fprintf
);
4107 * A helper function for the _utterly broken_ virtio device model to find out if
4108 * it's running on a big endian machine. Don't do this at home kids!
4110 bool virtio_is_big_endian(void);
4111 bool virtio_is_big_endian(void)
4113 #if defined(TARGET_WORDS_BIGENDIAN)
4122 #ifndef CONFIG_USER_ONLY
4123 bool cpu_physical_memory_is_io(target_phys_addr_t phys_addr
)
4125 MemoryRegionSection
*section
;
4127 section
= phys_page_find(phys_addr
>> TARGET_PAGE_BITS
);
4129 return !(memory_region_is_ram(section
->mr
) ||
4130 memory_region_is_romd(section
->mr
));