2 * virtual page mapping and translated block handling
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
23 #include <sys/types.h>
27 #include "qemu-common.h"
35 #include "qemu-timer.h"
37 #include "exec-memory.h"
38 #if defined(CONFIG_USER_ONLY)
40 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41 #include <sys/param.h>
42 #if __FreeBSD_version >= 700104
43 #define HAVE_KINFO_GETVMMAP
44 #define sigqueue sigqueue_freebsd /* avoid redefinition */
47 #include <machine/profile.h>
55 #else /* !CONFIG_USER_ONLY */
56 #include "xen-mapcache.h"
62 #define WANT_EXEC_OBSOLETE
63 #include "exec-obsolete.h"
65 //#define DEBUG_TB_INVALIDATE
67 //#define DEBUG_UNASSIGNED
69 /* make various TB consistency checks */
70 //#define DEBUG_TB_CHECK
72 //#define DEBUG_IOPORT
73 //#define DEBUG_SUBPAGE
75 #if !defined(CONFIG_USER_ONLY)
76 /* TB consistency checks only implemented for usermode emulation. */
80 #define SMC_BITMAP_USE_THRESHOLD 10
82 static TranslationBlock
*tbs
;
83 static int code_gen_max_blocks
;
84 TranslationBlock
*tb_phys_hash
[CODE_GEN_PHYS_HASH_SIZE
];
86 /* any access to the tbs or the page table must use this lock */
87 spinlock_t tb_lock
= SPIN_LOCK_UNLOCKED
;
89 uint8_t *code_gen_prologue
;
90 static uint8_t *code_gen_buffer
;
91 static size_t code_gen_buffer_size
;
92 /* threshold to flush the translated code buffer */
93 static size_t code_gen_buffer_max_size
;
94 static uint8_t *code_gen_ptr
;
96 #if !defined(CONFIG_USER_ONLY)
98 static int in_migration
;
100 RAMList ram_list
= { .blocks
= QLIST_HEAD_INITIALIZER(ram_list
.blocks
) };
102 static MemoryRegion
*system_memory
;
103 static MemoryRegion
*system_io
;
105 MemoryRegion io_mem_ram
, io_mem_rom
, io_mem_unassigned
, io_mem_notdirty
;
106 static MemoryRegion io_mem_subpage_ram
;
110 CPUArchState
*first_cpu
;
111 /* current CPU in the current thread. It is only valid inside
113 DEFINE_TLS(CPUArchState
*,cpu_single_env
);
114 /* 0 = Do not count executed instructions.
115 1 = Precise instruction counting.
116 2 = Adaptive rate instruction counting. */
119 typedef struct PageDesc
{
120 /* list of TBs intersecting this ram page */
121 TranslationBlock
*first_tb
;
122 /* in order to optimize self modifying code, we count the number
123 of lookups we do to a given page to use a bitmap */
124 unsigned int code_write_count
;
125 uint8_t *code_bitmap
;
126 #if defined(CONFIG_USER_ONLY)
131 /* In system mode we want L1_MAP to be based on ram offsets,
132 while in user mode we want it to be based on virtual addresses. */
133 #if !defined(CONFIG_USER_ONLY)
134 #if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
135 # define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
137 # define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
140 # define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
143 /* Size of the L2 (and L3, etc) page tables. */
145 #define L2_SIZE (1 << L2_BITS)
147 #define P_L2_LEVELS \
148 (((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / L2_BITS) + 1)
150 /* The bits remaining after N lower levels of page tables. */
151 #define V_L1_BITS_REM \
152 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
154 #if V_L1_BITS_REM < 4
155 #define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
157 #define V_L1_BITS V_L1_BITS_REM
160 #define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
162 #define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
164 uintptr_t qemu_real_host_page_size
;
165 uintptr_t qemu_host_page_size
;
166 uintptr_t qemu_host_page_mask
;
168 /* This is a multi-level map on the virtual address space.
169 The bottom level has pointers to PageDesc. */
170 static void *l1_map
[V_L1_SIZE
];
172 #if !defined(CONFIG_USER_ONLY)
173 typedef struct PhysPageEntry PhysPageEntry
;
175 static MemoryRegionSection
*phys_sections
;
176 static unsigned phys_sections_nb
, phys_sections_nb_alloc
;
177 static uint16_t phys_section_unassigned
;
178 static uint16_t phys_section_notdirty
;
179 static uint16_t phys_section_rom
;
180 static uint16_t phys_section_watch
;
182 struct PhysPageEntry
{
183 uint16_t is_leaf
: 1;
184 /* index into phys_sections (is_leaf) or phys_map_nodes (!is_leaf) */
188 /* Simple allocator for PhysPageEntry nodes */
189 static PhysPageEntry (*phys_map_nodes
)[L2_SIZE
];
190 static unsigned phys_map_nodes_nb
, phys_map_nodes_nb_alloc
;
192 #define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
194 /* This is a multi-level map on the physical address space.
195 The bottom level has pointers to MemoryRegionSections. */
196 static PhysPageEntry phys_map
= { .ptr
= PHYS_MAP_NODE_NIL
, .is_leaf
= 0 };
198 static void io_mem_init(void);
199 static void memory_map_init(void);
201 static MemoryRegion io_mem_watch
;
205 static int tb_flush_count
;
206 static int tb_phys_invalidate_count
;
209 static inline void map_exec(void *addr
, long size
)
212 VirtualProtect(addr
, size
,
213 PAGE_EXECUTE_READWRITE
, &old_protect
);
217 static inline void map_exec(void *addr
, long size
)
219 unsigned long start
, end
, page_size
;
221 page_size
= getpagesize();
222 start
= (unsigned long)addr
;
223 start
&= ~(page_size
- 1);
225 end
= (unsigned long)addr
+ size
;
226 end
+= page_size
- 1;
227 end
&= ~(page_size
- 1);
229 mprotect((void *)start
, end
- start
,
230 PROT_READ
| PROT_WRITE
| PROT_EXEC
);
234 static void page_init(void)
236 /* NOTE: we can always suppose that qemu_host_page_size >=
240 SYSTEM_INFO system_info
;
242 GetSystemInfo(&system_info
);
243 qemu_real_host_page_size
= system_info
.dwPageSize
;
246 qemu_real_host_page_size
= getpagesize();
248 if (qemu_host_page_size
== 0)
249 qemu_host_page_size
= qemu_real_host_page_size
;
250 if (qemu_host_page_size
< TARGET_PAGE_SIZE
)
251 qemu_host_page_size
= TARGET_PAGE_SIZE
;
252 qemu_host_page_mask
= ~(qemu_host_page_size
- 1);
254 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
256 #ifdef HAVE_KINFO_GETVMMAP
257 struct kinfo_vmentry
*freep
;
260 freep
= kinfo_getvmmap(getpid(), &cnt
);
263 for (i
= 0; i
< cnt
; i
++) {
264 unsigned long startaddr
, endaddr
;
266 startaddr
= freep
[i
].kve_start
;
267 endaddr
= freep
[i
].kve_end
;
268 if (h2g_valid(startaddr
)) {
269 startaddr
= h2g(startaddr
) & TARGET_PAGE_MASK
;
271 if (h2g_valid(endaddr
)) {
272 endaddr
= h2g(endaddr
);
273 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
275 #if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
277 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
288 last_brk
= (unsigned long)sbrk(0);
290 f
= fopen("/compat/linux/proc/self/maps", "r");
295 unsigned long startaddr
, endaddr
;
298 n
= fscanf (f
, "%lx-%lx %*[^\n]\n", &startaddr
, &endaddr
);
300 if (n
== 2 && h2g_valid(startaddr
)) {
301 startaddr
= h2g(startaddr
) & TARGET_PAGE_MASK
;
303 if (h2g_valid(endaddr
)) {
304 endaddr
= h2g(endaddr
);
308 page_set_flags(startaddr
, endaddr
, PAGE_RESERVED
);
320 static PageDesc
*page_find_alloc(tb_page_addr_t index
, int alloc
)
326 #if defined(CONFIG_USER_ONLY)
327 /* We can't use g_malloc because it may recurse into a locked mutex. */
328 # define ALLOC(P, SIZE) \
330 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
331 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
334 # define ALLOC(P, SIZE) \
335 do { P = g_malloc0(SIZE); } while (0)
338 /* Level 1. Always allocated. */
339 lp
= l1_map
+ ((index
>> V_L1_SHIFT
) & (V_L1_SIZE
- 1));
342 for (i
= V_L1_SHIFT
/ L2_BITS
- 1; i
> 0; i
--) {
349 ALLOC(p
, sizeof(void *) * L2_SIZE
);
353 lp
= p
+ ((index
>> (i
* L2_BITS
)) & (L2_SIZE
- 1));
361 ALLOC(pd
, sizeof(PageDesc
) * L2_SIZE
);
367 return pd
+ (index
& (L2_SIZE
- 1));
370 static inline PageDesc
*page_find(tb_page_addr_t index
)
372 return page_find_alloc(index
, 0);
375 #if !defined(CONFIG_USER_ONLY)
377 static void phys_map_node_reserve(unsigned nodes
)
379 if (phys_map_nodes_nb
+ nodes
> phys_map_nodes_nb_alloc
) {
380 typedef PhysPageEntry Node
[L2_SIZE
];
381 phys_map_nodes_nb_alloc
= MAX(phys_map_nodes_nb_alloc
* 2, 16);
382 phys_map_nodes_nb_alloc
= MAX(phys_map_nodes_nb_alloc
,
383 phys_map_nodes_nb
+ nodes
);
384 phys_map_nodes
= g_renew(Node
, phys_map_nodes
,
385 phys_map_nodes_nb_alloc
);
389 static uint16_t phys_map_node_alloc(void)
394 ret
= phys_map_nodes_nb
++;
395 assert(ret
!= PHYS_MAP_NODE_NIL
);
396 assert(ret
!= phys_map_nodes_nb_alloc
);
397 for (i
= 0; i
< L2_SIZE
; ++i
) {
398 phys_map_nodes
[ret
][i
].is_leaf
= 0;
399 phys_map_nodes
[ret
][i
].ptr
= PHYS_MAP_NODE_NIL
;
404 static void phys_map_nodes_reset(void)
406 phys_map_nodes_nb
= 0;
410 static void phys_page_set_level(PhysPageEntry
*lp
, target_phys_addr_t
*index
,
411 target_phys_addr_t
*nb
, uint16_t leaf
,
416 target_phys_addr_t step
= (target_phys_addr_t
)1 << (level
* L2_BITS
);
418 if (!lp
->is_leaf
&& lp
->ptr
== PHYS_MAP_NODE_NIL
) {
419 lp
->ptr
= phys_map_node_alloc();
420 p
= phys_map_nodes
[lp
->ptr
];
422 for (i
= 0; i
< L2_SIZE
; i
++) {
424 p
[i
].ptr
= phys_section_unassigned
;
428 p
= phys_map_nodes
[lp
->ptr
];
430 lp
= &p
[(*index
>> (level
* L2_BITS
)) & (L2_SIZE
- 1)];
432 while (*nb
&& lp
< &p
[L2_SIZE
]) {
433 if ((*index
& (step
- 1)) == 0 && *nb
>= step
) {
439 phys_page_set_level(lp
, index
, nb
, leaf
, level
- 1);
445 static void phys_page_set(target_phys_addr_t index
, target_phys_addr_t nb
,
448 /* Wildly overreserve - it doesn't matter much. */
449 phys_map_node_reserve(3 * P_L2_LEVELS
);
451 phys_page_set_level(&phys_map
, &index
, &nb
, leaf
, P_L2_LEVELS
- 1);
454 MemoryRegionSection
*phys_page_find(target_phys_addr_t index
)
456 PhysPageEntry lp
= phys_map
;
459 uint16_t s_index
= phys_section_unassigned
;
461 for (i
= P_L2_LEVELS
- 1; i
>= 0 && !lp
.is_leaf
; i
--) {
462 if (lp
.ptr
== PHYS_MAP_NODE_NIL
) {
465 p
= phys_map_nodes
[lp
.ptr
];
466 lp
= p
[(index
>> (i
* L2_BITS
)) & (L2_SIZE
- 1)];
471 return &phys_sections
[s_index
];
474 bool memory_region_is_unassigned(MemoryRegion
*mr
)
476 return mr
!= &io_mem_ram
&& mr
!= &io_mem_rom
477 && mr
!= &io_mem_notdirty
&& !mr
->rom_device
478 && mr
!= &io_mem_watch
;
481 #define mmap_lock() do { } while(0)
482 #define mmap_unlock() do { } while(0)
485 #if defined(CONFIG_USER_ONLY)
486 /* Currently it is not recommended to allocate big chunks of data in
487 user mode. It will change when a dedicated libc will be used. */
488 /* ??? 64-bit hosts ought to have no problem mmaping data outside the
489 region in which the guest needs to run. Revisit this. */
490 #define USE_STATIC_CODE_GEN_BUFFER
493 /* ??? Should configure for this, not list operating systems here. */
494 #if (defined(__linux__) \
495 || defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
496 || defined(__DragonFly__) || defined(__OpenBSD__) \
497 || defined(__NetBSD__))
501 /* Maximum size of the code gen buffer we'd like to use. Unless otherwise
502 indicated, this is constrained by the range of direct branches on the
503 host cpu, as used by the TCG implementation of goto_tb. */
504 #if defined(__x86_64__)
505 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
506 #elif defined(__sparc__)
507 # define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
508 #elif defined(__arm__)
509 # define MAX_CODE_GEN_BUFFER_SIZE (16u * 1024 * 1024)
510 #elif defined(__s390x__)
511 /* We have a +- 4GB range on the branches; leave some slop. */
512 # define MAX_CODE_GEN_BUFFER_SIZE (3ul * 1024 * 1024 * 1024)
514 # define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
517 #define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32u * 1024 * 1024)
519 #define DEFAULT_CODE_GEN_BUFFER_SIZE \
520 (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \
521 ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE)
523 static inline size_t size_code_gen_buffer(size_t tb_size
)
525 /* Size the buffer. */
527 #ifdef USE_STATIC_CODE_GEN_BUFFER
528 tb_size
= DEFAULT_CODE_GEN_BUFFER_SIZE
;
530 /* ??? Needs adjustments. */
531 /* ??? If we relax the requirement that CONFIG_USER_ONLY use the
532 static buffer, we could size this on RESERVED_VA, on the text
533 segment size of the executable, or continue to use the default. */
534 tb_size
= (unsigned long)(ram_size
/ 4);
537 if (tb_size
< MIN_CODE_GEN_BUFFER_SIZE
) {
538 tb_size
= MIN_CODE_GEN_BUFFER_SIZE
;
540 if (tb_size
> MAX_CODE_GEN_BUFFER_SIZE
) {
541 tb_size
= MAX_CODE_GEN_BUFFER_SIZE
;
543 code_gen_buffer_size
= tb_size
;
547 #ifdef USE_STATIC_CODE_GEN_BUFFER
548 static uint8_t static_code_gen_buffer
[DEFAULT_CODE_GEN_BUFFER_SIZE
]
549 __attribute__((aligned(CODE_GEN_ALIGN
)));
551 static inline void *alloc_code_gen_buffer(void)
553 map_exec(static_code_gen_buffer
, code_gen_buffer_size
);
554 return static_code_gen_buffer
;
556 #elif defined(USE_MMAP)
557 static inline void *alloc_code_gen_buffer(void)
559 int flags
= MAP_PRIVATE
| MAP_ANONYMOUS
;
563 /* Constrain the position of the buffer based on the host cpu.
564 Note that these addresses are chosen in concert with the
565 addresses assigned in the relevant linker script file. */
566 # if defined(__PIE__) || defined(__PIC__)
567 /* Don't bother setting a preferred location if we're building
568 a position-independent executable. We're more likely to get
569 an address near the main executable if we let the kernel
570 choose the address. */
571 # elif defined(__x86_64__) && defined(MAP_32BIT)
572 /* Force the memory down into low memory with the executable.
573 Leave the choice of exact location with the kernel. */
575 /* Cannot expect to map more than 800MB in low memory. */
576 if (code_gen_buffer_size
> 800u * 1024 * 1024) {
577 code_gen_buffer_size
= 800u * 1024 * 1024;
579 # elif defined(__sparc__)
580 start
= 0x40000000ul
;
581 # elif defined(__s390x__)
582 start
= 0x90000000ul
;
585 buf
= mmap((void *)start
, code_gen_buffer_size
,
586 PROT_WRITE
| PROT_READ
| PROT_EXEC
, flags
, -1, 0);
587 return buf
== MAP_FAILED
? NULL
: buf
;
590 static inline void *alloc_code_gen_buffer(void)
592 void *buf
= g_malloc(code_gen_buffer_size
);
594 map_exec(buf
, code_gen_buffer_size
);
598 #endif /* USE_STATIC_CODE_GEN_BUFFER, USE_MMAP */
600 static inline void code_gen_alloc(size_t tb_size
)
602 code_gen_buffer_size
= size_code_gen_buffer(tb_size
);
603 code_gen_buffer
= alloc_code_gen_buffer();
604 if (code_gen_buffer
== NULL
) {
605 fprintf(stderr
, "Could not allocate dynamic translator buffer\n");
609 /* Steal room for the prologue at the end of the buffer. This ensures
610 (via the MAX_CODE_GEN_BUFFER_SIZE limits above) that direct branches
611 from TB's to the prologue are going to be in range. It also means
612 that we don't need to mark (additional) portions of the data segment
614 code_gen_prologue
= code_gen_buffer
+ code_gen_buffer_size
- 1024;
615 code_gen_buffer_size
-= 1024;
617 code_gen_buffer_max_size
= code_gen_buffer_size
-
618 (TCG_MAX_OP_SIZE
* OPC_BUF_SIZE
);
619 code_gen_max_blocks
= code_gen_buffer_size
/ CODE_GEN_AVG_BLOCK_SIZE
;
620 tbs
= g_malloc(code_gen_max_blocks
* sizeof(TranslationBlock
));
623 /* Must be called before using the QEMU cpus. 'tb_size' is the size
624 (in bytes) allocated to the translation buffer. Zero means default
626 void tcg_exec_init(unsigned long tb_size
)
629 code_gen_alloc(tb_size
);
630 code_gen_ptr
= code_gen_buffer
;
631 tcg_register_jit(code_gen_buffer
, code_gen_buffer_size
);
633 #if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
634 /* There's no guest base to take into account, so go ahead and
635 initialize the prologue now. */
636 tcg_prologue_init(&tcg_ctx
);
640 bool tcg_enabled(void)
642 return code_gen_buffer
!= NULL
;
645 void cpu_exec_init_all(void)
647 #if !defined(CONFIG_USER_ONLY)
653 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
655 static int cpu_common_post_load(void *opaque
, int version_id
)
657 CPUArchState
*env
= opaque
;
659 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
660 version_id is increased. */
661 env
->interrupt_request
&= ~0x01;
667 static const VMStateDescription vmstate_cpu_common
= {
668 .name
= "cpu_common",
670 .minimum_version_id
= 1,
671 .minimum_version_id_old
= 1,
672 .post_load
= cpu_common_post_load
,
673 .fields
= (VMStateField
[]) {
674 VMSTATE_UINT32(halted
, CPUArchState
),
675 VMSTATE_UINT32(interrupt_request
, CPUArchState
),
676 VMSTATE_END_OF_LIST()
681 CPUArchState
*qemu_get_cpu(int cpu
)
683 CPUArchState
*env
= first_cpu
;
686 if (env
->cpu_index
== cpu
)
694 void cpu_exec_init(CPUArchState
*env
)
699 #if defined(CONFIG_USER_ONLY)
702 env
->next_cpu
= NULL
;
705 while (*penv
!= NULL
) {
706 penv
= &(*penv
)->next_cpu
;
709 env
->cpu_index
= cpu_index
;
711 QTAILQ_INIT(&env
->breakpoints
);
712 QTAILQ_INIT(&env
->watchpoints
);
713 #ifndef CONFIG_USER_ONLY
714 env
->thread_id
= qemu_get_thread_id();
717 #if defined(CONFIG_USER_ONLY)
720 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
721 vmstate_register(NULL
, cpu_index
, &vmstate_cpu_common
, env
);
722 register_savevm(NULL
, "cpu", cpu_index
, CPU_SAVE_VERSION
,
723 cpu_save
, cpu_load
, env
);
727 /* Allocate a new translation block. Flush the translation buffer if
728 too many translation blocks or too much generated code. */
729 static TranslationBlock
*tb_alloc(target_ulong pc
)
731 TranslationBlock
*tb
;
733 if (nb_tbs
>= code_gen_max_blocks
||
734 (code_gen_ptr
- code_gen_buffer
) >= code_gen_buffer_max_size
)
742 void tb_free(TranslationBlock
*tb
)
744 /* In practice this is mostly used for single use temporary TB
745 Ignore the hard cases and just back up if this TB happens to
746 be the last one generated. */
747 if (nb_tbs
> 0 && tb
== &tbs
[nb_tbs
- 1]) {
748 code_gen_ptr
= tb
->tc_ptr
;
753 static inline void invalidate_page_bitmap(PageDesc
*p
)
755 if (p
->code_bitmap
) {
756 g_free(p
->code_bitmap
);
757 p
->code_bitmap
= NULL
;
759 p
->code_write_count
= 0;
762 /* Set to NULL all the 'first_tb' fields in all PageDescs. */
764 static void page_flush_tb_1 (int level
, void **lp
)
773 for (i
= 0; i
< L2_SIZE
; ++i
) {
774 pd
[i
].first_tb
= NULL
;
775 invalidate_page_bitmap(pd
+ i
);
779 for (i
= 0; i
< L2_SIZE
; ++i
) {
780 page_flush_tb_1 (level
- 1, pp
+ i
);
785 static void page_flush_tb(void)
788 for (i
= 0; i
< V_L1_SIZE
; i
++) {
789 page_flush_tb_1(V_L1_SHIFT
/ L2_BITS
- 1, l1_map
+ i
);
793 /* flush all the translation blocks */
794 /* XXX: tb_flush is currently not thread safe */
795 void tb_flush(CPUArchState
*env1
)
798 #if defined(DEBUG_FLUSH)
799 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
800 (unsigned long)(code_gen_ptr
- code_gen_buffer
),
802 ((unsigned long)(code_gen_ptr
- code_gen_buffer
)) / nb_tbs
: 0);
804 if ((unsigned long)(code_gen_ptr
- code_gen_buffer
) > code_gen_buffer_size
)
805 cpu_abort(env1
, "Internal error: code buffer overflow\n");
809 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
810 memset (env
->tb_jmp_cache
, 0, TB_JMP_CACHE_SIZE
* sizeof (void *));
813 memset (tb_phys_hash
, 0, CODE_GEN_PHYS_HASH_SIZE
* sizeof (void *));
816 code_gen_ptr
= code_gen_buffer
;
817 /* XXX: flush processor icache at this point if cache flush is
822 #ifdef DEBUG_TB_CHECK
824 static void tb_invalidate_check(target_ulong address
)
826 TranslationBlock
*tb
;
828 address
&= TARGET_PAGE_MASK
;
829 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
830 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
831 if (!(address
+ TARGET_PAGE_SIZE
<= tb
->pc
||
832 address
>= tb
->pc
+ tb
->size
)) {
833 printf("ERROR invalidate: address=" TARGET_FMT_lx
834 " PC=%08lx size=%04x\n",
835 address
, (long)tb
->pc
, tb
->size
);
841 /* verify that all the pages have correct rights for code */
842 static void tb_page_check(void)
844 TranslationBlock
*tb
;
845 int i
, flags1
, flags2
;
847 for(i
= 0;i
< CODE_GEN_PHYS_HASH_SIZE
; i
++) {
848 for(tb
= tb_phys_hash
[i
]; tb
!= NULL
; tb
= tb
->phys_hash_next
) {
849 flags1
= page_get_flags(tb
->pc
);
850 flags2
= page_get_flags(tb
->pc
+ tb
->size
- 1);
851 if ((flags1
& PAGE_WRITE
) || (flags2
& PAGE_WRITE
)) {
852 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
853 (long)tb
->pc
, tb
->size
, flags1
, flags2
);
861 /* invalidate one TB */
862 static inline void tb_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
,
865 TranslationBlock
*tb1
;
869 *ptb
= *(TranslationBlock
**)((char *)tb1
+ next_offset
);
872 ptb
= (TranslationBlock
**)((char *)tb1
+ next_offset
);
876 static inline void tb_page_remove(TranslationBlock
**ptb
, TranslationBlock
*tb
)
878 TranslationBlock
*tb1
;
883 n1
= (uintptr_t)tb1
& 3;
884 tb1
= (TranslationBlock
*)((uintptr_t)tb1
& ~3);
886 *ptb
= tb1
->page_next
[n1
];
889 ptb
= &tb1
->page_next
[n1
];
893 static inline void tb_jmp_remove(TranslationBlock
*tb
, int n
)
895 TranslationBlock
*tb1
, **ptb
;
898 ptb
= &tb
->jmp_next
[n
];
901 /* find tb(n) in circular list */
904 n1
= (uintptr_t)tb1
& 3;
905 tb1
= (TranslationBlock
*)((uintptr_t)tb1
& ~3);
906 if (n1
== n
&& tb1
== tb
)
909 ptb
= &tb1
->jmp_first
;
911 ptb
= &tb1
->jmp_next
[n1
];
914 /* now we can suppress tb(n) from the list */
915 *ptb
= tb
->jmp_next
[n
];
917 tb
->jmp_next
[n
] = NULL
;
921 /* reset the jump entry 'n' of a TB so that it is not chained to
923 static inline void tb_reset_jump(TranslationBlock
*tb
, int n
)
925 tb_set_jmp_target(tb
, n
, (uintptr_t)(tb
->tc_ptr
+ tb
->tb_next_offset
[n
]));
928 void tb_phys_invalidate(TranslationBlock
*tb
, tb_page_addr_t page_addr
)
933 tb_page_addr_t phys_pc
;
934 TranslationBlock
*tb1
, *tb2
;
936 /* remove the TB from the hash list */
937 phys_pc
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
938 h
= tb_phys_hash_func(phys_pc
);
939 tb_remove(&tb_phys_hash
[h
], tb
,
940 offsetof(TranslationBlock
, phys_hash_next
));
942 /* remove the TB from the page list */
943 if (tb
->page_addr
[0] != page_addr
) {
944 p
= page_find(tb
->page_addr
[0] >> TARGET_PAGE_BITS
);
945 tb_page_remove(&p
->first_tb
, tb
);
946 invalidate_page_bitmap(p
);
948 if (tb
->page_addr
[1] != -1 && tb
->page_addr
[1] != page_addr
) {
949 p
= page_find(tb
->page_addr
[1] >> TARGET_PAGE_BITS
);
950 tb_page_remove(&p
->first_tb
, tb
);
951 invalidate_page_bitmap(p
);
954 tb_invalidated_flag
= 1;
956 /* remove the TB from the hash list */
957 h
= tb_jmp_cache_hash_func(tb
->pc
);
958 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
959 if (env
->tb_jmp_cache
[h
] == tb
)
960 env
->tb_jmp_cache
[h
] = NULL
;
963 /* suppress this TB from the two jump lists */
964 tb_jmp_remove(tb
, 0);
965 tb_jmp_remove(tb
, 1);
967 /* suppress any remaining jumps to this TB */
970 n1
= (uintptr_t)tb1
& 3;
973 tb1
= (TranslationBlock
*)((uintptr_t)tb1
& ~3);
974 tb2
= tb1
->jmp_next
[n1
];
975 tb_reset_jump(tb1
, n1
);
976 tb1
->jmp_next
[n1
] = NULL
;
979 tb
->jmp_first
= (TranslationBlock
*)((uintptr_t)tb
| 2); /* fail safe */
981 tb_phys_invalidate_count
++;
984 static inline void set_bits(uint8_t *tab
, int start
, int len
)
990 mask
= 0xff << (start
& 7);
991 if ((start
& ~7) == (end
& ~7)) {
993 mask
&= ~(0xff << (end
& 7));
998 start
= (start
+ 8) & ~7;
1000 while (start
< end1
) {
1005 mask
= ~(0xff << (end
& 7));
1011 static void build_page_bitmap(PageDesc
*p
)
1013 int n
, tb_start
, tb_end
;
1014 TranslationBlock
*tb
;
1016 p
->code_bitmap
= g_malloc0(TARGET_PAGE_SIZE
/ 8);
1019 while (tb
!= NULL
) {
1020 n
= (uintptr_t)tb
& 3;
1021 tb
= (TranslationBlock
*)((uintptr_t)tb
& ~3);
1022 /* NOTE: this is subtle as a TB may span two physical pages */
1024 /* NOTE: tb_end may be after the end of the page, but
1025 it is not a problem */
1026 tb_start
= tb
->pc
& ~TARGET_PAGE_MASK
;
1027 tb_end
= tb_start
+ tb
->size
;
1028 if (tb_end
> TARGET_PAGE_SIZE
)
1029 tb_end
= TARGET_PAGE_SIZE
;
1032 tb_end
= ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
1034 set_bits(p
->code_bitmap
, tb_start
, tb_end
- tb_start
);
1035 tb
= tb
->page_next
[n
];
1039 TranslationBlock
*tb_gen_code(CPUArchState
*env
,
1040 target_ulong pc
, target_ulong cs_base
,
1041 int flags
, int cflags
)
1043 TranslationBlock
*tb
;
1045 tb_page_addr_t phys_pc
, phys_page2
;
1046 target_ulong virt_page2
;
1049 phys_pc
= get_page_addr_code(env
, pc
);
1052 /* flush must be done */
1054 /* cannot fail at this point */
1056 /* Don't forget to invalidate previous TB info. */
1057 tb_invalidated_flag
= 1;
1059 tc_ptr
= code_gen_ptr
;
1060 tb
->tc_ptr
= tc_ptr
;
1061 tb
->cs_base
= cs_base
;
1063 tb
->cflags
= cflags
;
1064 cpu_gen_code(env
, tb
, &code_gen_size
);
1065 code_gen_ptr
= (void *)(((uintptr_t)code_gen_ptr
+ code_gen_size
+
1066 CODE_GEN_ALIGN
- 1) & ~(CODE_GEN_ALIGN
- 1));
1068 /* check next page if needed */
1069 virt_page2
= (pc
+ tb
->size
- 1) & TARGET_PAGE_MASK
;
1071 if ((pc
& TARGET_PAGE_MASK
) != virt_page2
) {
1072 phys_page2
= get_page_addr_code(env
, virt_page2
);
1074 tb_link_page(tb
, phys_pc
, phys_page2
);
1079 * Invalidate all TBs which intersect with the target physical address range
1080 * [start;end[. NOTE: start and end may refer to *different* physical pages.
1081 * 'is_cpu_write_access' should be true if called from a real cpu write
1082 * access: the virtual CPU will exit the current TB if code is modified inside
1085 void tb_invalidate_phys_range(tb_page_addr_t start
, tb_page_addr_t end
,
1086 int is_cpu_write_access
)
1088 while (start
< end
) {
1089 tb_invalidate_phys_page_range(start
, end
, is_cpu_write_access
);
1090 start
&= TARGET_PAGE_MASK
;
1091 start
+= TARGET_PAGE_SIZE
;
1096 * Invalidate all TBs which intersect with the target physical address range
1097 * [start;end[. NOTE: start and end must refer to the *same* physical page.
1098 * 'is_cpu_write_access' should be true if called from a real cpu write
1099 * access: the virtual CPU will exit the current TB if code is modified inside
1102 void tb_invalidate_phys_page_range(tb_page_addr_t start
, tb_page_addr_t end
,
1103 int is_cpu_write_access
)
1105 TranslationBlock
*tb
, *tb_next
, *saved_tb
;
1106 CPUArchState
*env
= cpu_single_env
;
1107 tb_page_addr_t tb_start
, tb_end
;
1110 #ifdef TARGET_HAS_PRECISE_SMC
1111 int current_tb_not_found
= is_cpu_write_access
;
1112 TranslationBlock
*current_tb
= NULL
;
1113 int current_tb_modified
= 0;
1114 target_ulong current_pc
= 0;
1115 target_ulong current_cs_base
= 0;
1116 int current_flags
= 0;
1117 #endif /* TARGET_HAS_PRECISE_SMC */
1119 p
= page_find(start
>> TARGET_PAGE_BITS
);
1122 if (!p
->code_bitmap
&&
1123 ++p
->code_write_count
>= SMC_BITMAP_USE_THRESHOLD
&&
1124 is_cpu_write_access
) {
1125 /* build code bitmap */
1126 build_page_bitmap(p
);
1129 /* we remove all the TBs in the range [start, end[ */
1130 /* XXX: see if in some cases it could be faster to invalidate all the code */
1132 while (tb
!= NULL
) {
1133 n
= (uintptr_t)tb
& 3;
1134 tb
= (TranslationBlock
*)((uintptr_t)tb
& ~3);
1135 tb_next
= tb
->page_next
[n
];
1136 /* NOTE: this is subtle as a TB may span two physical pages */
1138 /* NOTE: tb_end may be after the end of the page, but
1139 it is not a problem */
1140 tb_start
= tb
->page_addr
[0] + (tb
->pc
& ~TARGET_PAGE_MASK
);
1141 tb_end
= tb_start
+ tb
->size
;
1143 tb_start
= tb
->page_addr
[1];
1144 tb_end
= tb_start
+ ((tb
->pc
+ tb
->size
) & ~TARGET_PAGE_MASK
);
1146 if (!(tb_end
<= start
|| tb_start
>= end
)) {
1147 #ifdef TARGET_HAS_PRECISE_SMC
1148 if (current_tb_not_found
) {
1149 current_tb_not_found
= 0;
1151 if (env
->mem_io_pc
) {
1152 /* now we have a real cpu fault */
1153 current_tb
= tb_find_pc(env
->mem_io_pc
);
1156 if (current_tb
== tb
&&
1157 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1158 /* If we are modifying the current TB, we must stop
1159 its execution. We could be more precise by checking
1160 that the modification is after the current PC, but it
1161 would require a specialized function to partially
1162 restore the CPU state */
1164 current_tb_modified
= 1;
1165 cpu_restore_state(current_tb
, env
, env
->mem_io_pc
);
1166 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1169 #endif /* TARGET_HAS_PRECISE_SMC */
1170 /* we need to do that to handle the case where a signal
1171 occurs while doing tb_phys_invalidate() */
1174 saved_tb
= env
->current_tb
;
1175 env
->current_tb
= NULL
;
1177 tb_phys_invalidate(tb
, -1);
1179 env
->current_tb
= saved_tb
;
1180 if (env
->interrupt_request
&& env
->current_tb
)
1181 cpu_interrupt(env
, env
->interrupt_request
);
1186 #if !defined(CONFIG_USER_ONLY)
1187 /* if no code remaining, no need to continue to use slow writes */
1189 invalidate_page_bitmap(p
);
1190 if (is_cpu_write_access
) {
1191 tlb_unprotect_code_phys(env
, start
, env
->mem_io_vaddr
);
1195 #ifdef TARGET_HAS_PRECISE_SMC
1196 if (current_tb_modified
) {
1197 /* we generate a block containing just the instruction
1198 modifying the memory. It will ensure that it cannot modify
1200 env
->current_tb
= NULL
;
1201 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1202 cpu_resume_from_signal(env
, NULL
);
1207 /* len must be <= 8 and start must be a multiple of len */
1208 static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start
, int len
)
1214 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1215 cpu_single_env
->mem_io_vaddr
, len
,
1216 cpu_single_env
->eip
,
1217 cpu_single_env
->eip
+
1218 (intptr_t)cpu_single_env
->segs
[R_CS
].base
);
1221 p
= page_find(start
>> TARGET_PAGE_BITS
);
1224 if (p
->code_bitmap
) {
1225 offset
= start
& ~TARGET_PAGE_MASK
;
1226 b
= p
->code_bitmap
[offset
>> 3] >> (offset
& 7);
1227 if (b
& ((1 << len
) - 1))
1231 tb_invalidate_phys_page_range(start
, start
+ len
, 1);
1235 #if !defined(CONFIG_SOFTMMU)
1236 static void tb_invalidate_phys_page(tb_page_addr_t addr
,
1237 uintptr_t pc
, void *puc
)
1239 TranslationBlock
*tb
;
1242 #ifdef TARGET_HAS_PRECISE_SMC
1243 TranslationBlock
*current_tb
= NULL
;
1244 CPUArchState
*env
= cpu_single_env
;
1245 int current_tb_modified
= 0;
1246 target_ulong current_pc
= 0;
1247 target_ulong current_cs_base
= 0;
1248 int current_flags
= 0;
1251 addr
&= TARGET_PAGE_MASK
;
1252 p
= page_find(addr
>> TARGET_PAGE_BITS
);
1256 #ifdef TARGET_HAS_PRECISE_SMC
1257 if (tb
&& pc
!= 0) {
1258 current_tb
= tb_find_pc(pc
);
1261 while (tb
!= NULL
) {
1262 n
= (uintptr_t)tb
& 3;
1263 tb
= (TranslationBlock
*)((uintptr_t)tb
& ~3);
1264 #ifdef TARGET_HAS_PRECISE_SMC
1265 if (current_tb
== tb
&&
1266 (current_tb
->cflags
& CF_COUNT_MASK
) != 1) {
1267 /* If we are modifying the current TB, we must stop
1268 its execution. We could be more precise by checking
1269 that the modification is after the current PC, but it
1270 would require a specialized function to partially
1271 restore the CPU state */
1273 current_tb_modified
= 1;
1274 cpu_restore_state(current_tb
, env
, pc
);
1275 cpu_get_tb_cpu_state(env
, ¤t_pc
, ¤t_cs_base
,
1278 #endif /* TARGET_HAS_PRECISE_SMC */
1279 tb_phys_invalidate(tb
, addr
);
1280 tb
= tb
->page_next
[n
];
1283 #ifdef TARGET_HAS_PRECISE_SMC
1284 if (current_tb_modified
) {
1285 /* we generate a block containing just the instruction
1286 modifying the memory. It will ensure that it cannot modify
1288 env
->current_tb
= NULL
;
1289 tb_gen_code(env
, current_pc
, current_cs_base
, current_flags
, 1);
1290 cpu_resume_from_signal(env
, puc
);
1296 /* add the tb in the target page and protect it if necessary */
1297 static inline void tb_alloc_page(TranslationBlock
*tb
,
1298 unsigned int n
, tb_page_addr_t page_addr
)
1301 #ifndef CONFIG_USER_ONLY
1302 bool page_already_protected
;
1305 tb
->page_addr
[n
] = page_addr
;
1306 p
= page_find_alloc(page_addr
>> TARGET_PAGE_BITS
, 1);
1307 tb
->page_next
[n
] = p
->first_tb
;
1308 #ifndef CONFIG_USER_ONLY
1309 page_already_protected
= p
->first_tb
!= NULL
;
1311 p
->first_tb
= (TranslationBlock
*)((uintptr_t)tb
| n
);
1312 invalidate_page_bitmap(p
);
1314 #if defined(TARGET_HAS_SMC) || 1
1316 #if defined(CONFIG_USER_ONLY)
1317 if (p
->flags
& PAGE_WRITE
) {
1322 /* force the host page as non writable (writes will have a
1323 page fault + mprotect overhead) */
1324 page_addr
&= qemu_host_page_mask
;
1326 for(addr
= page_addr
; addr
< page_addr
+ qemu_host_page_size
;
1327 addr
+= TARGET_PAGE_SIZE
) {
1329 p2
= page_find (addr
>> TARGET_PAGE_BITS
);
1333 p2
->flags
&= ~PAGE_WRITE
;
1335 mprotect(g2h(page_addr
), qemu_host_page_size
,
1336 (prot
& PAGE_BITS
) & ~PAGE_WRITE
);
1337 #ifdef DEBUG_TB_INVALIDATE
1338 printf("protecting code page: 0x" TARGET_FMT_lx
"\n",
1343 /* if some code is already present, then the pages are already
1344 protected. So we handle the case where only the first TB is
1345 allocated in a physical page */
1346 if (!page_already_protected
) {
1347 tlb_protect_code(page_addr
);
1351 #endif /* TARGET_HAS_SMC */
1354 /* add a new TB and link it to the physical page tables. phys_page2 is
1355 (-1) to indicate that only one page contains the TB. */
1356 void tb_link_page(TranslationBlock
*tb
,
1357 tb_page_addr_t phys_pc
, tb_page_addr_t phys_page2
)
1360 TranslationBlock
**ptb
;
1362 /* Grab the mmap lock to stop another thread invalidating this TB
1363 before we are done. */
1365 /* add in the physical hash table */
1366 h
= tb_phys_hash_func(phys_pc
);
1367 ptb
= &tb_phys_hash
[h
];
1368 tb
->phys_hash_next
= *ptb
;
1371 /* add in the page list */
1372 tb_alloc_page(tb
, 0, phys_pc
& TARGET_PAGE_MASK
);
1373 if (phys_page2
!= -1)
1374 tb_alloc_page(tb
, 1, phys_page2
);
1376 tb
->page_addr
[1] = -1;
1378 tb
->jmp_first
= (TranslationBlock
*)((uintptr_t)tb
| 2);
1379 tb
->jmp_next
[0] = NULL
;
1380 tb
->jmp_next
[1] = NULL
;
1382 /* init original jump addresses */
1383 if (tb
->tb_next_offset
[0] != 0xffff)
1384 tb_reset_jump(tb
, 0);
1385 if (tb
->tb_next_offset
[1] != 0xffff)
1386 tb_reset_jump(tb
, 1);
1388 #ifdef DEBUG_TB_CHECK
1394 /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1395 tb[1].tc_ptr. Return NULL if not found */
1396 TranslationBlock
*tb_find_pc(uintptr_t tc_ptr
)
1398 int m_min
, m_max
, m
;
1400 TranslationBlock
*tb
;
1404 if (tc_ptr
< (uintptr_t)code_gen_buffer
||
1405 tc_ptr
>= (uintptr_t)code_gen_ptr
) {
1408 /* binary search (cf Knuth) */
1411 while (m_min
<= m_max
) {
1412 m
= (m_min
+ m_max
) >> 1;
1414 v
= (uintptr_t)tb
->tc_ptr
;
1417 else if (tc_ptr
< v
) {
1426 static void tb_reset_jump_recursive(TranslationBlock
*tb
);
1428 static inline void tb_reset_jump_recursive2(TranslationBlock
*tb
, int n
)
1430 TranslationBlock
*tb1
, *tb_next
, **ptb
;
1433 tb1
= tb
->jmp_next
[n
];
1435 /* find head of list */
1437 n1
= (uintptr_t)tb1
& 3;
1438 tb1
= (TranslationBlock
*)((uintptr_t)tb1
& ~3);
1441 tb1
= tb1
->jmp_next
[n1
];
1443 /* we are now sure now that tb jumps to tb1 */
1446 /* remove tb from the jmp_first list */
1447 ptb
= &tb_next
->jmp_first
;
1450 n1
= (uintptr_t)tb1
& 3;
1451 tb1
= (TranslationBlock
*)((uintptr_t)tb1
& ~3);
1452 if (n1
== n
&& tb1
== tb
)
1454 ptb
= &tb1
->jmp_next
[n1
];
1456 *ptb
= tb
->jmp_next
[n
];
1457 tb
->jmp_next
[n
] = NULL
;
1459 /* suppress the jump to next tb in generated code */
1460 tb_reset_jump(tb
, n
);
1462 /* suppress jumps in the tb on which we could have jumped */
1463 tb_reset_jump_recursive(tb_next
);
1467 static void tb_reset_jump_recursive(TranslationBlock
*tb
)
1469 tb_reset_jump_recursive2(tb
, 0);
1470 tb_reset_jump_recursive2(tb
, 1);
1473 #if defined(TARGET_HAS_ICE)
1474 #if defined(CONFIG_USER_ONLY)
1475 static void breakpoint_invalidate(CPUArchState
*env
, target_ulong pc
)
1477 tb_invalidate_phys_page_range(pc
, pc
+ 1, 0);
1480 void tb_invalidate_phys_addr(target_phys_addr_t addr
)
1482 ram_addr_t ram_addr
;
1483 MemoryRegionSection
*section
;
1485 section
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
1486 if (!(memory_region_is_ram(section
->mr
)
1487 || (section
->mr
->rom_device
&& section
->mr
->readable
))) {
1490 ram_addr
= (memory_region_get_ram_addr(section
->mr
) & TARGET_PAGE_MASK
)
1491 + memory_region_section_addr(section
, addr
);
1492 tb_invalidate_phys_page_range(ram_addr
, ram_addr
+ 1, 0);
1495 static void breakpoint_invalidate(CPUArchState
*env
, target_ulong pc
)
1497 tb_invalidate_phys_addr(cpu_get_phys_page_debug(env
, pc
) |
1498 (pc
& ~TARGET_PAGE_MASK
));
1501 #endif /* TARGET_HAS_ICE */
1503 #if defined(CONFIG_USER_ONLY)
1504 void cpu_watchpoint_remove_all(CPUArchState
*env
, int mask
)
1509 int cpu_watchpoint_insert(CPUArchState
*env
, target_ulong addr
, target_ulong len
,
1510 int flags
, CPUWatchpoint
**watchpoint
)
1515 /* Add a watchpoint. */
1516 int cpu_watchpoint_insert(CPUArchState
*env
, target_ulong addr
, target_ulong len
,
1517 int flags
, CPUWatchpoint
**watchpoint
)
1519 target_ulong len_mask
= ~(len
- 1);
1522 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1523 if ((len
& (len
- 1)) || (addr
& ~len_mask
) ||
1524 len
== 0 || len
> TARGET_PAGE_SIZE
) {
1525 fprintf(stderr
, "qemu: tried to set invalid watchpoint at "
1526 TARGET_FMT_lx
", len=" TARGET_FMT_lu
"\n", addr
, len
);
1529 wp
= g_malloc(sizeof(*wp
));
1532 wp
->len_mask
= len_mask
;
1535 /* keep all GDB-injected watchpoints in front */
1537 QTAILQ_INSERT_HEAD(&env
->watchpoints
, wp
, entry
);
1539 QTAILQ_INSERT_TAIL(&env
->watchpoints
, wp
, entry
);
1541 tlb_flush_page(env
, addr
);
1548 /* Remove a specific watchpoint. */
1549 int cpu_watchpoint_remove(CPUArchState
*env
, target_ulong addr
, target_ulong len
,
1552 target_ulong len_mask
= ~(len
- 1);
1555 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1556 if (addr
== wp
->vaddr
&& len_mask
== wp
->len_mask
1557 && flags
== (wp
->flags
& ~BP_WATCHPOINT_HIT
)) {
1558 cpu_watchpoint_remove_by_ref(env
, wp
);
1565 /* Remove a specific watchpoint by reference. */
1566 void cpu_watchpoint_remove_by_ref(CPUArchState
*env
, CPUWatchpoint
*watchpoint
)
1568 QTAILQ_REMOVE(&env
->watchpoints
, watchpoint
, entry
);
1570 tlb_flush_page(env
, watchpoint
->vaddr
);
1575 /* Remove all matching watchpoints. */
1576 void cpu_watchpoint_remove_all(CPUArchState
*env
, int mask
)
1578 CPUWatchpoint
*wp
, *next
;
1580 QTAILQ_FOREACH_SAFE(wp
, &env
->watchpoints
, entry
, next
) {
1581 if (wp
->flags
& mask
)
1582 cpu_watchpoint_remove_by_ref(env
, wp
);
1587 /* Add a breakpoint. */
1588 int cpu_breakpoint_insert(CPUArchState
*env
, target_ulong pc
, int flags
,
1589 CPUBreakpoint
**breakpoint
)
1591 #if defined(TARGET_HAS_ICE)
1594 bp
= g_malloc(sizeof(*bp
));
1599 /* keep all GDB-injected breakpoints in front */
1601 QTAILQ_INSERT_HEAD(&env
->breakpoints
, bp
, entry
);
1603 QTAILQ_INSERT_TAIL(&env
->breakpoints
, bp
, entry
);
1605 breakpoint_invalidate(env
, pc
);
1615 /* Remove a specific breakpoint. */
1616 int cpu_breakpoint_remove(CPUArchState
*env
, target_ulong pc
, int flags
)
1618 #if defined(TARGET_HAS_ICE)
1621 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1622 if (bp
->pc
== pc
&& bp
->flags
== flags
) {
1623 cpu_breakpoint_remove_by_ref(env
, bp
);
1633 /* Remove a specific breakpoint by reference. */
1634 void cpu_breakpoint_remove_by_ref(CPUArchState
*env
, CPUBreakpoint
*breakpoint
)
1636 #if defined(TARGET_HAS_ICE)
1637 QTAILQ_REMOVE(&env
->breakpoints
, breakpoint
, entry
);
1639 breakpoint_invalidate(env
, breakpoint
->pc
);
1645 /* Remove all matching breakpoints. */
1646 void cpu_breakpoint_remove_all(CPUArchState
*env
, int mask
)
1648 #if defined(TARGET_HAS_ICE)
1649 CPUBreakpoint
*bp
, *next
;
1651 QTAILQ_FOREACH_SAFE(bp
, &env
->breakpoints
, entry
, next
) {
1652 if (bp
->flags
& mask
)
1653 cpu_breakpoint_remove_by_ref(env
, bp
);
1658 /* enable or disable single step mode. EXCP_DEBUG is returned by the
1659 CPU loop after each instruction */
1660 void cpu_single_step(CPUArchState
*env
, int enabled
)
1662 #if defined(TARGET_HAS_ICE)
1663 if (env
->singlestep_enabled
!= enabled
) {
1664 env
->singlestep_enabled
= enabled
;
1666 kvm_update_guest_debug(env
, 0);
1668 /* must flush all the translated code to avoid inconsistencies */
1669 /* XXX: only flush what is necessary */
1676 static void cpu_unlink_tb(CPUArchState
*env
)
1678 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1679 problem and hope the cpu will stop of its own accord. For userspace
1680 emulation this often isn't actually as bad as it sounds. Often
1681 signals are used primarily to interrupt blocking syscalls. */
1682 TranslationBlock
*tb
;
1683 static spinlock_t interrupt_lock
= SPIN_LOCK_UNLOCKED
;
1685 spin_lock(&interrupt_lock
);
1686 tb
= env
->current_tb
;
1687 /* if the cpu is currently executing code, we must unlink it and
1688 all the potentially executing TB */
1690 env
->current_tb
= NULL
;
1691 tb_reset_jump_recursive(tb
);
1693 spin_unlock(&interrupt_lock
);
1696 #ifndef CONFIG_USER_ONLY
1697 /* mask must never be zero, except for A20 change call */
1698 static void tcg_handle_interrupt(CPUArchState
*env
, int mask
)
1702 old_mask
= env
->interrupt_request
;
1703 env
->interrupt_request
|= mask
;
1706 * If called from iothread context, wake the target cpu in
1709 if (!qemu_cpu_is_self(env
)) {
1715 env
->icount_decr
.u16
.high
= 0xffff;
1717 && (mask
& ~old_mask
) != 0) {
1718 cpu_abort(env
, "Raised interrupt while not in I/O function");
1725 CPUInterruptHandler cpu_interrupt_handler
= tcg_handle_interrupt
;
1727 #else /* CONFIG_USER_ONLY */
1729 void cpu_interrupt(CPUArchState
*env
, int mask
)
1731 env
->interrupt_request
|= mask
;
1734 #endif /* CONFIG_USER_ONLY */
1736 void cpu_reset_interrupt(CPUArchState
*env
, int mask
)
1738 env
->interrupt_request
&= ~mask
;
1741 void cpu_exit(CPUArchState
*env
)
1743 env
->exit_request
= 1;
1747 void cpu_abort(CPUArchState
*env
, const char *fmt
, ...)
1754 fprintf(stderr
, "qemu: fatal: ");
1755 vfprintf(stderr
, fmt
, ap
);
1756 fprintf(stderr
, "\n");
1757 cpu_dump_state(env
, stderr
, fprintf
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
1758 if (qemu_log_enabled()) {
1759 qemu_log("qemu: fatal: ");
1760 qemu_log_vprintf(fmt
, ap2
);
1762 log_cpu_state(env
, CPU_DUMP_FPU
| CPU_DUMP_CCOP
);
1768 #if defined(CONFIG_USER_ONLY)
1770 struct sigaction act
;
1771 sigfillset(&act
.sa_mask
);
1772 act
.sa_handler
= SIG_DFL
;
1773 sigaction(SIGABRT
, &act
, NULL
);
1779 CPUArchState
*cpu_copy(CPUArchState
*env
)
1781 CPUArchState
*new_env
= cpu_init(env
->cpu_model_str
);
1782 CPUArchState
*next_cpu
= new_env
->next_cpu
;
1783 int cpu_index
= new_env
->cpu_index
;
1784 #if defined(TARGET_HAS_ICE)
1789 memcpy(new_env
, env
, sizeof(CPUArchState
));
1791 /* Preserve chaining and index. */
1792 new_env
->next_cpu
= next_cpu
;
1793 new_env
->cpu_index
= cpu_index
;
1795 /* Clone all break/watchpoints.
1796 Note: Once we support ptrace with hw-debug register access, make sure
1797 BP_CPU break/watchpoints are handled correctly on clone. */
1798 QTAILQ_INIT(&env
->breakpoints
);
1799 QTAILQ_INIT(&env
->watchpoints
);
1800 #if defined(TARGET_HAS_ICE)
1801 QTAILQ_FOREACH(bp
, &env
->breakpoints
, entry
) {
1802 cpu_breakpoint_insert(new_env
, bp
->pc
, bp
->flags
, NULL
);
1804 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1805 cpu_watchpoint_insert(new_env
, wp
->vaddr
, (~wp
->len_mask
) + 1,
1813 #if !defined(CONFIG_USER_ONLY)
1814 void tb_flush_jmp_cache(CPUArchState
*env
, target_ulong addr
)
1818 /* Discard jump cache entries for any tb which might potentially
1819 overlap the flushed page. */
1820 i
= tb_jmp_cache_hash_page(addr
- TARGET_PAGE_SIZE
);
1821 memset (&env
->tb_jmp_cache
[i
], 0,
1822 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1824 i
= tb_jmp_cache_hash_page(addr
);
1825 memset (&env
->tb_jmp_cache
[i
], 0,
1826 TB_JMP_PAGE_SIZE
* sizeof(TranslationBlock
*));
1829 static void tlb_reset_dirty_range_all(ram_addr_t start
, ram_addr_t end
,
1834 /* we modify the TLB cache so that the dirty bit will be set again
1835 when accessing the range */
1836 start1
= (uintptr_t)qemu_safe_ram_ptr(start
);
1837 /* Check that we don't span multiple blocks - this breaks the
1838 address comparisons below. */
1839 if ((uintptr_t)qemu_safe_ram_ptr(end
- 1) - start1
1840 != (end
- 1) - start
) {
1843 cpu_tlb_reset_dirty_all(start1
, length
);
1847 /* Note: start and end must be within the same ram block. */
1848 void cpu_physical_memory_reset_dirty(ram_addr_t start
, ram_addr_t end
,
1853 start
&= TARGET_PAGE_MASK
;
1854 end
= TARGET_PAGE_ALIGN(end
);
1856 length
= end
- start
;
1859 cpu_physical_memory_mask_dirty_range(start
, length
, dirty_flags
);
1861 if (tcg_enabled()) {
1862 tlb_reset_dirty_range_all(start
, end
, length
);
1866 int cpu_physical_memory_set_dirty_tracking(int enable
)
1869 in_migration
= enable
;
1873 target_phys_addr_t
memory_region_section_get_iotlb(CPUArchState
*env
,
1874 MemoryRegionSection
*section
,
1876 target_phys_addr_t paddr
,
1878 target_ulong
*address
)
1880 target_phys_addr_t iotlb
;
1883 if (memory_region_is_ram(section
->mr
)) {
1885 iotlb
= (memory_region_get_ram_addr(section
->mr
) & TARGET_PAGE_MASK
)
1886 + memory_region_section_addr(section
, paddr
);
1887 if (!section
->readonly
) {
1888 iotlb
|= phys_section_notdirty
;
1890 iotlb
|= phys_section_rom
;
1893 /* IO handlers are currently passed a physical address.
1894 It would be nice to pass an offset from the base address
1895 of that region. This would avoid having to special case RAM,
1896 and avoid full address decoding in every device.
1897 We can't use the high bits of pd for this because
1898 IO_MEM_ROMD uses these as a ram address. */
1899 iotlb
= section
- phys_sections
;
1900 iotlb
+= memory_region_section_addr(section
, paddr
);
1903 /* Make accesses to pages with watchpoints go via the
1904 watchpoint trap routines. */
1905 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
1906 if (vaddr
== (wp
->vaddr
& TARGET_PAGE_MASK
)) {
1907 /* Avoid trapping reads of pages with a write breakpoint. */
1908 if ((prot
& PAGE_WRITE
) || (wp
->flags
& BP_MEM_READ
)) {
1909 iotlb
= phys_section_watch
+ paddr
;
1910 *address
|= TLB_MMIO
;
1921 * Walks guest process memory "regions" one by one
1922 * and calls callback function 'fn' for each region.
1925 struct walk_memory_regions_data
1927 walk_memory_regions_fn fn
;
1933 static int walk_memory_regions_end(struct walk_memory_regions_data
*data
,
1934 abi_ulong end
, int new_prot
)
1936 if (data
->start
!= -1ul) {
1937 int rc
= data
->fn(data
->priv
, data
->start
, end
, data
->prot
);
1943 data
->start
= (new_prot
? end
: -1ul);
1944 data
->prot
= new_prot
;
1949 static int walk_memory_regions_1(struct walk_memory_regions_data
*data
,
1950 abi_ulong base
, int level
, void **lp
)
1956 return walk_memory_regions_end(data
, base
, 0);
1961 for (i
= 0; i
< L2_SIZE
; ++i
) {
1962 int prot
= pd
[i
].flags
;
1964 pa
= base
| (i
<< TARGET_PAGE_BITS
);
1965 if (prot
!= data
->prot
) {
1966 rc
= walk_memory_regions_end(data
, pa
, prot
);
1974 for (i
= 0; i
< L2_SIZE
; ++i
) {
1975 pa
= base
| ((abi_ulong
)i
<<
1976 (TARGET_PAGE_BITS
+ L2_BITS
* level
));
1977 rc
= walk_memory_regions_1(data
, pa
, level
- 1, pp
+ i
);
1987 int walk_memory_regions(void *priv
, walk_memory_regions_fn fn
)
1989 struct walk_memory_regions_data data
;
1997 for (i
= 0; i
< V_L1_SIZE
; i
++) {
1998 int rc
= walk_memory_regions_1(&data
, (abi_ulong
)i
<< V_L1_SHIFT
,
1999 V_L1_SHIFT
/ L2_BITS
- 1, l1_map
+ i
);
2005 return walk_memory_regions_end(&data
, 0, 0);
2008 static int dump_region(void *priv
, abi_ulong start
,
2009 abi_ulong end
, unsigned long prot
)
2011 FILE *f
= (FILE *)priv
;
2013 (void) fprintf(f
, TARGET_ABI_FMT_lx
"-"TARGET_ABI_FMT_lx
2014 " "TARGET_ABI_FMT_lx
" %c%c%c\n",
2015 start
, end
, end
- start
,
2016 ((prot
& PAGE_READ
) ? 'r' : '-'),
2017 ((prot
& PAGE_WRITE
) ? 'w' : '-'),
2018 ((prot
& PAGE_EXEC
) ? 'x' : '-'));
2023 /* dump memory mappings */
2024 void page_dump(FILE *f
)
2026 (void) fprintf(f
, "%-8s %-8s %-8s %s\n",
2027 "start", "end", "size", "prot");
2028 walk_memory_regions(f
, dump_region
);
2031 int page_get_flags(target_ulong address
)
2035 p
= page_find(address
>> TARGET_PAGE_BITS
);
2041 /* Modify the flags of a page and invalidate the code if necessary.
2042 The flag PAGE_WRITE_ORG is positioned automatically depending
2043 on PAGE_WRITE. The mmap_lock should already be held. */
2044 void page_set_flags(target_ulong start
, target_ulong end
, int flags
)
2046 target_ulong addr
, len
;
2048 /* This function should never be called with addresses outside the
2049 guest address space. If this assert fires, it probably indicates
2050 a missing call to h2g_valid. */
2051 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2052 assert(end
< ((abi_ulong
)1 << L1_MAP_ADDR_SPACE_BITS
));
2054 assert(start
< end
);
2056 start
= start
& TARGET_PAGE_MASK
;
2057 end
= TARGET_PAGE_ALIGN(end
);
2059 if (flags
& PAGE_WRITE
) {
2060 flags
|= PAGE_WRITE_ORG
;
2063 for (addr
= start
, len
= end
- start
;
2065 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
2066 PageDesc
*p
= page_find_alloc(addr
>> TARGET_PAGE_BITS
, 1);
2068 /* If the write protection bit is set, then we invalidate
2070 if (!(p
->flags
& PAGE_WRITE
) &&
2071 (flags
& PAGE_WRITE
) &&
2073 tb_invalidate_phys_page(addr
, 0, NULL
);
2079 int page_check_range(target_ulong start
, target_ulong len
, int flags
)
2085 /* This function should never be called with addresses outside the
2086 guest address space. If this assert fires, it probably indicates
2087 a missing call to h2g_valid. */
2088 #if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2089 assert(start
< ((abi_ulong
)1 << L1_MAP_ADDR_SPACE_BITS
));
2095 if (start
+ len
- 1 < start
) {
2096 /* We've wrapped around. */
2100 end
= TARGET_PAGE_ALIGN(start
+len
); /* must do before we loose bits in the next step */
2101 start
= start
& TARGET_PAGE_MASK
;
2103 for (addr
= start
, len
= end
- start
;
2105 len
-= TARGET_PAGE_SIZE
, addr
+= TARGET_PAGE_SIZE
) {
2106 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2109 if( !(p
->flags
& PAGE_VALID
) )
2112 if ((flags
& PAGE_READ
) && !(p
->flags
& PAGE_READ
))
2114 if (flags
& PAGE_WRITE
) {
2115 if (!(p
->flags
& PAGE_WRITE_ORG
))
2117 /* unprotect the page if it was put read-only because it
2118 contains translated code */
2119 if (!(p
->flags
& PAGE_WRITE
)) {
2120 if (!page_unprotect(addr
, 0, NULL
))
2129 /* called from signal handler: invalidate the code and unprotect the
2130 page. Return TRUE if the fault was successfully handled. */
2131 int page_unprotect(target_ulong address
, uintptr_t pc
, void *puc
)
2135 target_ulong host_start
, host_end
, addr
;
2137 /* Technically this isn't safe inside a signal handler. However we
2138 know this only ever happens in a synchronous SEGV handler, so in
2139 practice it seems to be ok. */
2142 p
= page_find(address
>> TARGET_PAGE_BITS
);
2148 /* if the page was really writable, then we change its
2149 protection back to writable */
2150 if ((p
->flags
& PAGE_WRITE_ORG
) && !(p
->flags
& PAGE_WRITE
)) {
2151 host_start
= address
& qemu_host_page_mask
;
2152 host_end
= host_start
+ qemu_host_page_size
;
2155 for (addr
= host_start
; addr
< host_end
; addr
+= TARGET_PAGE_SIZE
) {
2156 p
= page_find(addr
>> TARGET_PAGE_BITS
);
2157 p
->flags
|= PAGE_WRITE
;
2160 /* and since the content will be modified, we must invalidate
2161 the corresponding translated code. */
2162 tb_invalidate_phys_page(addr
, pc
, puc
);
2163 #ifdef DEBUG_TB_CHECK
2164 tb_invalidate_check(addr
);
2167 mprotect((void *)g2h(host_start
), qemu_host_page_size
,
2176 #endif /* defined(CONFIG_USER_ONLY) */
2178 #if !defined(CONFIG_USER_ONLY)
2180 #define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2181 typedef struct subpage_t
{
2183 target_phys_addr_t base
;
2184 uint16_t sub_section
[TARGET_PAGE_SIZE
];
2187 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
2189 static subpage_t
*subpage_init(target_phys_addr_t base
);
2190 static void destroy_page_desc(uint16_t section_index
)
2192 MemoryRegionSection
*section
= &phys_sections
[section_index
];
2193 MemoryRegion
*mr
= section
->mr
;
2196 subpage_t
*subpage
= container_of(mr
, subpage_t
, iomem
);
2197 memory_region_destroy(&subpage
->iomem
);
2202 static void destroy_l2_mapping(PhysPageEntry
*lp
, unsigned level
)
2207 if (lp
->ptr
== PHYS_MAP_NODE_NIL
) {
2211 p
= phys_map_nodes
[lp
->ptr
];
2212 for (i
= 0; i
< L2_SIZE
; ++i
) {
2213 if (!p
[i
].is_leaf
) {
2214 destroy_l2_mapping(&p
[i
], level
- 1);
2216 destroy_page_desc(p
[i
].ptr
);
2220 lp
->ptr
= PHYS_MAP_NODE_NIL
;
2223 static void destroy_all_mappings(void)
2225 destroy_l2_mapping(&phys_map
, P_L2_LEVELS
- 1);
2226 phys_map_nodes_reset();
2229 static uint16_t phys_section_add(MemoryRegionSection
*section
)
2231 if (phys_sections_nb
== phys_sections_nb_alloc
) {
2232 phys_sections_nb_alloc
= MAX(phys_sections_nb_alloc
* 2, 16);
2233 phys_sections
= g_renew(MemoryRegionSection
, phys_sections
,
2234 phys_sections_nb_alloc
);
2236 phys_sections
[phys_sections_nb
] = *section
;
2237 return phys_sections_nb
++;
2240 static void phys_sections_clear(void)
2242 phys_sections_nb
= 0;
2245 static void register_subpage(MemoryRegionSection
*section
)
2248 target_phys_addr_t base
= section
->offset_within_address_space
2250 MemoryRegionSection
*existing
= phys_page_find(base
>> TARGET_PAGE_BITS
);
2251 MemoryRegionSection subsection
= {
2252 .offset_within_address_space
= base
,
2253 .size
= TARGET_PAGE_SIZE
,
2255 target_phys_addr_t start
, end
;
2257 assert(existing
->mr
->subpage
|| existing
->mr
== &io_mem_unassigned
);
2259 if (!(existing
->mr
->subpage
)) {
2260 subpage
= subpage_init(base
);
2261 subsection
.mr
= &subpage
->iomem
;
2262 phys_page_set(base
>> TARGET_PAGE_BITS
, 1,
2263 phys_section_add(&subsection
));
2265 subpage
= container_of(existing
->mr
, subpage_t
, iomem
);
2267 start
= section
->offset_within_address_space
& ~TARGET_PAGE_MASK
;
2268 end
= start
+ section
->size
- 1;
2269 subpage_register(subpage
, start
, end
, phys_section_add(section
));
2273 static void register_multipage(MemoryRegionSection
*section
)
2275 target_phys_addr_t start_addr
= section
->offset_within_address_space
;
2276 ram_addr_t size
= section
->size
;
2277 target_phys_addr_t addr
;
2278 uint16_t section_index
= phys_section_add(section
);
2283 phys_page_set(addr
>> TARGET_PAGE_BITS
, size
>> TARGET_PAGE_BITS
,
2287 void cpu_register_physical_memory_log(MemoryRegionSection
*section
,
2290 MemoryRegionSection now
= *section
, remain
= *section
;
2292 if ((now
.offset_within_address_space
& ~TARGET_PAGE_MASK
)
2293 || (now
.size
< TARGET_PAGE_SIZE
)) {
2294 now
.size
= MIN(TARGET_PAGE_ALIGN(now
.offset_within_address_space
)
2295 - now
.offset_within_address_space
,
2297 register_subpage(&now
);
2298 remain
.size
-= now
.size
;
2299 remain
.offset_within_address_space
+= now
.size
;
2300 remain
.offset_within_region
+= now
.size
;
2302 while (remain
.size
>= TARGET_PAGE_SIZE
) {
2304 if (remain
.offset_within_region
& ~TARGET_PAGE_MASK
) {
2305 now
.size
= TARGET_PAGE_SIZE
;
2306 register_subpage(&now
);
2308 now
.size
&= TARGET_PAGE_MASK
;
2309 register_multipage(&now
);
2311 remain
.size
-= now
.size
;
2312 remain
.offset_within_address_space
+= now
.size
;
2313 remain
.offset_within_region
+= now
.size
;
2317 register_subpage(&now
);
2322 void qemu_register_coalesced_mmio(target_phys_addr_t addr
, ram_addr_t size
)
2325 kvm_coalesce_mmio_region(addr
, size
);
2328 void qemu_unregister_coalesced_mmio(target_phys_addr_t addr
, ram_addr_t size
)
2331 kvm_uncoalesce_mmio_region(addr
, size
);
2334 void qemu_flush_coalesced_mmio_buffer(void)
2337 kvm_flush_coalesced_mmio_buffer();
2340 #if defined(__linux__) && !defined(TARGET_S390X)
2342 #include <sys/vfs.h>
2344 #define HUGETLBFS_MAGIC 0x958458f6
2346 static long gethugepagesize(const char *path
)
2352 ret
= statfs(path
, &fs
);
2353 } while (ret
!= 0 && errno
== EINTR
);
2360 if (fs
.f_type
!= HUGETLBFS_MAGIC
)
2361 fprintf(stderr
, "Warning: path not on HugeTLBFS: %s\n", path
);
2366 static void *file_ram_alloc(RAMBlock
*block
,
2376 unsigned long hpagesize
;
2378 hpagesize
= gethugepagesize(path
);
2383 if (memory
< hpagesize
) {
2387 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2388 fprintf(stderr
, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2392 if (asprintf(&filename
, "%s/qemu_back_mem.XXXXXX", path
) == -1) {
2396 fd
= mkstemp(filename
);
2398 perror("unable to create backing store for hugepages");
2405 memory
= (memory
+hpagesize
-1) & ~(hpagesize
-1);
2408 * ftruncate is not supported by hugetlbfs in older
2409 * hosts, so don't bother bailing out on errors.
2410 * If anything goes wrong with it under other filesystems,
2413 if (ftruncate(fd
, memory
))
2414 perror("ftruncate");
2417 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2418 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2419 * to sidestep this quirk.
2421 flags
= mem_prealloc
? MAP_POPULATE
| MAP_SHARED
: MAP_PRIVATE
;
2422 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
, flags
, fd
, 0);
2424 area
= mmap(0, memory
, PROT_READ
| PROT_WRITE
, MAP_PRIVATE
, fd
, 0);
2426 if (area
== MAP_FAILED
) {
2427 perror("file_ram_alloc: can't mmap RAM pages");
2436 static ram_addr_t
find_ram_offset(ram_addr_t size
)
2438 RAMBlock
*block
, *next_block
;
2439 ram_addr_t offset
= RAM_ADDR_MAX
, mingap
= RAM_ADDR_MAX
;
2441 if (QLIST_EMPTY(&ram_list
.blocks
))
2444 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2445 ram_addr_t end
, next
= RAM_ADDR_MAX
;
2447 end
= block
->offset
+ block
->length
;
2449 QLIST_FOREACH(next_block
, &ram_list
.blocks
, next
) {
2450 if (next_block
->offset
>= end
) {
2451 next
= MIN(next
, next_block
->offset
);
2454 if (next
- end
>= size
&& next
- end
< mingap
) {
2456 mingap
= next
- end
;
2460 if (offset
== RAM_ADDR_MAX
) {
2461 fprintf(stderr
, "Failed to find gap of requested size: %" PRIu64
"\n",
2469 static ram_addr_t
last_ram_offset(void)
2472 ram_addr_t last
= 0;
2474 QLIST_FOREACH(block
, &ram_list
.blocks
, next
)
2475 last
= MAX(last
, block
->offset
+ block
->length
);
2480 static void qemu_ram_setup_dump(void *addr
, ram_addr_t size
)
2483 QemuOpts
*machine_opts
;
2485 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
2486 machine_opts
= qemu_opts_find(qemu_find_opts("machine"), 0);
2488 !qemu_opt_get_bool(machine_opts
, "dump-guest-core", true)) {
2489 ret
= qemu_madvise(addr
, size
, QEMU_MADV_DONTDUMP
);
2491 perror("qemu_madvise");
2492 fprintf(stderr
, "madvise doesn't support MADV_DONTDUMP, "
2493 "but dump_guest_core=off specified\n");
2498 void qemu_ram_set_idstr(ram_addr_t addr
, const char *name
, DeviceState
*dev
)
2500 RAMBlock
*new_block
, *block
;
2503 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2504 if (block
->offset
== addr
) {
2510 assert(!new_block
->idstr
[0]);
2513 char *id
= qdev_get_dev_path(dev
);
2515 snprintf(new_block
->idstr
, sizeof(new_block
->idstr
), "%s/", id
);
2519 pstrcat(new_block
->idstr
, sizeof(new_block
->idstr
), name
);
2521 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2522 if (block
!= new_block
&& !strcmp(block
->idstr
, new_block
->idstr
)) {
2523 fprintf(stderr
, "RAMBlock \"%s\" already registered, abort!\n",
2530 static int memory_try_enable_merging(void *addr
, size_t len
)
2534 opts
= qemu_opts_find(qemu_find_opts("machine"), 0);
2535 if (opts
&& !qemu_opt_get_bool(opts
, "mem-merge", true)) {
2536 /* disabled by the user */
2540 return qemu_madvise(addr
, len
, QEMU_MADV_MERGEABLE
);
2543 ram_addr_t
qemu_ram_alloc_from_ptr(ram_addr_t size
, void *host
,
2546 RAMBlock
*new_block
;
2548 size
= TARGET_PAGE_ALIGN(size
);
2549 new_block
= g_malloc0(sizeof(*new_block
));
2552 new_block
->offset
= find_ram_offset(size
);
2554 new_block
->host
= host
;
2555 new_block
->flags
|= RAM_PREALLOC_MASK
;
2558 #if defined (__linux__) && !defined(TARGET_S390X)
2559 new_block
->host
= file_ram_alloc(new_block
, size
, mem_path
);
2560 if (!new_block
->host
) {
2561 new_block
->host
= qemu_vmalloc(size
);
2562 memory_try_enable_merging(new_block
->host
, size
);
2565 fprintf(stderr
, "-mem-path option unsupported\n");
2569 if (xen_enabled()) {
2570 xen_ram_alloc(new_block
->offset
, size
, mr
);
2571 } else if (kvm_enabled()) {
2572 /* some s390/kvm configurations have special constraints */
2573 new_block
->host
= kvm_vmalloc(size
);
2575 new_block
->host
= qemu_vmalloc(size
);
2577 memory_try_enable_merging(new_block
->host
, size
);
2580 new_block
->length
= size
;
2582 QLIST_INSERT_HEAD(&ram_list
.blocks
, new_block
, next
);
2584 ram_list
.phys_dirty
= g_realloc(ram_list
.phys_dirty
,
2585 last_ram_offset() >> TARGET_PAGE_BITS
);
2586 memset(ram_list
.phys_dirty
+ (new_block
->offset
>> TARGET_PAGE_BITS
),
2587 0, size
>> TARGET_PAGE_BITS
);
2588 cpu_physical_memory_set_dirty_range(new_block
->offset
, size
, 0xff);
2590 qemu_ram_setup_dump(new_block
->host
, size
);
2593 kvm_setup_guest_memory(new_block
->host
, size
);
2595 return new_block
->offset
;
2598 ram_addr_t
qemu_ram_alloc(ram_addr_t size
, MemoryRegion
*mr
)
2600 return qemu_ram_alloc_from_ptr(size
, NULL
, mr
);
2603 void qemu_ram_free_from_ptr(ram_addr_t addr
)
2607 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2608 if (addr
== block
->offset
) {
2609 QLIST_REMOVE(block
, next
);
2616 void qemu_ram_free(ram_addr_t addr
)
2620 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2621 if (addr
== block
->offset
) {
2622 QLIST_REMOVE(block
, next
);
2623 if (block
->flags
& RAM_PREALLOC_MASK
) {
2625 } else if (mem_path
) {
2626 #if defined (__linux__) && !defined(TARGET_S390X)
2628 munmap(block
->host
, block
->length
);
2631 qemu_vfree(block
->host
);
2637 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2638 munmap(block
->host
, block
->length
);
2640 if (xen_enabled()) {
2641 xen_invalidate_map_cache_entry(block
->host
);
2643 qemu_vfree(block
->host
);
2655 void qemu_ram_remap(ram_addr_t addr
, ram_addr_t length
)
2662 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2663 offset
= addr
- block
->offset
;
2664 if (offset
< block
->length
) {
2665 vaddr
= block
->host
+ offset
;
2666 if (block
->flags
& RAM_PREALLOC_MASK
) {
2670 munmap(vaddr
, length
);
2672 #if defined(__linux__) && !defined(TARGET_S390X)
2675 flags
|= mem_prealloc
? MAP_POPULATE
| MAP_SHARED
:
2678 flags
|= MAP_PRIVATE
;
2680 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
2681 flags
, block
->fd
, offset
);
2683 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
2684 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
2691 #if defined(TARGET_S390X) && defined(CONFIG_KVM)
2692 flags
|= MAP_SHARED
| MAP_ANONYMOUS
;
2693 area
= mmap(vaddr
, length
, PROT_EXEC
|PROT_READ
|PROT_WRITE
,
2696 flags
|= MAP_PRIVATE
| MAP_ANONYMOUS
;
2697 area
= mmap(vaddr
, length
, PROT_READ
| PROT_WRITE
,
2701 if (area
!= vaddr
) {
2702 fprintf(stderr
, "Could not remap addr: "
2703 RAM_ADDR_FMT
"@" RAM_ADDR_FMT
"\n",
2707 memory_try_enable_merging(vaddr
, length
);
2708 qemu_ram_setup_dump(vaddr
, length
);
2714 #endif /* !_WIN32 */
2716 /* Return a host pointer to ram allocated with qemu_ram_alloc.
2717 With the exception of the softmmu code in this file, this should
2718 only be used for local memory (e.g. video ram) that the device owns,
2719 and knows it isn't going to access beyond the end of the block.
2721 It should not be used for general purpose DMA.
2722 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2724 void *qemu_get_ram_ptr(ram_addr_t addr
)
2728 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2729 if (addr
- block
->offset
< block
->length
) {
2730 /* Move this entry to to start of the list. */
2731 if (block
!= QLIST_FIRST(&ram_list
.blocks
)) {
2732 QLIST_REMOVE(block
, next
);
2733 QLIST_INSERT_HEAD(&ram_list
.blocks
, block
, next
);
2735 if (xen_enabled()) {
2736 /* We need to check if the requested address is in the RAM
2737 * because we don't want to map the entire memory in QEMU.
2738 * In that case just map until the end of the page.
2740 if (block
->offset
== 0) {
2741 return xen_map_cache(addr
, 0, 0);
2742 } else if (block
->host
== NULL
) {
2744 xen_map_cache(block
->offset
, block
->length
, 1);
2747 return block
->host
+ (addr
- block
->offset
);
2751 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
2757 /* Return a host pointer to ram allocated with qemu_ram_alloc.
2758 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
2760 void *qemu_safe_ram_ptr(ram_addr_t addr
)
2764 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2765 if (addr
- block
->offset
< block
->length
) {
2766 if (xen_enabled()) {
2767 /* We need to check if the requested address is in the RAM
2768 * because we don't want to map the entire memory in QEMU.
2769 * In that case just map until the end of the page.
2771 if (block
->offset
== 0) {
2772 return xen_map_cache(addr
, 0, 0);
2773 } else if (block
->host
== NULL
) {
2775 xen_map_cache(block
->offset
, block
->length
, 1);
2778 return block
->host
+ (addr
- block
->offset
);
2782 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
2788 /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
2789 * but takes a size argument */
2790 void *qemu_ram_ptr_length(ram_addr_t addr
, ram_addr_t
*size
)
2795 if (xen_enabled()) {
2796 return xen_map_cache(addr
, *size
, 1);
2800 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2801 if (addr
- block
->offset
< block
->length
) {
2802 if (addr
- block
->offset
+ *size
> block
->length
)
2803 *size
= block
->length
- addr
+ block
->offset
;
2804 return block
->host
+ (addr
- block
->offset
);
2808 fprintf(stderr
, "Bad ram offset %" PRIx64
"\n", (uint64_t)addr
);
2813 void qemu_put_ram_ptr(void *addr
)
2815 trace_qemu_put_ram_ptr(addr
);
2818 int qemu_ram_addr_from_host(void *ptr
, ram_addr_t
*ram_addr
)
2821 uint8_t *host
= ptr
;
2823 if (xen_enabled()) {
2824 *ram_addr
= xen_ram_addr_from_mapcache(ptr
);
2828 QLIST_FOREACH(block
, &ram_list
.blocks
, next
) {
2829 /* This case append when the block is not mapped. */
2830 if (block
->host
== NULL
) {
2833 if (host
- block
->host
< block
->length
) {
2834 *ram_addr
= block
->offset
+ (host
- block
->host
);
2842 /* Some of the softmmu routines need to translate from a host pointer
2843 (typically a TLB entry) back to a ram offset. */
2844 ram_addr_t
qemu_ram_addr_from_host_nofail(void *ptr
)
2846 ram_addr_t ram_addr
;
2848 if (qemu_ram_addr_from_host(ptr
, &ram_addr
)) {
2849 fprintf(stderr
, "Bad ram pointer %p\n", ptr
);
2855 static uint64_t unassigned_mem_read(void *opaque
, target_phys_addr_t addr
,
2858 #ifdef DEBUG_UNASSIGNED
2859 printf("Unassigned mem read " TARGET_FMT_plx
"\n", addr
);
2861 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2862 cpu_unassigned_access(cpu_single_env
, addr
, 0, 0, 0, size
);
2867 static void unassigned_mem_write(void *opaque
, target_phys_addr_t addr
,
2868 uint64_t val
, unsigned size
)
2870 #ifdef DEBUG_UNASSIGNED
2871 printf("Unassigned mem write " TARGET_FMT_plx
" = 0x%"PRIx64
"\n", addr
, val
);
2873 #if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2874 cpu_unassigned_access(cpu_single_env
, addr
, 1, 0, 0, size
);
2878 static const MemoryRegionOps unassigned_mem_ops
= {
2879 .read
= unassigned_mem_read
,
2880 .write
= unassigned_mem_write
,
2881 .endianness
= DEVICE_NATIVE_ENDIAN
,
2884 static uint64_t error_mem_read(void *opaque
, target_phys_addr_t addr
,
2890 static void error_mem_write(void *opaque
, target_phys_addr_t addr
,
2891 uint64_t value
, unsigned size
)
2896 static const MemoryRegionOps error_mem_ops
= {
2897 .read
= error_mem_read
,
2898 .write
= error_mem_write
,
2899 .endianness
= DEVICE_NATIVE_ENDIAN
,
2902 static const MemoryRegionOps rom_mem_ops
= {
2903 .read
= error_mem_read
,
2904 .write
= unassigned_mem_write
,
2905 .endianness
= DEVICE_NATIVE_ENDIAN
,
2908 static void notdirty_mem_write(void *opaque
, target_phys_addr_t ram_addr
,
2909 uint64_t val
, unsigned size
)
2912 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
2913 if (!(dirty_flags
& CODE_DIRTY_FLAG
)) {
2914 #if !defined(CONFIG_USER_ONLY)
2915 tb_invalidate_phys_page_fast(ram_addr
, size
);
2916 dirty_flags
= cpu_physical_memory_get_dirty_flags(ram_addr
);
2921 stb_p(qemu_get_ram_ptr(ram_addr
), val
);
2924 stw_p(qemu_get_ram_ptr(ram_addr
), val
);
2927 stl_p(qemu_get_ram_ptr(ram_addr
), val
);
2932 dirty_flags
|= (0xff & ~CODE_DIRTY_FLAG
);
2933 cpu_physical_memory_set_dirty_flags(ram_addr
, dirty_flags
);
2934 /* we remove the notdirty callback only if the code has been
2936 if (dirty_flags
== 0xff)
2937 tlb_set_dirty(cpu_single_env
, cpu_single_env
->mem_io_vaddr
);
2940 static const MemoryRegionOps notdirty_mem_ops
= {
2941 .read
= error_mem_read
,
2942 .write
= notdirty_mem_write
,
2943 .endianness
= DEVICE_NATIVE_ENDIAN
,
2946 /* Generate a debug exception if a watchpoint has been hit. */
2947 static void check_watchpoint(int offset
, int len_mask
, int flags
)
2949 CPUArchState
*env
= cpu_single_env
;
2950 target_ulong pc
, cs_base
;
2951 TranslationBlock
*tb
;
2956 if (env
->watchpoint_hit
) {
2957 /* We re-entered the check after replacing the TB. Now raise
2958 * the debug interrupt so that is will trigger after the
2959 * current instruction. */
2960 cpu_interrupt(env
, CPU_INTERRUPT_DEBUG
);
2963 vaddr
= (env
->mem_io_vaddr
& TARGET_PAGE_MASK
) + offset
;
2964 QTAILQ_FOREACH(wp
, &env
->watchpoints
, entry
) {
2965 if ((vaddr
== (wp
->vaddr
& len_mask
) ||
2966 (vaddr
& wp
->len_mask
) == wp
->vaddr
) && (wp
->flags
& flags
)) {
2967 wp
->flags
|= BP_WATCHPOINT_HIT
;
2968 if (!env
->watchpoint_hit
) {
2969 env
->watchpoint_hit
= wp
;
2970 tb
= tb_find_pc(env
->mem_io_pc
);
2972 cpu_abort(env
, "check_watchpoint: could not find TB for "
2973 "pc=%p", (void *)env
->mem_io_pc
);
2975 cpu_restore_state(tb
, env
, env
->mem_io_pc
);
2976 tb_phys_invalidate(tb
, -1);
2977 if (wp
->flags
& BP_STOP_BEFORE_ACCESS
) {
2978 env
->exception_index
= EXCP_DEBUG
;
2981 cpu_get_tb_cpu_state(env
, &pc
, &cs_base
, &cpu_flags
);
2982 tb_gen_code(env
, pc
, cs_base
, cpu_flags
, 1);
2983 cpu_resume_from_signal(env
, NULL
);
2987 wp
->flags
&= ~BP_WATCHPOINT_HIT
;
2992 /* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2993 so these check for a hit then pass through to the normal out-of-line
2995 static uint64_t watch_mem_read(void *opaque
, target_phys_addr_t addr
,
2998 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~(size
- 1), BP_MEM_READ
);
3000 case 1: return ldub_phys(addr
);
3001 case 2: return lduw_phys(addr
);
3002 case 4: return ldl_phys(addr
);
3007 static void watch_mem_write(void *opaque
, target_phys_addr_t addr
,
3008 uint64_t val
, unsigned size
)
3010 check_watchpoint(addr
& ~TARGET_PAGE_MASK
, ~(size
- 1), BP_MEM_WRITE
);
3013 stb_phys(addr
, val
);
3016 stw_phys(addr
, val
);
3019 stl_phys(addr
, val
);
3025 static const MemoryRegionOps watch_mem_ops
= {
3026 .read
= watch_mem_read
,
3027 .write
= watch_mem_write
,
3028 .endianness
= DEVICE_NATIVE_ENDIAN
,
3031 static uint64_t subpage_read(void *opaque
, target_phys_addr_t addr
,
3034 subpage_t
*mmio
= opaque
;
3035 unsigned int idx
= SUBPAGE_IDX(addr
);
3036 MemoryRegionSection
*section
;
3037 #if defined(DEBUG_SUBPAGE)
3038 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
" idx %d\n", __func__
,
3039 mmio
, len
, addr
, idx
);
3042 section
= &phys_sections
[mmio
->sub_section
[idx
]];
3044 addr
-= section
->offset_within_address_space
;
3045 addr
+= section
->offset_within_region
;
3046 return io_mem_read(section
->mr
, addr
, len
);
3049 static void subpage_write(void *opaque
, target_phys_addr_t addr
,
3050 uint64_t value
, unsigned len
)
3052 subpage_t
*mmio
= opaque
;
3053 unsigned int idx
= SUBPAGE_IDX(addr
);
3054 MemoryRegionSection
*section
;
3055 #if defined(DEBUG_SUBPAGE)
3056 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
3057 " idx %d value %"PRIx64
"\n",
3058 __func__
, mmio
, len
, addr
, idx
, value
);
3061 section
= &phys_sections
[mmio
->sub_section
[idx
]];
3063 addr
-= section
->offset_within_address_space
;
3064 addr
+= section
->offset_within_region
;
3065 io_mem_write(section
->mr
, addr
, value
, len
);
3068 static const MemoryRegionOps subpage_ops
= {
3069 .read
= subpage_read
,
3070 .write
= subpage_write
,
3071 .endianness
= DEVICE_NATIVE_ENDIAN
,
3074 static uint64_t subpage_ram_read(void *opaque
, target_phys_addr_t addr
,
3077 ram_addr_t raddr
= addr
;
3078 void *ptr
= qemu_get_ram_ptr(raddr
);
3080 case 1: return ldub_p(ptr
);
3081 case 2: return lduw_p(ptr
);
3082 case 4: return ldl_p(ptr
);
3087 static void subpage_ram_write(void *opaque
, target_phys_addr_t addr
,
3088 uint64_t value
, unsigned size
)
3090 ram_addr_t raddr
= addr
;
3091 void *ptr
= qemu_get_ram_ptr(raddr
);
3093 case 1: return stb_p(ptr
, value
);
3094 case 2: return stw_p(ptr
, value
);
3095 case 4: return stl_p(ptr
, value
);
3100 static const MemoryRegionOps subpage_ram_ops
= {
3101 .read
= subpage_ram_read
,
3102 .write
= subpage_ram_write
,
3103 .endianness
= DEVICE_NATIVE_ENDIAN
,
3106 static int subpage_register (subpage_t
*mmio
, uint32_t start
, uint32_t end
,
3111 if (start
>= TARGET_PAGE_SIZE
|| end
>= TARGET_PAGE_SIZE
)
3113 idx
= SUBPAGE_IDX(start
);
3114 eidx
= SUBPAGE_IDX(end
);
3115 #if defined(DEBUG_SUBPAGE)
3116 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__
,
3117 mmio
, start
, end
, idx
, eidx
, memory
);
3119 if (memory_region_is_ram(phys_sections
[section
].mr
)) {
3120 MemoryRegionSection new_section
= phys_sections
[section
];
3121 new_section
.mr
= &io_mem_subpage_ram
;
3122 section
= phys_section_add(&new_section
);
3124 for (; idx
<= eidx
; idx
++) {
3125 mmio
->sub_section
[idx
] = section
;
3131 static subpage_t
*subpage_init(target_phys_addr_t base
)
3135 mmio
= g_malloc0(sizeof(subpage_t
));
3138 memory_region_init_io(&mmio
->iomem
, &subpage_ops
, mmio
,
3139 "subpage", TARGET_PAGE_SIZE
);
3140 mmio
->iomem
.subpage
= true;
3141 #if defined(DEBUG_SUBPAGE)
3142 printf("%s: %p base " TARGET_FMT_plx
" len %08x %d\n", __func__
,
3143 mmio
, base
, TARGET_PAGE_SIZE
, subpage_memory
);
3145 subpage_register(mmio
, 0, TARGET_PAGE_SIZE
-1, phys_section_unassigned
);
3150 static uint16_t dummy_section(MemoryRegion
*mr
)
3152 MemoryRegionSection section
= {
3154 .offset_within_address_space
= 0,
3155 .offset_within_region
= 0,
3159 return phys_section_add(§ion
);
3162 MemoryRegion
*iotlb_to_region(target_phys_addr_t index
)
3164 return phys_sections
[index
& ~TARGET_PAGE_MASK
].mr
;
3167 static void io_mem_init(void)
3169 memory_region_init_io(&io_mem_ram
, &error_mem_ops
, NULL
, "ram", UINT64_MAX
);
3170 memory_region_init_io(&io_mem_rom
, &rom_mem_ops
, NULL
, "rom", UINT64_MAX
);
3171 memory_region_init_io(&io_mem_unassigned
, &unassigned_mem_ops
, NULL
,
3172 "unassigned", UINT64_MAX
);
3173 memory_region_init_io(&io_mem_notdirty
, ¬dirty_mem_ops
, NULL
,
3174 "notdirty", UINT64_MAX
);
3175 memory_region_init_io(&io_mem_subpage_ram
, &subpage_ram_ops
, NULL
,
3176 "subpage-ram", UINT64_MAX
);
3177 memory_region_init_io(&io_mem_watch
, &watch_mem_ops
, NULL
,
3178 "watch", UINT64_MAX
);
3181 static void core_begin(MemoryListener
*listener
)
3183 destroy_all_mappings();
3184 phys_sections_clear();
3185 phys_map
.ptr
= PHYS_MAP_NODE_NIL
;
3186 phys_section_unassigned
= dummy_section(&io_mem_unassigned
);
3187 phys_section_notdirty
= dummy_section(&io_mem_notdirty
);
3188 phys_section_rom
= dummy_section(&io_mem_rom
);
3189 phys_section_watch
= dummy_section(&io_mem_watch
);
3192 static void core_commit(MemoryListener
*listener
)
3196 /* since each CPU stores ram addresses in its TLB cache, we must
3197 reset the modified entries */
3199 for(env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
3204 static void core_region_add(MemoryListener
*listener
,
3205 MemoryRegionSection
*section
)
3207 cpu_register_physical_memory_log(section
, section
->readonly
);
3210 static void core_region_del(MemoryListener
*listener
,
3211 MemoryRegionSection
*section
)
3215 static void core_region_nop(MemoryListener
*listener
,
3216 MemoryRegionSection
*section
)
3218 cpu_register_physical_memory_log(section
, section
->readonly
);
3221 static void core_log_start(MemoryListener
*listener
,
3222 MemoryRegionSection
*section
)
3226 static void core_log_stop(MemoryListener
*listener
,
3227 MemoryRegionSection
*section
)
3231 static void core_log_sync(MemoryListener
*listener
,
3232 MemoryRegionSection
*section
)
3236 static void core_log_global_start(MemoryListener
*listener
)
3238 cpu_physical_memory_set_dirty_tracking(1);
3241 static void core_log_global_stop(MemoryListener
*listener
)
3243 cpu_physical_memory_set_dirty_tracking(0);
3246 static void core_eventfd_add(MemoryListener
*listener
,
3247 MemoryRegionSection
*section
,
3248 bool match_data
, uint64_t data
, EventNotifier
*e
)
3252 static void core_eventfd_del(MemoryListener
*listener
,
3253 MemoryRegionSection
*section
,
3254 bool match_data
, uint64_t data
, EventNotifier
*e
)
3258 static void io_begin(MemoryListener
*listener
)
3262 static void io_commit(MemoryListener
*listener
)
3266 static void io_region_add(MemoryListener
*listener
,
3267 MemoryRegionSection
*section
)
3269 MemoryRegionIORange
*mrio
= g_new(MemoryRegionIORange
, 1);
3271 mrio
->mr
= section
->mr
;
3272 mrio
->offset
= section
->offset_within_region
;
3273 iorange_init(&mrio
->iorange
, &memory_region_iorange_ops
,
3274 section
->offset_within_address_space
, section
->size
);
3275 ioport_register(&mrio
->iorange
);
3278 static void io_region_del(MemoryListener
*listener
,
3279 MemoryRegionSection
*section
)
3281 isa_unassign_ioport(section
->offset_within_address_space
, section
->size
);
3284 static void io_region_nop(MemoryListener
*listener
,
3285 MemoryRegionSection
*section
)
3289 static void io_log_start(MemoryListener
*listener
,
3290 MemoryRegionSection
*section
)
3294 static void io_log_stop(MemoryListener
*listener
,
3295 MemoryRegionSection
*section
)
3299 static void io_log_sync(MemoryListener
*listener
,
3300 MemoryRegionSection
*section
)
3304 static void io_log_global_start(MemoryListener
*listener
)
3308 static void io_log_global_stop(MemoryListener
*listener
)
3312 static void io_eventfd_add(MemoryListener
*listener
,
3313 MemoryRegionSection
*section
,
3314 bool match_data
, uint64_t data
, EventNotifier
*e
)
3318 static void io_eventfd_del(MemoryListener
*listener
,
3319 MemoryRegionSection
*section
,
3320 bool match_data
, uint64_t data
, EventNotifier
*e
)
3324 static MemoryListener core_memory_listener
= {
3325 .begin
= core_begin
,
3326 .commit
= core_commit
,
3327 .region_add
= core_region_add
,
3328 .region_del
= core_region_del
,
3329 .region_nop
= core_region_nop
,
3330 .log_start
= core_log_start
,
3331 .log_stop
= core_log_stop
,
3332 .log_sync
= core_log_sync
,
3333 .log_global_start
= core_log_global_start
,
3334 .log_global_stop
= core_log_global_stop
,
3335 .eventfd_add
= core_eventfd_add
,
3336 .eventfd_del
= core_eventfd_del
,
3340 static MemoryListener io_memory_listener
= {
3342 .commit
= io_commit
,
3343 .region_add
= io_region_add
,
3344 .region_del
= io_region_del
,
3345 .region_nop
= io_region_nop
,
3346 .log_start
= io_log_start
,
3347 .log_stop
= io_log_stop
,
3348 .log_sync
= io_log_sync
,
3349 .log_global_start
= io_log_global_start
,
3350 .log_global_stop
= io_log_global_stop
,
3351 .eventfd_add
= io_eventfd_add
,
3352 .eventfd_del
= io_eventfd_del
,
3356 static void memory_map_init(void)
3358 system_memory
= g_malloc(sizeof(*system_memory
));
3359 memory_region_init(system_memory
, "system", INT64_MAX
);
3360 set_system_memory_map(system_memory
);
3362 system_io
= g_malloc(sizeof(*system_io
));
3363 memory_region_init(system_io
, "io", 65536);
3364 set_system_io_map(system_io
);
3366 memory_listener_register(&core_memory_listener
, system_memory
);
3367 memory_listener_register(&io_memory_listener
, system_io
);
3370 MemoryRegion
*get_system_memory(void)
3372 return system_memory
;
3375 MemoryRegion
*get_system_io(void)
3380 #endif /* !defined(CONFIG_USER_ONLY) */
3382 /* physical memory access (slow version, mainly for debug) */
3383 #if defined(CONFIG_USER_ONLY)
3384 int cpu_memory_rw_debug(CPUArchState
*env
, target_ulong addr
,
3385 uint8_t *buf
, int len
, int is_write
)
3392 page
= addr
& TARGET_PAGE_MASK
;
3393 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3396 flags
= page_get_flags(page
);
3397 if (!(flags
& PAGE_VALID
))
3400 if (!(flags
& PAGE_WRITE
))
3402 /* XXX: this code should not depend on lock_user */
3403 if (!(p
= lock_user(VERIFY_WRITE
, addr
, l
, 0)))
3406 unlock_user(p
, addr
, l
);
3408 if (!(flags
& PAGE_READ
))
3410 /* XXX: this code should not depend on lock_user */
3411 if (!(p
= lock_user(VERIFY_READ
, addr
, l
, 1)))
3414 unlock_user(p
, addr
, 0);
3425 static void invalidate_and_set_dirty(target_phys_addr_t addr
,
3426 target_phys_addr_t length
)
3428 if (!cpu_physical_memory_is_dirty(addr
)) {
3429 /* invalidate code */
3430 tb_invalidate_phys_page_range(addr
, addr
+ length
, 0);
3432 cpu_physical_memory_set_dirty_flags(addr
, (0xff & ~CODE_DIRTY_FLAG
));
3434 xen_modified_memory(addr
, length
);
3437 void cpu_physical_memory_rw(target_phys_addr_t addr
, uint8_t *buf
,
3438 int len
, int is_write
)
3443 target_phys_addr_t page
;
3444 MemoryRegionSection
*section
;
3447 page
= addr
& TARGET_PAGE_MASK
;
3448 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3451 section
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3454 if (!memory_region_is_ram(section
->mr
)) {
3455 target_phys_addr_t addr1
;
3456 addr1
= memory_region_section_addr(section
, addr
);
3457 /* XXX: could force cpu_single_env to NULL to avoid
3459 if (l
>= 4 && ((addr1
& 3) == 0)) {
3460 /* 32 bit write access */
3462 io_mem_write(section
->mr
, addr1
, val
, 4);
3464 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
3465 /* 16 bit write access */
3467 io_mem_write(section
->mr
, addr1
, val
, 2);
3470 /* 8 bit write access */
3472 io_mem_write(section
->mr
, addr1
, val
, 1);
3475 } else if (!section
->readonly
) {
3477 addr1
= memory_region_get_ram_addr(section
->mr
)
3478 + memory_region_section_addr(section
, addr
);
3480 ptr
= qemu_get_ram_ptr(addr1
);
3481 memcpy(ptr
, buf
, l
);
3482 invalidate_and_set_dirty(addr1
, l
);
3483 qemu_put_ram_ptr(ptr
);
3486 if (!(memory_region_is_ram(section
->mr
) ||
3487 memory_region_is_romd(section
->mr
))) {
3488 target_phys_addr_t addr1
;
3490 addr1
= memory_region_section_addr(section
, addr
);
3491 if (l
>= 4 && ((addr1
& 3) == 0)) {
3492 /* 32 bit read access */
3493 val
= io_mem_read(section
->mr
, addr1
, 4);
3496 } else if (l
>= 2 && ((addr1
& 1) == 0)) {
3497 /* 16 bit read access */
3498 val
= io_mem_read(section
->mr
, addr1
, 2);
3502 /* 8 bit read access */
3503 val
= io_mem_read(section
->mr
, addr1
, 1);
3509 ptr
= qemu_get_ram_ptr(section
->mr
->ram_addr
3510 + memory_region_section_addr(section
,
3512 memcpy(buf
, ptr
, l
);
3513 qemu_put_ram_ptr(ptr
);
3522 /* used for ROM loading : can write in RAM and ROM */
3523 void cpu_physical_memory_write_rom(target_phys_addr_t addr
,
3524 const uint8_t *buf
, int len
)
3528 target_phys_addr_t page
;
3529 MemoryRegionSection
*section
;
3532 page
= addr
& TARGET_PAGE_MASK
;
3533 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3536 section
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3538 if (!(memory_region_is_ram(section
->mr
) ||
3539 memory_region_is_romd(section
->mr
))) {
3542 unsigned long addr1
;
3543 addr1
= memory_region_get_ram_addr(section
->mr
)
3544 + memory_region_section_addr(section
, addr
);
3546 ptr
= qemu_get_ram_ptr(addr1
);
3547 memcpy(ptr
, buf
, l
);
3548 invalidate_and_set_dirty(addr1
, l
);
3549 qemu_put_ram_ptr(ptr
);
3559 target_phys_addr_t addr
;
3560 target_phys_addr_t len
;
3563 static BounceBuffer bounce
;
3565 typedef struct MapClient
{
3567 void (*callback
)(void *opaque
);
3568 QLIST_ENTRY(MapClient
) link
;
3571 static QLIST_HEAD(map_client_list
, MapClient
) map_client_list
3572 = QLIST_HEAD_INITIALIZER(map_client_list
);
3574 void *cpu_register_map_client(void *opaque
, void (*callback
)(void *opaque
))
3576 MapClient
*client
= g_malloc(sizeof(*client
));
3578 client
->opaque
= opaque
;
3579 client
->callback
= callback
;
3580 QLIST_INSERT_HEAD(&map_client_list
, client
, link
);
3584 void cpu_unregister_map_client(void *_client
)
3586 MapClient
*client
= (MapClient
*)_client
;
3588 QLIST_REMOVE(client
, link
);
3592 static void cpu_notify_map_clients(void)
3596 while (!QLIST_EMPTY(&map_client_list
)) {
3597 client
= QLIST_FIRST(&map_client_list
);
3598 client
->callback(client
->opaque
);
3599 cpu_unregister_map_client(client
);
3603 /* Map a physical memory region into a host virtual address.
3604 * May map a subset of the requested range, given by and returned in *plen.
3605 * May return NULL if resources needed to perform the mapping are exhausted.
3606 * Use only for reads OR writes - not for read-modify-write operations.
3607 * Use cpu_register_map_client() to know when retrying the map operation is
3608 * likely to succeed.
3610 void *cpu_physical_memory_map(target_phys_addr_t addr
,
3611 target_phys_addr_t
*plen
,
3614 target_phys_addr_t len
= *plen
;
3615 target_phys_addr_t todo
= 0;
3617 target_phys_addr_t page
;
3618 MemoryRegionSection
*section
;
3619 ram_addr_t raddr
= RAM_ADDR_MAX
;
3624 page
= addr
& TARGET_PAGE_MASK
;
3625 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
3628 section
= phys_page_find(page
>> TARGET_PAGE_BITS
);
3630 if (!(memory_region_is_ram(section
->mr
) && !section
->readonly
)) {
3631 if (todo
|| bounce
.buffer
) {
3634 bounce
.buffer
= qemu_memalign(TARGET_PAGE_SIZE
, TARGET_PAGE_SIZE
);
3638 cpu_physical_memory_read(addr
, bounce
.buffer
, l
);
3642 return bounce
.buffer
;
3645 raddr
= memory_region_get_ram_addr(section
->mr
)
3646 + memory_region_section_addr(section
, addr
);
3654 ret
= qemu_ram_ptr_length(raddr
, &rlen
);
3659 /* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3660 * Will also mark the memory as dirty if is_write == 1. access_len gives
3661 * the amount of memory that was actually read or written by the caller.
3663 void cpu_physical_memory_unmap(void *buffer
, target_phys_addr_t len
,
3664 int is_write
, target_phys_addr_t access_len
)
3666 if (buffer
!= bounce
.buffer
) {
3668 ram_addr_t addr1
= qemu_ram_addr_from_host_nofail(buffer
);
3669 while (access_len
) {
3671 l
= TARGET_PAGE_SIZE
;
3674 invalidate_and_set_dirty(addr1
, l
);
3679 if (xen_enabled()) {
3680 xen_invalidate_map_cache_entry(buffer
);
3685 cpu_physical_memory_write(bounce
.addr
, bounce
.buffer
, access_len
);
3687 qemu_vfree(bounce
.buffer
);
3688 bounce
.buffer
= NULL
;
3689 cpu_notify_map_clients();
3692 /* warning: addr must be aligned */
3693 static inline uint32_t ldl_phys_internal(target_phys_addr_t addr
,
3694 enum device_endian endian
)
3698 MemoryRegionSection
*section
;
3700 section
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3702 if (!(memory_region_is_ram(section
->mr
) ||
3703 memory_region_is_romd(section
->mr
))) {
3705 addr
= memory_region_section_addr(section
, addr
);
3706 val
= io_mem_read(section
->mr
, addr
, 4);
3707 #if defined(TARGET_WORDS_BIGENDIAN)
3708 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3712 if (endian
== DEVICE_BIG_ENDIAN
) {
3718 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(section
->mr
)
3720 + memory_region_section_addr(section
, addr
));
3722 case DEVICE_LITTLE_ENDIAN
:
3723 val
= ldl_le_p(ptr
);
3725 case DEVICE_BIG_ENDIAN
:
3726 val
= ldl_be_p(ptr
);
3736 uint32_t ldl_phys(target_phys_addr_t addr
)
3738 return ldl_phys_internal(addr
, DEVICE_NATIVE_ENDIAN
);
3741 uint32_t ldl_le_phys(target_phys_addr_t addr
)
3743 return ldl_phys_internal(addr
, DEVICE_LITTLE_ENDIAN
);
3746 uint32_t ldl_be_phys(target_phys_addr_t addr
)
3748 return ldl_phys_internal(addr
, DEVICE_BIG_ENDIAN
);
3751 /* warning: addr must be aligned */
3752 static inline uint64_t ldq_phys_internal(target_phys_addr_t addr
,
3753 enum device_endian endian
)
3757 MemoryRegionSection
*section
;
3759 section
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3761 if (!(memory_region_is_ram(section
->mr
) ||
3762 memory_region_is_romd(section
->mr
))) {
3764 addr
= memory_region_section_addr(section
, addr
);
3766 /* XXX This is broken when device endian != cpu endian.
3767 Fix and add "endian" variable check */
3768 #ifdef TARGET_WORDS_BIGENDIAN
3769 val
= io_mem_read(section
->mr
, addr
, 4) << 32;
3770 val
|= io_mem_read(section
->mr
, addr
+ 4, 4);
3772 val
= io_mem_read(section
->mr
, addr
, 4);
3773 val
|= io_mem_read(section
->mr
, addr
+ 4, 4) << 32;
3777 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(section
->mr
)
3779 + memory_region_section_addr(section
, addr
));
3781 case DEVICE_LITTLE_ENDIAN
:
3782 val
= ldq_le_p(ptr
);
3784 case DEVICE_BIG_ENDIAN
:
3785 val
= ldq_be_p(ptr
);
3795 uint64_t ldq_phys(target_phys_addr_t addr
)
3797 return ldq_phys_internal(addr
, DEVICE_NATIVE_ENDIAN
);
3800 uint64_t ldq_le_phys(target_phys_addr_t addr
)
3802 return ldq_phys_internal(addr
, DEVICE_LITTLE_ENDIAN
);
3805 uint64_t ldq_be_phys(target_phys_addr_t addr
)
3807 return ldq_phys_internal(addr
, DEVICE_BIG_ENDIAN
);
3811 uint32_t ldub_phys(target_phys_addr_t addr
)
3814 cpu_physical_memory_read(addr
, &val
, 1);
3818 /* warning: addr must be aligned */
3819 static inline uint32_t lduw_phys_internal(target_phys_addr_t addr
,
3820 enum device_endian endian
)
3824 MemoryRegionSection
*section
;
3826 section
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3828 if (!(memory_region_is_ram(section
->mr
) ||
3829 memory_region_is_romd(section
->mr
))) {
3831 addr
= memory_region_section_addr(section
, addr
);
3832 val
= io_mem_read(section
->mr
, addr
, 2);
3833 #if defined(TARGET_WORDS_BIGENDIAN)
3834 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3838 if (endian
== DEVICE_BIG_ENDIAN
) {
3844 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(section
->mr
)
3846 + memory_region_section_addr(section
, addr
));
3848 case DEVICE_LITTLE_ENDIAN
:
3849 val
= lduw_le_p(ptr
);
3851 case DEVICE_BIG_ENDIAN
:
3852 val
= lduw_be_p(ptr
);
3862 uint32_t lduw_phys(target_phys_addr_t addr
)
3864 return lduw_phys_internal(addr
, DEVICE_NATIVE_ENDIAN
);
3867 uint32_t lduw_le_phys(target_phys_addr_t addr
)
3869 return lduw_phys_internal(addr
, DEVICE_LITTLE_ENDIAN
);
3872 uint32_t lduw_be_phys(target_phys_addr_t addr
)
3874 return lduw_phys_internal(addr
, DEVICE_BIG_ENDIAN
);
3877 /* warning: addr must be aligned. The ram page is not masked as dirty
3878 and the code inside is not invalidated. It is useful if the dirty
3879 bits are used to track modified PTEs */
3880 void stl_phys_notdirty(target_phys_addr_t addr
, uint32_t val
)
3883 MemoryRegionSection
*section
;
3885 section
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3887 if (!memory_region_is_ram(section
->mr
) || section
->readonly
) {
3888 addr
= memory_region_section_addr(section
, addr
);
3889 if (memory_region_is_ram(section
->mr
)) {
3890 section
= &phys_sections
[phys_section_rom
];
3892 io_mem_write(section
->mr
, addr
, val
, 4);
3894 unsigned long addr1
= (memory_region_get_ram_addr(section
->mr
)
3896 + memory_region_section_addr(section
, addr
);
3897 ptr
= qemu_get_ram_ptr(addr1
);
3900 if (unlikely(in_migration
)) {
3901 if (!cpu_physical_memory_is_dirty(addr1
)) {
3902 /* invalidate code */
3903 tb_invalidate_phys_page_range(addr1
, addr1
+ 4, 0);
3905 cpu_physical_memory_set_dirty_flags(
3906 addr1
, (0xff & ~CODE_DIRTY_FLAG
));
3912 void stq_phys_notdirty(target_phys_addr_t addr
, uint64_t val
)
3915 MemoryRegionSection
*section
;
3917 section
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3919 if (!memory_region_is_ram(section
->mr
) || section
->readonly
) {
3920 addr
= memory_region_section_addr(section
, addr
);
3921 if (memory_region_is_ram(section
->mr
)) {
3922 section
= &phys_sections
[phys_section_rom
];
3924 #ifdef TARGET_WORDS_BIGENDIAN
3925 io_mem_write(section
->mr
, addr
, val
>> 32, 4);
3926 io_mem_write(section
->mr
, addr
+ 4, (uint32_t)val
, 4);
3928 io_mem_write(section
->mr
, addr
, (uint32_t)val
, 4);
3929 io_mem_write(section
->mr
, addr
+ 4, val
>> 32, 4);
3932 ptr
= qemu_get_ram_ptr((memory_region_get_ram_addr(section
->mr
)
3934 + memory_region_section_addr(section
, addr
));
3939 /* warning: addr must be aligned */
3940 static inline void stl_phys_internal(target_phys_addr_t addr
, uint32_t val
,
3941 enum device_endian endian
)
3944 MemoryRegionSection
*section
;
3946 section
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
3948 if (!memory_region_is_ram(section
->mr
) || section
->readonly
) {
3949 addr
= memory_region_section_addr(section
, addr
);
3950 if (memory_region_is_ram(section
->mr
)) {
3951 section
= &phys_sections
[phys_section_rom
];
3953 #if defined(TARGET_WORDS_BIGENDIAN)
3954 if (endian
== DEVICE_LITTLE_ENDIAN
) {
3958 if (endian
== DEVICE_BIG_ENDIAN
) {
3962 io_mem_write(section
->mr
, addr
, val
, 4);
3964 unsigned long addr1
;
3965 addr1
= (memory_region_get_ram_addr(section
->mr
) & TARGET_PAGE_MASK
)
3966 + memory_region_section_addr(section
, addr
);
3968 ptr
= qemu_get_ram_ptr(addr1
);
3970 case DEVICE_LITTLE_ENDIAN
:
3973 case DEVICE_BIG_ENDIAN
:
3980 invalidate_and_set_dirty(addr1
, 4);
3984 void stl_phys(target_phys_addr_t addr
, uint32_t val
)
3986 stl_phys_internal(addr
, val
, DEVICE_NATIVE_ENDIAN
);
3989 void stl_le_phys(target_phys_addr_t addr
, uint32_t val
)
3991 stl_phys_internal(addr
, val
, DEVICE_LITTLE_ENDIAN
);
3994 void stl_be_phys(target_phys_addr_t addr
, uint32_t val
)
3996 stl_phys_internal(addr
, val
, DEVICE_BIG_ENDIAN
);
4000 void stb_phys(target_phys_addr_t addr
, uint32_t val
)
4003 cpu_physical_memory_write(addr
, &v
, 1);
4006 /* warning: addr must be aligned */
4007 static inline void stw_phys_internal(target_phys_addr_t addr
, uint32_t val
,
4008 enum device_endian endian
)
4011 MemoryRegionSection
*section
;
4013 section
= phys_page_find(addr
>> TARGET_PAGE_BITS
);
4015 if (!memory_region_is_ram(section
->mr
) || section
->readonly
) {
4016 addr
= memory_region_section_addr(section
, addr
);
4017 if (memory_region_is_ram(section
->mr
)) {
4018 section
= &phys_sections
[phys_section_rom
];
4020 #if defined(TARGET_WORDS_BIGENDIAN)
4021 if (endian
== DEVICE_LITTLE_ENDIAN
) {
4025 if (endian
== DEVICE_BIG_ENDIAN
) {
4029 io_mem_write(section
->mr
, addr
, val
, 2);
4031 unsigned long addr1
;
4032 addr1
= (memory_region_get_ram_addr(section
->mr
) & TARGET_PAGE_MASK
)
4033 + memory_region_section_addr(section
, addr
);
4035 ptr
= qemu_get_ram_ptr(addr1
);
4037 case DEVICE_LITTLE_ENDIAN
:
4040 case DEVICE_BIG_ENDIAN
:
4047 invalidate_and_set_dirty(addr1
, 2);
4051 void stw_phys(target_phys_addr_t addr
, uint32_t val
)
4053 stw_phys_internal(addr
, val
, DEVICE_NATIVE_ENDIAN
);
4056 void stw_le_phys(target_phys_addr_t addr
, uint32_t val
)
4058 stw_phys_internal(addr
, val
, DEVICE_LITTLE_ENDIAN
);
4061 void stw_be_phys(target_phys_addr_t addr
, uint32_t val
)
4063 stw_phys_internal(addr
, val
, DEVICE_BIG_ENDIAN
);
4067 void stq_phys(target_phys_addr_t addr
, uint64_t val
)
4070 cpu_physical_memory_write(addr
, &val
, 8);
4073 void stq_le_phys(target_phys_addr_t addr
, uint64_t val
)
4075 val
= cpu_to_le64(val
);
4076 cpu_physical_memory_write(addr
, &val
, 8);
4079 void stq_be_phys(target_phys_addr_t addr
, uint64_t val
)
4081 val
= cpu_to_be64(val
);
4082 cpu_physical_memory_write(addr
, &val
, 8);
4085 /* virtual memory access for debug (includes writing to ROM) */
4086 int cpu_memory_rw_debug(CPUArchState
*env
, target_ulong addr
,
4087 uint8_t *buf
, int len
, int is_write
)
4090 target_phys_addr_t phys_addr
;
4094 page
= addr
& TARGET_PAGE_MASK
;
4095 phys_addr
= cpu_get_phys_page_debug(env
, page
);
4096 /* if no physical page mapped, return an error */
4097 if (phys_addr
== -1)
4099 l
= (page
+ TARGET_PAGE_SIZE
) - addr
;
4102 phys_addr
+= (addr
& ~TARGET_PAGE_MASK
);
4104 cpu_physical_memory_write_rom(phys_addr
, buf
, l
);
4106 cpu_physical_memory_rw(phys_addr
, buf
, l
, is_write
);
4115 /* in deterministic execution mode, instructions doing device I/Os
4116 must be at the end of the TB */
4117 void cpu_io_recompile(CPUArchState
*env
, uintptr_t retaddr
)
4119 TranslationBlock
*tb
;
4121 target_ulong pc
, cs_base
;
4124 tb
= tb_find_pc(retaddr
);
4126 cpu_abort(env
, "cpu_io_recompile: could not find TB for pc=%p",
4129 n
= env
->icount_decr
.u16
.low
+ tb
->icount
;
4130 cpu_restore_state(tb
, env
, retaddr
);
4131 /* Calculate how many instructions had been executed before the fault
4133 n
= n
- env
->icount_decr
.u16
.low
;
4134 /* Generate a new TB ending on the I/O insn. */
4136 /* On MIPS and SH, delay slot instructions can only be restarted if
4137 they were already the first instruction in the TB. If this is not
4138 the first instruction in a TB then re-execute the preceding
4140 #if defined(TARGET_MIPS)
4141 if ((env
->hflags
& MIPS_HFLAG_BMASK
) != 0 && n
> 1) {
4142 env
->active_tc
.PC
-= 4;
4143 env
->icount_decr
.u16
.low
++;
4144 env
->hflags
&= ~MIPS_HFLAG_BMASK
;
4146 #elif defined(TARGET_SH4)
4147 if ((env
->flags
& ((DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
))) != 0
4150 env
->icount_decr
.u16
.low
++;
4151 env
->flags
&= ~(DELAY_SLOT
| DELAY_SLOT_CONDITIONAL
);
4154 /* This should never happen. */
4155 if (n
> CF_COUNT_MASK
)
4156 cpu_abort(env
, "TB too big during recompile");
4158 cflags
= n
| CF_LAST_IO
;
4160 cs_base
= tb
->cs_base
;
4162 tb_phys_invalidate(tb
, -1);
4163 /* FIXME: In theory this could raise an exception. In practice
4164 we have already translated the block once so it's probably ok. */
4165 tb_gen_code(env
, pc
, cs_base
, flags
, cflags
);
4166 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
4167 the first in the TB) then we end up generating a whole new TB and
4168 repeating the fault, which is horribly inefficient.
4169 Better would be to execute just this insn uncached, or generate a
4171 cpu_resume_from_signal(env
, NULL
);
4174 #if !defined(CONFIG_USER_ONLY)
4176 void dump_exec_info(FILE *f
, fprintf_function cpu_fprintf
)
4178 int i
, target_code_size
, max_target_code_size
;
4179 int direct_jmp_count
, direct_jmp2_count
, cross_page
;
4180 TranslationBlock
*tb
;
4182 target_code_size
= 0;
4183 max_target_code_size
= 0;
4185 direct_jmp_count
= 0;
4186 direct_jmp2_count
= 0;
4187 for(i
= 0; i
< nb_tbs
; i
++) {
4189 target_code_size
+= tb
->size
;
4190 if (tb
->size
> max_target_code_size
)
4191 max_target_code_size
= tb
->size
;
4192 if (tb
->page_addr
[1] != -1)
4194 if (tb
->tb_next_offset
[0] != 0xffff) {
4196 if (tb
->tb_next_offset
[1] != 0xffff) {
4197 direct_jmp2_count
++;
4201 /* XXX: avoid using doubles ? */
4202 cpu_fprintf(f
, "Translation buffer state:\n");
4203 cpu_fprintf(f
, "gen code size %td/%zd\n",
4204 code_gen_ptr
- code_gen_buffer
, code_gen_buffer_max_size
);
4205 cpu_fprintf(f
, "TB count %d/%d\n",
4206 nb_tbs
, code_gen_max_blocks
);
4207 cpu_fprintf(f
, "TB avg target size %d max=%d bytes\n",
4208 nb_tbs
? target_code_size
/ nb_tbs
: 0,
4209 max_target_code_size
);
4210 cpu_fprintf(f
, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
4211 nb_tbs
? (code_gen_ptr
- code_gen_buffer
) / nb_tbs
: 0,
4212 target_code_size
? (double) (code_gen_ptr
- code_gen_buffer
) / target_code_size
: 0);
4213 cpu_fprintf(f
, "cross page TB count %d (%d%%)\n",
4215 nb_tbs
? (cross_page
* 100) / nb_tbs
: 0);
4216 cpu_fprintf(f
, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
4218 nb_tbs
? (direct_jmp_count
* 100) / nb_tbs
: 0,
4220 nb_tbs
? (direct_jmp2_count
* 100) / nb_tbs
: 0);
4221 cpu_fprintf(f
, "\nStatistics:\n");
4222 cpu_fprintf(f
, "TB flush count %d\n", tb_flush_count
);
4223 cpu_fprintf(f
, "TB invalidate count %d\n", tb_phys_invalidate_count
);
4224 cpu_fprintf(f
, "TLB flush count %d\n", tlb_flush_count
);
4225 tcg_dump_info(f
, cpu_fprintf
);
4229 * A helper function for the _utterly broken_ virtio device model to find out if
4230 * it's running on a big endian machine. Don't do this at home kids!
4232 bool virtio_is_big_endian(void);
4233 bool virtio_is_big_endian(void)
4235 #if defined(TARGET_WORDS_BIGENDIAN)
4244 #ifndef CONFIG_USER_ONLY
4245 bool cpu_physical_memory_is_io(target_phys_addr_t phys_addr
)
4247 MemoryRegionSection
*section
;
4249 section
= phys_page_find(phys_addr
>> TARGET_PAGE_BITS
);
4251 return !(memory_region_is_ram(section
->mr
) ||
4252 memory_region_is_romd(section
->mr
));