]> git.proxmox.com Git - qemu.git/blame - exec.c
mips-linux-user: Always support rdhwr.
[qemu.git] / exec.c
CommitLineData
54936004 1/*
fd6ce8f6 2 * virtual page mapping and translated block handling
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
54936004 18 */
67b915a5 19#include "config.h"
d5a8f07c
FB
20#ifdef _WIN32
21#include <windows.h>
22#else
a98d49b1 23#include <sys/types.h>
d5a8f07c
FB
24#include <sys/mman.h>
25#endif
54936004 26
055403b2 27#include "qemu-common.h"
6180a181 28#include "cpu.h"
b67d9a52 29#include "tcg.h"
b3c7724c 30#include "hw/hw.h"
cc9e98cb 31#include "hw/qdev.h"
74576198 32#include "osdep.h"
7ba1e619 33#include "kvm.h"
432d268c 34#include "hw/xen.h"
29e922b6 35#include "qemu-timer.h"
62152b8a
AK
36#include "memory.h"
37#include "exec-memory.h"
53a5960a
PB
38#if defined(CONFIG_USER_ONLY)
39#include <qemu.h>
f01576f1
JL
40#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41#include <sys/param.h>
42#if __FreeBSD_version >= 700104
43#define HAVE_KINFO_GETVMMAP
44#define sigqueue sigqueue_freebsd /* avoid redefinition */
45#include <sys/time.h>
46#include <sys/proc.h>
47#include <machine/profile.h>
48#define _KERNEL
49#include <sys/user.h>
50#undef _KERNEL
51#undef sigqueue
52#include <libutil.h>
53#endif
54#endif
432d268c
JN
55#else /* !CONFIG_USER_ONLY */
56#include "xen-mapcache.h"
6506e4f9 57#include "trace.h"
53a5960a 58#endif
54936004 59
0cac1b66
BS
60#include "cputlb.h"
61
67d95c15
AK
62#define WANT_EXEC_OBSOLETE
63#include "exec-obsolete.h"
64
fd6ce8f6 65//#define DEBUG_TB_INVALIDATE
66e85a21 66//#define DEBUG_FLUSH
67d3b957 67//#define DEBUG_UNASSIGNED
fd6ce8f6
FB
68
69/* make various TB consistency checks */
5fafdf24 70//#define DEBUG_TB_CHECK
fd6ce8f6 71
1196be37 72//#define DEBUG_IOPORT
db7b5426 73//#define DEBUG_SUBPAGE
1196be37 74
99773bd4
PB
75#if !defined(CONFIG_USER_ONLY)
76/* TB consistency checks only implemented for usermode emulation. */
77#undef DEBUG_TB_CHECK
78#endif
79
9fa3e853
FB
80#define SMC_BITMAP_USE_THRESHOLD 10
81
bdaf78e0 82static TranslationBlock *tbs;
24ab68ac 83static int code_gen_max_blocks;
9fa3e853 84TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
bdaf78e0 85static int nb_tbs;
eb51d102 86/* any access to the tbs or the page table must use this lock */
c227f099 87spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
fd6ce8f6 88
141ac468
BS
89#if defined(__arm__) || defined(__sparc_v9__)
90/* The prologue must be reachable with a direct jump. ARM and Sparc64
91 have limited branch ranges (possibly also PPC) so place it in a
d03d860b
BS
92 section close to code segment. */
93#define code_gen_section \
94 __attribute__((__section__(".gen_code"))) \
95 __attribute__((aligned (32)))
6840981d 96#elif defined(_WIN32) && !defined(_WIN64)
f8e2af11
SW
97#define code_gen_section \
98 __attribute__((aligned (16)))
d03d860b
BS
99#else
100#define code_gen_section \
101 __attribute__((aligned (32)))
102#endif
103
104uint8_t code_gen_prologue[1024] code_gen_section;
bdaf78e0
BS
105static uint8_t *code_gen_buffer;
106static unsigned long code_gen_buffer_size;
26a5f13b 107/* threshold to flush the translated code buffer */
bdaf78e0 108static unsigned long code_gen_buffer_max_size;
24ab68ac 109static uint8_t *code_gen_ptr;
fd6ce8f6 110
e2eef170 111#if !defined(CONFIG_USER_ONLY)
9fa3e853 112int phys_ram_fd;
74576198 113static int in_migration;
94a6b54f 114
85d59fef 115RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
62152b8a
AK
116
117static MemoryRegion *system_memory;
309cb471 118static MemoryRegion *system_io;
62152b8a 119
0e0df1e2 120MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty;
de712f94 121static MemoryRegion io_mem_subpage_ram;
0e0df1e2 122
e2eef170 123#endif
9fa3e853 124
9349b4f9 125CPUArchState *first_cpu;
6a00d601
FB
126/* current CPU in the current thread. It is only valid inside
127 cpu_exec() */
9349b4f9 128DEFINE_TLS(CPUArchState *,cpu_single_env);
2e70f6ef 129/* 0 = Do not count executed instructions.
bf20dc07 130 1 = Precise instruction counting.
2e70f6ef
PB
131 2 = Adaptive rate instruction counting. */
132int use_icount = 0;
6a00d601 133
54936004 134typedef struct PageDesc {
92e873b9 135 /* list of TBs intersecting this ram page */
fd6ce8f6 136 TranslationBlock *first_tb;
9fa3e853
FB
137 /* in order to optimize self modifying code, we count the number
138 of lookups we do to a given page to use a bitmap */
139 unsigned int code_write_count;
140 uint8_t *code_bitmap;
141#if defined(CONFIG_USER_ONLY)
142 unsigned long flags;
143#endif
54936004
FB
144} PageDesc;
145
41c1b1c9 146/* In system mode we want L1_MAP to be based on ram offsets,
5cd2c5b6
RH
147 while in user mode we want it to be based on virtual addresses. */
148#if !defined(CONFIG_USER_ONLY)
41c1b1c9
PB
149#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
150# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
151#else
5cd2c5b6 152# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
41c1b1c9 153#endif
bedb69ea 154#else
5cd2c5b6 155# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
bedb69ea 156#endif
54936004 157
5cd2c5b6
RH
158/* Size of the L2 (and L3, etc) page tables. */
159#define L2_BITS 10
54936004
FB
160#define L2_SIZE (1 << L2_BITS)
161
3eef53df
AK
162#define P_L2_LEVELS \
163 (((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / L2_BITS) + 1)
164
5cd2c5b6 165/* The bits remaining after N lower levels of page tables. */
5cd2c5b6
RH
166#define V_L1_BITS_REM \
167 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
168
5cd2c5b6
RH
169#if V_L1_BITS_REM < 4
170#define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
171#else
172#define V_L1_BITS V_L1_BITS_REM
173#endif
174
5cd2c5b6
RH
175#define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
176
5cd2c5b6
RH
177#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
178
c6d50674
SW
179uintptr_t qemu_real_host_page_size;
180uintptr_t qemu_host_page_size;
181uintptr_t qemu_host_page_mask;
54936004 182
5cd2c5b6
RH
183/* This is a multi-level map on the virtual address space.
184 The bottom level has pointers to PageDesc. */
185static void *l1_map[V_L1_SIZE];
54936004 186
e2eef170 187#if !defined(CONFIG_USER_ONLY)
4346ae3e
AK
188typedef struct PhysPageEntry PhysPageEntry;
189
5312bd8b
AK
190static MemoryRegionSection *phys_sections;
191static unsigned phys_sections_nb, phys_sections_nb_alloc;
192static uint16_t phys_section_unassigned;
aa102231
AK
193static uint16_t phys_section_notdirty;
194static uint16_t phys_section_rom;
195static uint16_t phys_section_watch;
5312bd8b 196
4346ae3e 197struct PhysPageEntry {
07f07b31
AK
198 uint16_t is_leaf : 1;
199 /* index into phys_sections (is_leaf) or phys_map_nodes (!is_leaf) */
200 uint16_t ptr : 15;
4346ae3e
AK
201};
202
d6f2ea22
AK
203/* Simple allocator for PhysPageEntry nodes */
204static PhysPageEntry (*phys_map_nodes)[L2_SIZE];
205static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc;
206
07f07b31 207#define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
d6f2ea22 208
5cd2c5b6 209/* This is a multi-level map on the physical address space.
06ef3525 210 The bottom level has pointers to MemoryRegionSections. */
07f07b31 211static PhysPageEntry phys_map = { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
6d9a1304 212
e2eef170 213static void io_mem_init(void);
62152b8a 214static void memory_map_init(void);
e2eef170 215
1ec9b909 216static MemoryRegion io_mem_watch;
6658ffb8 217#endif
33417e70 218
34865134 219/* log support */
1e8b27ca
JR
220#ifdef WIN32
221static const char *logfilename = "qemu.log";
222#else
d9b630fd 223static const char *logfilename = "/tmp/qemu.log";
1e8b27ca 224#endif
34865134
FB
225FILE *logfile;
226int loglevel;
e735b91c 227static int log_append = 0;
34865134 228
e3db7226 229/* statistics */
e3db7226
FB
230static int tb_flush_count;
231static int tb_phys_invalidate_count;
232
7cb69cae
FB
233#ifdef _WIN32
234static void map_exec(void *addr, long size)
235{
236 DWORD old_protect;
237 VirtualProtect(addr, size,
238 PAGE_EXECUTE_READWRITE, &old_protect);
239
240}
241#else
242static void map_exec(void *addr, long size)
243{
4369415f 244 unsigned long start, end, page_size;
7cb69cae 245
4369415f 246 page_size = getpagesize();
7cb69cae 247 start = (unsigned long)addr;
4369415f 248 start &= ~(page_size - 1);
7cb69cae
FB
249
250 end = (unsigned long)addr + size;
4369415f
FB
251 end += page_size - 1;
252 end &= ~(page_size - 1);
7cb69cae
FB
253
254 mprotect((void *)start, end - start,
255 PROT_READ | PROT_WRITE | PROT_EXEC);
256}
257#endif
258
b346ff46 259static void page_init(void)
54936004 260{
83fb7adf 261 /* NOTE: we can always suppose that qemu_host_page_size >=
54936004 262 TARGET_PAGE_SIZE */
c2b48b69
AL
263#ifdef _WIN32
264 {
265 SYSTEM_INFO system_info;
266
267 GetSystemInfo(&system_info);
268 qemu_real_host_page_size = system_info.dwPageSize;
269 }
270#else
271 qemu_real_host_page_size = getpagesize();
272#endif
83fb7adf
FB
273 if (qemu_host_page_size == 0)
274 qemu_host_page_size = qemu_real_host_page_size;
275 if (qemu_host_page_size < TARGET_PAGE_SIZE)
276 qemu_host_page_size = TARGET_PAGE_SIZE;
83fb7adf 277 qemu_host_page_mask = ~(qemu_host_page_size - 1);
50a9569b 278
2e9a5713 279#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
50a9569b 280 {
f01576f1
JL
281#ifdef HAVE_KINFO_GETVMMAP
282 struct kinfo_vmentry *freep;
283 int i, cnt;
284
285 freep = kinfo_getvmmap(getpid(), &cnt);
286 if (freep) {
287 mmap_lock();
288 for (i = 0; i < cnt; i++) {
289 unsigned long startaddr, endaddr;
290
291 startaddr = freep[i].kve_start;
292 endaddr = freep[i].kve_end;
293 if (h2g_valid(startaddr)) {
294 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
295
296 if (h2g_valid(endaddr)) {
297 endaddr = h2g(endaddr);
fd436907 298 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
f01576f1
JL
299 } else {
300#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
301 endaddr = ~0ul;
fd436907 302 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
f01576f1
JL
303#endif
304 }
305 }
306 }
307 free(freep);
308 mmap_unlock();
309 }
310#else
50a9569b 311 FILE *f;
50a9569b 312
0776590d 313 last_brk = (unsigned long)sbrk(0);
5cd2c5b6 314
fd436907 315 f = fopen("/compat/linux/proc/self/maps", "r");
50a9569b 316 if (f) {
5cd2c5b6
RH
317 mmap_lock();
318
50a9569b 319 do {
5cd2c5b6
RH
320 unsigned long startaddr, endaddr;
321 int n;
322
323 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
324
325 if (n == 2 && h2g_valid(startaddr)) {
326 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
327
328 if (h2g_valid(endaddr)) {
329 endaddr = h2g(endaddr);
330 } else {
331 endaddr = ~0ul;
332 }
333 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
50a9569b
AZ
334 }
335 } while (!feof(f));
5cd2c5b6 336
50a9569b 337 fclose(f);
5cd2c5b6 338 mmap_unlock();
50a9569b 339 }
f01576f1 340#endif
50a9569b
AZ
341 }
342#endif
54936004
FB
343}
344
41c1b1c9 345static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
54936004 346{
41c1b1c9
PB
347 PageDesc *pd;
348 void **lp;
349 int i;
350
5cd2c5b6 351#if defined(CONFIG_USER_ONLY)
7267c094 352 /* We can't use g_malloc because it may recurse into a locked mutex. */
5cd2c5b6
RH
353# define ALLOC(P, SIZE) \
354 do { \
355 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
356 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
5cd2c5b6
RH
357 } while (0)
358#else
359# define ALLOC(P, SIZE) \
7267c094 360 do { P = g_malloc0(SIZE); } while (0)
17e2377a 361#endif
434929bf 362
5cd2c5b6
RH
363 /* Level 1. Always allocated. */
364 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
365
366 /* Level 2..N-1. */
367 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
368 void **p = *lp;
369
370 if (p == NULL) {
371 if (!alloc) {
372 return NULL;
373 }
374 ALLOC(p, sizeof(void *) * L2_SIZE);
375 *lp = p;
17e2377a 376 }
5cd2c5b6
RH
377
378 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
379 }
380
381 pd = *lp;
382 if (pd == NULL) {
383 if (!alloc) {
384 return NULL;
385 }
386 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
387 *lp = pd;
54936004 388 }
5cd2c5b6
RH
389
390#undef ALLOC
5cd2c5b6
RH
391
392 return pd + (index & (L2_SIZE - 1));
54936004
FB
393}
394
41c1b1c9 395static inline PageDesc *page_find(tb_page_addr_t index)
54936004 396{
5cd2c5b6 397 return page_find_alloc(index, 0);
fd6ce8f6
FB
398}
399
6d9a1304 400#if !defined(CONFIG_USER_ONLY)
d6f2ea22 401
f7bf5461 402static void phys_map_node_reserve(unsigned nodes)
d6f2ea22 403{
f7bf5461 404 if (phys_map_nodes_nb + nodes > phys_map_nodes_nb_alloc) {
d6f2ea22
AK
405 typedef PhysPageEntry Node[L2_SIZE];
406 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16);
f7bf5461
AK
407 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc,
408 phys_map_nodes_nb + nodes);
d6f2ea22
AK
409 phys_map_nodes = g_renew(Node, phys_map_nodes,
410 phys_map_nodes_nb_alloc);
411 }
f7bf5461
AK
412}
413
414static uint16_t phys_map_node_alloc(void)
415{
416 unsigned i;
417 uint16_t ret;
418
419 ret = phys_map_nodes_nb++;
420 assert(ret != PHYS_MAP_NODE_NIL);
421 assert(ret != phys_map_nodes_nb_alloc);
d6f2ea22 422 for (i = 0; i < L2_SIZE; ++i) {
07f07b31 423 phys_map_nodes[ret][i].is_leaf = 0;
c19e8800 424 phys_map_nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
d6f2ea22 425 }
f7bf5461 426 return ret;
d6f2ea22
AK
427}
428
429static void phys_map_nodes_reset(void)
430{
431 phys_map_nodes_nb = 0;
432}
433
92e873b9 434
2999097b
AK
435static void phys_page_set_level(PhysPageEntry *lp, target_phys_addr_t *index,
436 target_phys_addr_t *nb, uint16_t leaf,
437 int level)
f7bf5461
AK
438{
439 PhysPageEntry *p;
440 int i;
07f07b31 441 target_phys_addr_t step = (target_phys_addr_t)1 << (level * L2_BITS);
108c49b8 442
07f07b31 443 if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
c19e8800
AK
444 lp->ptr = phys_map_node_alloc();
445 p = phys_map_nodes[lp->ptr];
f7bf5461
AK
446 if (level == 0) {
447 for (i = 0; i < L2_SIZE; i++) {
07f07b31 448 p[i].is_leaf = 1;
c19e8800 449 p[i].ptr = phys_section_unassigned;
4346ae3e 450 }
67c4d23c 451 }
f7bf5461 452 } else {
c19e8800 453 p = phys_map_nodes[lp->ptr];
92e873b9 454 }
2999097b 455 lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
f7bf5461 456
2999097b 457 while (*nb && lp < &p[L2_SIZE]) {
07f07b31
AK
458 if ((*index & (step - 1)) == 0 && *nb >= step) {
459 lp->is_leaf = true;
c19e8800 460 lp->ptr = leaf;
07f07b31
AK
461 *index += step;
462 *nb -= step;
2999097b
AK
463 } else {
464 phys_page_set_level(lp, index, nb, leaf, level - 1);
465 }
466 ++lp;
f7bf5461
AK
467 }
468}
469
2999097b
AK
470static void phys_page_set(target_phys_addr_t index, target_phys_addr_t nb,
471 uint16_t leaf)
f7bf5461 472{
2999097b 473 /* Wildly overreserve - it doesn't matter much. */
07f07b31 474 phys_map_node_reserve(3 * P_L2_LEVELS);
5cd2c5b6 475
2999097b 476 phys_page_set_level(&phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
92e873b9
FB
477}
478
0cac1b66 479MemoryRegionSection *phys_page_find(target_phys_addr_t index)
92e873b9 480{
31ab2b4a
AK
481 PhysPageEntry lp = phys_map;
482 PhysPageEntry *p;
483 int i;
31ab2b4a 484 uint16_t s_index = phys_section_unassigned;
f1f6e3b8 485
07f07b31 486 for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
c19e8800 487 if (lp.ptr == PHYS_MAP_NODE_NIL) {
31ab2b4a
AK
488 goto not_found;
489 }
c19e8800 490 p = phys_map_nodes[lp.ptr];
31ab2b4a 491 lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
5312bd8b 492 }
31ab2b4a 493
c19e8800 494 s_index = lp.ptr;
31ab2b4a 495not_found:
f3705d53
AK
496 return &phys_sections[s_index];
497}
498
e5548617
BS
499bool memory_region_is_unassigned(MemoryRegion *mr)
500{
501 return mr != &io_mem_ram && mr != &io_mem_rom
502 && mr != &io_mem_notdirty && !mr->rom_device
503 && mr != &io_mem_watch;
504}
505
c8a706fe
PB
506#define mmap_lock() do { } while(0)
507#define mmap_unlock() do { } while(0)
9fa3e853 508#endif
fd6ce8f6 509
4369415f
FB
510#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
511
512#if defined(CONFIG_USER_ONLY)
ccbb4d44 513/* Currently it is not recommended to allocate big chunks of data in
4369415f
FB
514 user mode. It will change when a dedicated libc will be used */
515#define USE_STATIC_CODE_GEN_BUFFER
516#endif
517
518#ifdef USE_STATIC_CODE_GEN_BUFFER
ebf50fb3
AJ
519static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
520 __attribute__((aligned (CODE_GEN_ALIGN)));
4369415f
FB
521#endif
522
8fcd3692 523static void code_gen_alloc(unsigned long tb_size)
26a5f13b 524{
4369415f
FB
525#ifdef USE_STATIC_CODE_GEN_BUFFER
526 code_gen_buffer = static_code_gen_buffer;
527 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
528 map_exec(code_gen_buffer, code_gen_buffer_size);
529#else
26a5f13b
FB
530 code_gen_buffer_size = tb_size;
531 if (code_gen_buffer_size == 0) {
4369415f 532#if defined(CONFIG_USER_ONLY)
4369415f
FB
533 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
534#else
ccbb4d44 535 /* XXX: needs adjustments */
94a6b54f 536 code_gen_buffer_size = (unsigned long)(ram_size / 4);
4369415f 537#endif
26a5f13b
FB
538 }
539 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
540 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
541 /* The code gen buffer location may have constraints depending on
542 the host cpu and OS */
543#if defined(__linux__)
544 {
545 int flags;
141ac468
BS
546 void *start = NULL;
547
26a5f13b
FB
548 flags = MAP_PRIVATE | MAP_ANONYMOUS;
549#if defined(__x86_64__)
550 flags |= MAP_32BIT;
551 /* Cannot map more than that */
552 if (code_gen_buffer_size > (800 * 1024 * 1024))
553 code_gen_buffer_size = (800 * 1024 * 1024);
141ac468
BS
554#elif defined(__sparc_v9__)
555 // Map the buffer below 2G, so we can use direct calls and branches
556 flags |= MAP_FIXED;
557 start = (void *) 0x60000000UL;
558 if (code_gen_buffer_size > (512 * 1024 * 1024))
559 code_gen_buffer_size = (512 * 1024 * 1024);
1cb0661e 560#elif defined(__arm__)
5c84bd90 561 /* Keep the buffer no bigger than 16MB to branch between blocks */
1cb0661e
AZ
562 if (code_gen_buffer_size > 16 * 1024 * 1024)
563 code_gen_buffer_size = 16 * 1024 * 1024;
eba0b893
RH
564#elif defined(__s390x__)
565 /* Map the buffer so that we can use direct calls and branches. */
566 /* We have a +- 4GB range on the branches; leave some slop. */
567 if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
568 code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
569 }
570 start = (void *)0x90000000UL;
26a5f13b 571#endif
141ac468
BS
572 code_gen_buffer = mmap(start, code_gen_buffer_size,
573 PROT_WRITE | PROT_READ | PROT_EXEC,
26a5f13b
FB
574 flags, -1, 0);
575 if (code_gen_buffer == MAP_FAILED) {
576 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
577 exit(1);
578 }
579 }
cbb608a5 580#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
9f4b09a4
TN
581 || defined(__DragonFly__) || defined(__OpenBSD__) \
582 || defined(__NetBSD__)
06e67a82
AL
583 {
584 int flags;
585 void *addr = NULL;
586 flags = MAP_PRIVATE | MAP_ANONYMOUS;
587#if defined(__x86_64__)
588 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
589 * 0x40000000 is free */
590 flags |= MAP_FIXED;
591 addr = (void *)0x40000000;
592 /* Cannot map more than that */
593 if (code_gen_buffer_size > (800 * 1024 * 1024))
594 code_gen_buffer_size = (800 * 1024 * 1024);
4cd31ad2
BS
595#elif defined(__sparc_v9__)
596 // Map the buffer below 2G, so we can use direct calls and branches
597 flags |= MAP_FIXED;
598 addr = (void *) 0x60000000UL;
599 if (code_gen_buffer_size > (512 * 1024 * 1024)) {
600 code_gen_buffer_size = (512 * 1024 * 1024);
601 }
06e67a82
AL
602#endif
603 code_gen_buffer = mmap(addr, code_gen_buffer_size,
604 PROT_WRITE | PROT_READ | PROT_EXEC,
605 flags, -1, 0);
606 if (code_gen_buffer == MAP_FAILED) {
607 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
608 exit(1);
609 }
610 }
26a5f13b 611#else
7267c094 612 code_gen_buffer = g_malloc(code_gen_buffer_size);
26a5f13b
FB
613 map_exec(code_gen_buffer, code_gen_buffer_size);
614#endif
4369415f 615#endif /* !USE_STATIC_CODE_GEN_BUFFER */
26a5f13b 616 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
a884da8a
PM
617 code_gen_buffer_max_size = code_gen_buffer_size -
618 (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
26a5f13b 619 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
7267c094 620 tbs = g_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
26a5f13b
FB
621}
622
623/* Must be called before using the QEMU cpus. 'tb_size' is the size
624 (in bytes) allocated to the translation buffer. Zero means default
625 size. */
d5ab9713 626void tcg_exec_init(unsigned long tb_size)
26a5f13b 627{
26a5f13b
FB
628 cpu_gen_init();
629 code_gen_alloc(tb_size);
630 code_gen_ptr = code_gen_buffer;
813da627 631 tcg_register_jit(code_gen_buffer, code_gen_buffer_size);
4369415f 632 page_init();
9002ec79
RH
633#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
634 /* There's no guest base to take into account, so go ahead and
635 initialize the prologue now. */
636 tcg_prologue_init(&tcg_ctx);
637#endif
26a5f13b
FB
638}
639
d5ab9713
JK
640bool tcg_enabled(void)
641{
642 return code_gen_buffer != NULL;
643}
644
645void cpu_exec_init_all(void)
646{
647#if !defined(CONFIG_USER_ONLY)
648 memory_map_init();
649 io_mem_init();
650#endif
651}
652
9656f324
PB
653#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
654
e59fb374 655static int cpu_common_post_load(void *opaque, int version_id)
e7f4eff7 656{
9349b4f9 657 CPUArchState *env = opaque;
9656f324 658
3098dba0
AJ
659 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
660 version_id is increased. */
661 env->interrupt_request &= ~0x01;
9656f324
PB
662 tlb_flush(env, 1);
663
664 return 0;
665}
e7f4eff7
JQ
666
667static const VMStateDescription vmstate_cpu_common = {
668 .name = "cpu_common",
669 .version_id = 1,
670 .minimum_version_id = 1,
671 .minimum_version_id_old = 1,
e7f4eff7
JQ
672 .post_load = cpu_common_post_load,
673 .fields = (VMStateField []) {
9349b4f9
AF
674 VMSTATE_UINT32(halted, CPUArchState),
675 VMSTATE_UINT32(interrupt_request, CPUArchState),
e7f4eff7
JQ
676 VMSTATE_END_OF_LIST()
677 }
678};
9656f324
PB
679#endif
680
9349b4f9 681CPUArchState *qemu_get_cpu(int cpu)
950f1472 682{
9349b4f9 683 CPUArchState *env = first_cpu;
950f1472
GC
684
685 while (env) {
686 if (env->cpu_index == cpu)
687 break;
688 env = env->next_cpu;
689 }
690
691 return env;
692}
693
9349b4f9 694void cpu_exec_init(CPUArchState *env)
fd6ce8f6 695{
9349b4f9 696 CPUArchState **penv;
6a00d601
FB
697 int cpu_index;
698
c2764719
PB
699#if defined(CONFIG_USER_ONLY)
700 cpu_list_lock();
701#endif
6a00d601
FB
702 env->next_cpu = NULL;
703 penv = &first_cpu;
704 cpu_index = 0;
705 while (*penv != NULL) {
1e9fa730 706 penv = &(*penv)->next_cpu;
6a00d601
FB
707 cpu_index++;
708 }
709 env->cpu_index = cpu_index;
268a362c 710 env->numa_node = 0;
72cf2d4f
BS
711 QTAILQ_INIT(&env->breakpoints);
712 QTAILQ_INIT(&env->watchpoints);
dc7a09cf
JK
713#ifndef CONFIG_USER_ONLY
714 env->thread_id = qemu_get_thread_id();
715#endif
6a00d601 716 *penv = env;
c2764719
PB
717#if defined(CONFIG_USER_ONLY)
718 cpu_list_unlock();
719#endif
b3c7724c 720#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
0be71e32
AW
721 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
722 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
b3c7724c
PB
723 cpu_save, cpu_load, env);
724#endif
fd6ce8f6
FB
725}
726
d1a1eb74
TG
727/* Allocate a new translation block. Flush the translation buffer if
728 too many translation blocks or too much generated code. */
729static TranslationBlock *tb_alloc(target_ulong pc)
730{
731 TranslationBlock *tb;
732
733 if (nb_tbs >= code_gen_max_blocks ||
734 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
735 return NULL;
736 tb = &tbs[nb_tbs++];
737 tb->pc = pc;
738 tb->cflags = 0;
739 return tb;
740}
741
742void tb_free(TranslationBlock *tb)
743{
744 /* In practice this is mostly used for single use temporary TB
745 Ignore the hard cases and just back up if this TB happens to
746 be the last one generated. */
747 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
748 code_gen_ptr = tb->tc_ptr;
749 nb_tbs--;
750 }
751}
752
9fa3e853
FB
753static inline void invalidate_page_bitmap(PageDesc *p)
754{
755 if (p->code_bitmap) {
7267c094 756 g_free(p->code_bitmap);
9fa3e853
FB
757 p->code_bitmap = NULL;
758 }
759 p->code_write_count = 0;
760}
761
5cd2c5b6
RH
762/* Set to NULL all the 'first_tb' fields in all PageDescs. */
763
764static void page_flush_tb_1 (int level, void **lp)
fd6ce8f6 765{
5cd2c5b6 766 int i;
fd6ce8f6 767
5cd2c5b6
RH
768 if (*lp == NULL) {
769 return;
770 }
771 if (level == 0) {
772 PageDesc *pd = *lp;
7296abac 773 for (i = 0; i < L2_SIZE; ++i) {
5cd2c5b6
RH
774 pd[i].first_tb = NULL;
775 invalidate_page_bitmap(pd + i);
fd6ce8f6 776 }
5cd2c5b6
RH
777 } else {
778 void **pp = *lp;
7296abac 779 for (i = 0; i < L2_SIZE; ++i) {
5cd2c5b6
RH
780 page_flush_tb_1 (level - 1, pp + i);
781 }
782 }
783}
784
785static void page_flush_tb(void)
786{
787 int i;
788 for (i = 0; i < V_L1_SIZE; i++) {
789 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
fd6ce8f6
FB
790 }
791}
792
793/* flush all the translation blocks */
d4e8164f 794/* XXX: tb_flush is currently not thread safe */
9349b4f9 795void tb_flush(CPUArchState *env1)
fd6ce8f6 796{
9349b4f9 797 CPUArchState *env;
0124311e 798#if defined(DEBUG_FLUSH)
ab3d1727
BS
799 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
800 (unsigned long)(code_gen_ptr - code_gen_buffer),
801 nb_tbs, nb_tbs > 0 ?
802 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
fd6ce8f6 803#endif
26a5f13b 804 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
a208e54a
PB
805 cpu_abort(env1, "Internal error: code buffer overflow\n");
806
fd6ce8f6 807 nb_tbs = 0;
3b46e624 808
6a00d601
FB
809 for(env = first_cpu; env != NULL; env = env->next_cpu) {
810 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
811 }
9fa3e853 812
8a8a608f 813 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
fd6ce8f6 814 page_flush_tb();
9fa3e853 815
fd6ce8f6 816 code_gen_ptr = code_gen_buffer;
d4e8164f
FB
817 /* XXX: flush processor icache at this point if cache flush is
818 expensive */
e3db7226 819 tb_flush_count++;
fd6ce8f6
FB
820}
821
822#ifdef DEBUG_TB_CHECK
823
bc98a7ef 824static void tb_invalidate_check(target_ulong address)
fd6ce8f6
FB
825{
826 TranslationBlock *tb;
827 int i;
828 address &= TARGET_PAGE_MASK;
99773bd4
PB
829 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
830 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
831 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
832 address >= tb->pc + tb->size)) {
0bf9e31a
BS
833 printf("ERROR invalidate: address=" TARGET_FMT_lx
834 " PC=%08lx size=%04x\n",
99773bd4 835 address, (long)tb->pc, tb->size);
fd6ce8f6
FB
836 }
837 }
838 }
839}
840
841/* verify that all the pages have correct rights for code */
842static void tb_page_check(void)
843{
844 TranslationBlock *tb;
845 int i, flags1, flags2;
3b46e624 846
99773bd4
PB
847 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
848 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
849 flags1 = page_get_flags(tb->pc);
850 flags2 = page_get_flags(tb->pc + tb->size - 1);
851 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
852 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
99773bd4 853 (long)tb->pc, tb->size, flags1, flags2);
fd6ce8f6
FB
854 }
855 }
856 }
857}
858
859#endif
860
861/* invalidate one TB */
862static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
863 int next_offset)
864{
865 TranslationBlock *tb1;
866 for(;;) {
867 tb1 = *ptb;
868 if (tb1 == tb) {
869 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
870 break;
871 }
872 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
873 }
874}
875
9fa3e853
FB
876static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
877{
878 TranslationBlock *tb1;
879 unsigned int n1;
880
881 for(;;) {
882 tb1 = *ptb;
8efe0ca8
SW
883 n1 = (uintptr_t)tb1 & 3;
884 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
9fa3e853
FB
885 if (tb1 == tb) {
886 *ptb = tb1->page_next[n1];
887 break;
888 }
889 ptb = &tb1->page_next[n1];
890 }
891}
892
d4e8164f
FB
893static inline void tb_jmp_remove(TranslationBlock *tb, int n)
894{
895 TranslationBlock *tb1, **ptb;
896 unsigned int n1;
897
898 ptb = &tb->jmp_next[n];
899 tb1 = *ptb;
900 if (tb1) {
901 /* find tb(n) in circular list */
902 for(;;) {
903 tb1 = *ptb;
8efe0ca8
SW
904 n1 = (uintptr_t)tb1 & 3;
905 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
d4e8164f
FB
906 if (n1 == n && tb1 == tb)
907 break;
908 if (n1 == 2) {
909 ptb = &tb1->jmp_first;
910 } else {
911 ptb = &tb1->jmp_next[n1];
912 }
913 }
914 /* now we can suppress tb(n) from the list */
915 *ptb = tb->jmp_next[n];
916
917 tb->jmp_next[n] = NULL;
918 }
919}
920
921/* reset the jump entry 'n' of a TB so that it is not chained to
922 another TB */
923static inline void tb_reset_jump(TranslationBlock *tb, int n)
924{
8efe0ca8 925 tb_set_jmp_target(tb, n, (uintptr_t)(tb->tc_ptr + tb->tb_next_offset[n]));
d4e8164f
FB
926}
927
41c1b1c9 928void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
fd6ce8f6 929{
9349b4f9 930 CPUArchState *env;
8a40a180 931 PageDesc *p;
d4e8164f 932 unsigned int h, n1;
41c1b1c9 933 tb_page_addr_t phys_pc;
8a40a180 934 TranslationBlock *tb1, *tb2;
3b46e624 935
8a40a180
FB
936 /* remove the TB from the hash list */
937 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
938 h = tb_phys_hash_func(phys_pc);
5fafdf24 939 tb_remove(&tb_phys_hash[h], tb,
8a40a180
FB
940 offsetof(TranslationBlock, phys_hash_next));
941
942 /* remove the TB from the page list */
943 if (tb->page_addr[0] != page_addr) {
944 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
945 tb_page_remove(&p->first_tb, tb);
946 invalidate_page_bitmap(p);
947 }
948 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
949 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
950 tb_page_remove(&p->first_tb, tb);
951 invalidate_page_bitmap(p);
952 }
953
36bdbe54 954 tb_invalidated_flag = 1;
59817ccb 955
fd6ce8f6 956 /* remove the TB from the hash list */
8a40a180 957 h = tb_jmp_cache_hash_func(tb->pc);
6a00d601
FB
958 for(env = first_cpu; env != NULL; env = env->next_cpu) {
959 if (env->tb_jmp_cache[h] == tb)
960 env->tb_jmp_cache[h] = NULL;
961 }
d4e8164f
FB
962
963 /* suppress this TB from the two jump lists */
964 tb_jmp_remove(tb, 0);
965 tb_jmp_remove(tb, 1);
966
967 /* suppress any remaining jumps to this TB */
968 tb1 = tb->jmp_first;
969 for(;;) {
8efe0ca8 970 n1 = (uintptr_t)tb1 & 3;
d4e8164f
FB
971 if (n1 == 2)
972 break;
8efe0ca8 973 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
d4e8164f
FB
974 tb2 = tb1->jmp_next[n1];
975 tb_reset_jump(tb1, n1);
976 tb1->jmp_next[n1] = NULL;
977 tb1 = tb2;
978 }
8efe0ca8 979 tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2); /* fail safe */
9fa3e853 980
e3db7226 981 tb_phys_invalidate_count++;
9fa3e853
FB
982}
983
984static inline void set_bits(uint8_t *tab, int start, int len)
985{
986 int end, mask, end1;
987
988 end = start + len;
989 tab += start >> 3;
990 mask = 0xff << (start & 7);
991 if ((start & ~7) == (end & ~7)) {
992 if (start < end) {
993 mask &= ~(0xff << (end & 7));
994 *tab |= mask;
995 }
996 } else {
997 *tab++ |= mask;
998 start = (start + 8) & ~7;
999 end1 = end & ~7;
1000 while (start < end1) {
1001 *tab++ = 0xff;
1002 start += 8;
1003 }
1004 if (start < end) {
1005 mask = ~(0xff << (end & 7));
1006 *tab |= mask;
1007 }
1008 }
1009}
1010
1011static void build_page_bitmap(PageDesc *p)
1012{
1013 int n, tb_start, tb_end;
1014 TranslationBlock *tb;
3b46e624 1015
7267c094 1016 p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8);
9fa3e853
FB
1017
1018 tb = p->first_tb;
1019 while (tb != NULL) {
8efe0ca8
SW
1020 n = (uintptr_t)tb & 3;
1021 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
9fa3e853
FB
1022 /* NOTE: this is subtle as a TB may span two physical pages */
1023 if (n == 0) {
1024 /* NOTE: tb_end may be after the end of the page, but
1025 it is not a problem */
1026 tb_start = tb->pc & ~TARGET_PAGE_MASK;
1027 tb_end = tb_start + tb->size;
1028 if (tb_end > TARGET_PAGE_SIZE)
1029 tb_end = TARGET_PAGE_SIZE;
1030 } else {
1031 tb_start = 0;
1032 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1033 }
1034 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
1035 tb = tb->page_next[n];
1036 }
1037}
1038
9349b4f9 1039TranslationBlock *tb_gen_code(CPUArchState *env,
2e70f6ef
PB
1040 target_ulong pc, target_ulong cs_base,
1041 int flags, int cflags)
d720b93d
FB
1042{
1043 TranslationBlock *tb;
1044 uint8_t *tc_ptr;
41c1b1c9
PB
1045 tb_page_addr_t phys_pc, phys_page2;
1046 target_ulong virt_page2;
d720b93d
FB
1047 int code_gen_size;
1048
41c1b1c9 1049 phys_pc = get_page_addr_code(env, pc);
c27004ec 1050 tb = tb_alloc(pc);
d720b93d
FB
1051 if (!tb) {
1052 /* flush must be done */
1053 tb_flush(env);
1054 /* cannot fail at this point */
c27004ec 1055 tb = tb_alloc(pc);
2e70f6ef
PB
1056 /* Don't forget to invalidate previous TB info. */
1057 tb_invalidated_flag = 1;
d720b93d
FB
1058 }
1059 tc_ptr = code_gen_ptr;
1060 tb->tc_ptr = tc_ptr;
1061 tb->cs_base = cs_base;
1062 tb->flags = flags;
1063 tb->cflags = cflags;
d07bde88 1064 cpu_gen_code(env, tb, &code_gen_size);
8efe0ca8
SW
1065 code_gen_ptr = (void *)(((uintptr_t)code_gen_ptr + code_gen_size +
1066 CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
3b46e624 1067
d720b93d 1068 /* check next page if needed */
c27004ec 1069 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
d720b93d 1070 phys_page2 = -1;
c27004ec 1071 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
41c1b1c9 1072 phys_page2 = get_page_addr_code(env, virt_page2);
d720b93d 1073 }
41c1b1c9 1074 tb_link_page(tb, phys_pc, phys_page2);
2e70f6ef 1075 return tb;
d720b93d 1076}
3b46e624 1077
77a8f1a5
AG
1078/*
1079 * invalidate all TBs which intersect with the target physical pages
1080 * starting in range [start;end[. NOTE: start and end may refer to
1081 * different physical pages. 'is_cpu_write_access' should be true if called
1082 * from a real cpu write access: the virtual CPU will exit the current
1083 * TB if code is modified inside this TB.
1084 */
1085void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end,
1086 int is_cpu_write_access)
1087{
1088 while (start < end) {
1089 tb_invalidate_phys_page_range(start, end, is_cpu_write_access);
1090 start &= TARGET_PAGE_MASK;
1091 start += TARGET_PAGE_SIZE;
1092 }
1093}
1094
9fa3e853
FB
1095/* invalidate all TBs which intersect with the target physical page
1096 starting in range [start;end[. NOTE: start and end must refer to
d720b93d
FB
1097 the same physical page. 'is_cpu_write_access' should be true if called
1098 from a real cpu write access: the virtual CPU will exit the current
1099 TB if code is modified inside this TB. */
41c1b1c9 1100void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
d720b93d
FB
1101 int is_cpu_write_access)
1102{
6b917547 1103 TranslationBlock *tb, *tb_next, *saved_tb;
9349b4f9 1104 CPUArchState *env = cpu_single_env;
41c1b1c9 1105 tb_page_addr_t tb_start, tb_end;
6b917547
AL
1106 PageDesc *p;
1107 int n;
1108#ifdef TARGET_HAS_PRECISE_SMC
1109 int current_tb_not_found = is_cpu_write_access;
1110 TranslationBlock *current_tb = NULL;
1111 int current_tb_modified = 0;
1112 target_ulong current_pc = 0;
1113 target_ulong current_cs_base = 0;
1114 int current_flags = 0;
1115#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
1116
1117 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 1118 if (!p)
9fa3e853 1119 return;
5fafdf24 1120 if (!p->code_bitmap &&
d720b93d
FB
1121 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1122 is_cpu_write_access) {
9fa3e853
FB
1123 /* build code bitmap */
1124 build_page_bitmap(p);
1125 }
1126
1127 /* we remove all the TBs in the range [start, end[ */
1128 /* XXX: see if in some cases it could be faster to invalidate all the code */
1129 tb = p->first_tb;
1130 while (tb != NULL) {
8efe0ca8
SW
1131 n = (uintptr_t)tb & 3;
1132 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
9fa3e853
FB
1133 tb_next = tb->page_next[n];
1134 /* NOTE: this is subtle as a TB may span two physical pages */
1135 if (n == 0) {
1136 /* NOTE: tb_end may be after the end of the page, but
1137 it is not a problem */
1138 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1139 tb_end = tb_start + tb->size;
1140 } else {
1141 tb_start = tb->page_addr[1];
1142 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1143 }
1144 if (!(tb_end <= start || tb_start >= end)) {
d720b93d
FB
1145#ifdef TARGET_HAS_PRECISE_SMC
1146 if (current_tb_not_found) {
1147 current_tb_not_found = 0;
1148 current_tb = NULL;
2e70f6ef 1149 if (env->mem_io_pc) {
d720b93d 1150 /* now we have a real cpu fault */
2e70f6ef 1151 current_tb = tb_find_pc(env->mem_io_pc);
d720b93d
FB
1152 }
1153 }
1154 if (current_tb == tb &&
2e70f6ef 1155 (current_tb->cflags & CF_COUNT_MASK) != 1) {
d720b93d
FB
1156 /* If we are modifying the current TB, we must stop
1157 its execution. We could be more precise by checking
1158 that the modification is after the current PC, but it
1159 would require a specialized function to partially
1160 restore the CPU state */
3b46e624 1161
d720b93d 1162 current_tb_modified = 1;
618ba8e6 1163 cpu_restore_state(current_tb, env, env->mem_io_pc);
6b917547
AL
1164 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1165 &current_flags);
d720b93d
FB
1166 }
1167#endif /* TARGET_HAS_PRECISE_SMC */
6f5a9f7e
FB
1168 /* we need to do that to handle the case where a signal
1169 occurs while doing tb_phys_invalidate() */
1170 saved_tb = NULL;
1171 if (env) {
1172 saved_tb = env->current_tb;
1173 env->current_tb = NULL;
1174 }
9fa3e853 1175 tb_phys_invalidate(tb, -1);
6f5a9f7e
FB
1176 if (env) {
1177 env->current_tb = saved_tb;
1178 if (env->interrupt_request && env->current_tb)
1179 cpu_interrupt(env, env->interrupt_request);
1180 }
9fa3e853
FB
1181 }
1182 tb = tb_next;
1183 }
1184#if !defined(CONFIG_USER_ONLY)
1185 /* if no code remaining, no need to continue to use slow writes */
1186 if (!p->first_tb) {
1187 invalidate_page_bitmap(p);
d720b93d 1188 if (is_cpu_write_access) {
2e70f6ef 1189 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
d720b93d
FB
1190 }
1191 }
1192#endif
1193#ifdef TARGET_HAS_PRECISE_SMC
1194 if (current_tb_modified) {
1195 /* we generate a block containing just the instruction
1196 modifying the memory. It will ensure that it cannot modify
1197 itself */
ea1c1802 1198 env->current_tb = NULL;
2e70f6ef 1199 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
d720b93d 1200 cpu_resume_from_signal(env, NULL);
9fa3e853 1201 }
fd6ce8f6 1202#endif
9fa3e853 1203}
fd6ce8f6 1204
9fa3e853 1205/* len must be <= 8 and start must be a multiple of len */
41c1b1c9 1206static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
9fa3e853
FB
1207{
1208 PageDesc *p;
1209 int offset, b;
59817ccb 1210#if 0
a4193c8a 1211 if (1) {
93fcfe39
AL
1212 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1213 cpu_single_env->mem_io_vaddr, len,
1214 cpu_single_env->eip,
8efe0ca8
SW
1215 cpu_single_env->eip +
1216 (intptr_t)cpu_single_env->segs[R_CS].base);
59817ccb
FB
1217 }
1218#endif
9fa3e853 1219 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 1220 if (!p)
9fa3e853
FB
1221 return;
1222 if (p->code_bitmap) {
1223 offset = start & ~TARGET_PAGE_MASK;
1224 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1225 if (b & ((1 << len) - 1))
1226 goto do_invalidate;
1227 } else {
1228 do_invalidate:
d720b93d 1229 tb_invalidate_phys_page_range(start, start + len, 1);
9fa3e853
FB
1230 }
1231}
1232
9fa3e853 1233#if !defined(CONFIG_SOFTMMU)
41c1b1c9 1234static void tb_invalidate_phys_page(tb_page_addr_t addr,
20503968 1235 uintptr_t pc, void *puc)
9fa3e853 1236{
6b917547 1237 TranslationBlock *tb;
9fa3e853 1238 PageDesc *p;
6b917547 1239 int n;
d720b93d 1240#ifdef TARGET_HAS_PRECISE_SMC
6b917547 1241 TranslationBlock *current_tb = NULL;
9349b4f9 1242 CPUArchState *env = cpu_single_env;
6b917547
AL
1243 int current_tb_modified = 0;
1244 target_ulong current_pc = 0;
1245 target_ulong current_cs_base = 0;
1246 int current_flags = 0;
d720b93d 1247#endif
9fa3e853
FB
1248
1249 addr &= TARGET_PAGE_MASK;
1250 p = page_find(addr >> TARGET_PAGE_BITS);
5fafdf24 1251 if (!p)
9fa3e853
FB
1252 return;
1253 tb = p->first_tb;
d720b93d
FB
1254#ifdef TARGET_HAS_PRECISE_SMC
1255 if (tb && pc != 0) {
1256 current_tb = tb_find_pc(pc);
1257 }
1258#endif
9fa3e853 1259 while (tb != NULL) {
8efe0ca8
SW
1260 n = (uintptr_t)tb & 3;
1261 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
d720b93d
FB
1262#ifdef TARGET_HAS_PRECISE_SMC
1263 if (current_tb == tb &&
2e70f6ef 1264 (current_tb->cflags & CF_COUNT_MASK) != 1) {
d720b93d
FB
1265 /* If we are modifying the current TB, we must stop
1266 its execution. We could be more precise by checking
1267 that the modification is after the current PC, but it
1268 would require a specialized function to partially
1269 restore the CPU state */
3b46e624 1270
d720b93d 1271 current_tb_modified = 1;
618ba8e6 1272 cpu_restore_state(current_tb, env, pc);
6b917547
AL
1273 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1274 &current_flags);
d720b93d
FB
1275 }
1276#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
1277 tb_phys_invalidate(tb, addr);
1278 tb = tb->page_next[n];
1279 }
fd6ce8f6 1280 p->first_tb = NULL;
d720b93d
FB
1281#ifdef TARGET_HAS_PRECISE_SMC
1282 if (current_tb_modified) {
1283 /* we generate a block containing just the instruction
1284 modifying the memory. It will ensure that it cannot modify
1285 itself */
ea1c1802 1286 env->current_tb = NULL;
2e70f6ef 1287 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
d720b93d
FB
1288 cpu_resume_from_signal(env, puc);
1289 }
1290#endif
fd6ce8f6 1291}
9fa3e853 1292#endif
fd6ce8f6
FB
1293
1294/* add the tb in the target page and protect it if necessary */
5fafdf24 1295static inline void tb_alloc_page(TranslationBlock *tb,
41c1b1c9 1296 unsigned int n, tb_page_addr_t page_addr)
fd6ce8f6
FB
1297{
1298 PageDesc *p;
4429ab44
JQ
1299#ifndef CONFIG_USER_ONLY
1300 bool page_already_protected;
1301#endif
9fa3e853
FB
1302
1303 tb->page_addr[n] = page_addr;
5cd2c5b6 1304 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
9fa3e853 1305 tb->page_next[n] = p->first_tb;
4429ab44
JQ
1306#ifndef CONFIG_USER_ONLY
1307 page_already_protected = p->first_tb != NULL;
1308#endif
8efe0ca8 1309 p->first_tb = (TranslationBlock *)((uintptr_t)tb | n);
9fa3e853 1310 invalidate_page_bitmap(p);
fd6ce8f6 1311
107db443 1312#if defined(TARGET_HAS_SMC) || 1
d720b93d 1313
9fa3e853 1314#if defined(CONFIG_USER_ONLY)
fd6ce8f6 1315 if (p->flags & PAGE_WRITE) {
53a5960a
PB
1316 target_ulong addr;
1317 PageDesc *p2;
9fa3e853
FB
1318 int prot;
1319
fd6ce8f6
FB
1320 /* force the host page as non writable (writes will have a
1321 page fault + mprotect overhead) */
53a5960a 1322 page_addr &= qemu_host_page_mask;
fd6ce8f6 1323 prot = 0;
53a5960a
PB
1324 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1325 addr += TARGET_PAGE_SIZE) {
1326
1327 p2 = page_find (addr >> TARGET_PAGE_BITS);
1328 if (!p2)
1329 continue;
1330 prot |= p2->flags;
1331 p2->flags &= ~PAGE_WRITE;
53a5960a 1332 }
5fafdf24 1333 mprotect(g2h(page_addr), qemu_host_page_size,
fd6ce8f6
FB
1334 (prot & PAGE_BITS) & ~PAGE_WRITE);
1335#ifdef DEBUG_TB_INVALIDATE
ab3d1727 1336 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
53a5960a 1337 page_addr);
fd6ce8f6 1338#endif
fd6ce8f6 1339 }
9fa3e853
FB
1340#else
1341 /* if some code is already present, then the pages are already
1342 protected. So we handle the case where only the first TB is
1343 allocated in a physical page */
4429ab44 1344 if (!page_already_protected) {
6a00d601 1345 tlb_protect_code(page_addr);
9fa3e853
FB
1346 }
1347#endif
d720b93d
FB
1348
1349#endif /* TARGET_HAS_SMC */
fd6ce8f6
FB
1350}
1351
9fa3e853
FB
1352/* add a new TB and link it to the physical page tables. phys_page2 is
1353 (-1) to indicate that only one page contains the TB. */
41c1b1c9
PB
1354void tb_link_page(TranslationBlock *tb,
1355 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
d4e8164f 1356{
9fa3e853
FB
1357 unsigned int h;
1358 TranslationBlock **ptb;
1359
c8a706fe
PB
1360 /* Grab the mmap lock to stop another thread invalidating this TB
1361 before we are done. */
1362 mmap_lock();
9fa3e853
FB
1363 /* add in the physical hash table */
1364 h = tb_phys_hash_func(phys_pc);
1365 ptb = &tb_phys_hash[h];
1366 tb->phys_hash_next = *ptb;
1367 *ptb = tb;
fd6ce8f6
FB
1368
1369 /* add in the page list */
9fa3e853
FB
1370 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1371 if (phys_page2 != -1)
1372 tb_alloc_page(tb, 1, phys_page2);
1373 else
1374 tb->page_addr[1] = -1;
9fa3e853 1375
8efe0ca8 1376 tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2);
d4e8164f
FB
1377 tb->jmp_next[0] = NULL;
1378 tb->jmp_next[1] = NULL;
1379
1380 /* init original jump addresses */
1381 if (tb->tb_next_offset[0] != 0xffff)
1382 tb_reset_jump(tb, 0);
1383 if (tb->tb_next_offset[1] != 0xffff)
1384 tb_reset_jump(tb, 1);
8a40a180
FB
1385
1386#ifdef DEBUG_TB_CHECK
1387 tb_page_check();
1388#endif
c8a706fe 1389 mmap_unlock();
fd6ce8f6
FB
1390}
1391
9fa3e853
FB
1392/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1393 tb[1].tc_ptr. Return NULL if not found */
6375e09e 1394TranslationBlock *tb_find_pc(uintptr_t tc_ptr)
fd6ce8f6 1395{
9fa3e853 1396 int m_min, m_max, m;
8efe0ca8 1397 uintptr_t v;
9fa3e853 1398 TranslationBlock *tb;
a513fe19
FB
1399
1400 if (nb_tbs <= 0)
1401 return NULL;
8efe0ca8
SW
1402 if (tc_ptr < (uintptr_t)code_gen_buffer ||
1403 tc_ptr >= (uintptr_t)code_gen_ptr) {
a513fe19 1404 return NULL;
8efe0ca8 1405 }
a513fe19
FB
1406 /* binary search (cf Knuth) */
1407 m_min = 0;
1408 m_max = nb_tbs - 1;
1409 while (m_min <= m_max) {
1410 m = (m_min + m_max) >> 1;
1411 tb = &tbs[m];
8efe0ca8 1412 v = (uintptr_t)tb->tc_ptr;
a513fe19
FB
1413 if (v == tc_ptr)
1414 return tb;
1415 else if (tc_ptr < v) {
1416 m_max = m - 1;
1417 } else {
1418 m_min = m + 1;
1419 }
5fafdf24 1420 }
a513fe19
FB
1421 return &tbs[m_max];
1422}
7501267e 1423
ea041c0e
FB
1424static void tb_reset_jump_recursive(TranslationBlock *tb);
1425
1426static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1427{
1428 TranslationBlock *tb1, *tb_next, **ptb;
1429 unsigned int n1;
1430
1431 tb1 = tb->jmp_next[n];
1432 if (tb1 != NULL) {
1433 /* find head of list */
1434 for(;;) {
8efe0ca8
SW
1435 n1 = (uintptr_t)tb1 & 3;
1436 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
ea041c0e
FB
1437 if (n1 == 2)
1438 break;
1439 tb1 = tb1->jmp_next[n1];
1440 }
1441 /* we are now sure now that tb jumps to tb1 */
1442 tb_next = tb1;
1443
1444 /* remove tb from the jmp_first list */
1445 ptb = &tb_next->jmp_first;
1446 for(;;) {
1447 tb1 = *ptb;
8efe0ca8
SW
1448 n1 = (uintptr_t)tb1 & 3;
1449 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
ea041c0e
FB
1450 if (n1 == n && tb1 == tb)
1451 break;
1452 ptb = &tb1->jmp_next[n1];
1453 }
1454 *ptb = tb->jmp_next[n];
1455 tb->jmp_next[n] = NULL;
3b46e624 1456
ea041c0e
FB
1457 /* suppress the jump to next tb in generated code */
1458 tb_reset_jump(tb, n);
1459
0124311e 1460 /* suppress jumps in the tb on which we could have jumped */
ea041c0e
FB
1461 tb_reset_jump_recursive(tb_next);
1462 }
1463}
1464
1465static void tb_reset_jump_recursive(TranslationBlock *tb)
1466{
1467 tb_reset_jump_recursive2(tb, 0);
1468 tb_reset_jump_recursive2(tb, 1);
1469}
1470
1fddef4b 1471#if defined(TARGET_HAS_ICE)
94df27fd 1472#if defined(CONFIG_USER_ONLY)
9349b4f9 1473static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
94df27fd
PB
1474{
1475 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1476}
1477#else
1e7855a5 1478void tb_invalidate_phys_addr(target_phys_addr_t addr)
d720b93d 1479{
c227f099 1480 ram_addr_t ram_addr;
f3705d53 1481 MemoryRegionSection *section;
d720b93d 1482
06ef3525 1483 section = phys_page_find(addr >> TARGET_PAGE_BITS);
f3705d53
AK
1484 if (!(memory_region_is_ram(section->mr)
1485 || (section->mr->rom_device && section->mr->readable))) {
06ef3525
AK
1486 return;
1487 }
f3705d53 1488 ram_addr = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
cc5bea60 1489 + memory_region_section_addr(section, addr);
706cd4b5 1490 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
d720b93d 1491}
1e7855a5
MF
1492
1493static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
1494{
07ff3759
MF
1495 tb_invalidate_phys_addr(cpu_get_phys_page_debug(env, pc) |
1496 (pc & ~TARGET_PAGE_MASK));
1e7855a5 1497}
c27004ec 1498#endif
94df27fd 1499#endif /* TARGET_HAS_ICE */
d720b93d 1500
c527ee8f 1501#if defined(CONFIG_USER_ONLY)
9349b4f9 1502void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
c527ee8f
PB
1503
1504{
1505}
1506
9349b4f9 1507int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
c527ee8f
PB
1508 int flags, CPUWatchpoint **watchpoint)
1509{
1510 return -ENOSYS;
1511}
1512#else
6658ffb8 1513/* Add a watchpoint. */
9349b4f9 1514int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
a1d1bb31 1515 int flags, CPUWatchpoint **watchpoint)
6658ffb8 1516{
b4051334 1517 target_ulong len_mask = ~(len - 1);
c0ce998e 1518 CPUWatchpoint *wp;
6658ffb8 1519
b4051334 1520 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
0dc23828
MF
1521 if ((len & (len - 1)) || (addr & ~len_mask) ||
1522 len == 0 || len > TARGET_PAGE_SIZE) {
b4051334
AL
1523 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1524 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1525 return -EINVAL;
1526 }
7267c094 1527 wp = g_malloc(sizeof(*wp));
a1d1bb31
AL
1528
1529 wp->vaddr = addr;
b4051334 1530 wp->len_mask = len_mask;
a1d1bb31
AL
1531 wp->flags = flags;
1532
2dc9f411 1533 /* keep all GDB-injected watchpoints in front */
c0ce998e 1534 if (flags & BP_GDB)
72cf2d4f 1535 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
c0ce998e 1536 else
72cf2d4f 1537 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
6658ffb8 1538
6658ffb8 1539 tlb_flush_page(env, addr);
a1d1bb31
AL
1540
1541 if (watchpoint)
1542 *watchpoint = wp;
1543 return 0;
6658ffb8
PB
1544}
1545
a1d1bb31 1546/* Remove a specific watchpoint. */
9349b4f9 1547int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
a1d1bb31 1548 int flags)
6658ffb8 1549{
b4051334 1550 target_ulong len_mask = ~(len - 1);
a1d1bb31 1551 CPUWatchpoint *wp;
6658ffb8 1552
72cf2d4f 1553 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334 1554 if (addr == wp->vaddr && len_mask == wp->len_mask
6e140f28 1555 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
a1d1bb31 1556 cpu_watchpoint_remove_by_ref(env, wp);
6658ffb8
PB
1557 return 0;
1558 }
1559 }
a1d1bb31 1560 return -ENOENT;
6658ffb8
PB
1561}
1562
a1d1bb31 1563/* Remove a specific watchpoint by reference. */
9349b4f9 1564void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
a1d1bb31 1565{
72cf2d4f 1566 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
7d03f82f 1567
a1d1bb31
AL
1568 tlb_flush_page(env, watchpoint->vaddr);
1569
7267c094 1570 g_free(watchpoint);
a1d1bb31
AL
1571}
1572
1573/* Remove all matching watchpoints. */
9349b4f9 1574void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
a1d1bb31 1575{
c0ce998e 1576 CPUWatchpoint *wp, *next;
a1d1bb31 1577
72cf2d4f 1578 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
a1d1bb31
AL
1579 if (wp->flags & mask)
1580 cpu_watchpoint_remove_by_ref(env, wp);
c0ce998e 1581 }
7d03f82f 1582}
c527ee8f 1583#endif
7d03f82f 1584
a1d1bb31 1585/* Add a breakpoint. */
9349b4f9 1586int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
a1d1bb31 1587 CPUBreakpoint **breakpoint)
4c3a88a2 1588{
1fddef4b 1589#if defined(TARGET_HAS_ICE)
c0ce998e 1590 CPUBreakpoint *bp;
3b46e624 1591
7267c094 1592 bp = g_malloc(sizeof(*bp));
4c3a88a2 1593
a1d1bb31
AL
1594 bp->pc = pc;
1595 bp->flags = flags;
1596
2dc9f411 1597 /* keep all GDB-injected breakpoints in front */
c0ce998e 1598 if (flags & BP_GDB)
72cf2d4f 1599 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
c0ce998e 1600 else
72cf2d4f 1601 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
3b46e624 1602
d720b93d 1603 breakpoint_invalidate(env, pc);
a1d1bb31
AL
1604
1605 if (breakpoint)
1606 *breakpoint = bp;
4c3a88a2
FB
1607 return 0;
1608#else
a1d1bb31 1609 return -ENOSYS;
4c3a88a2
FB
1610#endif
1611}
1612
a1d1bb31 1613/* Remove a specific breakpoint. */
9349b4f9 1614int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
a1d1bb31 1615{
7d03f82f 1616#if defined(TARGET_HAS_ICE)
a1d1bb31
AL
1617 CPUBreakpoint *bp;
1618
72cf2d4f 1619 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31
AL
1620 if (bp->pc == pc && bp->flags == flags) {
1621 cpu_breakpoint_remove_by_ref(env, bp);
1622 return 0;
1623 }
7d03f82f 1624 }
a1d1bb31
AL
1625 return -ENOENT;
1626#else
1627 return -ENOSYS;
7d03f82f
EI
1628#endif
1629}
1630
a1d1bb31 1631/* Remove a specific breakpoint by reference. */
9349b4f9 1632void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
4c3a88a2 1633{
1fddef4b 1634#if defined(TARGET_HAS_ICE)
72cf2d4f 1635 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
d720b93d 1636
a1d1bb31
AL
1637 breakpoint_invalidate(env, breakpoint->pc);
1638
7267c094 1639 g_free(breakpoint);
a1d1bb31
AL
1640#endif
1641}
1642
1643/* Remove all matching breakpoints. */
9349b4f9 1644void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
a1d1bb31
AL
1645{
1646#if defined(TARGET_HAS_ICE)
c0ce998e 1647 CPUBreakpoint *bp, *next;
a1d1bb31 1648
72cf2d4f 1649 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
a1d1bb31
AL
1650 if (bp->flags & mask)
1651 cpu_breakpoint_remove_by_ref(env, bp);
c0ce998e 1652 }
4c3a88a2
FB
1653#endif
1654}
1655
c33a346e
FB
1656/* enable or disable single step mode. EXCP_DEBUG is returned by the
1657 CPU loop after each instruction */
9349b4f9 1658void cpu_single_step(CPUArchState *env, int enabled)
c33a346e 1659{
1fddef4b 1660#if defined(TARGET_HAS_ICE)
c33a346e
FB
1661 if (env->singlestep_enabled != enabled) {
1662 env->singlestep_enabled = enabled;
e22a25c9
AL
1663 if (kvm_enabled())
1664 kvm_update_guest_debug(env, 0);
1665 else {
ccbb4d44 1666 /* must flush all the translated code to avoid inconsistencies */
e22a25c9
AL
1667 /* XXX: only flush what is necessary */
1668 tb_flush(env);
1669 }
c33a346e
FB
1670 }
1671#endif
1672}
1673
34865134
FB
1674/* enable or disable low levels log */
1675void cpu_set_log(int log_flags)
1676{
1677 loglevel = log_flags;
1678 if (loglevel && !logfile) {
11fcfab4 1679 logfile = fopen(logfilename, log_append ? "a" : "w");
34865134
FB
1680 if (!logfile) {
1681 perror(logfilename);
1682 _exit(1);
1683 }
9fa3e853
FB
1684#if !defined(CONFIG_SOFTMMU)
1685 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1686 {
b55266b5 1687 static char logfile_buf[4096];
9fa3e853
FB
1688 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1689 }
daf767b1
SW
1690#elif defined(_WIN32)
1691 /* Win32 doesn't support line-buffering, so use unbuffered output. */
1692 setvbuf(logfile, NULL, _IONBF, 0);
1693#else
34865134 1694 setvbuf(logfile, NULL, _IOLBF, 0);
9fa3e853 1695#endif
e735b91c
PB
1696 log_append = 1;
1697 }
1698 if (!loglevel && logfile) {
1699 fclose(logfile);
1700 logfile = NULL;
34865134
FB
1701 }
1702}
1703
1704void cpu_set_log_filename(const char *filename)
1705{
1706 logfilename = strdup(filename);
e735b91c
PB
1707 if (logfile) {
1708 fclose(logfile);
1709 logfile = NULL;
1710 }
1711 cpu_set_log(loglevel);
34865134 1712}
c33a346e 1713
9349b4f9 1714static void cpu_unlink_tb(CPUArchState *env)
ea041c0e 1715{
3098dba0
AJ
1716 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1717 problem and hope the cpu will stop of its own accord. For userspace
1718 emulation this often isn't actually as bad as it sounds. Often
1719 signals are used primarily to interrupt blocking syscalls. */
ea041c0e 1720 TranslationBlock *tb;
c227f099 1721 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
59817ccb 1722
cab1b4bd 1723 spin_lock(&interrupt_lock);
3098dba0
AJ
1724 tb = env->current_tb;
1725 /* if the cpu is currently executing code, we must unlink it and
1726 all the potentially executing TB */
f76cfe56 1727 if (tb) {
3098dba0
AJ
1728 env->current_tb = NULL;
1729 tb_reset_jump_recursive(tb);
be214e6c 1730 }
cab1b4bd 1731 spin_unlock(&interrupt_lock);
3098dba0
AJ
1732}
1733
97ffbd8d 1734#ifndef CONFIG_USER_ONLY
3098dba0 1735/* mask must never be zero, except for A20 change call */
9349b4f9 1736static void tcg_handle_interrupt(CPUArchState *env, int mask)
3098dba0
AJ
1737{
1738 int old_mask;
be214e6c 1739
2e70f6ef 1740 old_mask = env->interrupt_request;
68a79315 1741 env->interrupt_request |= mask;
3098dba0 1742
8edac960
AL
1743 /*
1744 * If called from iothread context, wake the target cpu in
1745 * case its halted.
1746 */
b7680cb6 1747 if (!qemu_cpu_is_self(env)) {
8edac960
AL
1748 qemu_cpu_kick(env);
1749 return;
1750 }
8edac960 1751
2e70f6ef 1752 if (use_icount) {
266910c4 1753 env->icount_decr.u16.high = 0xffff;
2e70f6ef 1754 if (!can_do_io(env)
be214e6c 1755 && (mask & ~old_mask) != 0) {
2e70f6ef
PB
1756 cpu_abort(env, "Raised interrupt while not in I/O function");
1757 }
2e70f6ef 1758 } else {
3098dba0 1759 cpu_unlink_tb(env);
ea041c0e
FB
1760 }
1761}
1762
ec6959d0
JK
1763CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1764
97ffbd8d
JK
1765#else /* CONFIG_USER_ONLY */
1766
9349b4f9 1767void cpu_interrupt(CPUArchState *env, int mask)
97ffbd8d
JK
1768{
1769 env->interrupt_request |= mask;
1770 cpu_unlink_tb(env);
1771}
1772#endif /* CONFIG_USER_ONLY */
1773
9349b4f9 1774void cpu_reset_interrupt(CPUArchState *env, int mask)
b54ad049
FB
1775{
1776 env->interrupt_request &= ~mask;
1777}
1778
9349b4f9 1779void cpu_exit(CPUArchState *env)
3098dba0
AJ
1780{
1781 env->exit_request = 1;
1782 cpu_unlink_tb(env);
1783}
1784
c7cd6a37 1785const CPULogItem cpu_log_items[] = {
5fafdf24 1786 { CPU_LOG_TB_OUT_ASM, "out_asm",
f193c797
FB
1787 "show generated host assembly code for each compiled TB" },
1788 { CPU_LOG_TB_IN_ASM, "in_asm",
1789 "show target assembly code for each compiled TB" },
5fafdf24 1790 { CPU_LOG_TB_OP, "op",
57fec1fe 1791 "show micro ops for each compiled TB" },
f193c797 1792 { CPU_LOG_TB_OP_OPT, "op_opt",
e01a1157
BS
1793 "show micro ops "
1794#ifdef TARGET_I386
1795 "before eflags optimization and "
f193c797 1796#endif
e01a1157 1797 "after liveness analysis" },
f193c797
FB
1798 { CPU_LOG_INT, "int",
1799 "show interrupts/exceptions in short format" },
1800 { CPU_LOG_EXEC, "exec",
1801 "show trace before each executed TB (lots of logs)" },
9fddaa0c 1802 { CPU_LOG_TB_CPU, "cpu",
e91c8a77 1803 "show CPU state before block translation" },
f193c797
FB
1804#ifdef TARGET_I386
1805 { CPU_LOG_PCALL, "pcall",
1806 "show protected mode far calls/returns/exceptions" },
eca1bdf4
AL
1807 { CPU_LOG_RESET, "cpu_reset",
1808 "show CPU state before CPU resets" },
f193c797 1809#endif
8e3a9fd2 1810#ifdef DEBUG_IOPORT
fd872598
FB
1811 { CPU_LOG_IOPORT, "ioport",
1812 "show all i/o ports accesses" },
8e3a9fd2 1813#endif
f193c797
FB
1814 { 0, NULL, NULL },
1815};
1816
1817static int cmp1(const char *s1, int n, const char *s2)
1818{
1819 if (strlen(s2) != n)
1820 return 0;
1821 return memcmp(s1, s2, n) == 0;
1822}
3b46e624 1823
f193c797
FB
1824/* takes a comma separated list of log masks. Return 0 if error. */
1825int cpu_str_to_log_mask(const char *str)
1826{
c7cd6a37 1827 const CPULogItem *item;
f193c797
FB
1828 int mask;
1829 const char *p, *p1;
1830
1831 p = str;
1832 mask = 0;
1833 for(;;) {
1834 p1 = strchr(p, ',');
1835 if (!p1)
1836 p1 = p + strlen(p);
9742bf26
YT
1837 if(cmp1(p,p1-p,"all")) {
1838 for(item = cpu_log_items; item->mask != 0; item++) {
1839 mask |= item->mask;
1840 }
1841 } else {
1842 for(item = cpu_log_items; item->mask != 0; item++) {
1843 if (cmp1(p, p1 - p, item->name))
1844 goto found;
1845 }
1846 return 0;
f193c797 1847 }
f193c797
FB
1848 found:
1849 mask |= item->mask;
1850 if (*p1 != ',')
1851 break;
1852 p = p1 + 1;
1853 }
1854 return mask;
1855}
ea041c0e 1856
9349b4f9 1857void cpu_abort(CPUArchState *env, const char *fmt, ...)
7501267e
FB
1858{
1859 va_list ap;
493ae1f0 1860 va_list ap2;
7501267e
FB
1861
1862 va_start(ap, fmt);
493ae1f0 1863 va_copy(ap2, ap);
7501267e
FB
1864 fprintf(stderr, "qemu: fatal: ");
1865 vfprintf(stderr, fmt, ap);
1866 fprintf(stderr, "\n");
1867#ifdef TARGET_I386
7fe48483
FB
1868 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1869#else
1870 cpu_dump_state(env, stderr, fprintf, 0);
7501267e 1871#endif
93fcfe39
AL
1872 if (qemu_log_enabled()) {
1873 qemu_log("qemu: fatal: ");
1874 qemu_log_vprintf(fmt, ap2);
1875 qemu_log("\n");
f9373291 1876#ifdef TARGET_I386
93fcfe39 1877 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
f9373291 1878#else
93fcfe39 1879 log_cpu_state(env, 0);
f9373291 1880#endif
31b1a7b4 1881 qemu_log_flush();
93fcfe39 1882 qemu_log_close();
924edcae 1883 }
493ae1f0 1884 va_end(ap2);
f9373291 1885 va_end(ap);
fd052bf6
RV
1886#if defined(CONFIG_USER_ONLY)
1887 {
1888 struct sigaction act;
1889 sigfillset(&act.sa_mask);
1890 act.sa_handler = SIG_DFL;
1891 sigaction(SIGABRT, &act, NULL);
1892 }
1893#endif
7501267e
FB
1894 abort();
1895}
1896
9349b4f9 1897CPUArchState *cpu_copy(CPUArchState *env)
c5be9f08 1898{
9349b4f9
AF
1899 CPUArchState *new_env = cpu_init(env->cpu_model_str);
1900 CPUArchState *next_cpu = new_env->next_cpu;
c5be9f08 1901 int cpu_index = new_env->cpu_index;
5a38f081
AL
1902#if defined(TARGET_HAS_ICE)
1903 CPUBreakpoint *bp;
1904 CPUWatchpoint *wp;
1905#endif
1906
9349b4f9 1907 memcpy(new_env, env, sizeof(CPUArchState));
5a38f081
AL
1908
1909 /* Preserve chaining and index. */
c5be9f08
TS
1910 new_env->next_cpu = next_cpu;
1911 new_env->cpu_index = cpu_index;
5a38f081
AL
1912
1913 /* Clone all break/watchpoints.
1914 Note: Once we support ptrace with hw-debug register access, make sure
1915 BP_CPU break/watchpoints are handled correctly on clone. */
72cf2d4f
BS
1916 QTAILQ_INIT(&env->breakpoints);
1917 QTAILQ_INIT(&env->watchpoints);
5a38f081 1918#if defined(TARGET_HAS_ICE)
72cf2d4f 1919 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
5a38f081
AL
1920 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1921 }
72cf2d4f 1922 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
5a38f081
AL
1923 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1924 wp->flags, NULL);
1925 }
1926#endif
1927
c5be9f08
TS
1928 return new_env;
1929}
1930
0124311e 1931#if !defined(CONFIG_USER_ONLY)
0cac1b66 1932void tb_flush_jmp_cache(CPUArchState *env, target_ulong addr)
5c751e99
EI
1933{
1934 unsigned int i;
1935
1936 /* Discard jump cache entries for any tb which might potentially
1937 overlap the flushed page. */
1938 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1939 memset (&env->tb_jmp_cache[i], 0,
9742bf26 1940 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
5c751e99
EI
1941
1942 i = tb_jmp_cache_hash_page(addr);
1943 memset (&env->tb_jmp_cache[i], 0,
9742bf26 1944 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
5c751e99
EI
1945}
1946
5579c7f3 1947/* Note: start and end must be within the same ram block. */
c227f099 1948void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 1949 int dirty_flags)
1ccde1cb 1950{
8efe0ca8 1951 uintptr_t length, start1;
1ccde1cb
FB
1952
1953 start &= TARGET_PAGE_MASK;
1954 end = TARGET_PAGE_ALIGN(end);
1955
1956 length = end - start;
1957 if (length == 0)
1958 return;
f7c11b53 1959 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
f23db169 1960
1ccde1cb
FB
1961 /* we modify the TLB cache so that the dirty bit will be set again
1962 when accessing the range */
8efe0ca8 1963 start1 = (uintptr_t)qemu_safe_ram_ptr(start);
a57d23e4 1964 /* Check that we don't span multiple blocks - this breaks the
5579c7f3 1965 address comparisons below. */
8efe0ca8 1966 if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1
5579c7f3
PB
1967 != (end - 1) - start) {
1968 abort();
1969 }
e5548617 1970 cpu_tlb_reset_dirty_all(start1, length);
1ccde1cb
FB
1971}
1972
74576198
AL
1973int cpu_physical_memory_set_dirty_tracking(int enable)
1974{
f6f3fbca 1975 int ret = 0;
74576198 1976 in_migration = enable;
f6f3fbca 1977 return ret;
74576198
AL
1978}
1979
e5548617
BS
1980target_phys_addr_t memory_region_section_get_iotlb(CPUArchState *env,
1981 MemoryRegionSection *section,
1982 target_ulong vaddr,
1983 target_phys_addr_t paddr,
1984 int prot,
1985 target_ulong *address)
1986{
1987 target_phys_addr_t iotlb;
1988 CPUWatchpoint *wp;
1989
cc5bea60 1990 if (memory_region_is_ram(section->mr)) {
e5548617
BS
1991 /* Normal RAM. */
1992 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
cc5bea60 1993 + memory_region_section_addr(section, paddr);
e5548617
BS
1994 if (!section->readonly) {
1995 iotlb |= phys_section_notdirty;
1996 } else {
1997 iotlb |= phys_section_rom;
1998 }
1999 } else {
2000 /* IO handlers are currently passed a physical address.
2001 It would be nice to pass an offset from the base address
2002 of that region. This would avoid having to special case RAM,
2003 and avoid full address decoding in every device.
2004 We can't use the high bits of pd for this because
2005 IO_MEM_ROMD uses these as a ram address. */
2006 iotlb = section - phys_sections;
cc5bea60 2007 iotlb += memory_region_section_addr(section, paddr);
e5548617
BS
2008 }
2009
2010 /* Make accesses to pages with watchpoints go via the
2011 watchpoint trap routines. */
2012 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
2013 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
2014 /* Avoid trapping reads of pages with a write breakpoint. */
2015 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
2016 iotlb = phys_section_watch + paddr;
2017 *address |= TLB_MMIO;
2018 break;
2019 }
2020 }
2021 }
2022
2023 return iotlb;
2024}
2025
0124311e 2026#else
edf8e2af
MW
2027/*
2028 * Walks guest process memory "regions" one by one
2029 * and calls callback function 'fn' for each region.
2030 */
5cd2c5b6
RH
2031
2032struct walk_memory_regions_data
2033{
2034 walk_memory_regions_fn fn;
2035 void *priv;
8efe0ca8 2036 uintptr_t start;
5cd2c5b6
RH
2037 int prot;
2038};
2039
2040static int walk_memory_regions_end(struct walk_memory_regions_data *data,
b480d9b7 2041 abi_ulong end, int new_prot)
5cd2c5b6
RH
2042{
2043 if (data->start != -1ul) {
2044 int rc = data->fn(data->priv, data->start, end, data->prot);
2045 if (rc != 0) {
2046 return rc;
2047 }
2048 }
2049
2050 data->start = (new_prot ? end : -1ul);
2051 data->prot = new_prot;
2052
2053 return 0;
2054}
2055
2056static int walk_memory_regions_1(struct walk_memory_regions_data *data,
b480d9b7 2057 abi_ulong base, int level, void **lp)
5cd2c5b6 2058{
b480d9b7 2059 abi_ulong pa;
5cd2c5b6
RH
2060 int i, rc;
2061
2062 if (*lp == NULL) {
2063 return walk_memory_regions_end(data, base, 0);
2064 }
2065
2066 if (level == 0) {
2067 PageDesc *pd = *lp;
7296abac 2068 for (i = 0; i < L2_SIZE; ++i) {
5cd2c5b6
RH
2069 int prot = pd[i].flags;
2070
2071 pa = base | (i << TARGET_PAGE_BITS);
2072 if (prot != data->prot) {
2073 rc = walk_memory_regions_end(data, pa, prot);
2074 if (rc != 0) {
2075 return rc;
9fa3e853 2076 }
9fa3e853 2077 }
5cd2c5b6
RH
2078 }
2079 } else {
2080 void **pp = *lp;
7296abac 2081 for (i = 0; i < L2_SIZE; ++i) {
b480d9b7
PB
2082 pa = base | ((abi_ulong)i <<
2083 (TARGET_PAGE_BITS + L2_BITS * level));
5cd2c5b6
RH
2084 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2085 if (rc != 0) {
2086 return rc;
2087 }
2088 }
2089 }
2090
2091 return 0;
2092}
2093
2094int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2095{
2096 struct walk_memory_regions_data data;
8efe0ca8 2097 uintptr_t i;
5cd2c5b6
RH
2098
2099 data.fn = fn;
2100 data.priv = priv;
2101 data.start = -1ul;
2102 data.prot = 0;
2103
2104 for (i = 0; i < V_L1_SIZE; i++) {
b480d9b7 2105 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
5cd2c5b6
RH
2106 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2107 if (rc != 0) {
2108 return rc;
9fa3e853 2109 }
33417e70 2110 }
5cd2c5b6
RH
2111
2112 return walk_memory_regions_end(&data, 0, 0);
edf8e2af
MW
2113}
2114
b480d9b7
PB
2115static int dump_region(void *priv, abi_ulong start,
2116 abi_ulong end, unsigned long prot)
edf8e2af
MW
2117{
2118 FILE *f = (FILE *)priv;
2119
b480d9b7
PB
2120 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2121 " "TARGET_ABI_FMT_lx" %c%c%c\n",
edf8e2af
MW
2122 start, end, end - start,
2123 ((prot & PAGE_READ) ? 'r' : '-'),
2124 ((prot & PAGE_WRITE) ? 'w' : '-'),
2125 ((prot & PAGE_EXEC) ? 'x' : '-'));
2126
2127 return (0);
2128}
2129
2130/* dump memory mappings */
2131void page_dump(FILE *f)
2132{
2133 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2134 "start", "end", "size", "prot");
2135 walk_memory_regions(f, dump_region);
33417e70
FB
2136}
2137
53a5960a 2138int page_get_flags(target_ulong address)
33417e70 2139{
9fa3e853
FB
2140 PageDesc *p;
2141
2142 p = page_find(address >> TARGET_PAGE_BITS);
33417e70 2143 if (!p)
9fa3e853
FB
2144 return 0;
2145 return p->flags;
2146}
2147
376a7909
RH
2148/* Modify the flags of a page and invalidate the code if necessary.
2149 The flag PAGE_WRITE_ORG is positioned automatically depending
2150 on PAGE_WRITE. The mmap_lock should already be held. */
53a5960a 2151void page_set_flags(target_ulong start, target_ulong end, int flags)
9fa3e853 2152{
376a7909
RH
2153 target_ulong addr, len;
2154
2155 /* This function should never be called with addresses outside the
2156 guest address space. If this assert fires, it probably indicates
2157 a missing call to h2g_valid. */
b480d9b7
PB
2158#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2159 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
376a7909
RH
2160#endif
2161 assert(start < end);
9fa3e853
FB
2162
2163 start = start & TARGET_PAGE_MASK;
2164 end = TARGET_PAGE_ALIGN(end);
376a7909
RH
2165
2166 if (flags & PAGE_WRITE) {
9fa3e853 2167 flags |= PAGE_WRITE_ORG;
376a7909
RH
2168 }
2169
2170 for (addr = start, len = end - start;
2171 len != 0;
2172 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2173 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2174
2175 /* If the write protection bit is set, then we invalidate
2176 the code inside. */
5fafdf24 2177 if (!(p->flags & PAGE_WRITE) &&
9fa3e853
FB
2178 (flags & PAGE_WRITE) &&
2179 p->first_tb) {
d720b93d 2180 tb_invalidate_phys_page(addr, 0, NULL);
9fa3e853
FB
2181 }
2182 p->flags = flags;
2183 }
33417e70
FB
2184}
2185
3d97b40b
TS
2186int page_check_range(target_ulong start, target_ulong len, int flags)
2187{
2188 PageDesc *p;
2189 target_ulong end;
2190 target_ulong addr;
2191
376a7909
RH
2192 /* This function should never be called with addresses outside the
2193 guest address space. If this assert fires, it probably indicates
2194 a missing call to h2g_valid. */
338e9e6c
BS
2195#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2196 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
376a7909
RH
2197#endif
2198
3e0650a9
RH
2199 if (len == 0) {
2200 return 0;
2201 }
376a7909
RH
2202 if (start + len - 1 < start) {
2203 /* We've wrapped around. */
55f280c9 2204 return -1;
376a7909 2205 }
55f280c9 2206
3d97b40b
TS
2207 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2208 start = start & TARGET_PAGE_MASK;
2209
376a7909
RH
2210 for (addr = start, len = end - start;
2211 len != 0;
2212 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
3d97b40b
TS
2213 p = page_find(addr >> TARGET_PAGE_BITS);
2214 if( !p )
2215 return -1;
2216 if( !(p->flags & PAGE_VALID) )
2217 return -1;
2218
dae3270c 2219 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
3d97b40b 2220 return -1;
dae3270c
FB
2221 if (flags & PAGE_WRITE) {
2222 if (!(p->flags & PAGE_WRITE_ORG))
2223 return -1;
2224 /* unprotect the page if it was put read-only because it
2225 contains translated code */
2226 if (!(p->flags & PAGE_WRITE)) {
2227 if (!page_unprotect(addr, 0, NULL))
2228 return -1;
2229 }
2230 return 0;
2231 }
3d97b40b
TS
2232 }
2233 return 0;
2234}
2235
9fa3e853 2236/* called from signal handler: invalidate the code and unprotect the
ccbb4d44 2237 page. Return TRUE if the fault was successfully handled. */
6375e09e 2238int page_unprotect(target_ulong address, uintptr_t pc, void *puc)
9fa3e853 2239{
45d679d6
AJ
2240 unsigned int prot;
2241 PageDesc *p;
53a5960a 2242 target_ulong host_start, host_end, addr;
9fa3e853 2243
c8a706fe
PB
2244 /* Technically this isn't safe inside a signal handler. However we
2245 know this only ever happens in a synchronous SEGV handler, so in
2246 practice it seems to be ok. */
2247 mmap_lock();
2248
45d679d6
AJ
2249 p = page_find(address >> TARGET_PAGE_BITS);
2250 if (!p) {
c8a706fe 2251 mmap_unlock();
9fa3e853 2252 return 0;
c8a706fe 2253 }
45d679d6 2254
9fa3e853
FB
2255 /* if the page was really writable, then we change its
2256 protection back to writable */
45d679d6
AJ
2257 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2258 host_start = address & qemu_host_page_mask;
2259 host_end = host_start + qemu_host_page_size;
2260
2261 prot = 0;
2262 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2263 p = page_find(addr >> TARGET_PAGE_BITS);
2264 p->flags |= PAGE_WRITE;
2265 prot |= p->flags;
2266
9fa3e853
FB
2267 /* and since the content will be modified, we must invalidate
2268 the corresponding translated code. */
45d679d6 2269 tb_invalidate_phys_page(addr, pc, puc);
9fa3e853 2270#ifdef DEBUG_TB_CHECK
45d679d6 2271 tb_invalidate_check(addr);
9fa3e853 2272#endif
9fa3e853 2273 }
45d679d6
AJ
2274 mprotect((void *)g2h(host_start), qemu_host_page_size,
2275 prot & PAGE_BITS);
2276
2277 mmap_unlock();
2278 return 1;
9fa3e853 2279 }
c8a706fe 2280 mmap_unlock();
9fa3e853
FB
2281 return 0;
2282}
9fa3e853
FB
2283#endif /* defined(CONFIG_USER_ONLY) */
2284
e2eef170 2285#if !defined(CONFIG_USER_ONLY)
8da3ff18 2286
c04b2b78
PB
2287#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2288typedef struct subpage_t {
70c68e44 2289 MemoryRegion iomem;
c04b2b78 2290 target_phys_addr_t base;
5312bd8b 2291 uint16_t sub_section[TARGET_PAGE_SIZE];
c04b2b78
PB
2292} subpage_t;
2293
c227f099 2294static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 2295 uint16_t section);
0f0cb164 2296static subpage_t *subpage_init(target_phys_addr_t base);
5312bd8b 2297static void destroy_page_desc(uint16_t section_index)
54688b1e 2298{
5312bd8b
AK
2299 MemoryRegionSection *section = &phys_sections[section_index];
2300 MemoryRegion *mr = section->mr;
54688b1e
AK
2301
2302 if (mr->subpage) {
2303 subpage_t *subpage = container_of(mr, subpage_t, iomem);
2304 memory_region_destroy(&subpage->iomem);
2305 g_free(subpage);
2306 }
2307}
2308
4346ae3e 2309static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level)
54688b1e
AK
2310{
2311 unsigned i;
d6f2ea22 2312 PhysPageEntry *p;
54688b1e 2313
c19e8800 2314 if (lp->ptr == PHYS_MAP_NODE_NIL) {
54688b1e
AK
2315 return;
2316 }
2317
c19e8800 2318 p = phys_map_nodes[lp->ptr];
4346ae3e 2319 for (i = 0; i < L2_SIZE; ++i) {
07f07b31 2320 if (!p[i].is_leaf) {
54688b1e 2321 destroy_l2_mapping(&p[i], level - 1);
4346ae3e 2322 } else {
c19e8800 2323 destroy_page_desc(p[i].ptr);
54688b1e 2324 }
54688b1e 2325 }
07f07b31 2326 lp->is_leaf = 0;
c19e8800 2327 lp->ptr = PHYS_MAP_NODE_NIL;
54688b1e
AK
2328}
2329
2330static void destroy_all_mappings(void)
2331{
3eef53df 2332 destroy_l2_mapping(&phys_map, P_L2_LEVELS - 1);
d6f2ea22 2333 phys_map_nodes_reset();
54688b1e
AK
2334}
2335
5312bd8b
AK
2336static uint16_t phys_section_add(MemoryRegionSection *section)
2337{
2338 if (phys_sections_nb == phys_sections_nb_alloc) {
2339 phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16);
2340 phys_sections = g_renew(MemoryRegionSection, phys_sections,
2341 phys_sections_nb_alloc);
2342 }
2343 phys_sections[phys_sections_nb] = *section;
2344 return phys_sections_nb++;
2345}
2346
2347static void phys_sections_clear(void)
2348{
2349 phys_sections_nb = 0;
2350}
2351
8f2498f9
MT
2352/* register physical memory.
2353 For RAM, 'size' must be a multiple of the target page size.
2354 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
8da3ff18
PB
2355 io memory page. The address used when calling the IO function is
2356 the offset from the start of the region, plus region_offset. Both
ccbb4d44 2357 start_addr and region_offset are rounded down to a page boundary
8da3ff18
PB
2358 before calculating this offset. This should not be a problem unless
2359 the low bits of start_addr and region_offset differ. */
0f0cb164
AK
2360static void register_subpage(MemoryRegionSection *section)
2361{
2362 subpage_t *subpage;
2363 target_phys_addr_t base = section->offset_within_address_space
2364 & TARGET_PAGE_MASK;
f3705d53 2365 MemoryRegionSection *existing = phys_page_find(base >> TARGET_PAGE_BITS);
0f0cb164
AK
2366 MemoryRegionSection subsection = {
2367 .offset_within_address_space = base,
2368 .size = TARGET_PAGE_SIZE,
2369 };
0f0cb164
AK
2370 target_phys_addr_t start, end;
2371
f3705d53 2372 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
0f0cb164 2373
f3705d53 2374 if (!(existing->mr->subpage)) {
0f0cb164
AK
2375 subpage = subpage_init(base);
2376 subsection.mr = &subpage->iomem;
2999097b
AK
2377 phys_page_set(base >> TARGET_PAGE_BITS, 1,
2378 phys_section_add(&subsection));
0f0cb164 2379 } else {
f3705d53 2380 subpage = container_of(existing->mr, subpage_t, iomem);
0f0cb164
AK
2381 }
2382 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
2383 end = start + section->size;
2384 subpage_register(subpage, start, end, phys_section_add(section));
2385}
2386
2387
2388static void register_multipage(MemoryRegionSection *section)
33417e70 2389{
dd81124b
AK
2390 target_phys_addr_t start_addr = section->offset_within_address_space;
2391 ram_addr_t size = section->size;
2999097b 2392 target_phys_addr_t addr;
5312bd8b 2393 uint16_t section_index = phys_section_add(section);
dd81124b 2394
3b8e6a2d 2395 assert(size);
f6f3fbca 2396
3b8e6a2d 2397 addr = start_addr;
2999097b
AK
2398 phys_page_set(addr >> TARGET_PAGE_BITS, size >> TARGET_PAGE_BITS,
2399 section_index);
33417e70
FB
2400}
2401
0f0cb164
AK
2402void cpu_register_physical_memory_log(MemoryRegionSection *section,
2403 bool readonly)
2404{
2405 MemoryRegionSection now = *section, remain = *section;
2406
2407 if ((now.offset_within_address_space & ~TARGET_PAGE_MASK)
2408 || (now.size < TARGET_PAGE_SIZE)) {
2409 now.size = MIN(TARGET_PAGE_ALIGN(now.offset_within_address_space)
2410 - now.offset_within_address_space,
2411 now.size);
2412 register_subpage(&now);
2413 remain.size -= now.size;
2414 remain.offset_within_address_space += now.size;
2415 remain.offset_within_region += now.size;
2416 }
2417 now = remain;
2418 now.size &= TARGET_PAGE_MASK;
2419 if (now.size) {
2420 register_multipage(&now);
2421 remain.size -= now.size;
2422 remain.offset_within_address_space += now.size;
2423 remain.offset_within_region += now.size;
2424 }
2425 now = remain;
2426 if (now.size) {
2427 register_subpage(&now);
2428 }
2429}
2430
2431
c227f099 2432void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
f65ed4c1
AL
2433{
2434 if (kvm_enabled())
2435 kvm_coalesce_mmio_region(addr, size);
2436}
2437
c227f099 2438void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
f65ed4c1
AL
2439{
2440 if (kvm_enabled())
2441 kvm_uncoalesce_mmio_region(addr, size);
2442}
2443
62a2744c
SY
2444void qemu_flush_coalesced_mmio_buffer(void)
2445{
2446 if (kvm_enabled())
2447 kvm_flush_coalesced_mmio_buffer();
2448}
2449
c902760f
MT
2450#if defined(__linux__) && !defined(TARGET_S390X)
2451
2452#include <sys/vfs.h>
2453
2454#define HUGETLBFS_MAGIC 0x958458f6
2455
2456static long gethugepagesize(const char *path)
2457{
2458 struct statfs fs;
2459 int ret;
2460
2461 do {
9742bf26 2462 ret = statfs(path, &fs);
c902760f
MT
2463 } while (ret != 0 && errno == EINTR);
2464
2465 if (ret != 0) {
9742bf26
YT
2466 perror(path);
2467 return 0;
c902760f
MT
2468 }
2469
2470 if (fs.f_type != HUGETLBFS_MAGIC)
9742bf26 2471 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
c902760f
MT
2472
2473 return fs.f_bsize;
2474}
2475
04b16653
AW
2476static void *file_ram_alloc(RAMBlock *block,
2477 ram_addr_t memory,
2478 const char *path)
c902760f
MT
2479{
2480 char *filename;
2481 void *area;
2482 int fd;
2483#ifdef MAP_POPULATE
2484 int flags;
2485#endif
2486 unsigned long hpagesize;
2487
2488 hpagesize = gethugepagesize(path);
2489 if (!hpagesize) {
9742bf26 2490 return NULL;
c902760f
MT
2491 }
2492
2493 if (memory < hpagesize) {
2494 return NULL;
2495 }
2496
2497 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2498 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2499 return NULL;
2500 }
2501
2502 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
9742bf26 2503 return NULL;
c902760f
MT
2504 }
2505
2506 fd = mkstemp(filename);
2507 if (fd < 0) {
9742bf26
YT
2508 perror("unable to create backing store for hugepages");
2509 free(filename);
2510 return NULL;
c902760f
MT
2511 }
2512 unlink(filename);
2513 free(filename);
2514
2515 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2516
2517 /*
2518 * ftruncate is not supported by hugetlbfs in older
2519 * hosts, so don't bother bailing out on errors.
2520 * If anything goes wrong with it under other filesystems,
2521 * mmap will fail.
2522 */
2523 if (ftruncate(fd, memory))
9742bf26 2524 perror("ftruncate");
c902760f
MT
2525
2526#ifdef MAP_POPULATE
2527 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2528 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2529 * to sidestep this quirk.
2530 */
2531 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2532 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2533#else
2534 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2535#endif
2536 if (area == MAP_FAILED) {
9742bf26
YT
2537 perror("file_ram_alloc: can't mmap RAM pages");
2538 close(fd);
2539 return (NULL);
c902760f 2540 }
04b16653 2541 block->fd = fd;
c902760f
MT
2542 return area;
2543}
2544#endif
2545
d17b5288 2546static ram_addr_t find_ram_offset(ram_addr_t size)
04b16653
AW
2547{
2548 RAMBlock *block, *next_block;
3e837b2c 2549 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
04b16653
AW
2550
2551 if (QLIST_EMPTY(&ram_list.blocks))
2552 return 0;
2553
2554 QLIST_FOREACH(block, &ram_list.blocks, next) {
f15fbc4b 2555 ram_addr_t end, next = RAM_ADDR_MAX;
04b16653
AW
2556
2557 end = block->offset + block->length;
2558
2559 QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2560 if (next_block->offset >= end) {
2561 next = MIN(next, next_block->offset);
2562 }
2563 }
2564 if (next - end >= size && next - end < mingap) {
3e837b2c 2565 offset = end;
04b16653
AW
2566 mingap = next - end;
2567 }
2568 }
3e837b2c
AW
2569
2570 if (offset == RAM_ADDR_MAX) {
2571 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
2572 (uint64_t)size);
2573 abort();
2574 }
2575
04b16653
AW
2576 return offset;
2577}
2578
2579static ram_addr_t last_ram_offset(void)
d17b5288
AW
2580{
2581 RAMBlock *block;
2582 ram_addr_t last = 0;
2583
2584 QLIST_FOREACH(block, &ram_list.blocks, next)
2585 last = MAX(last, block->offset + block->length);
2586
2587 return last;
2588}
2589
c5705a77 2590void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
84b89d78
CM
2591{
2592 RAMBlock *new_block, *block;
2593
c5705a77
AK
2594 new_block = NULL;
2595 QLIST_FOREACH(block, &ram_list.blocks, next) {
2596 if (block->offset == addr) {
2597 new_block = block;
2598 break;
2599 }
2600 }
2601 assert(new_block);
2602 assert(!new_block->idstr[0]);
84b89d78
CM
2603
2604 if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
2605 char *id = dev->parent_bus->info->get_dev_path(dev);
2606 if (id) {
2607 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
7267c094 2608 g_free(id);
84b89d78
CM
2609 }
2610 }
2611 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2612
2613 QLIST_FOREACH(block, &ram_list.blocks, next) {
c5705a77 2614 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
84b89d78
CM
2615 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2616 new_block->idstr);
2617 abort();
2618 }
2619 }
c5705a77
AK
2620}
2621
2622ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
2623 MemoryRegion *mr)
2624{
2625 RAMBlock *new_block;
2626
2627 size = TARGET_PAGE_ALIGN(size);
2628 new_block = g_malloc0(sizeof(*new_block));
84b89d78 2629
7c637366 2630 new_block->mr = mr;
432d268c 2631 new_block->offset = find_ram_offset(size);
6977dfe6
YT
2632 if (host) {
2633 new_block->host = host;
cd19cfa2 2634 new_block->flags |= RAM_PREALLOC_MASK;
6977dfe6
YT
2635 } else {
2636 if (mem_path) {
c902760f 2637#if defined (__linux__) && !defined(TARGET_S390X)
6977dfe6
YT
2638 new_block->host = file_ram_alloc(new_block, size, mem_path);
2639 if (!new_block->host) {
2640 new_block->host = qemu_vmalloc(size);
e78815a5 2641 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
6977dfe6 2642 }
c902760f 2643#else
6977dfe6
YT
2644 fprintf(stderr, "-mem-path option unsupported\n");
2645 exit(1);
c902760f 2646#endif
6977dfe6 2647 } else {
6b02494d 2648#if defined(TARGET_S390X) && defined(CONFIG_KVM)
ff83678a
CB
2649 /* S390 KVM requires the topmost vma of the RAM to be smaller than
2650 an system defined value, which is at least 256GB. Larger systems
2651 have larger values. We put the guest between the end of data
2652 segment (system break) and this value. We use 32GB as a base to
2653 have enough room for the system break to grow. */
2654 new_block->host = mmap((void*)0x800000000, size,
6977dfe6 2655 PROT_EXEC|PROT_READ|PROT_WRITE,
ff83678a 2656 MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
fb8b2735
AG
2657 if (new_block->host == MAP_FAILED) {
2658 fprintf(stderr, "Allocating RAM failed\n");
2659 abort();
2660 }
6b02494d 2661#else
868bb33f 2662 if (xen_enabled()) {
fce537d4 2663 xen_ram_alloc(new_block->offset, size, mr);
432d268c
JN
2664 } else {
2665 new_block->host = qemu_vmalloc(size);
2666 }
6b02494d 2667#endif
e78815a5 2668 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
6977dfe6 2669 }
c902760f 2670 }
94a6b54f
PB
2671 new_block->length = size;
2672
f471a17e 2673 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
94a6b54f 2674
7267c094 2675 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
04b16653 2676 last_ram_offset() >> TARGET_PAGE_BITS);
d17b5288 2677 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
94a6b54f
PB
2678 0xff, size >> TARGET_PAGE_BITS);
2679
6f0437e8
JK
2680 if (kvm_enabled())
2681 kvm_setup_guest_memory(new_block->host, size);
2682
94a6b54f
PB
2683 return new_block->offset;
2684}
e9a1ab19 2685
c5705a77 2686ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
6977dfe6 2687{
c5705a77 2688 return qemu_ram_alloc_from_ptr(size, NULL, mr);
6977dfe6
YT
2689}
2690
1f2e98b6
AW
2691void qemu_ram_free_from_ptr(ram_addr_t addr)
2692{
2693 RAMBlock *block;
2694
2695 QLIST_FOREACH(block, &ram_list.blocks, next) {
2696 if (addr == block->offset) {
2697 QLIST_REMOVE(block, next);
7267c094 2698 g_free(block);
1f2e98b6
AW
2699 return;
2700 }
2701 }
2702}
2703
c227f099 2704void qemu_ram_free(ram_addr_t addr)
e9a1ab19 2705{
04b16653
AW
2706 RAMBlock *block;
2707
2708 QLIST_FOREACH(block, &ram_list.blocks, next) {
2709 if (addr == block->offset) {
2710 QLIST_REMOVE(block, next);
cd19cfa2
HY
2711 if (block->flags & RAM_PREALLOC_MASK) {
2712 ;
2713 } else if (mem_path) {
04b16653
AW
2714#if defined (__linux__) && !defined(TARGET_S390X)
2715 if (block->fd) {
2716 munmap(block->host, block->length);
2717 close(block->fd);
2718 } else {
2719 qemu_vfree(block->host);
2720 }
fd28aa13
JK
2721#else
2722 abort();
04b16653
AW
2723#endif
2724 } else {
2725#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2726 munmap(block->host, block->length);
2727#else
868bb33f 2728 if (xen_enabled()) {
e41d7c69 2729 xen_invalidate_map_cache_entry(block->host);
432d268c
JN
2730 } else {
2731 qemu_vfree(block->host);
2732 }
04b16653
AW
2733#endif
2734 }
7267c094 2735 g_free(block);
04b16653
AW
2736 return;
2737 }
2738 }
2739
e9a1ab19
FB
2740}
2741
cd19cfa2
HY
2742#ifndef _WIN32
2743void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
2744{
2745 RAMBlock *block;
2746 ram_addr_t offset;
2747 int flags;
2748 void *area, *vaddr;
2749
2750 QLIST_FOREACH(block, &ram_list.blocks, next) {
2751 offset = addr - block->offset;
2752 if (offset < block->length) {
2753 vaddr = block->host + offset;
2754 if (block->flags & RAM_PREALLOC_MASK) {
2755 ;
2756 } else {
2757 flags = MAP_FIXED;
2758 munmap(vaddr, length);
2759 if (mem_path) {
2760#if defined(__linux__) && !defined(TARGET_S390X)
2761 if (block->fd) {
2762#ifdef MAP_POPULATE
2763 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
2764 MAP_PRIVATE;
2765#else
2766 flags |= MAP_PRIVATE;
2767#endif
2768 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2769 flags, block->fd, offset);
2770 } else {
2771 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2772 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2773 flags, -1, 0);
2774 }
fd28aa13
JK
2775#else
2776 abort();
cd19cfa2
HY
2777#endif
2778 } else {
2779#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2780 flags |= MAP_SHARED | MAP_ANONYMOUS;
2781 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
2782 flags, -1, 0);
2783#else
2784 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2785 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2786 flags, -1, 0);
2787#endif
2788 }
2789 if (area != vaddr) {
f15fbc4b
AP
2790 fprintf(stderr, "Could not remap addr: "
2791 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
cd19cfa2
HY
2792 length, addr);
2793 exit(1);
2794 }
2795 qemu_madvise(vaddr, length, QEMU_MADV_MERGEABLE);
2796 }
2797 return;
2798 }
2799 }
2800}
2801#endif /* !_WIN32 */
2802
dc828ca1 2803/* Return a host pointer to ram allocated with qemu_ram_alloc.
5579c7f3
PB
2804 With the exception of the softmmu code in this file, this should
2805 only be used for local memory (e.g. video ram) that the device owns,
2806 and knows it isn't going to access beyond the end of the block.
2807
2808 It should not be used for general purpose DMA.
2809 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2810 */
c227f099 2811void *qemu_get_ram_ptr(ram_addr_t addr)
dc828ca1 2812{
94a6b54f
PB
2813 RAMBlock *block;
2814
f471a17e
AW
2815 QLIST_FOREACH(block, &ram_list.blocks, next) {
2816 if (addr - block->offset < block->length) {
7d82af38
VP
2817 /* Move this entry to to start of the list. */
2818 if (block != QLIST_FIRST(&ram_list.blocks)) {
2819 QLIST_REMOVE(block, next);
2820 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
2821 }
868bb33f 2822 if (xen_enabled()) {
432d268c
JN
2823 /* We need to check if the requested address is in the RAM
2824 * because we don't want to map the entire memory in QEMU.
712c2b41 2825 * In that case just map until the end of the page.
432d268c
JN
2826 */
2827 if (block->offset == 0) {
e41d7c69 2828 return xen_map_cache(addr, 0, 0);
432d268c 2829 } else if (block->host == NULL) {
e41d7c69
JK
2830 block->host =
2831 xen_map_cache(block->offset, block->length, 1);
432d268c
JN
2832 }
2833 }
f471a17e
AW
2834 return block->host + (addr - block->offset);
2835 }
94a6b54f 2836 }
f471a17e
AW
2837
2838 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2839 abort();
2840
2841 return NULL;
dc828ca1
PB
2842}
2843
b2e0a138
MT
2844/* Return a host pointer to ram allocated with qemu_ram_alloc.
2845 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
2846 */
2847void *qemu_safe_ram_ptr(ram_addr_t addr)
2848{
2849 RAMBlock *block;
2850
2851 QLIST_FOREACH(block, &ram_list.blocks, next) {
2852 if (addr - block->offset < block->length) {
868bb33f 2853 if (xen_enabled()) {
432d268c
JN
2854 /* We need to check if the requested address is in the RAM
2855 * because we don't want to map the entire memory in QEMU.
712c2b41 2856 * In that case just map until the end of the page.
432d268c
JN
2857 */
2858 if (block->offset == 0) {
e41d7c69 2859 return xen_map_cache(addr, 0, 0);
432d268c 2860 } else if (block->host == NULL) {
e41d7c69
JK
2861 block->host =
2862 xen_map_cache(block->offset, block->length, 1);
432d268c
JN
2863 }
2864 }
b2e0a138
MT
2865 return block->host + (addr - block->offset);
2866 }
2867 }
2868
2869 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2870 abort();
2871
2872 return NULL;
2873}
2874
38bee5dc
SS
2875/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
2876 * but takes a size argument */
8ab934f9 2877void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
38bee5dc 2878{
8ab934f9
SS
2879 if (*size == 0) {
2880 return NULL;
2881 }
868bb33f 2882 if (xen_enabled()) {
e41d7c69 2883 return xen_map_cache(addr, *size, 1);
868bb33f 2884 } else {
38bee5dc
SS
2885 RAMBlock *block;
2886
2887 QLIST_FOREACH(block, &ram_list.blocks, next) {
2888 if (addr - block->offset < block->length) {
2889 if (addr - block->offset + *size > block->length)
2890 *size = block->length - addr + block->offset;
2891 return block->host + (addr - block->offset);
2892 }
2893 }
2894
2895 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2896 abort();
38bee5dc
SS
2897 }
2898}
2899
050a0ddf
AP
2900void qemu_put_ram_ptr(void *addr)
2901{
2902 trace_qemu_put_ram_ptr(addr);
050a0ddf
AP
2903}
2904
e890261f 2905int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
5579c7f3 2906{
94a6b54f
PB
2907 RAMBlock *block;
2908 uint8_t *host = ptr;
2909
868bb33f 2910 if (xen_enabled()) {
e41d7c69 2911 *ram_addr = xen_ram_addr_from_mapcache(ptr);
712c2b41
SS
2912 return 0;
2913 }
2914
f471a17e 2915 QLIST_FOREACH(block, &ram_list.blocks, next) {
432d268c
JN
2916 /* This case append when the block is not mapped. */
2917 if (block->host == NULL) {
2918 continue;
2919 }
f471a17e 2920 if (host - block->host < block->length) {
e890261f
MT
2921 *ram_addr = block->offset + (host - block->host);
2922 return 0;
f471a17e 2923 }
94a6b54f 2924 }
432d268c 2925
e890261f
MT
2926 return -1;
2927}
f471a17e 2928
e890261f
MT
2929/* Some of the softmmu routines need to translate from a host pointer
2930 (typically a TLB entry) back to a ram offset. */
2931ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
2932{
2933 ram_addr_t ram_addr;
f471a17e 2934
e890261f
MT
2935 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
2936 fprintf(stderr, "Bad ram pointer %p\n", ptr);
2937 abort();
2938 }
2939 return ram_addr;
5579c7f3
PB
2940}
2941
0e0df1e2
AK
2942static uint64_t unassigned_mem_read(void *opaque, target_phys_addr_t addr,
2943 unsigned size)
e18231a3
BS
2944{
2945#ifdef DEBUG_UNASSIGNED
2946 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2947#endif
5b450407 2948#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
0e0df1e2 2949 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
e18231a3
BS
2950#endif
2951 return 0;
2952}
2953
0e0df1e2
AK
2954static void unassigned_mem_write(void *opaque, target_phys_addr_t addr,
2955 uint64_t val, unsigned size)
e18231a3
BS
2956{
2957#ifdef DEBUG_UNASSIGNED
0e0df1e2 2958 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
e18231a3 2959#endif
5b450407 2960#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
0e0df1e2 2961 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
67d3b957 2962#endif
33417e70
FB
2963}
2964
0e0df1e2
AK
2965static const MemoryRegionOps unassigned_mem_ops = {
2966 .read = unassigned_mem_read,
2967 .write = unassigned_mem_write,
2968 .endianness = DEVICE_NATIVE_ENDIAN,
2969};
e18231a3 2970
0e0df1e2
AK
2971static uint64_t error_mem_read(void *opaque, target_phys_addr_t addr,
2972 unsigned size)
e18231a3 2973{
0e0df1e2 2974 abort();
e18231a3
BS
2975}
2976
0e0df1e2
AK
2977static void error_mem_write(void *opaque, target_phys_addr_t addr,
2978 uint64_t value, unsigned size)
e18231a3 2979{
0e0df1e2 2980 abort();
33417e70
FB
2981}
2982
0e0df1e2
AK
2983static const MemoryRegionOps error_mem_ops = {
2984 .read = error_mem_read,
2985 .write = error_mem_write,
2986 .endianness = DEVICE_NATIVE_ENDIAN,
33417e70
FB
2987};
2988
0e0df1e2
AK
2989static const MemoryRegionOps rom_mem_ops = {
2990 .read = error_mem_read,
2991 .write = unassigned_mem_write,
2992 .endianness = DEVICE_NATIVE_ENDIAN,
33417e70
FB
2993};
2994
0e0df1e2
AK
2995static void notdirty_mem_write(void *opaque, target_phys_addr_t ram_addr,
2996 uint64_t val, unsigned size)
9fa3e853 2997{
3a7d929e 2998 int dirty_flags;
f7c11b53 2999 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3a7d929e 3000 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 3001#if !defined(CONFIG_USER_ONLY)
0e0df1e2 3002 tb_invalidate_phys_page_fast(ram_addr, size);
f7c11b53 3003 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
9fa3e853 3004#endif
3a7d929e 3005 }
0e0df1e2
AK
3006 switch (size) {
3007 case 1:
3008 stb_p(qemu_get_ram_ptr(ram_addr), val);
3009 break;
3010 case 2:
3011 stw_p(qemu_get_ram_ptr(ram_addr), val);
3012 break;
3013 case 4:
3014 stl_p(qemu_get_ram_ptr(ram_addr), val);
3015 break;
3016 default:
3017 abort();
3a7d929e 3018 }
f23db169 3019 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
f7c11b53 3020 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
f23db169
FB
3021 /* we remove the notdirty callback only if the code has been
3022 flushed */
3023 if (dirty_flags == 0xff)
2e70f6ef 3024 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
3025}
3026
0e0df1e2
AK
3027static const MemoryRegionOps notdirty_mem_ops = {
3028 .read = error_mem_read,
3029 .write = notdirty_mem_write,
3030 .endianness = DEVICE_NATIVE_ENDIAN,
1ccde1cb
FB
3031};
3032
0f459d16 3033/* Generate a debug exception if a watchpoint has been hit. */
b4051334 3034static void check_watchpoint(int offset, int len_mask, int flags)
0f459d16 3035{
9349b4f9 3036 CPUArchState *env = cpu_single_env;
06d55cc1
AL
3037 target_ulong pc, cs_base;
3038 TranslationBlock *tb;
0f459d16 3039 target_ulong vaddr;
a1d1bb31 3040 CPUWatchpoint *wp;
06d55cc1 3041 int cpu_flags;
0f459d16 3042
06d55cc1
AL
3043 if (env->watchpoint_hit) {
3044 /* We re-entered the check after replacing the TB. Now raise
3045 * the debug interrupt so that is will trigger after the
3046 * current instruction. */
3047 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3048 return;
3049 }
2e70f6ef 3050 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
72cf2d4f 3051 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334
AL
3052 if ((vaddr == (wp->vaddr & len_mask) ||
3053 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
6e140f28
AL
3054 wp->flags |= BP_WATCHPOINT_HIT;
3055 if (!env->watchpoint_hit) {
3056 env->watchpoint_hit = wp;
3057 tb = tb_find_pc(env->mem_io_pc);
3058 if (!tb) {
3059 cpu_abort(env, "check_watchpoint: could not find TB for "
3060 "pc=%p", (void *)env->mem_io_pc);
3061 }
618ba8e6 3062 cpu_restore_state(tb, env, env->mem_io_pc);
6e140f28
AL
3063 tb_phys_invalidate(tb, -1);
3064 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3065 env->exception_index = EXCP_DEBUG;
488d6577 3066 cpu_loop_exit(env);
6e140f28
AL
3067 } else {
3068 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3069 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
488d6577 3070 cpu_resume_from_signal(env, NULL);
6e140f28 3071 }
06d55cc1 3072 }
6e140f28
AL
3073 } else {
3074 wp->flags &= ~BP_WATCHPOINT_HIT;
0f459d16
PB
3075 }
3076 }
3077}
3078
6658ffb8
PB
3079/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3080 so these check for a hit then pass through to the normal out-of-line
3081 phys routines. */
1ec9b909
AK
3082static uint64_t watch_mem_read(void *opaque, target_phys_addr_t addr,
3083 unsigned size)
6658ffb8 3084{
1ec9b909
AK
3085 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
3086 switch (size) {
3087 case 1: return ldub_phys(addr);
3088 case 2: return lduw_phys(addr);
3089 case 4: return ldl_phys(addr);
3090 default: abort();
3091 }
6658ffb8
PB
3092}
3093
1ec9b909
AK
3094static void watch_mem_write(void *opaque, target_phys_addr_t addr,
3095 uint64_t val, unsigned size)
6658ffb8 3096{
1ec9b909
AK
3097 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
3098 switch (size) {
67364150
MF
3099 case 1:
3100 stb_phys(addr, val);
3101 break;
3102 case 2:
3103 stw_phys(addr, val);
3104 break;
3105 case 4:
3106 stl_phys(addr, val);
3107 break;
1ec9b909
AK
3108 default: abort();
3109 }
6658ffb8
PB
3110}
3111
1ec9b909
AK
3112static const MemoryRegionOps watch_mem_ops = {
3113 .read = watch_mem_read,
3114 .write = watch_mem_write,
3115 .endianness = DEVICE_NATIVE_ENDIAN,
6658ffb8 3116};
6658ffb8 3117
70c68e44
AK
3118static uint64_t subpage_read(void *opaque, target_phys_addr_t addr,
3119 unsigned len)
db7b5426 3120{
70c68e44 3121 subpage_t *mmio = opaque;
f6405247 3122 unsigned int idx = SUBPAGE_IDX(addr);
5312bd8b 3123 MemoryRegionSection *section;
db7b5426
BS
3124#if defined(DEBUG_SUBPAGE)
3125 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3126 mmio, len, addr, idx);
3127#endif
db7b5426 3128
5312bd8b
AK
3129 section = &phys_sections[mmio->sub_section[idx]];
3130 addr += mmio->base;
3131 addr -= section->offset_within_address_space;
3132 addr += section->offset_within_region;
37ec01d4 3133 return io_mem_read(section->mr, addr, len);
db7b5426
BS
3134}
3135
70c68e44
AK
3136static void subpage_write(void *opaque, target_phys_addr_t addr,
3137 uint64_t value, unsigned len)
db7b5426 3138{
70c68e44 3139 subpage_t *mmio = opaque;
f6405247 3140 unsigned int idx = SUBPAGE_IDX(addr);
5312bd8b 3141 MemoryRegionSection *section;
db7b5426 3142#if defined(DEBUG_SUBPAGE)
70c68e44
AK
3143 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
3144 " idx %d value %"PRIx64"\n",
f6405247 3145 __func__, mmio, len, addr, idx, value);
db7b5426 3146#endif
f6405247 3147
5312bd8b
AK
3148 section = &phys_sections[mmio->sub_section[idx]];
3149 addr += mmio->base;
3150 addr -= section->offset_within_address_space;
3151 addr += section->offset_within_region;
37ec01d4 3152 io_mem_write(section->mr, addr, value, len);
db7b5426
BS
3153}
3154
70c68e44
AK
3155static const MemoryRegionOps subpage_ops = {
3156 .read = subpage_read,
3157 .write = subpage_write,
3158 .endianness = DEVICE_NATIVE_ENDIAN,
db7b5426
BS
3159};
3160
de712f94
AK
3161static uint64_t subpage_ram_read(void *opaque, target_phys_addr_t addr,
3162 unsigned size)
56384e8b
AF
3163{
3164 ram_addr_t raddr = addr;
3165 void *ptr = qemu_get_ram_ptr(raddr);
de712f94
AK
3166 switch (size) {
3167 case 1: return ldub_p(ptr);
3168 case 2: return lduw_p(ptr);
3169 case 4: return ldl_p(ptr);
3170 default: abort();
3171 }
56384e8b
AF
3172}
3173
de712f94
AK
3174static void subpage_ram_write(void *opaque, target_phys_addr_t addr,
3175 uint64_t value, unsigned size)
56384e8b
AF
3176{
3177 ram_addr_t raddr = addr;
3178 void *ptr = qemu_get_ram_ptr(raddr);
de712f94
AK
3179 switch (size) {
3180 case 1: return stb_p(ptr, value);
3181 case 2: return stw_p(ptr, value);
3182 case 4: return stl_p(ptr, value);
3183 default: abort();
3184 }
56384e8b
AF
3185}
3186
de712f94
AK
3187static const MemoryRegionOps subpage_ram_ops = {
3188 .read = subpage_ram_read,
3189 .write = subpage_ram_write,
3190 .endianness = DEVICE_NATIVE_ENDIAN,
56384e8b
AF
3191};
3192
c227f099 3193static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 3194 uint16_t section)
db7b5426
BS
3195{
3196 int idx, eidx;
3197
3198 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3199 return -1;
3200 idx = SUBPAGE_IDX(start);
3201 eidx = SUBPAGE_IDX(end);
3202#if defined(DEBUG_SUBPAGE)
0bf9e31a 3203 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
db7b5426
BS
3204 mmio, start, end, idx, eidx, memory);
3205#endif
5312bd8b
AK
3206 if (memory_region_is_ram(phys_sections[section].mr)) {
3207 MemoryRegionSection new_section = phys_sections[section];
3208 new_section.mr = &io_mem_subpage_ram;
3209 section = phys_section_add(&new_section);
56384e8b 3210 }
db7b5426 3211 for (; idx <= eidx; idx++) {
5312bd8b 3212 mmio->sub_section[idx] = section;
db7b5426
BS
3213 }
3214
3215 return 0;
3216}
3217
0f0cb164 3218static subpage_t *subpage_init(target_phys_addr_t base)
db7b5426 3219{
c227f099 3220 subpage_t *mmio;
db7b5426 3221
7267c094 3222 mmio = g_malloc0(sizeof(subpage_t));
1eec614b
AL
3223
3224 mmio->base = base;
70c68e44
AK
3225 memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
3226 "subpage", TARGET_PAGE_SIZE);
b3b00c78 3227 mmio->iomem.subpage = true;
db7b5426 3228#if defined(DEBUG_SUBPAGE)
1eec614b
AL
3229 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3230 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
db7b5426 3231#endif
0f0cb164 3232 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, phys_section_unassigned);
db7b5426
BS
3233
3234 return mmio;
3235}
3236
5312bd8b
AK
3237static uint16_t dummy_section(MemoryRegion *mr)
3238{
3239 MemoryRegionSection section = {
3240 .mr = mr,
3241 .offset_within_address_space = 0,
3242 .offset_within_region = 0,
3243 .size = UINT64_MAX,
3244 };
3245
3246 return phys_section_add(&section);
3247}
3248
37ec01d4 3249MemoryRegion *iotlb_to_region(target_phys_addr_t index)
aa102231 3250{
37ec01d4 3251 return phys_sections[index & ~TARGET_PAGE_MASK].mr;
aa102231
AK
3252}
3253
e9179ce1
AK
3254static void io_mem_init(void)
3255{
0e0df1e2 3256 memory_region_init_io(&io_mem_ram, &error_mem_ops, NULL, "ram", UINT64_MAX);
0e0df1e2
AK
3257 memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
3258 memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
3259 "unassigned", UINT64_MAX);
3260 memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
3261 "notdirty", UINT64_MAX);
de712f94
AK
3262 memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
3263 "subpage-ram", UINT64_MAX);
1ec9b909
AK
3264 memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
3265 "watch", UINT64_MAX);
e9179ce1
AK
3266}
3267
50c1e149
AK
3268static void core_begin(MemoryListener *listener)
3269{
54688b1e 3270 destroy_all_mappings();
5312bd8b 3271 phys_sections_clear();
c19e8800 3272 phys_map.ptr = PHYS_MAP_NODE_NIL;
5312bd8b 3273 phys_section_unassigned = dummy_section(&io_mem_unassigned);
aa102231
AK
3274 phys_section_notdirty = dummy_section(&io_mem_notdirty);
3275 phys_section_rom = dummy_section(&io_mem_rom);
3276 phys_section_watch = dummy_section(&io_mem_watch);
50c1e149
AK
3277}
3278
3279static void core_commit(MemoryListener *listener)
3280{
9349b4f9 3281 CPUArchState *env;
117712c3
AK
3282
3283 /* since each CPU stores ram addresses in its TLB cache, we must
3284 reset the modified entries */
3285 /* XXX: slow ! */
3286 for(env = first_cpu; env != NULL; env = env->next_cpu) {
3287 tlb_flush(env, 1);
3288 }
50c1e149
AK
3289}
3290
93632747
AK
3291static void core_region_add(MemoryListener *listener,
3292 MemoryRegionSection *section)
3293{
4855d41a 3294 cpu_register_physical_memory_log(section, section->readonly);
93632747
AK
3295}
3296
3297static void core_region_del(MemoryListener *listener,
3298 MemoryRegionSection *section)
3299{
93632747
AK
3300}
3301
50c1e149
AK
3302static void core_region_nop(MemoryListener *listener,
3303 MemoryRegionSection *section)
3304{
54688b1e 3305 cpu_register_physical_memory_log(section, section->readonly);
50c1e149
AK
3306}
3307
93632747
AK
3308static void core_log_start(MemoryListener *listener,
3309 MemoryRegionSection *section)
3310{
3311}
3312
3313static void core_log_stop(MemoryListener *listener,
3314 MemoryRegionSection *section)
3315{
3316}
3317
3318static void core_log_sync(MemoryListener *listener,
3319 MemoryRegionSection *section)
3320{
3321}
3322
3323static void core_log_global_start(MemoryListener *listener)
3324{
3325 cpu_physical_memory_set_dirty_tracking(1);
3326}
3327
3328static void core_log_global_stop(MemoryListener *listener)
3329{
3330 cpu_physical_memory_set_dirty_tracking(0);
3331}
3332
3333static void core_eventfd_add(MemoryListener *listener,
3334 MemoryRegionSection *section,
3335 bool match_data, uint64_t data, int fd)
3336{
3337}
3338
3339static void core_eventfd_del(MemoryListener *listener,
3340 MemoryRegionSection *section,
3341 bool match_data, uint64_t data, int fd)
3342{
3343}
3344
50c1e149
AK
3345static void io_begin(MemoryListener *listener)
3346{
3347}
3348
3349static void io_commit(MemoryListener *listener)
3350{
3351}
3352
4855d41a
AK
3353static void io_region_add(MemoryListener *listener,
3354 MemoryRegionSection *section)
3355{
a2d33521
AK
3356 MemoryRegionIORange *mrio = g_new(MemoryRegionIORange, 1);
3357
3358 mrio->mr = section->mr;
3359 mrio->offset = section->offset_within_region;
3360 iorange_init(&mrio->iorange, &memory_region_iorange_ops,
4855d41a 3361 section->offset_within_address_space, section->size);
a2d33521 3362 ioport_register(&mrio->iorange);
4855d41a
AK
3363}
3364
3365static void io_region_del(MemoryListener *listener,
3366 MemoryRegionSection *section)
3367{
3368 isa_unassign_ioport(section->offset_within_address_space, section->size);
3369}
3370
50c1e149
AK
3371static void io_region_nop(MemoryListener *listener,
3372 MemoryRegionSection *section)
3373{
3374}
3375
4855d41a
AK
3376static void io_log_start(MemoryListener *listener,
3377 MemoryRegionSection *section)
3378{
3379}
3380
3381static void io_log_stop(MemoryListener *listener,
3382 MemoryRegionSection *section)
3383{
3384}
3385
3386static void io_log_sync(MemoryListener *listener,
3387 MemoryRegionSection *section)
3388{
3389}
3390
3391static void io_log_global_start(MemoryListener *listener)
3392{
3393}
3394
3395static void io_log_global_stop(MemoryListener *listener)
3396{
3397}
3398
3399static void io_eventfd_add(MemoryListener *listener,
3400 MemoryRegionSection *section,
3401 bool match_data, uint64_t data, int fd)
3402{
3403}
3404
3405static void io_eventfd_del(MemoryListener *listener,
3406 MemoryRegionSection *section,
3407 bool match_data, uint64_t data, int fd)
3408{
3409}
3410
93632747 3411static MemoryListener core_memory_listener = {
50c1e149
AK
3412 .begin = core_begin,
3413 .commit = core_commit,
93632747
AK
3414 .region_add = core_region_add,
3415 .region_del = core_region_del,
50c1e149 3416 .region_nop = core_region_nop,
93632747
AK
3417 .log_start = core_log_start,
3418 .log_stop = core_log_stop,
3419 .log_sync = core_log_sync,
3420 .log_global_start = core_log_global_start,
3421 .log_global_stop = core_log_global_stop,
3422 .eventfd_add = core_eventfd_add,
3423 .eventfd_del = core_eventfd_del,
3424 .priority = 0,
3425};
3426
4855d41a 3427static MemoryListener io_memory_listener = {
50c1e149
AK
3428 .begin = io_begin,
3429 .commit = io_commit,
4855d41a
AK
3430 .region_add = io_region_add,
3431 .region_del = io_region_del,
50c1e149 3432 .region_nop = io_region_nop,
4855d41a
AK
3433 .log_start = io_log_start,
3434 .log_stop = io_log_stop,
3435 .log_sync = io_log_sync,
3436 .log_global_start = io_log_global_start,
3437 .log_global_stop = io_log_global_stop,
3438 .eventfd_add = io_eventfd_add,
3439 .eventfd_del = io_eventfd_del,
3440 .priority = 0,
3441};
3442
62152b8a
AK
3443static void memory_map_init(void)
3444{
7267c094 3445 system_memory = g_malloc(sizeof(*system_memory));
8417cebf 3446 memory_region_init(system_memory, "system", INT64_MAX);
62152b8a 3447 set_system_memory_map(system_memory);
309cb471 3448
7267c094 3449 system_io = g_malloc(sizeof(*system_io));
309cb471
AK
3450 memory_region_init(system_io, "io", 65536);
3451 set_system_io_map(system_io);
93632747 3452
4855d41a
AK
3453 memory_listener_register(&core_memory_listener, system_memory);
3454 memory_listener_register(&io_memory_listener, system_io);
62152b8a
AK
3455}
3456
3457MemoryRegion *get_system_memory(void)
3458{
3459 return system_memory;
3460}
3461
309cb471
AK
3462MemoryRegion *get_system_io(void)
3463{
3464 return system_io;
3465}
3466
e2eef170
PB
3467#endif /* !defined(CONFIG_USER_ONLY) */
3468
13eb76e0
FB
3469/* physical memory access (slow version, mainly for debug) */
3470#if defined(CONFIG_USER_ONLY)
9349b4f9 3471int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
a68fe89c 3472 uint8_t *buf, int len, int is_write)
13eb76e0
FB
3473{
3474 int l, flags;
3475 target_ulong page;
53a5960a 3476 void * p;
13eb76e0
FB
3477
3478 while (len > 0) {
3479 page = addr & TARGET_PAGE_MASK;
3480 l = (page + TARGET_PAGE_SIZE) - addr;
3481 if (l > len)
3482 l = len;
3483 flags = page_get_flags(page);
3484 if (!(flags & PAGE_VALID))
a68fe89c 3485 return -1;
13eb76e0
FB
3486 if (is_write) {
3487 if (!(flags & PAGE_WRITE))
a68fe89c 3488 return -1;
579a97f7 3489 /* XXX: this code should not depend on lock_user */
72fb7daa 3490 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
a68fe89c 3491 return -1;
72fb7daa
AJ
3492 memcpy(p, buf, l);
3493 unlock_user(p, addr, l);
13eb76e0
FB
3494 } else {
3495 if (!(flags & PAGE_READ))
a68fe89c 3496 return -1;
579a97f7 3497 /* XXX: this code should not depend on lock_user */
72fb7daa 3498 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
a68fe89c 3499 return -1;
72fb7daa 3500 memcpy(buf, p, l);
5b257578 3501 unlock_user(p, addr, 0);
13eb76e0
FB
3502 }
3503 len -= l;
3504 buf += l;
3505 addr += l;
3506 }
a68fe89c 3507 return 0;
13eb76e0 3508}
8df1cd07 3509
13eb76e0 3510#else
c227f099 3511void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
3512 int len, int is_write)
3513{
37ec01d4 3514 int l;
13eb76e0
FB
3515 uint8_t *ptr;
3516 uint32_t val;
c227f099 3517 target_phys_addr_t page;
f3705d53 3518 MemoryRegionSection *section;
3b46e624 3519
13eb76e0
FB
3520 while (len > 0) {
3521 page = addr & TARGET_PAGE_MASK;
3522 l = (page + TARGET_PAGE_SIZE) - addr;
3523 if (l > len)
3524 l = len;
06ef3525 3525 section = phys_page_find(page >> TARGET_PAGE_BITS);
3b46e624 3526
13eb76e0 3527 if (is_write) {
f3705d53 3528 if (!memory_region_is_ram(section->mr)) {
f1f6e3b8 3529 target_phys_addr_t addr1;
cc5bea60 3530 addr1 = memory_region_section_addr(section, addr);
6a00d601
FB
3531 /* XXX: could force cpu_single_env to NULL to avoid
3532 potential bugs */
6c2934db 3533 if (l >= 4 && ((addr1 & 3) == 0)) {
1c213d19 3534 /* 32 bit write access */
c27004ec 3535 val = ldl_p(buf);
37ec01d4 3536 io_mem_write(section->mr, addr1, val, 4);
13eb76e0 3537 l = 4;
6c2934db 3538 } else if (l >= 2 && ((addr1 & 1) == 0)) {
1c213d19 3539 /* 16 bit write access */
c27004ec 3540 val = lduw_p(buf);
37ec01d4 3541 io_mem_write(section->mr, addr1, val, 2);
13eb76e0
FB
3542 l = 2;
3543 } else {
1c213d19 3544 /* 8 bit write access */
c27004ec 3545 val = ldub_p(buf);
37ec01d4 3546 io_mem_write(section->mr, addr1, val, 1);
13eb76e0
FB
3547 l = 1;
3548 }
f3705d53 3549 } else if (!section->readonly) {
8ca5692d 3550 ram_addr_t addr1;
f3705d53 3551 addr1 = memory_region_get_ram_addr(section->mr)
cc5bea60 3552 + memory_region_section_addr(section, addr);
13eb76e0 3553 /* RAM case */
5579c7f3 3554 ptr = qemu_get_ram_ptr(addr1);
13eb76e0 3555 memcpy(ptr, buf, l);
3a7d929e
FB
3556 if (!cpu_physical_memory_is_dirty(addr1)) {
3557 /* invalidate code */
3558 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3559 /* set dirty bit */
f7c11b53
YT
3560 cpu_physical_memory_set_dirty_flags(
3561 addr1, (0xff & ~CODE_DIRTY_FLAG));
3a7d929e 3562 }
050a0ddf 3563 qemu_put_ram_ptr(ptr);
13eb76e0
FB
3564 }
3565 } else {
cc5bea60
BS
3566 if (!(memory_region_is_ram(section->mr) ||
3567 memory_region_is_romd(section->mr))) {
f1f6e3b8 3568 target_phys_addr_t addr1;
13eb76e0 3569 /* I/O case */
cc5bea60 3570 addr1 = memory_region_section_addr(section, addr);
6c2934db 3571 if (l >= 4 && ((addr1 & 3) == 0)) {
13eb76e0 3572 /* 32 bit read access */
37ec01d4 3573 val = io_mem_read(section->mr, addr1, 4);
c27004ec 3574 stl_p(buf, val);
13eb76e0 3575 l = 4;
6c2934db 3576 } else if (l >= 2 && ((addr1 & 1) == 0)) {
13eb76e0 3577 /* 16 bit read access */
37ec01d4 3578 val = io_mem_read(section->mr, addr1, 2);
c27004ec 3579 stw_p(buf, val);
13eb76e0
FB
3580 l = 2;
3581 } else {
1c213d19 3582 /* 8 bit read access */
37ec01d4 3583 val = io_mem_read(section->mr, addr1, 1);
c27004ec 3584 stb_p(buf, val);
13eb76e0
FB
3585 l = 1;
3586 }
3587 } else {
3588 /* RAM case */
0a1b357f 3589 ptr = qemu_get_ram_ptr(section->mr->ram_addr
cc5bea60
BS
3590 + memory_region_section_addr(section,
3591 addr));
f3705d53 3592 memcpy(buf, ptr, l);
050a0ddf 3593 qemu_put_ram_ptr(ptr);
13eb76e0
FB
3594 }
3595 }
3596 len -= l;
3597 buf += l;
3598 addr += l;
3599 }
3600}
8df1cd07 3601
d0ecd2aa 3602/* used for ROM loading : can write in RAM and ROM */
c227f099 3603void cpu_physical_memory_write_rom(target_phys_addr_t addr,
d0ecd2aa
FB
3604 const uint8_t *buf, int len)
3605{
3606 int l;
3607 uint8_t *ptr;
c227f099 3608 target_phys_addr_t page;
f3705d53 3609 MemoryRegionSection *section;
3b46e624 3610
d0ecd2aa
FB
3611 while (len > 0) {
3612 page = addr & TARGET_PAGE_MASK;
3613 l = (page + TARGET_PAGE_SIZE) - addr;
3614 if (l > len)
3615 l = len;
06ef3525 3616 section = phys_page_find(page >> TARGET_PAGE_BITS);
3b46e624 3617
cc5bea60
BS
3618 if (!(memory_region_is_ram(section->mr) ||
3619 memory_region_is_romd(section->mr))) {
d0ecd2aa
FB
3620 /* do nothing */
3621 } else {
3622 unsigned long addr1;
f3705d53 3623 addr1 = memory_region_get_ram_addr(section->mr)
cc5bea60 3624 + memory_region_section_addr(section, addr);
d0ecd2aa 3625 /* ROM/RAM case */
5579c7f3 3626 ptr = qemu_get_ram_ptr(addr1);
d0ecd2aa 3627 memcpy(ptr, buf, l);
050a0ddf 3628 qemu_put_ram_ptr(ptr);
d0ecd2aa
FB
3629 }
3630 len -= l;
3631 buf += l;
3632 addr += l;
3633 }
3634}
3635
6d16c2f8
AL
3636typedef struct {
3637 void *buffer;
c227f099
AL
3638 target_phys_addr_t addr;
3639 target_phys_addr_t len;
6d16c2f8
AL
3640} BounceBuffer;
3641
3642static BounceBuffer bounce;
3643
ba223c29
AL
3644typedef struct MapClient {
3645 void *opaque;
3646 void (*callback)(void *opaque);
72cf2d4f 3647 QLIST_ENTRY(MapClient) link;
ba223c29
AL
3648} MapClient;
3649
72cf2d4f
BS
3650static QLIST_HEAD(map_client_list, MapClient) map_client_list
3651 = QLIST_HEAD_INITIALIZER(map_client_list);
ba223c29
AL
3652
3653void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3654{
7267c094 3655 MapClient *client = g_malloc(sizeof(*client));
ba223c29
AL
3656
3657 client->opaque = opaque;
3658 client->callback = callback;
72cf2d4f 3659 QLIST_INSERT_HEAD(&map_client_list, client, link);
ba223c29
AL
3660 return client;
3661}
3662
3663void cpu_unregister_map_client(void *_client)
3664{
3665 MapClient *client = (MapClient *)_client;
3666
72cf2d4f 3667 QLIST_REMOVE(client, link);
7267c094 3668 g_free(client);
ba223c29
AL
3669}
3670
3671static void cpu_notify_map_clients(void)
3672{
3673 MapClient *client;
3674
72cf2d4f
BS
3675 while (!QLIST_EMPTY(&map_client_list)) {
3676 client = QLIST_FIRST(&map_client_list);
ba223c29 3677 client->callback(client->opaque);
34d5e948 3678 cpu_unregister_map_client(client);
ba223c29
AL
3679 }
3680}
3681
6d16c2f8
AL
3682/* Map a physical memory region into a host virtual address.
3683 * May map a subset of the requested range, given by and returned in *plen.
3684 * May return NULL if resources needed to perform the mapping are exhausted.
3685 * Use only for reads OR writes - not for read-modify-write operations.
ba223c29
AL
3686 * Use cpu_register_map_client() to know when retrying the map operation is
3687 * likely to succeed.
6d16c2f8 3688 */
c227f099
AL
3689void *cpu_physical_memory_map(target_phys_addr_t addr,
3690 target_phys_addr_t *plen,
6d16c2f8
AL
3691 int is_write)
3692{
c227f099 3693 target_phys_addr_t len = *plen;
38bee5dc 3694 target_phys_addr_t todo = 0;
6d16c2f8 3695 int l;
c227f099 3696 target_phys_addr_t page;
f3705d53 3697 MemoryRegionSection *section;
f15fbc4b 3698 ram_addr_t raddr = RAM_ADDR_MAX;
8ab934f9
SS
3699 ram_addr_t rlen;
3700 void *ret;
6d16c2f8
AL
3701
3702 while (len > 0) {
3703 page = addr & TARGET_PAGE_MASK;
3704 l = (page + TARGET_PAGE_SIZE) - addr;
3705 if (l > len)
3706 l = len;
06ef3525 3707 section = phys_page_find(page >> TARGET_PAGE_BITS);
6d16c2f8 3708
f3705d53 3709 if (!(memory_region_is_ram(section->mr) && !section->readonly)) {
38bee5dc 3710 if (todo || bounce.buffer) {
6d16c2f8
AL
3711 break;
3712 }
3713 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3714 bounce.addr = addr;
3715 bounce.len = l;
3716 if (!is_write) {
54f7b4a3 3717 cpu_physical_memory_read(addr, bounce.buffer, l);
6d16c2f8 3718 }
38bee5dc
SS
3719
3720 *plen = l;
3721 return bounce.buffer;
6d16c2f8 3722 }
8ab934f9 3723 if (!todo) {
f3705d53 3724 raddr = memory_region_get_ram_addr(section->mr)
cc5bea60 3725 + memory_region_section_addr(section, addr);
8ab934f9 3726 }
6d16c2f8
AL
3727
3728 len -= l;
3729 addr += l;
38bee5dc 3730 todo += l;
6d16c2f8 3731 }
8ab934f9
SS
3732 rlen = todo;
3733 ret = qemu_ram_ptr_length(raddr, &rlen);
3734 *plen = rlen;
3735 return ret;
6d16c2f8
AL
3736}
3737
3738/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3739 * Will also mark the memory as dirty if is_write == 1. access_len gives
3740 * the amount of memory that was actually read or written by the caller.
3741 */
c227f099
AL
3742void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3743 int is_write, target_phys_addr_t access_len)
6d16c2f8
AL
3744{
3745 if (buffer != bounce.buffer) {
3746 if (is_write) {
e890261f 3747 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
6d16c2f8
AL
3748 while (access_len) {
3749 unsigned l;
3750 l = TARGET_PAGE_SIZE;
3751 if (l > access_len)
3752 l = access_len;
3753 if (!cpu_physical_memory_is_dirty(addr1)) {
3754 /* invalidate code */
3755 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3756 /* set dirty bit */
f7c11b53
YT
3757 cpu_physical_memory_set_dirty_flags(
3758 addr1, (0xff & ~CODE_DIRTY_FLAG));
6d16c2f8
AL
3759 }
3760 addr1 += l;
3761 access_len -= l;
3762 }
3763 }
868bb33f 3764 if (xen_enabled()) {
e41d7c69 3765 xen_invalidate_map_cache_entry(buffer);
050a0ddf 3766 }
6d16c2f8
AL
3767 return;
3768 }
3769 if (is_write) {
3770 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3771 }
f8a83245 3772 qemu_vfree(bounce.buffer);
6d16c2f8 3773 bounce.buffer = NULL;
ba223c29 3774 cpu_notify_map_clients();
6d16c2f8 3775}
d0ecd2aa 3776
8df1cd07 3777/* warning: addr must be aligned */
1e78bcc1
AG
3778static inline uint32_t ldl_phys_internal(target_phys_addr_t addr,
3779 enum device_endian endian)
8df1cd07 3780{
8df1cd07
FB
3781 uint8_t *ptr;
3782 uint32_t val;
f3705d53 3783 MemoryRegionSection *section;
8df1cd07 3784
06ef3525 3785 section = phys_page_find(addr >> TARGET_PAGE_BITS);
3b46e624 3786
cc5bea60
BS
3787 if (!(memory_region_is_ram(section->mr) ||
3788 memory_region_is_romd(section->mr))) {
8df1cd07 3789 /* I/O case */
cc5bea60 3790 addr = memory_region_section_addr(section, addr);
37ec01d4 3791 val = io_mem_read(section->mr, addr, 4);
1e78bcc1
AG
3792#if defined(TARGET_WORDS_BIGENDIAN)
3793 if (endian == DEVICE_LITTLE_ENDIAN) {
3794 val = bswap32(val);
3795 }
3796#else
3797 if (endian == DEVICE_BIG_ENDIAN) {
3798 val = bswap32(val);
3799 }
3800#endif
8df1cd07
FB
3801 } else {
3802 /* RAM case */
f3705d53 3803 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
06ef3525 3804 & TARGET_PAGE_MASK)
cc5bea60 3805 + memory_region_section_addr(section, addr));
1e78bcc1
AG
3806 switch (endian) {
3807 case DEVICE_LITTLE_ENDIAN:
3808 val = ldl_le_p(ptr);
3809 break;
3810 case DEVICE_BIG_ENDIAN:
3811 val = ldl_be_p(ptr);
3812 break;
3813 default:
3814 val = ldl_p(ptr);
3815 break;
3816 }
8df1cd07
FB
3817 }
3818 return val;
3819}
3820
1e78bcc1
AG
3821uint32_t ldl_phys(target_phys_addr_t addr)
3822{
3823 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3824}
3825
3826uint32_t ldl_le_phys(target_phys_addr_t addr)
3827{
3828 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3829}
3830
3831uint32_t ldl_be_phys(target_phys_addr_t addr)
3832{
3833 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
3834}
3835
84b7b8e7 3836/* warning: addr must be aligned */
1e78bcc1
AG
3837static inline uint64_t ldq_phys_internal(target_phys_addr_t addr,
3838 enum device_endian endian)
84b7b8e7 3839{
84b7b8e7
FB
3840 uint8_t *ptr;
3841 uint64_t val;
f3705d53 3842 MemoryRegionSection *section;
84b7b8e7 3843
06ef3525 3844 section = phys_page_find(addr >> TARGET_PAGE_BITS);
3b46e624 3845
cc5bea60
BS
3846 if (!(memory_region_is_ram(section->mr) ||
3847 memory_region_is_romd(section->mr))) {
84b7b8e7 3848 /* I/O case */
cc5bea60 3849 addr = memory_region_section_addr(section, addr);
1e78bcc1
AG
3850
3851 /* XXX This is broken when device endian != cpu endian.
3852 Fix and add "endian" variable check */
84b7b8e7 3853#ifdef TARGET_WORDS_BIGENDIAN
37ec01d4
AK
3854 val = io_mem_read(section->mr, addr, 4) << 32;
3855 val |= io_mem_read(section->mr, addr + 4, 4);
84b7b8e7 3856#else
37ec01d4
AK
3857 val = io_mem_read(section->mr, addr, 4);
3858 val |= io_mem_read(section->mr, addr + 4, 4) << 32;
84b7b8e7
FB
3859#endif
3860 } else {
3861 /* RAM case */
f3705d53 3862 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
06ef3525 3863 & TARGET_PAGE_MASK)
cc5bea60 3864 + memory_region_section_addr(section, addr));
1e78bcc1
AG
3865 switch (endian) {
3866 case DEVICE_LITTLE_ENDIAN:
3867 val = ldq_le_p(ptr);
3868 break;
3869 case DEVICE_BIG_ENDIAN:
3870 val = ldq_be_p(ptr);
3871 break;
3872 default:
3873 val = ldq_p(ptr);
3874 break;
3875 }
84b7b8e7
FB
3876 }
3877 return val;
3878}
3879
1e78bcc1
AG
3880uint64_t ldq_phys(target_phys_addr_t addr)
3881{
3882 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3883}
3884
3885uint64_t ldq_le_phys(target_phys_addr_t addr)
3886{
3887 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3888}
3889
3890uint64_t ldq_be_phys(target_phys_addr_t addr)
3891{
3892 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
3893}
3894
aab33094 3895/* XXX: optimize */
c227f099 3896uint32_t ldub_phys(target_phys_addr_t addr)
aab33094
FB
3897{
3898 uint8_t val;
3899 cpu_physical_memory_read(addr, &val, 1);
3900 return val;
3901}
3902
733f0b02 3903/* warning: addr must be aligned */
1e78bcc1
AG
3904static inline uint32_t lduw_phys_internal(target_phys_addr_t addr,
3905 enum device_endian endian)
aab33094 3906{
733f0b02
MT
3907 uint8_t *ptr;
3908 uint64_t val;
f3705d53 3909 MemoryRegionSection *section;
733f0b02 3910
06ef3525 3911 section = phys_page_find(addr >> TARGET_PAGE_BITS);
733f0b02 3912
cc5bea60
BS
3913 if (!(memory_region_is_ram(section->mr) ||
3914 memory_region_is_romd(section->mr))) {
733f0b02 3915 /* I/O case */
cc5bea60 3916 addr = memory_region_section_addr(section, addr);
37ec01d4 3917 val = io_mem_read(section->mr, addr, 2);
1e78bcc1
AG
3918#if defined(TARGET_WORDS_BIGENDIAN)
3919 if (endian == DEVICE_LITTLE_ENDIAN) {
3920 val = bswap16(val);
3921 }
3922#else
3923 if (endian == DEVICE_BIG_ENDIAN) {
3924 val = bswap16(val);
3925 }
3926#endif
733f0b02
MT
3927 } else {
3928 /* RAM case */
f3705d53 3929 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
06ef3525 3930 & TARGET_PAGE_MASK)
cc5bea60 3931 + memory_region_section_addr(section, addr));
1e78bcc1
AG
3932 switch (endian) {
3933 case DEVICE_LITTLE_ENDIAN:
3934 val = lduw_le_p(ptr);
3935 break;
3936 case DEVICE_BIG_ENDIAN:
3937 val = lduw_be_p(ptr);
3938 break;
3939 default:
3940 val = lduw_p(ptr);
3941 break;
3942 }
733f0b02
MT
3943 }
3944 return val;
aab33094
FB
3945}
3946
1e78bcc1
AG
3947uint32_t lduw_phys(target_phys_addr_t addr)
3948{
3949 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3950}
3951
3952uint32_t lduw_le_phys(target_phys_addr_t addr)
3953{
3954 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3955}
3956
3957uint32_t lduw_be_phys(target_phys_addr_t addr)
3958{
3959 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
3960}
3961
8df1cd07
FB
3962/* warning: addr must be aligned. The ram page is not masked as dirty
3963 and the code inside is not invalidated. It is useful if the dirty
3964 bits are used to track modified PTEs */
c227f099 3965void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
8df1cd07 3966{
8df1cd07 3967 uint8_t *ptr;
f3705d53 3968 MemoryRegionSection *section;
8df1cd07 3969
06ef3525 3970 section = phys_page_find(addr >> TARGET_PAGE_BITS);
3b46e624 3971
f3705d53 3972 if (!memory_region_is_ram(section->mr) || section->readonly) {
cc5bea60 3973 addr = memory_region_section_addr(section, addr);
f3705d53 3974 if (memory_region_is_ram(section->mr)) {
37ec01d4 3975 section = &phys_sections[phys_section_rom];
06ef3525 3976 }
37ec01d4 3977 io_mem_write(section->mr, addr, val, 4);
8df1cd07 3978 } else {
f3705d53 3979 unsigned long addr1 = (memory_region_get_ram_addr(section->mr)
06ef3525 3980 & TARGET_PAGE_MASK)
cc5bea60 3981 + memory_region_section_addr(section, addr);
5579c7f3 3982 ptr = qemu_get_ram_ptr(addr1);
8df1cd07 3983 stl_p(ptr, val);
74576198
AL
3984
3985 if (unlikely(in_migration)) {
3986 if (!cpu_physical_memory_is_dirty(addr1)) {
3987 /* invalidate code */
3988 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3989 /* set dirty bit */
f7c11b53
YT
3990 cpu_physical_memory_set_dirty_flags(
3991 addr1, (0xff & ~CODE_DIRTY_FLAG));
74576198
AL
3992 }
3993 }
8df1cd07
FB
3994 }
3995}
3996
c227f099 3997void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
bc98a7ef 3998{
bc98a7ef 3999 uint8_t *ptr;
f3705d53 4000 MemoryRegionSection *section;
bc98a7ef 4001
06ef3525 4002 section = phys_page_find(addr >> TARGET_PAGE_BITS);
3b46e624 4003
f3705d53 4004 if (!memory_region_is_ram(section->mr) || section->readonly) {
cc5bea60 4005 addr = memory_region_section_addr(section, addr);
f3705d53 4006 if (memory_region_is_ram(section->mr)) {
37ec01d4 4007 section = &phys_sections[phys_section_rom];
06ef3525 4008 }
bc98a7ef 4009#ifdef TARGET_WORDS_BIGENDIAN
37ec01d4
AK
4010 io_mem_write(section->mr, addr, val >> 32, 4);
4011 io_mem_write(section->mr, addr + 4, (uint32_t)val, 4);
bc98a7ef 4012#else
37ec01d4
AK
4013 io_mem_write(section->mr, addr, (uint32_t)val, 4);
4014 io_mem_write(section->mr, addr + 4, val >> 32, 4);
bc98a7ef
JM
4015#endif
4016 } else {
f3705d53 4017 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
06ef3525 4018 & TARGET_PAGE_MASK)
cc5bea60 4019 + memory_region_section_addr(section, addr));
bc98a7ef
JM
4020 stq_p(ptr, val);
4021 }
4022}
4023
8df1cd07 4024/* warning: addr must be aligned */
1e78bcc1
AG
4025static inline void stl_phys_internal(target_phys_addr_t addr, uint32_t val,
4026 enum device_endian endian)
8df1cd07 4027{
8df1cd07 4028 uint8_t *ptr;
f3705d53 4029 MemoryRegionSection *section;
8df1cd07 4030
06ef3525 4031 section = phys_page_find(addr >> TARGET_PAGE_BITS);
3b46e624 4032
f3705d53 4033 if (!memory_region_is_ram(section->mr) || section->readonly) {
cc5bea60 4034 addr = memory_region_section_addr(section, addr);
f3705d53 4035 if (memory_region_is_ram(section->mr)) {
37ec01d4 4036 section = &phys_sections[phys_section_rom];
06ef3525 4037 }
1e78bcc1
AG
4038#if defined(TARGET_WORDS_BIGENDIAN)
4039 if (endian == DEVICE_LITTLE_ENDIAN) {
4040 val = bswap32(val);
4041 }
4042#else
4043 if (endian == DEVICE_BIG_ENDIAN) {
4044 val = bswap32(val);
4045 }
4046#endif
37ec01d4 4047 io_mem_write(section->mr, addr, val, 4);
8df1cd07
FB
4048 } else {
4049 unsigned long addr1;
f3705d53 4050 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
cc5bea60 4051 + memory_region_section_addr(section, addr);
8df1cd07 4052 /* RAM case */
5579c7f3 4053 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
4054 switch (endian) {
4055 case DEVICE_LITTLE_ENDIAN:
4056 stl_le_p(ptr, val);
4057 break;
4058 case DEVICE_BIG_ENDIAN:
4059 stl_be_p(ptr, val);
4060 break;
4061 default:
4062 stl_p(ptr, val);
4063 break;
4064 }
3a7d929e
FB
4065 if (!cpu_physical_memory_is_dirty(addr1)) {
4066 /* invalidate code */
4067 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4068 /* set dirty bit */
f7c11b53
YT
4069 cpu_physical_memory_set_dirty_flags(addr1,
4070 (0xff & ~CODE_DIRTY_FLAG));
3a7d929e 4071 }
8df1cd07
FB
4072 }
4073}
4074
1e78bcc1
AG
4075void stl_phys(target_phys_addr_t addr, uint32_t val)
4076{
4077 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4078}
4079
4080void stl_le_phys(target_phys_addr_t addr, uint32_t val)
4081{
4082 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4083}
4084
4085void stl_be_phys(target_phys_addr_t addr, uint32_t val)
4086{
4087 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4088}
4089
aab33094 4090/* XXX: optimize */
c227f099 4091void stb_phys(target_phys_addr_t addr, uint32_t val)
aab33094
FB
4092{
4093 uint8_t v = val;
4094 cpu_physical_memory_write(addr, &v, 1);
4095}
4096
733f0b02 4097/* warning: addr must be aligned */
1e78bcc1
AG
4098static inline void stw_phys_internal(target_phys_addr_t addr, uint32_t val,
4099 enum device_endian endian)
aab33094 4100{
733f0b02 4101 uint8_t *ptr;
f3705d53 4102 MemoryRegionSection *section;
733f0b02 4103
06ef3525 4104 section = phys_page_find(addr >> TARGET_PAGE_BITS);
733f0b02 4105
f3705d53 4106 if (!memory_region_is_ram(section->mr) || section->readonly) {
cc5bea60 4107 addr = memory_region_section_addr(section, addr);
f3705d53 4108 if (memory_region_is_ram(section->mr)) {
37ec01d4 4109 section = &phys_sections[phys_section_rom];
06ef3525 4110 }
1e78bcc1
AG
4111#if defined(TARGET_WORDS_BIGENDIAN)
4112 if (endian == DEVICE_LITTLE_ENDIAN) {
4113 val = bswap16(val);
4114 }
4115#else
4116 if (endian == DEVICE_BIG_ENDIAN) {
4117 val = bswap16(val);
4118 }
4119#endif
37ec01d4 4120 io_mem_write(section->mr, addr, val, 2);
733f0b02
MT
4121 } else {
4122 unsigned long addr1;
f3705d53 4123 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
cc5bea60 4124 + memory_region_section_addr(section, addr);
733f0b02
MT
4125 /* RAM case */
4126 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
4127 switch (endian) {
4128 case DEVICE_LITTLE_ENDIAN:
4129 stw_le_p(ptr, val);
4130 break;
4131 case DEVICE_BIG_ENDIAN:
4132 stw_be_p(ptr, val);
4133 break;
4134 default:
4135 stw_p(ptr, val);
4136 break;
4137 }
733f0b02
MT
4138 if (!cpu_physical_memory_is_dirty(addr1)) {
4139 /* invalidate code */
4140 tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
4141 /* set dirty bit */
4142 cpu_physical_memory_set_dirty_flags(addr1,
4143 (0xff & ~CODE_DIRTY_FLAG));
4144 }
4145 }
aab33094
FB
4146}
4147
1e78bcc1
AG
4148void stw_phys(target_phys_addr_t addr, uint32_t val)
4149{
4150 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4151}
4152
4153void stw_le_phys(target_phys_addr_t addr, uint32_t val)
4154{
4155 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4156}
4157
4158void stw_be_phys(target_phys_addr_t addr, uint32_t val)
4159{
4160 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4161}
4162
aab33094 4163/* XXX: optimize */
c227f099 4164void stq_phys(target_phys_addr_t addr, uint64_t val)
aab33094
FB
4165{
4166 val = tswap64(val);
71d2b725 4167 cpu_physical_memory_write(addr, &val, 8);
aab33094
FB
4168}
4169
1e78bcc1
AG
4170void stq_le_phys(target_phys_addr_t addr, uint64_t val)
4171{
4172 val = cpu_to_le64(val);
4173 cpu_physical_memory_write(addr, &val, 8);
4174}
4175
4176void stq_be_phys(target_phys_addr_t addr, uint64_t val)
4177{
4178 val = cpu_to_be64(val);
4179 cpu_physical_memory_write(addr, &val, 8);
4180}
4181
5e2972fd 4182/* virtual memory access for debug (includes writing to ROM) */
9349b4f9 4183int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
b448f2f3 4184 uint8_t *buf, int len, int is_write)
13eb76e0
FB
4185{
4186 int l;
c227f099 4187 target_phys_addr_t phys_addr;
9b3c35e0 4188 target_ulong page;
13eb76e0
FB
4189
4190 while (len > 0) {
4191 page = addr & TARGET_PAGE_MASK;
4192 phys_addr = cpu_get_phys_page_debug(env, page);
4193 /* if no physical page mapped, return an error */
4194 if (phys_addr == -1)
4195 return -1;
4196 l = (page + TARGET_PAGE_SIZE) - addr;
4197 if (l > len)
4198 l = len;
5e2972fd 4199 phys_addr += (addr & ~TARGET_PAGE_MASK);
5e2972fd
AL
4200 if (is_write)
4201 cpu_physical_memory_write_rom(phys_addr, buf, l);
4202 else
5e2972fd 4203 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
13eb76e0
FB
4204 len -= l;
4205 buf += l;
4206 addr += l;
4207 }
4208 return 0;
4209}
a68fe89c 4210#endif
13eb76e0 4211
2e70f6ef
PB
4212/* in deterministic execution mode, instructions doing device I/Os
4213 must be at the end of the TB */
20503968 4214void cpu_io_recompile(CPUArchState *env, uintptr_t retaddr)
2e70f6ef
PB
4215{
4216 TranslationBlock *tb;
4217 uint32_t n, cflags;
4218 target_ulong pc, cs_base;
4219 uint64_t flags;
4220
20503968 4221 tb = tb_find_pc(retaddr);
2e70f6ef
PB
4222 if (!tb) {
4223 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
20503968 4224 (void *)retaddr);
2e70f6ef
PB
4225 }
4226 n = env->icount_decr.u16.low + tb->icount;
20503968 4227 cpu_restore_state(tb, env, retaddr);
2e70f6ef 4228 /* Calculate how many instructions had been executed before the fault
bf20dc07 4229 occurred. */
2e70f6ef
PB
4230 n = n - env->icount_decr.u16.low;
4231 /* Generate a new TB ending on the I/O insn. */
4232 n++;
4233 /* On MIPS and SH, delay slot instructions can only be restarted if
4234 they were already the first instruction in the TB. If this is not
bf20dc07 4235 the first instruction in a TB then re-execute the preceding
2e70f6ef
PB
4236 branch. */
4237#if defined(TARGET_MIPS)
4238 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4239 env->active_tc.PC -= 4;
4240 env->icount_decr.u16.low++;
4241 env->hflags &= ~MIPS_HFLAG_BMASK;
4242 }
4243#elif defined(TARGET_SH4)
4244 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4245 && n > 1) {
4246 env->pc -= 2;
4247 env->icount_decr.u16.low++;
4248 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4249 }
4250#endif
4251 /* This should never happen. */
4252 if (n > CF_COUNT_MASK)
4253 cpu_abort(env, "TB too big during recompile");
4254
4255 cflags = n | CF_LAST_IO;
4256 pc = tb->pc;
4257 cs_base = tb->cs_base;
4258 flags = tb->flags;
4259 tb_phys_invalidate(tb, -1);
4260 /* FIXME: In theory this could raise an exception. In practice
4261 we have already translated the block once so it's probably ok. */
4262 tb_gen_code(env, pc, cs_base, flags, cflags);
bf20dc07 4263 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
2e70f6ef
PB
4264 the first in the TB) then we end up generating a whole new TB and
4265 repeating the fault, which is horribly inefficient.
4266 Better would be to execute just this insn uncached, or generate a
4267 second new TB. */
4268 cpu_resume_from_signal(env, NULL);
4269}
4270
b3755a91
PB
4271#if !defined(CONFIG_USER_ONLY)
4272
055403b2 4273void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
e3db7226
FB
4274{
4275 int i, target_code_size, max_target_code_size;
4276 int direct_jmp_count, direct_jmp2_count, cross_page;
4277 TranslationBlock *tb;
3b46e624 4278
e3db7226
FB
4279 target_code_size = 0;
4280 max_target_code_size = 0;
4281 cross_page = 0;
4282 direct_jmp_count = 0;
4283 direct_jmp2_count = 0;
4284 for(i = 0; i < nb_tbs; i++) {
4285 tb = &tbs[i];
4286 target_code_size += tb->size;
4287 if (tb->size > max_target_code_size)
4288 max_target_code_size = tb->size;
4289 if (tb->page_addr[1] != -1)
4290 cross_page++;
4291 if (tb->tb_next_offset[0] != 0xffff) {
4292 direct_jmp_count++;
4293 if (tb->tb_next_offset[1] != 0xffff) {
4294 direct_jmp2_count++;
4295 }
4296 }
4297 }
4298 /* XXX: avoid using doubles ? */
57fec1fe 4299 cpu_fprintf(f, "Translation buffer state:\n");
055403b2 4300 cpu_fprintf(f, "gen code size %td/%ld\n",
26a5f13b
FB
4301 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4302 cpu_fprintf(f, "TB count %d/%d\n",
4303 nb_tbs, code_gen_max_blocks);
5fafdf24 4304 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
e3db7226
FB
4305 nb_tbs ? target_code_size / nb_tbs : 0,
4306 max_target_code_size);
055403b2 4307 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
e3db7226
FB
4308 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4309 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
5fafdf24
TS
4310 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4311 cross_page,
e3db7226
FB
4312 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4313 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
5fafdf24 4314 direct_jmp_count,
e3db7226
FB
4315 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4316 direct_jmp2_count,
4317 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
57fec1fe 4318 cpu_fprintf(f, "\nStatistics:\n");
e3db7226
FB
4319 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4320 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4321 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
b67d9a52 4322 tcg_dump_info(f, cpu_fprintf);
e3db7226
FB
4323}
4324
82afa586
BH
4325/*
4326 * A helper function for the _utterly broken_ virtio device model to find out if
4327 * it's running on a big endian machine. Don't do this at home kids!
4328 */
4329bool virtio_is_big_endian(void);
4330bool virtio_is_big_endian(void)
4331{
4332#if defined(TARGET_WORDS_BIGENDIAN)
4333 return true;
4334#else
4335 return false;
4336#endif
4337}
4338
61382a50 4339#endif