]> git.proxmox.com Git - qemu.git/blame - exec.c
sun4u: implement interrupt clearing registers
[qemu.git] / exec.c
CommitLineData
54936004 1/*
fd6ce8f6 2 * virtual page mapping and translated block handling
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
54936004 18 */
67b915a5 19#include "config.h"
d5a8f07c
FB
20#ifdef _WIN32
21#include <windows.h>
22#else
a98d49b1 23#include <sys/types.h>
d5a8f07c
FB
24#include <sys/mman.h>
25#endif
54936004 26
055403b2 27#include "qemu-common.h"
6180a181 28#include "cpu.h"
b67d9a52 29#include "tcg.h"
b3c7724c 30#include "hw/hw.h"
cc9e98cb 31#include "hw/qdev.h"
74576198 32#include "osdep.h"
7ba1e619 33#include "kvm.h"
432d268c 34#include "hw/xen.h"
29e922b6 35#include "qemu-timer.h"
62152b8a
AK
36#include "memory.h"
37#include "exec-memory.h"
53a5960a
PB
38#if defined(CONFIG_USER_ONLY)
39#include <qemu.h>
f01576f1
JL
40#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41#include <sys/param.h>
42#if __FreeBSD_version >= 700104
43#define HAVE_KINFO_GETVMMAP
44#define sigqueue sigqueue_freebsd /* avoid redefinition */
45#include <sys/time.h>
46#include <sys/proc.h>
47#include <machine/profile.h>
48#define _KERNEL
49#include <sys/user.h>
50#undef _KERNEL
51#undef sigqueue
52#include <libutil.h>
53#endif
54#endif
432d268c
JN
55#else /* !CONFIG_USER_ONLY */
56#include "xen-mapcache.h"
6506e4f9 57#include "trace.h"
53a5960a 58#endif
54936004 59
0cac1b66
BS
60#include "cputlb.h"
61
67d95c15
AK
62#define WANT_EXEC_OBSOLETE
63#include "exec-obsolete.h"
64
fd6ce8f6 65//#define DEBUG_TB_INVALIDATE
66e85a21 66//#define DEBUG_FLUSH
67d3b957 67//#define DEBUG_UNASSIGNED
fd6ce8f6
FB
68
69/* make various TB consistency checks */
5fafdf24 70//#define DEBUG_TB_CHECK
fd6ce8f6 71
1196be37 72//#define DEBUG_IOPORT
db7b5426 73//#define DEBUG_SUBPAGE
1196be37 74
99773bd4
PB
75#if !defined(CONFIG_USER_ONLY)
76/* TB consistency checks only implemented for usermode emulation. */
77#undef DEBUG_TB_CHECK
78#endif
79
9fa3e853
FB
80#define SMC_BITMAP_USE_THRESHOLD 10
81
bdaf78e0 82static TranslationBlock *tbs;
24ab68ac 83static int code_gen_max_blocks;
9fa3e853 84TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
bdaf78e0 85static int nb_tbs;
eb51d102 86/* any access to the tbs or the page table must use this lock */
c227f099 87spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
fd6ce8f6 88
141ac468
BS
89#if defined(__arm__) || defined(__sparc_v9__)
90/* The prologue must be reachable with a direct jump. ARM and Sparc64
91 have limited branch ranges (possibly also PPC) so place it in a
d03d860b
BS
92 section close to code segment. */
93#define code_gen_section \
94 __attribute__((__section__(".gen_code"))) \
95 __attribute__((aligned (32)))
6840981d 96#elif defined(_WIN32) && !defined(_WIN64)
f8e2af11
SW
97#define code_gen_section \
98 __attribute__((aligned (16)))
d03d860b
BS
99#else
100#define code_gen_section \
101 __attribute__((aligned (32)))
102#endif
103
104uint8_t code_gen_prologue[1024] code_gen_section;
bdaf78e0
BS
105static uint8_t *code_gen_buffer;
106static unsigned long code_gen_buffer_size;
26a5f13b 107/* threshold to flush the translated code buffer */
bdaf78e0 108static unsigned long code_gen_buffer_max_size;
24ab68ac 109static uint8_t *code_gen_ptr;
fd6ce8f6 110
e2eef170 111#if !defined(CONFIG_USER_ONLY)
9fa3e853 112int phys_ram_fd;
74576198 113static int in_migration;
94a6b54f 114
85d59fef 115RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
62152b8a
AK
116
117static MemoryRegion *system_memory;
309cb471 118static MemoryRegion *system_io;
62152b8a 119
0e0df1e2 120MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty;
de712f94 121static MemoryRegion io_mem_subpage_ram;
0e0df1e2 122
e2eef170 123#endif
9fa3e853 124
9349b4f9 125CPUArchState *first_cpu;
6a00d601
FB
126/* current CPU in the current thread. It is only valid inside
127 cpu_exec() */
9349b4f9 128DEFINE_TLS(CPUArchState *,cpu_single_env);
2e70f6ef 129/* 0 = Do not count executed instructions.
bf20dc07 130 1 = Precise instruction counting.
2e70f6ef
PB
131 2 = Adaptive rate instruction counting. */
132int use_icount = 0;
6a00d601 133
54936004 134typedef struct PageDesc {
92e873b9 135 /* list of TBs intersecting this ram page */
fd6ce8f6 136 TranslationBlock *first_tb;
9fa3e853
FB
137 /* in order to optimize self modifying code, we count the number
138 of lookups we do to a given page to use a bitmap */
139 unsigned int code_write_count;
140 uint8_t *code_bitmap;
141#if defined(CONFIG_USER_ONLY)
142 unsigned long flags;
143#endif
54936004
FB
144} PageDesc;
145
41c1b1c9 146/* In system mode we want L1_MAP to be based on ram offsets,
5cd2c5b6
RH
147 while in user mode we want it to be based on virtual addresses. */
148#if !defined(CONFIG_USER_ONLY)
41c1b1c9
PB
149#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
150# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
151#else
5cd2c5b6 152# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
41c1b1c9 153#endif
bedb69ea 154#else
5cd2c5b6 155# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
bedb69ea 156#endif
54936004 157
5cd2c5b6
RH
158/* Size of the L2 (and L3, etc) page tables. */
159#define L2_BITS 10
54936004
FB
160#define L2_SIZE (1 << L2_BITS)
161
3eef53df
AK
162#define P_L2_LEVELS \
163 (((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / L2_BITS) + 1)
164
5cd2c5b6 165/* The bits remaining after N lower levels of page tables. */
5cd2c5b6
RH
166#define V_L1_BITS_REM \
167 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
168
5cd2c5b6
RH
169#if V_L1_BITS_REM < 4
170#define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
171#else
172#define V_L1_BITS V_L1_BITS_REM
173#endif
174
5cd2c5b6
RH
175#define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
176
5cd2c5b6
RH
177#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
178
c6d50674
SW
179uintptr_t qemu_real_host_page_size;
180uintptr_t qemu_host_page_size;
181uintptr_t qemu_host_page_mask;
54936004 182
5cd2c5b6
RH
183/* This is a multi-level map on the virtual address space.
184 The bottom level has pointers to PageDesc. */
185static void *l1_map[V_L1_SIZE];
54936004 186
e2eef170 187#if !defined(CONFIG_USER_ONLY)
4346ae3e
AK
188typedef struct PhysPageEntry PhysPageEntry;
189
5312bd8b
AK
190static MemoryRegionSection *phys_sections;
191static unsigned phys_sections_nb, phys_sections_nb_alloc;
192static uint16_t phys_section_unassigned;
aa102231
AK
193static uint16_t phys_section_notdirty;
194static uint16_t phys_section_rom;
195static uint16_t phys_section_watch;
5312bd8b 196
4346ae3e 197struct PhysPageEntry {
07f07b31
AK
198 uint16_t is_leaf : 1;
199 /* index into phys_sections (is_leaf) or phys_map_nodes (!is_leaf) */
200 uint16_t ptr : 15;
4346ae3e
AK
201};
202
d6f2ea22
AK
203/* Simple allocator for PhysPageEntry nodes */
204static PhysPageEntry (*phys_map_nodes)[L2_SIZE];
205static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc;
206
07f07b31 207#define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
d6f2ea22 208
5cd2c5b6 209/* This is a multi-level map on the physical address space.
06ef3525 210 The bottom level has pointers to MemoryRegionSections. */
07f07b31 211static PhysPageEntry phys_map = { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
6d9a1304 212
e2eef170 213static void io_mem_init(void);
62152b8a 214static void memory_map_init(void);
e2eef170 215
1ec9b909 216static MemoryRegion io_mem_watch;
6658ffb8 217#endif
33417e70 218
34865134 219/* log support */
1e8b27ca
JR
220#ifdef WIN32
221static const char *logfilename = "qemu.log";
222#else
d9b630fd 223static const char *logfilename = "/tmp/qemu.log";
1e8b27ca 224#endif
34865134
FB
225FILE *logfile;
226int loglevel;
e735b91c 227static int log_append = 0;
34865134 228
e3db7226 229/* statistics */
e3db7226
FB
230static int tb_flush_count;
231static int tb_phys_invalidate_count;
232
7cb69cae
FB
233#ifdef _WIN32
234static void map_exec(void *addr, long size)
235{
236 DWORD old_protect;
237 VirtualProtect(addr, size,
238 PAGE_EXECUTE_READWRITE, &old_protect);
239
240}
241#else
242static void map_exec(void *addr, long size)
243{
4369415f 244 unsigned long start, end, page_size;
7cb69cae 245
4369415f 246 page_size = getpagesize();
7cb69cae 247 start = (unsigned long)addr;
4369415f 248 start &= ~(page_size - 1);
7cb69cae
FB
249
250 end = (unsigned long)addr + size;
4369415f
FB
251 end += page_size - 1;
252 end &= ~(page_size - 1);
7cb69cae
FB
253
254 mprotect((void *)start, end - start,
255 PROT_READ | PROT_WRITE | PROT_EXEC);
256}
257#endif
258
b346ff46 259static void page_init(void)
54936004 260{
83fb7adf 261 /* NOTE: we can always suppose that qemu_host_page_size >=
54936004 262 TARGET_PAGE_SIZE */
c2b48b69
AL
263#ifdef _WIN32
264 {
265 SYSTEM_INFO system_info;
266
267 GetSystemInfo(&system_info);
268 qemu_real_host_page_size = system_info.dwPageSize;
269 }
270#else
271 qemu_real_host_page_size = getpagesize();
272#endif
83fb7adf
FB
273 if (qemu_host_page_size == 0)
274 qemu_host_page_size = qemu_real_host_page_size;
275 if (qemu_host_page_size < TARGET_PAGE_SIZE)
276 qemu_host_page_size = TARGET_PAGE_SIZE;
83fb7adf 277 qemu_host_page_mask = ~(qemu_host_page_size - 1);
50a9569b 278
2e9a5713 279#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
50a9569b 280 {
f01576f1
JL
281#ifdef HAVE_KINFO_GETVMMAP
282 struct kinfo_vmentry *freep;
283 int i, cnt;
284
285 freep = kinfo_getvmmap(getpid(), &cnt);
286 if (freep) {
287 mmap_lock();
288 for (i = 0; i < cnt; i++) {
289 unsigned long startaddr, endaddr;
290
291 startaddr = freep[i].kve_start;
292 endaddr = freep[i].kve_end;
293 if (h2g_valid(startaddr)) {
294 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
295
296 if (h2g_valid(endaddr)) {
297 endaddr = h2g(endaddr);
fd436907 298 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
f01576f1
JL
299 } else {
300#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
301 endaddr = ~0ul;
fd436907 302 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
f01576f1
JL
303#endif
304 }
305 }
306 }
307 free(freep);
308 mmap_unlock();
309 }
310#else
50a9569b 311 FILE *f;
50a9569b 312
0776590d 313 last_brk = (unsigned long)sbrk(0);
5cd2c5b6 314
fd436907 315 f = fopen("/compat/linux/proc/self/maps", "r");
50a9569b 316 if (f) {
5cd2c5b6
RH
317 mmap_lock();
318
50a9569b 319 do {
5cd2c5b6
RH
320 unsigned long startaddr, endaddr;
321 int n;
322
323 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
324
325 if (n == 2 && h2g_valid(startaddr)) {
326 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
327
328 if (h2g_valid(endaddr)) {
329 endaddr = h2g(endaddr);
330 } else {
331 endaddr = ~0ul;
332 }
333 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
50a9569b
AZ
334 }
335 } while (!feof(f));
5cd2c5b6 336
50a9569b 337 fclose(f);
5cd2c5b6 338 mmap_unlock();
50a9569b 339 }
f01576f1 340#endif
50a9569b
AZ
341 }
342#endif
54936004
FB
343}
344
41c1b1c9 345static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
54936004 346{
41c1b1c9
PB
347 PageDesc *pd;
348 void **lp;
349 int i;
350
5cd2c5b6 351#if defined(CONFIG_USER_ONLY)
7267c094 352 /* We can't use g_malloc because it may recurse into a locked mutex. */
5cd2c5b6
RH
353# define ALLOC(P, SIZE) \
354 do { \
355 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
356 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
5cd2c5b6
RH
357 } while (0)
358#else
359# define ALLOC(P, SIZE) \
7267c094 360 do { P = g_malloc0(SIZE); } while (0)
17e2377a 361#endif
434929bf 362
5cd2c5b6
RH
363 /* Level 1. Always allocated. */
364 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
365
366 /* Level 2..N-1. */
367 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
368 void **p = *lp;
369
370 if (p == NULL) {
371 if (!alloc) {
372 return NULL;
373 }
374 ALLOC(p, sizeof(void *) * L2_SIZE);
375 *lp = p;
17e2377a 376 }
5cd2c5b6
RH
377
378 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
379 }
380
381 pd = *lp;
382 if (pd == NULL) {
383 if (!alloc) {
384 return NULL;
385 }
386 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
387 *lp = pd;
54936004 388 }
5cd2c5b6
RH
389
390#undef ALLOC
5cd2c5b6
RH
391
392 return pd + (index & (L2_SIZE - 1));
54936004
FB
393}
394
41c1b1c9 395static inline PageDesc *page_find(tb_page_addr_t index)
54936004 396{
5cd2c5b6 397 return page_find_alloc(index, 0);
fd6ce8f6
FB
398}
399
6d9a1304 400#if !defined(CONFIG_USER_ONLY)
d6f2ea22 401
f7bf5461 402static void phys_map_node_reserve(unsigned nodes)
d6f2ea22 403{
f7bf5461 404 if (phys_map_nodes_nb + nodes > phys_map_nodes_nb_alloc) {
d6f2ea22
AK
405 typedef PhysPageEntry Node[L2_SIZE];
406 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16);
f7bf5461
AK
407 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc,
408 phys_map_nodes_nb + nodes);
d6f2ea22
AK
409 phys_map_nodes = g_renew(Node, phys_map_nodes,
410 phys_map_nodes_nb_alloc);
411 }
f7bf5461
AK
412}
413
414static uint16_t phys_map_node_alloc(void)
415{
416 unsigned i;
417 uint16_t ret;
418
419 ret = phys_map_nodes_nb++;
420 assert(ret != PHYS_MAP_NODE_NIL);
421 assert(ret != phys_map_nodes_nb_alloc);
d6f2ea22 422 for (i = 0; i < L2_SIZE; ++i) {
07f07b31 423 phys_map_nodes[ret][i].is_leaf = 0;
c19e8800 424 phys_map_nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
d6f2ea22 425 }
f7bf5461 426 return ret;
d6f2ea22
AK
427}
428
429static void phys_map_nodes_reset(void)
430{
431 phys_map_nodes_nb = 0;
432}
433
92e873b9 434
2999097b
AK
435static void phys_page_set_level(PhysPageEntry *lp, target_phys_addr_t *index,
436 target_phys_addr_t *nb, uint16_t leaf,
437 int level)
f7bf5461
AK
438{
439 PhysPageEntry *p;
440 int i;
07f07b31 441 target_phys_addr_t step = (target_phys_addr_t)1 << (level * L2_BITS);
108c49b8 442
07f07b31 443 if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
c19e8800
AK
444 lp->ptr = phys_map_node_alloc();
445 p = phys_map_nodes[lp->ptr];
f7bf5461
AK
446 if (level == 0) {
447 for (i = 0; i < L2_SIZE; i++) {
07f07b31 448 p[i].is_leaf = 1;
c19e8800 449 p[i].ptr = phys_section_unassigned;
4346ae3e 450 }
67c4d23c 451 }
f7bf5461 452 } else {
c19e8800 453 p = phys_map_nodes[lp->ptr];
92e873b9 454 }
2999097b 455 lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
f7bf5461 456
2999097b 457 while (*nb && lp < &p[L2_SIZE]) {
07f07b31
AK
458 if ((*index & (step - 1)) == 0 && *nb >= step) {
459 lp->is_leaf = true;
c19e8800 460 lp->ptr = leaf;
07f07b31
AK
461 *index += step;
462 *nb -= step;
2999097b
AK
463 } else {
464 phys_page_set_level(lp, index, nb, leaf, level - 1);
465 }
466 ++lp;
f7bf5461
AK
467 }
468}
469
2999097b
AK
470static void phys_page_set(target_phys_addr_t index, target_phys_addr_t nb,
471 uint16_t leaf)
f7bf5461 472{
2999097b 473 /* Wildly overreserve - it doesn't matter much. */
07f07b31 474 phys_map_node_reserve(3 * P_L2_LEVELS);
5cd2c5b6 475
2999097b 476 phys_page_set_level(&phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
92e873b9
FB
477}
478
0cac1b66 479MemoryRegionSection *phys_page_find(target_phys_addr_t index)
92e873b9 480{
31ab2b4a
AK
481 PhysPageEntry lp = phys_map;
482 PhysPageEntry *p;
483 int i;
31ab2b4a 484 uint16_t s_index = phys_section_unassigned;
f1f6e3b8 485
07f07b31 486 for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
c19e8800 487 if (lp.ptr == PHYS_MAP_NODE_NIL) {
31ab2b4a
AK
488 goto not_found;
489 }
c19e8800 490 p = phys_map_nodes[lp.ptr];
31ab2b4a 491 lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
5312bd8b 492 }
31ab2b4a 493
c19e8800 494 s_index = lp.ptr;
31ab2b4a 495not_found:
f3705d53
AK
496 return &phys_sections[s_index];
497}
498
e5548617
BS
499bool memory_region_is_unassigned(MemoryRegion *mr)
500{
501 return mr != &io_mem_ram && mr != &io_mem_rom
502 && mr != &io_mem_notdirty && !mr->rom_device
503 && mr != &io_mem_watch;
504}
505
c8a706fe
PB
506#define mmap_lock() do { } while(0)
507#define mmap_unlock() do { } while(0)
9fa3e853 508#endif
fd6ce8f6 509
4369415f
FB
510#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
511
512#if defined(CONFIG_USER_ONLY)
ccbb4d44 513/* Currently it is not recommended to allocate big chunks of data in
4369415f
FB
514 user mode. It will change when a dedicated libc will be used */
515#define USE_STATIC_CODE_GEN_BUFFER
516#endif
517
518#ifdef USE_STATIC_CODE_GEN_BUFFER
ebf50fb3
AJ
519static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
520 __attribute__((aligned (CODE_GEN_ALIGN)));
4369415f
FB
521#endif
522
8fcd3692 523static void code_gen_alloc(unsigned long tb_size)
26a5f13b 524{
4369415f
FB
525#ifdef USE_STATIC_CODE_GEN_BUFFER
526 code_gen_buffer = static_code_gen_buffer;
527 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
528 map_exec(code_gen_buffer, code_gen_buffer_size);
529#else
26a5f13b
FB
530 code_gen_buffer_size = tb_size;
531 if (code_gen_buffer_size == 0) {
4369415f 532#if defined(CONFIG_USER_ONLY)
4369415f
FB
533 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
534#else
ccbb4d44 535 /* XXX: needs adjustments */
94a6b54f 536 code_gen_buffer_size = (unsigned long)(ram_size / 4);
4369415f 537#endif
26a5f13b
FB
538 }
539 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
540 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
541 /* The code gen buffer location may have constraints depending on
542 the host cpu and OS */
543#if defined(__linux__)
544 {
545 int flags;
141ac468
BS
546 void *start = NULL;
547
26a5f13b
FB
548 flags = MAP_PRIVATE | MAP_ANONYMOUS;
549#if defined(__x86_64__)
550 flags |= MAP_32BIT;
551 /* Cannot map more than that */
552 if (code_gen_buffer_size > (800 * 1024 * 1024))
553 code_gen_buffer_size = (800 * 1024 * 1024);
141ac468
BS
554#elif defined(__sparc_v9__)
555 // Map the buffer below 2G, so we can use direct calls and branches
556 flags |= MAP_FIXED;
557 start = (void *) 0x60000000UL;
558 if (code_gen_buffer_size > (512 * 1024 * 1024))
559 code_gen_buffer_size = (512 * 1024 * 1024);
1cb0661e 560#elif defined(__arm__)
5c84bd90 561 /* Keep the buffer no bigger than 16MB to branch between blocks */
1cb0661e
AZ
562 if (code_gen_buffer_size > 16 * 1024 * 1024)
563 code_gen_buffer_size = 16 * 1024 * 1024;
eba0b893
RH
564#elif defined(__s390x__)
565 /* Map the buffer so that we can use direct calls and branches. */
566 /* We have a +- 4GB range on the branches; leave some slop. */
567 if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
568 code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
569 }
570 start = (void *)0x90000000UL;
26a5f13b 571#endif
141ac468
BS
572 code_gen_buffer = mmap(start, code_gen_buffer_size,
573 PROT_WRITE | PROT_READ | PROT_EXEC,
26a5f13b
FB
574 flags, -1, 0);
575 if (code_gen_buffer == MAP_FAILED) {
576 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
577 exit(1);
578 }
579 }
cbb608a5 580#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
9f4b09a4
TN
581 || defined(__DragonFly__) || defined(__OpenBSD__) \
582 || defined(__NetBSD__)
06e67a82
AL
583 {
584 int flags;
585 void *addr = NULL;
586 flags = MAP_PRIVATE | MAP_ANONYMOUS;
587#if defined(__x86_64__)
588 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
589 * 0x40000000 is free */
590 flags |= MAP_FIXED;
591 addr = (void *)0x40000000;
592 /* Cannot map more than that */
593 if (code_gen_buffer_size > (800 * 1024 * 1024))
594 code_gen_buffer_size = (800 * 1024 * 1024);
4cd31ad2
BS
595#elif defined(__sparc_v9__)
596 // Map the buffer below 2G, so we can use direct calls and branches
597 flags |= MAP_FIXED;
598 addr = (void *) 0x60000000UL;
599 if (code_gen_buffer_size > (512 * 1024 * 1024)) {
600 code_gen_buffer_size = (512 * 1024 * 1024);
601 }
06e67a82
AL
602#endif
603 code_gen_buffer = mmap(addr, code_gen_buffer_size,
604 PROT_WRITE | PROT_READ | PROT_EXEC,
605 flags, -1, 0);
606 if (code_gen_buffer == MAP_FAILED) {
607 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
608 exit(1);
609 }
610 }
26a5f13b 611#else
7267c094 612 code_gen_buffer = g_malloc(code_gen_buffer_size);
26a5f13b
FB
613 map_exec(code_gen_buffer, code_gen_buffer_size);
614#endif
4369415f 615#endif /* !USE_STATIC_CODE_GEN_BUFFER */
26a5f13b 616 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
a884da8a
PM
617 code_gen_buffer_max_size = code_gen_buffer_size -
618 (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
26a5f13b 619 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
7267c094 620 tbs = g_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
26a5f13b
FB
621}
622
623/* Must be called before using the QEMU cpus. 'tb_size' is the size
624 (in bytes) allocated to the translation buffer. Zero means default
625 size. */
d5ab9713 626void tcg_exec_init(unsigned long tb_size)
26a5f13b 627{
26a5f13b
FB
628 cpu_gen_init();
629 code_gen_alloc(tb_size);
630 code_gen_ptr = code_gen_buffer;
813da627 631 tcg_register_jit(code_gen_buffer, code_gen_buffer_size);
4369415f 632 page_init();
9002ec79
RH
633#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
634 /* There's no guest base to take into account, so go ahead and
635 initialize the prologue now. */
636 tcg_prologue_init(&tcg_ctx);
637#endif
26a5f13b
FB
638}
639
d5ab9713
JK
640bool tcg_enabled(void)
641{
642 return code_gen_buffer != NULL;
643}
644
645void cpu_exec_init_all(void)
646{
647#if !defined(CONFIG_USER_ONLY)
648 memory_map_init();
649 io_mem_init();
650#endif
651}
652
9656f324
PB
653#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
654
e59fb374 655static int cpu_common_post_load(void *opaque, int version_id)
e7f4eff7 656{
9349b4f9 657 CPUArchState *env = opaque;
9656f324 658
3098dba0
AJ
659 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
660 version_id is increased. */
661 env->interrupt_request &= ~0x01;
9656f324
PB
662 tlb_flush(env, 1);
663
664 return 0;
665}
e7f4eff7
JQ
666
667static const VMStateDescription vmstate_cpu_common = {
668 .name = "cpu_common",
669 .version_id = 1,
670 .minimum_version_id = 1,
671 .minimum_version_id_old = 1,
e7f4eff7
JQ
672 .post_load = cpu_common_post_load,
673 .fields = (VMStateField []) {
9349b4f9
AF
674 VMSTATE_UINT32(halted, CPUArchState),
675 VMSTATE_UINT32(interrupt_request, CPUArchState),
e7f4eff7
JQ
676 VMSTATE_END_OF_LIST()
677 }
678};
9656f324
PB
679#endif
680
9349b4f9 681CPUArchState *qemu_get_cpu(int cpu)
950f1472 682{
9349b4f9 683 CPUArchState *env = first_cpu;
950f1472
GC
684
685 while (env) {
686 if (env->cpu_index == cpu)
687 break;
688 env = env->next_cpu;
689 }
690
691 return env;
692}
693
9349b4f9 694void cpu_exec_init(CPUArchState *env)
fd6ce8f6 695{
9349b4f9 696 CPUArchState **penv;
6a00d601
FB
697 int cpu_index;
698
c2764719
PB
699#if defined(CONFIG_USER_ONLY)
700 cpu_list_lock();
701#endif
6a00d601
FB
702 env->next_cpu = NULL;
703 penv = &first_cpu;
704 cpu_index = 0;
705 while (*penv != NULL) {
1e9fa730 706 penv = &(*penv)->next_cpu;
6a00d601
FB
707 cpu_index++;
708 }
709 env->cpu_index = cpu_index;
268a362c 710 env->numa_node = 0;
72cf2d4f
BS
711 QTAILQ_INIT(&env->breakpoints);
712 QTAILQ_INIT(&env->watchpoints);
dc7a09cf
JK
713#ifndef CONFIG_USER_ONLY
714 env->thread_id = qemu_get_thread_id();
715#endif
6a00d601 716 *penv = env;
c2764719
PB
717#if defined(CONFIG_USER_ONLY)
718 cpu_list_unlock();
719#endif
b3c7724c 720#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
0be71e32
AW
721 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
722 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
b3c7724c
PB
723 cpu_save, cpu_load, env);
724#endif
fd6ce8f6
FB
725}
726
d1a1eb74
TG
727/* Allocate a new translation block. Flush the translation buffer if
728 too many translation blocks or too much generated code. */
729static TranslationBlock *tb_alloc(target_ulong pc)
730{
731 TranslationBlock *tb;
732
733 if (nb_tbs >= code_gen_max_blocks ||
734 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
735 return NULL;
736 tb = &tbs[nb_tbs++];
737 tb->pc = pc;
738 tb->cflags = 0;
739 return tb;
740}
741
742void tb_free(TranslationBlock *tb)
743{
744 /* In practice this is mostly used for single use temporary TB
745 Ignore the hard cases and just back up if this TB happens to
746 be the last one generated. */
747 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
748 code_gen_ptr = tb->tc_ptr;
749 nb_tbs--;
750 }
751}
752
9fa3e853
FB
753static inline void invalidate_page_bitmap(PageDesc *p)
754{
755 if (p->code_bitmap) {
7267c094 756 g_free(p->code_bitmap);
9fa3e853
FB
757 p->code_bitmap = NULL;
758 }
759 p->code_write_count = 0;
760}
761
5cd2c5b6
RH
762/* Set to NULL all the 'first_tb' fields in all PageDescs. */
763
764static void page_flush_tb_1 (int level, void **lp)
fd6ce8f6 765{
5cd2c5b6 766 int i;
fd6ce8f6 767
5cd2c5b6
RH
768 if (*lp == NULL) {
769 return;
770 }
771 if (level == 0) {
772 PageDesc *pd = *lp;
7296abac 773 for (i = 0; i < L2_SIZE; ++i) {
5cd2c5b6
RH
774 pd[i].first_tb = NULL;
775 invalidate_page_bitmap(pd + i);
fd6ce8f6 776 }
5cd2c5b6
RH
777 } else {
778 void **pp = *lp;
7296abac 779 for (i = 0; i < L2_SIZE; ++i) {
5cd2c5b6
RH
780 page_flush_tb_1 (level - 1, pp + i);
781 }
782 }
783}
784
785static void page_flush_tb(void)
786{
787 int i;
788 for (i = 0; i < V_L1_SIZE; i++) {
789 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
fd6ce8f6
FB
790 }
791}
792
793/* flush all the translation blocks */
d4e8164f 794/* XXX: tb_flush is currently not thread safe */
9349b4f9 795void tb_flush(CPUArchState *env1)
fd6ce8f6 796{
9349b4f9 797 CPUArchState *env;
0124311e 798#if defined(DEBUG_FLUSH)
ab3d1727
BS
799 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
800 (unsigned long)(code_gen_ptr - code_gen_buffer),
801 nb_tbs, nb_tbs > 0 ?
802 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
fd6ce8f6 803#endif
26a5f13b 804 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
a208e54a
PB
805 cpu_abort(env1, "Internal error: code buffer overflow\n");
806
fd6ce8f6 807 nb_tbs = 0;
3b46e624 808
6a00d601
FB
809 for(env = first_cpu; env != NULL; env = env->next_cpu) {
810 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
811 }
9fa3e853 812
8a8a608f 813 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
fd6ce8f6 814 page_flush_tb();
9fa3e853 815
fd6ce8f6 816 code_gen_ptr = code_gen_buffer;
d4e8164f
FB
817 /* XXX: flush processor icache at this point if cache flush is
818 expensive */
e3db7226 819 tb_flush_count++;
fd6ce8f6
FB
820}
821
822#ifdef DEBUG_TB_CHECK
823
bc98a7ef 824static void tb_invalidate_check(target_ulong address)
fd6ce8f6
FB
825{
826 TranslationBlock *tb;
827 int i;
828 address &= TARGET_PAGE_MASK;
99773bd4
PB
829 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
830 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
831 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
832 address >= tb->pc + tb->size)) {
0bf9e31a
BS
833 printf("ERROR invalidate: address=" TARGET_FMT_lx
834 " PC=%08lx size=%04x\n",
99773bd4 835 address, (long)tb->pc, tb->size);
fd6ce8f6
FB
836 }
837 }
838 }
839}
840
841/* verify that all the pages have correct rights for code */
842static void tb_page_check(void)
843{
844 TranslationBlock *tb;
845 int i, flags1, flags2;
3b46e624 846
99773bd4
PB
847 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
848 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
849 flags1 = page_get_flags(tb->pc);
850 flags2 = page_get_flags(tb->pc + tb->size - 1);
851 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
852 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
99773bd4 853 (long)tb->pc, tb->size, flags1, flags2);
fd6ce8f6
FB
854 }
855 }
856 }
857}
858
859#endif
860
861/* invalidate one TB */
862static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
863 int next_offset)
864{
865 TranslationBlock *tb1;
866 for(;;) {
867 tb1 = *ptb;
868 if (tb1 == tb) {
869 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
870 break;
871 }
872 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
873 }
874}
875
9fa3e853
FB
876static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
877{
878 TranslationBlock *tb1;
879 unsigned int n1;
880
881 for(;;) {
882 tb1 = *ptb;
8efe0ca8
SW
883 n1 = (uintptr_t)tb1 & 3;
884 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
9fa3e853
FB
885 if (tb1 == tb) {
886 *ptb = tb1->page_next[n1];
887 break;
888 }
889 ptb = &tb1->page_next[n1];
890 }
891}
892
d4e8164f
FB
893static inline void tb_jmp_remove(TranslationBlock *tb, int n)
894{
895 TranslationBlock *tb1, **ptb;
896 unsigned int n1;
897
898 ptb = &tb->jmp_next[n];
899 tb1 = *ptb;
900 if (tb1) {
901 /* find tb(n) in circular list */
902 for(;;) {
903 tb1 = *ptb;
8efe0ca8
SW
904 n1 = (uintptr_t)tb1 & 3;
905 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
d4e8164f
FB
906 if (n1 == n && tb1 == tb)
907 break;
908 if (n1 == 2) {
909 ptb = &tb1->jmp_first;
910 } else {
911 ptb = &tb1->jmp_next[n1];
912 }
913 }
914 /* now we can suppress tb(n) from the list */
915 *ptb = tb->jmp_next[n];
916
917 tb->jmp_next[n] = NULL;
918 }
919}
920
921/* reset the jump entry 'n' of a TB so that it is not chained to
922 another TB */
923static inline void tb_reset_jump(TranslationBlock *tb, int n)
924{
8efe0ca8 925 tb_set_jmp_target(tb, n, (uintptr_t)(tb->tc_ptr + tb->tb_next_offset[n]));
d4e8164f
FB
926}
927
41c1b1c9 928void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
fd6ce8f6 929{
9349b4f9 930 CPUArchState *env;
8a40a180 931 PageDesc *p;
d4e8164f 932 unsigned int h, n1;
41c1b1c9 933 tb_page_addr_t phys_pc;
8a40a180 934 TranslationBlock *tb1, *tb2;
3b46e624 935
8a40a180
FB
936 /* remove the TB from the hash list */
937 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
938 h = tb_phys_hash_func(phys_pc);
5fafdf24 939 tb_remove(&tb_phys_hash[h], tb,
8a40a180
FB
940 offsetof(TranslationBlock, phys_hash_next));
941
942 /* remove the TB from the page list */
943 if (tb->page_addr[0] != page_addr) {
944 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
945 tb_page_remove(&p->first_tb, tb);
946 invalidate_page_bitmap(p);
947 }
948 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
949 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
950 tb_page_remove(&p->first_tb, tb);
951 invalidate_page_bitmap(p);
952 }
953
36bdbe54 954 tb_invalidated_flag = 1;
59817ccb 955
fd6ce8f6 956 /* remove the TB from the hash list */
8a40a180 957 h = tb_jmp_cache_hash_func(tb->pc);
6a00d601
FB
958 for(env = first_cpu; env != NULL; env = env->next_cpu) {
959 if (env->tb_jmp_cache[h] == tb)
960 env->tb_jmp_cache[h] = NULL;
961 }
d4e8164f
FB
962
963 /* suppress this TB from the two jump lists */
964 tb_jmp_remove(tb, 0);
965 tb_jmp_remove(tb, 1);
966
967 /* suppress any remaining jumps to this TB */
968 tb1 = tb->jmp_first;
969 for(;;) {
8efe0ca8 970 n1 = (uintptr_t)tb1 & 3;
d4e8164f
FB
971 if (n1 == 2)
972 break;
8efe0ca8 973 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
d4e8164f
FB
974 tb2 = tb1->jmp_next[n1];
975 tb_reset_jump(tb1, n1);
976 tb1->jmp_next[n1] = NULL;
977 tb1 = tb2;
978 }
8efe0ca8 979 tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2); /* fail safe */
9fa3e853 980
e3db7226 981 tb_phys_invalidate_count++;
9fa3e853
FB
982}
983
984static inline void set_bits(uint8_t *tab, int start, int len)
985{
986 int end, mask, end1;
987
988 end = start + len;
989 tab += start >> 3;
990 mask = 0xff << (start & 7);
991 if ((start & ~7) == (end & ~7)) {
992 if (start < end) {
993 mask &= ~(0xff << (end & 7));
994 *tab |= mask;
995 }
996 } else {
997 *tab++ |= mask;
998 start = (start + 8) & ~7;
999 end1 = end & ~7;
1000 while (start < end1) {
1001 *tab++ = 0xff;
1002 start += 8;
1003 }
1004 if (start < end) {
1005 mask = ~(0xff << (end & 7));
1006 *tab |= mask;
1007 }
1008 }
1009}
1010
1011static void build_page_bitmap(PageDesc *p)
1012{
1013 int n, tb_start, tb_end;
1014 TranslationBlock *tb;
3b46e624 1015
7267c094 1016 p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8);
9fa3e853
FB
1017
1018 tb = p->first_tb;
1019 while (tb != NULL) {
8efe0ca8
SW
1020 n = (uintptr_t)tb & 3;
1021 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
9fa3e853
FB
1022 /* NOTE: this is subtle as a TB may span two physical pages */
1023 if (n == 0) {
1024 /* NOTE: tb_end may be after the end of the page, but
1025 it is not a problem */
1026 tb_start = tb->pc & ~TARGET_PAGE_MASK;
1027 tb_end = tb_start + tb->size;
1028 if (tb_end > TARGET_PAGE_SIZE)
1029 tb_end = TARGET_PAGE_SIZE;
1030 } else {
1031 tb_start = 0;
1032 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1033 }
1034 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
1035 tb = tb->page_next[n];
1036 }
1037}
1038
9349b4f9 1039TranslationBlock *tb_gen_code(CPUArchState *env,
2e70f6ef
PB
1040 target_ulong pc, target_ulong cs_base,
1041 int flags, int cflags)
d720b93d
FB
1042{
1043 TranslationBlock *tb;
1044 uint8_t *tc_ptr;
41c1b1c9
PB
1045 tb_page_addr_t phys_pc, phys_page2;
1046 target_ulong virt_page2;
d720b93d
FB
1047 int code_gen_size;
1048
41c1b1c9 1049 phys_pc = get_page_addr_code(env, pc);
c27004ec 1050 tb = tb_alloc(pc);
d720b93d
FB
1051 if (!tb) {
1052 /* flush must be done */
1053 tb_flush(env);
1054 /* cannot fail at this point */
c27004ec 1055 tb = tb_alloc(pc);
2e70f6ef
PB
1056 /* Don't forget to invalidate previous TB info. */
1057 tb_invalidated_flag = 1;
d720b93d
FB
1058 }
1059 tc_ptr = code_gen_ptr;
1060 tb->tc_ptr = tc_ptr;
1061 tb->cs_base = cs_base;
1062 tb->flags = flags;
1063 tb->cflags = cflags;
d07bde88 1064 cpu_gen_code(env, tb, &code_gen_size);
8efe0ca8
SW
1065 code_gen_ptr = (void *)(((uintptr_t)code_gen_ptr + code_gen_size +
1066 CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
3b46e624 1067
d720b93d 1068 /* check next page if needed */
c27004ec 1069 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
d720b93d 1070 phys_page2 = -1;
c27004ec 1071 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
41c1b1c9 1072 phys_page2 = get_page_addr_code(env, virt_page2);
d720b93d 1073 }
41c1b1c9 1074 tb_link_page(tb, phys_pc, phys_page2);
2e70f6ef 1075 return tb;
d720b93d 1076}
3b46e624 1077
9fa3e853
FB
1078/* invalidate all TBs which intersect with the target physical page
1079 starting in range [start;end[. NOTE: start and end must refer to
d720b93d
FB
1080 the same physical page. 'is_cpu_write_access' should be true if called
1081 from a real cpu write access: the virtual CPU will exit the current
1082 TB if code is modified inside this TB. */
41c1b1c9 1083void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
d720b93d
FB
1084 int is_cpu_write_access)
1085{
6b917547 1086 TranslationBlock *tb, *tb_next, *saved_tb;
9349b4f9 1087 CPUArchState *env = cpu_single_env;
41c1b1c9 1088 tb_page_addr_t tb_start, tb_end;
6b917547
AL
1089 PageDesc *p;
1090 int n;
1091#ifdef TARGET_HAS_PRECISE_SMC
1092 int current_tb_not_found = is_cpu_write_access;
1093 TranslationBlock *current_tb = NULL;
1094 int current_tb_modified = 0;
1095 target_ulong current_pc = 0;
1096 target_ulong current_cs_base = 0;
1097 int current_flags = 0;
1098#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
1099
1100 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 1101 if (!p)
9fa3e853 1102 return;
5fafdf24 1103 if (!p->code_bitmap &&
d720b93d
FB
1104 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1105 is_cpu_write_access) {
9fa3e853
FB
1106 /* build code bitmap */
1107 build_page_bitmap(p);
1108 }
1109
1110 /* we remove all the TBs in the range [start, end[ */
1111 /* XXX: see if in some cases it could be faster to invalidate all the code */
1112 tb = p->first_tb;
1113 while (tb != NULL) {
8efe0ca8
SW
1114 n = (uintptr_t)tb & 3;
1115 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
9fa3e853
FB
1116 tb_next = tb->page_next[n];
1117 /* NOTE: this is subtle as a TB may span two physical pages */
1118 if (n == 0) {
1119 /* NOTE: tb_end may be after the end of the page, but
1120 it is not a problem */
1121 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1122 tb_end = tb_start + tb->size;
1123 } else {
1124 tb_start = tb->page_addr[1];
1125 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1126 }
1127 if (!(tb_end <= start || tb_start >= end)) {
d720b93d
FB
1128#ifdef TARGET_HAS_PRECISE_SMC
1129 if (current_tb_not_found) {
1130 current_tb_not_found = 0;
1131 current_tb = NULL;
2e70f6ef 1132 if (env->mem_io_pc) {
d720b93d 1133 /* now we have a real cpu fault */
2e70f6ef 1134 current_tb = tb_find_pc(env->mem_io_pc);
d720b93d
FB
1135 }
1136 }
1137 if (current_tb == tb &&
2e70f6ef 1138 (current_tb->cflags & CF_COUNT_MASK) != 1) {
d720b93d
FB
1139 /* If we are modifying the current TB, we must stop
1140 its execution. We could be more precise by checking
1141 that the modification is after the current PC, but it
1142 would require a specialized function to partially
1143 restore the CPU state */
3b46e624 1144
d720b93d 1145 current_tb_modified = 1;
618ba8e6 1146 cpu_restore_state(current_tb, env, env->mem_io_pc);
6b917547
AL
1147 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1148 &current_flags);
d720b93d
FB
1149 }
1150#endif /* TARGET_HAS_PRECISE_SMC */
6f5a9f7e
FB
1151 /* we need to do that to handle the case where a signal
1152 occurs while doing tb_phys_invalidate() */
1153 saved_tb = NULL;
1154 if (env) {
1155 saved_tb = env->current_tb;
1156 env->current_tb = NULL;
1157 }
9fa3e853 1158 tb_phys_invalidate(tb, -1);
6f5a9f7e
FB
1159 if (env) {
1160 env->current_tb = saved_tb;
1161 if (env->interrupt_request && env->current_tb)
1162 cpu_interrupt(env, env->interrupt_request);
1163 }
9fa3e853
FB
1164 }
1165 tb = tb_next;
1166 }
1167#if !defined(CONFIG_USER_ONLY)
1168 /* if no code remaining, no need to continue to use slow writes */
1169 if (!p->first_tb) {
1170 invalidate_page_bitmap(p);
d720b93d 1171 if (is_cpu_write_access) {
2e70f6ef 1172 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
d720b93d
FB
1173 }
1174 }
1175#endif
1176#ifdef TARGET_HAS_PRECISE_SMC
1177 if (current_tb_modified) {
1178 /* we generate a block containing just the instruction
1179 modifying the memory. It will ensure that it cannot modify
1180 itself */
ea1c1802 1181 env->current_tb = NULL;
2e70f6ef 1182 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
d720b93d 1183 cpu_resume_from_signal(env, NULL);
9fa3e853 1184 }
fd6ce8f6 1185#endif
9fa3e853 1186}
fd6ce8f6 1187
9fa3e853 1188/* len must be <= 8 and start must be a multiple of len */
41c1b1c9 1189static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
9fa3e853
FB
1190{
1191 PageDesc *p;
1192 int offset, b;
59817ccb 1193#if 0
a4193c8a 1194 if (1) {
93fcfe39
AL
1195 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1196 cpu_single_env->mem_io_vaddr, len,
1197 cpu_single_env->eip,
8efe0ca8
SW
1198 cpu_single_env->eip +
1199 (intptr_t)cpu_single_env->segs[R_CS].base);
59817ccb
FB
1200 }
1201#endif
9fa3e853 1202 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 1203 if (!p)
9fa3e853
FB
1204 return;
1205 if (p->code_bitmap) {
1206 offset = start & ~TARGET_PAGE_MASK;
1207 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1208 if (b & ((1 << len) - 1))
1209 goto do_invalidate;
1210 } else {
1211 do_invalidate:
d720b93d 1212 tb_invalidate_phys_page_range(start, start + len, 1);
9fa3e853
FB
1213 }
1214}
1215
9fa3e853 1216#if !defined(CONFIG_SOFTMMU)
41c1b1c9 1217static void tb_invalidate_phys_page(tb_page_addr_t addr,
20503968 1218 uintptr_t pc, void *puc)
9fa3e853 1219{
6b917547 1220 TranslationBlock *tb;
9fa3e853 1221 PageDesc *p;
6b917547 1222 int n;
d720b93d 1223#ifdef TARGET_HAS_PRECISE_SMC
6b917547 1224 TranslationBlock *current_tb = NULL;
9349b4f9 1225 CPUArchState *env = cpu_single_env;
6b917547
AL
1226 int current_tb_modified = 0;
1227 target_ulong current_pc = 0;
1228 target_ulong current_cs_base = 0;
1229 int current_flags = 0;
d720b93d 1230#endif
9fa3e853
FB
1231
1232 addr &= TARGET_PAGE_MASK;
1233 p = page_find(addr >> TARGET_PAGE_BITS);
5fafdf24 1234 if (!p)
9fa3e853
FB
1235 return;
1236 tb = p->first_tb;
d720b93d
FB
1237#ifdef TARGET_HAS_PRECISE_SMC
1238 if (tb && pc != 0) {
1239 current_tb = tb_find_pc(pc);
1240 }
1241#endif
9fa3e853 1242 while (tb != NULL) {
8efe0ca8
SW
1243 n = (uintptr_t)tb & 3;
1244 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
d720b93d
FB
1245#ifdef TARGET_HAS_PRECISE_SMC
1246 if (current_tb == tb &&
2e70f6ef 1247 (current_tb->cflags & CF_COUNT_MASK) != 1) {
d720b93d
FB
1248 /* If we are modifying the current TB, we must stop
1249 its execution. We could be more precise by checking
1250 that the modification is after the current PC, but it
1251 would require a specialized function to partially
1252 restore the CPU state */
3b46e624 1253
d720b93d 1254 current_tb_modified = 1;
618ba8e6 1255 cpu_restore_state(current_tb, env, pc);
6b917547
AL
1256 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1257 &current_flags);
d720b93d
FB
1258 }
1259#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
1260 tb_phys_invalidate(tb, addr);
1261 tb = tb->page_next[n];
1262 }
fd6ce8f6 1263 p->first_tb = NULL;
d720b93d
FB
1264#ifdef TARGET_HAS_PRECISE_SMC
1265 if (current_tb_modified) {
1266 /* we generate a block containing just the instruction
1267 modifying the memory. It will ensure that it cannot modify
1268 itself */
ea1c1802 1269 env->current_tb = NULL;
2e70f6ef 1270 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
d720b93d
FB
1271 cpu_resume_from_signal(env, puc);
1272 }
1273#endif
fd6ce8f6 1274}
9fa3e853 1275#endif
fd6ce8f6
FB
1276
1277/* add the tb in the target page and protect it if necessary */
5fafdf24 1278static inline void tb_alloc_page(TranslationBlock *tb,
41c1b1c9 1279 unsigned int n, tb_page_addr_t page_addr)
fd6ce8f6
FB
1280{
1281 PageDesc *p;
4429ab44
JQ
1282#ifndef CONFIG_USER_ONLY
1283 bool page_already_protected;
1284#endif
9fa3e853
FB
1285
1286 tb->page_addr[n] = page_addr;
5cd2c5b6 1287 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
9fa3e853 1288 tb->page_next[n] = p->first_tb;
4429ab44
JQ
1289#ifndef CONFIG_USER_ONLY
1290 page_already_protected = p->first_tb != NULL;
1291#endif
8efe0ca8 1292 p->first_tb = (TranslationBlock *)((uintptr_t)tb | n);
9fa3e853 1293 invalidate_page_bitmap(p);
fd6ce8f6 1294
107db443 1295#if defined(TARGET_HAS_SMC) || 1
d720b93d 1296
9fa3e853 1297#if defined(CONFIG_USER_ONLY)
fd6ce8f6 1298 if (p->flags & PAGE_WRITE) {
53a5960a
PB
1299 target_ulong addr;
1300 PageDesc *p2;
9fa3e853
FB
1301 int prot;
1302
fd6ce8f6
FB
1303 /* force the host page as non writable (writes will have a
1304 page fault + mprotect overhead) */
53a5960a 1305 page_addr &= qemu_host_page_mask;
fd6ce8f6 1306 prot = 0;
53a5960a
PB
1307 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1308 addr += TARGET_PAGE_SIZE) {
1309
1310 p2 = page_find (addr >> TARGET_PAGE_BITS);
1311 if (!p2)
1312 continue;
1313 prot |= p2->flags;
1314 p2->flags &= ~PAGE_WRITE;
53a5960a 1315 }
5fafdf24 1316 mprotect(g2h(page_addr), qemu_host_page_size,
fd6ce8f6
FB
1317 (prot & PAGE_BITS) & ~PAGE_WRITE);
1318#ifdef DEBUG_TB_INVALIDATE
ab3d1727 1319 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
53a5960a 1320 page_addr);
fd6ce8f6 1321#endif
fd6ce8f6 1322 }
9fa3e853
FB
1323#else
1324 /* if some code is already present, then the pages are already
1325 protected. So we handle the case where only the first TB is
1326 allocated in a physical page */
4429ab44 1327 if (!page_already_protected) {
6a00d601 1328 tlb_protect_code(page_addr);
9fa3e853
FB
1329 }
1330#endif
d720b93d
FB
1331
1332#endif /* TARGET_HAS_SMC */
fd6ce8f6
FB
1333}
1334
9fa3e853
FB
1335/* add a new TB and link it to the physical page tables. phys_page2 is
1336 (-1) to indicate that only one page contains the TB. */
41c1b1c9
PB
1337void tb_link_page(TranslationBlock *tb,
1338 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
d4e8164f 1339{
9fa3e853
FB
1340 unsigned int h;
1341 TranslationBlock **ptb;
1342
c8a706fe
PB
1343 /* Grab the mmap lock to stop another thread invalidating this TB
1344 before we are done. */
1345 mmap_lock();
9fa3e853
FB
1346 /* add in the physical hash table */
1347 h = tb_phys_hash_func(phys_pc);
1348 ptb = &tb_phys_hash[h];
1349 tb->phys_hash_next = *ptb;
1350 *ptb = tb;
fd6ce8f6
FB
1351
1352 /* add in the page list */
9fa3e853
FB
1353 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1354 if (phys_page2 != -1)
1355 tb_alloc_page(tb, 1, phys_page2);
1356 else
1357 tb->page_addr[1] = -1;
9fa3e853 1358
8efe0ca8 1359 tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2);
d4e8164f
FB
1360 tb->jmp_next[0] = NULL;
1361 tb->jmp_next[1] = NULL;
1362
1363 /* init original jump addresses */
1364 if (tb->tb_next_offset[0] != 0xffff)
1365 tb_reset_jump(tb, 0);
1366 if (tb->tb_next_offset[1] != 0xffff)
1367 tb_reset_jump(tb, 1);
8a40a180
FB
1368
1369#ifdef DEBUG_TB_CHECK
1370 tb_page_check();
1371#endif
c8a706fe 1372 mmap_unlock();
fd6ce8f6
FB
1373}
1374
9fa3e853
FB
1375/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1376 tb[1].tc_ptr. Return NULL if not found */
6375e09e 1377TranslationBlock *tb_find_pc(uintptr_t tc_ptr)
fd6ce8f6 1378{
9fa3e853 1379 int m_min, m_max, m;
8efe0ca8 1380 uintptr_t v;
9fa3e853 1381 TranslationBlock *tb;
a513fe19
FB
1382
1383 if (nb_tbs <= 0)
1384 return NULL;
8efe0ca8
SW
1385 if (tc_ptr < (uintptr_t)code_gen_buffer ||
1386 tc_ptr >= (uintptr_t)code_gen_ptr) {
a513fe19 1387 return NULL;
8efe0ca8 1388 }
a513fe19
FB
1389 /* binary search (cf Knuth) */
1390 m_min = 0;
1391 m_max = nb_tbs - 1;
1392 while (m_min <= m_max) {
1393 m = (m_min + m_max) >> 1;
1394 tb = &tbs[m];
8efe0ca8 1395 v = (uintptr_t)tb->tc_ptr;
a513fe19
FB
1396 if (v == tc_ptr)
1397 return tb;
1398 else if (tc_ptr < v) {
1399 m_max = m - 1;
1400 } else {
1401 m_min = m + 1;
1402 }
5fafdf24 1403 }
a513fe19
FB
1404 return &tbs[m_max];
1405}
7501267e 1406
ea041c0e
FB
1407static void tb_reset_jump_recursive(TranslationBlock *tb);
1408
1409static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1410{
1411 TranslationBlock *tb1, *tb_next, **ptb;
1412 unsigned int n1;
1413
1414 tb1 = tb->jmp_next[n];
1415 if (tb1 != NULL) {
1416 /* find head of list */
1417 for(;;) {
8efe0ca8
SW
1418 n1 = (uintptr_t)tb1 & 3;
1419 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
ea041c0e
FB
1420 if (n1 == 2)
1421 break;
1422 tb1 = tb1->jmp_next[n1];
1423 }
1424 /* we are now sure now that tb jumps to tb1 */
1425 tb_next = tb1;
1426
1427 /* remove tb from the jmp_first list */
1428 ptb = &tb_next->jmp_first;
1429 for(;;) {
1430 tb1 = *ptb;
8efe0ca8
SW
1431 n1 = (uintptr_t)tb1 & 3;
1432 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
ea041c0e
FB
1433 if (n1 == n && tb1 == tb)
1434 break;
1435 ptb = &tb1->jmp_next[n1];
1436 }
1437 *ptb = tb->jmp_next[n];
1438 tb->jmp_next[n] = NULL;
3b46e624 1439
ea041c0e
FB
1440 /* suppress the jump to next tb in generated code */
1441 tb_reset_jump(tb, n);
1442
0124311e 1443 /* suppress jumps in the tb on which we could have jumped */
ea041c0e
FB
1444 tb_reset_jump_recursive(tb_next);
1445 }
1446}
1447
1448static void tb_reset_jump_recursive(TranslationBlock *tb)
1449{
1450 tb_reset_jump_recursive2(tb, 0);
1451 tb_reset_jump_recursive2(tb, 1);
1452}
1453
1fddef4b 1454#if defined(TARGET_HAS_ICE)
94df27fd 1455#if defined(CONFIG_USER_ONLY)
9349b4f9 1456static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
94df27fd
PB
1457{
1458 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1459}
1460#else
1e7855a5 1461void tb_invalidate_phys_addr(target_phys_addr_t addr)
d720b93d 1462{
c227f099 1463 ram_addr_t ram_addr;
f3705d53 1464 MemoryRegionSection *section;
d720b93d 1465
06ef3525 1466 section = phys_page_find(addr >> TARGET_PAGE_BITS);
f3705d53
AK
1467 if (!(memory_region_is_ram(section->mr)
1468 || (section->mr->rom_device && section->mr->readable))) {
06ef3525
AK
1469 return;
1470 }
f3705d53 1471 ram_addr = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
cc5bea60 1472 + memory_region_section_addr(section, addr);
706cd4b5 1473 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
d720b93d 1474}
1e7855a5
MF
1475
1476static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
1477{
1478 tb_invalidate_phys_addr(cpu_get_phys_page_debug(env, pc));
1479}
c27004ec 1480#endif
94df27fd 1481#endif /* TARGET_HAS_ICE */
d720b93d 1482
c527ee8f 1483#if defined(CONFIG_USER_ONLY)
9349b4f9 1484void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
c527ee8f
PB
1485
1486{
1487}
1488
9349b4f9 1489int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
c527ee8f
PB
1490 int flags, CPUWatchpoint **watchpoint)
1491{
1492 return -ENOSYS;
1493}
1494#else
6658ffb8 1495/* Add a watchpoint. */
9349b4f9 1496int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
a1d1bb31 1497 int flags, CPUWatchpoint **watchpoint)
6658ffb8 1498{
b4051334 1499 target_ulong len_mask = ~(len - 1);
c0ce998e 1500 CPUWatchpoint *wp;
6658ffb8 1501
b4051334 1502 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
0dc23828
MF
1503 if ((len & (len - 1)) || (addr & ~len_mask) ||
1504 len == 0 || len > TARGET_PAGE_SIZE) {
b4051334
AL
1505 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1506 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1507 return -EINVAL;
1508 }
7267c094 1509 wp = g_malloc(sizeof(*wp));
a1d1bb31
AL
1510
1511 wp->vaddr = addr;
b4051334 1512 wp->len_mask = len_mask;
a1d1bb31
AL
1513 wp->flags = flags;
1514
2dc9f411 1515 /* keep all GDB-injected watchpoints in front */
c0ce998e 1516 if (flags & BP_GDB)
72cf2d4f 1517 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
c0ce998e 1518 else
72cf2d4f 1519 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
6658ffb8 1520
6658ffb8 1521 tlb_flush_page(env, addr);
a1d1bb31
AL
1522
1523 if (watchpoint)
1524 *watchpoint = wp;
1525 return 0;
6658ffb8
PB
1526}
1527
a1d1bb31 1528/* Remove a specific watchpoint. */
9349b4f9 1529int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
a1d1bb31 1530 int flags)
6658ffb8 1531{
b4051334 1532 target_ulong len_mask = ~(len - 1);
a1d1bb31 1533 CPUWatchpoint *wp;
6658ffb8 1534
72cf2d4f 1535 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334 1536 if (addr == wp->vaddr && len_mask == wp->len_mask
6e140f28 1537 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
a1d1bb31 1538 cpu_watchpoint_remove_by_ref(env, wp);
6658ffb8
PB
1539 return 0;
1540 }
1541 }
a1d1bb31 1542 return -ENOENT;
6658ffb8
PB
1543}
1544
a1d1bb31 1545/* Remove a specific watchpoint by reference. */
9349b4f9 1546void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
a1d1bb31 1547{
72cf2d4f 1548 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
7d03f82f 1549
a1d1bb31
AL
1550 tlb_flush_page(env, watchpoint->vaddr);
1551
7267c094 1552 g_free(watchpoint);
a1d1bb31
AL
1553}
1554
1555/* Remove all matching watchpoints. */
9349b4f9 1556void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
a1d1bb31 1557{
c0ce998e 1558 CPUWatchpoint *wp, *next;
a1d1bb31 1559
72cf2d4f 1560 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
a1d1bb31
AL
1561 if (wp->flags & mask)
1562 cpu_watchpoint_remove_by_ref(env, wp);
c0ce998e 1563 }
7d03f82f 1564}
c527ee8f 1565#endif
7d03f82f 1566
a1d1bb31 1567/* Add a breakpoint. */
9349b4f9 1568int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
a1d1bb31 1569 CPUBreakpoint **breakpoint)
4c3a88a2 1570{
1fddef4b 1571#if defined(TARGET_HAS_ICE)
c0ce998e 1572 CPUBreakpoint *bp;
3b46e624 1573
7267c094 1574 bp = g_malloc(sizeof(*bp));
4c3a88a2 1575
a1d1bb31
AL
1576 bp->pc = pc;
1577 bp->flags = flags;
1578
2dc9f411 1579 /* keep all GDB-injected breakpoints in front */
c0ce998e 1580 if (flags & BP_GDB)
72cf2d4f 1581 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
c0ce998e 1582 else
72cf2d4f 1583 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
3b46e624 1584
d720b93d 1585 breakpoint_invalidate(env, pc);
a1d1bb31
AL
1586
1587 if (breakpoint)
1588 *breakpoint = bp;
4c3a88a2
FB
1589 return 0;
1590#else
a1d1bb31 1591 return -ENOSYS;
4c3a88a2
FB
1592#endif
1593}
1594
a1d1bb31 1595/* Remove a specific breakpoint. */
9349b4f9 1596int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
a1d1bb31 1597{
7d03f82f 1598#if defined(TARGET_HAS_ICE)
a1d1bb31
AL
1599 CPUBreakpoint *bp;
1600
72cf2d4f 1601 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31
AL
1602 if (bp->pc == pc && bp->flags == flags) {
1603 cpu_breakpoint_remove_by_ref(env, bp);
1604 return 0;
1605 }
7d03f82f 1606 }
a1d1bb31
AL
1607 return -ENOENT;
1608#else
1609 return -ENOSYS;
7d03f82f
EI
1610#endif
1611}
1612
a1d1bb31 1613/* Remove a specific breakpoint by reference. */
9349b4f9 1614void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
4c3a88a2 1615{
1fddef4b 1616#if defined(TARGET_HAS_ICE)
72cf2d4f 1617 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
d720b93d 1618
a1d1bb31
AL
1619 breakpoint_invalidate(env, breakpoint->pc);
1620
7267c094 1621 g_free(breakpoint);
a1d1bb31
AL
1622#endif
1623}
1624
1625/* Remove all matching breakpoints. */
9349b4f9 1626void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
a1d1bb31
AL
1627{
1628#if defined(TARGET_HAS_ICE)
c0ce998e 1629 CPUBreakpoint *bp, *next;
a1d1bb31 1630
72cf2d4f 1631 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
a1d1bb31
AL
1632 if (bp->flags & mask)
1633 cpu_breakpoint_remove_by_ref(env, bp);
c0ce998e 1634 }
4c3a88a2
FB
1635#endif
1636}
1637
c33a346e
FB
1638/* enable or disable single step mode. EXCP_DEBUG is returned by the
1639 CPU loop after each instruction */
9349b4f9 1640void cpu_single_step(CPUArchState *env, int enabled)
c33a346e 1641{
1fddef4b 1642#if defined(TARGET_HAS_ICE)
c33a346e
FB
1643 if (env->singlestep_enabled != enabled) {
1644 env->singlestep_enabled = enabled;
e22a25c9
AL
1645 if (kvm_enabled())
1646 kvm_update_guest_debug(env, 0);
1647 else {
ccbb4d44 1648 /* must flush all the translated code to avoid inconsistencies */
e22a25c9
AL
1649 /* XXX: only flush what is necessary */
1650 tb_flush(env);
1651 }
c33a346e
FB
1652 }
1653#endif
1654}
1655
34865134
FB
1656/* enable or disable low levels log */
1657void cpu_set_log(int log_flags)
1658{
1659 loglevel = log_flags;
1660 if (loglevel && !logfile) {
11fcfab4 1661 logfile = fopen(logfilename, log_append ? "a" : "w");
34865134
FB
1662 if (!logfile) {
1663 perror(logfilename);
1664 _exit(1);
1665 }
9fa3e853
FB
1666#if !defined(CONFIG_SOFTMMU)
1667 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1668 {
b55266b5 1669 static char logfile_buf[4096];
9fa3e853
FB
1670 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1671 }
daf767b1
SW
1672#elif defined(_WIN32)
1673 /* Win32 doesn't support line-buffering, so use unbuffered output. */
1674 setvbuf(logfile, NULL, _IONBF, 0);
1675#else
34865134 1676 setvbuf(logfile, NULL, _IOLBF, 0);
9fa3e853 1677#endif
e735b91c
PB
1678 log_append = 1;
1679 }
1680 if (!loglevel && logfile) {
1681 fclose(logfile);
1682 logfile = NULL;
34865134
FB
1683 }
1684}
1685
1686void cpu_set_log_filename(const char *filename)
1687{
1688 logfilename = strdup(filename);
e735b91c
PB
1689 if (logfile) {
1690 fclose(logfile);
1691 logfile = NULL;
1692 }
1693 cpu_set_log(loglevel);
34865134 1694}
c33a346e 1695
9349b4f9 1696static void cpu_unlink_tb(CPUArchState *env)
ea041c0e 1697{
3098dba0
AJ
1698 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1699 problem and hope the cpu will stop of its own accord. For userspace
1700 emulation this often isn't actually as bad as it sounds. Often
1701 signals are used primarily to interrupt blocking syscalls. */
ea041c0e 1702 TranslationBlock *tb;
c227f099 1703 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
59817ccb 1704
cab1b4bd 1705 spin_lock(&interrupt_lock);
3098dba0
AJ
1706 tb = env->current_tb;
1707 /* if the cpu is currently executing code, we must unlink it and
1708 all the potentially executing TB */
f76cfe56 1709 if (tb) {
3098dba0
AJ
1710 env->current_tb = NULL;
1711 tb_reset_jump_recursive(tb);
be214e6c 1712 }
cab1b4bd 1713 spin_unlock(&interrupt_lock);
3098dba0
AJ
1714}
1715
97ffbd8d 1716#ifndef CONFIG_USER_ONLY
3098dba0 1717/* mask must never be zero, except for A20 change call */
9349b4f9 1718static void tcg_handle_interrupt(CPUArchState *env, int mask)
3098dba0
AJ
1719{
1720 int old_mask;
be214e6c 1721
2e70f6ef 1722 old_mask = env->interrupt_request;
68a79315 1723 env->interrupt_request |= mask;
3098dba0 1724
8edac960
AL
1725 /*
1726 * If called from iothread context, wake the target cpu in
1727 * case its halted.
1728 */
b7680cb6 1729 if (!qemu_cpu_is_self(env)) {
8edac960
AL
1730 qemu_cpu_kick(env);
1731 return;
1732 }
8edac960 1733
2e70f6ef 1734 if (use_icount) {
266910c4 1735 env->icount_decr.u16.high = 0xffff;
2e70f6ef 1736 if (!can_do_io(env)
be214e6c 1737 && (mask & ~old_mask) != 0) {
2e70f6ef
PB
1738 cpu_abort(env, "Raised interrupt while not in I/O function");
1739 }
2e70f6ef 1740 } else {
3098dba0 1741 cpu_unlink_tb(env);
ea041c0e
FB
1742 }
1743}
1744
ec6959d0
JK
1745CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1746
97ffbd8d
JK
1747#else /* CONFIG_USER_ONLY */
1748
9349b4f9 1749void cpu_interrupt(CPUArchState *env, int mask)
97ffbd8d
JK
1750{
1751 env->interrupt_request |= mask;
1752 cpu_unlink_tb(env);
1753}
1754#endif /* CONFIG_USER_ONLY */
1755
9349b4f9 1756void cpu_reset_interrupt(CPUArchState *env, int mask)
b54ad049
FB
1757{
1758 env->interrupt_request &= ~mask;
1759}
1760
9349b4f9 1761void cpu_exit(CPUArchState *env)
3098dba0
AJ
1762{
1763 env->exit_request = 1;
1764 cpu_unlink_tb(env);
1765}
1766
c7cd6a37 1767const CPULogItem cpu_log_items[] = {
5fafdf24 1768 { CPU_LOG_TB_OUT_ASM, "out_asm",
f193c797
FB
1769 "show generated host assembly code for each compiled TB" },
1770 { CPU_LOG_TB_IN_ASM, "in_asm",
1771 "show target assembly code for each compiled TB" },
5fafdf24 1772 { CPU_LOG_TB_OP, "op",
57fec1fe 1773 "show micro ops for each compiled TB" },
f193c797 1774 { CPU_LOG_TB_OP_OPT, "op_opt",
e01a1157
BS
1775 "show micro ops "
1776#ifdef TARGET_I386
1777 "before eflags optimization and "
f193c797 1778#endif
e01a1157 1779 "after liveness analysis" },
f193c797
FB
1780 { CPU_LOG_INT, "int",
1781 "show interrupts/exceptions in short format" },
1782 { CPU_LOG_EXEC, "exec",
1783 "show trace before each executed TB (lots of logs)" },
9fddaa0c 1784 { CPU_LOG_TB_CPU, "cpu",
e91c8a77 1785 "show CPU state before block translation" },
f193c797
FB
1786#ifdef TARGET_I386
1787 { CPU_LOG_PCALL, "pcall",
1788 "show protected mode far calls/returns/exceptions" },
eca1bdf4
AL
1789 { CPU_LOG_RESET, "cpu_reset",
1790 "show CPU state before CPU resets" },
f193c797 1791#endif
8e3a9fd2 1792#ifdef DEBUG_IOPORT
fd872598
FB
1793 { CPU_LOG_IOPORT, "ioport",
1794 "show all i/o ports accesses" },
8e3a9fd2 1795#endif
f193c797
FB
1796 { 0, NULL, NULL },
1797};
1798
1799static int cmp1(const char *s1, int n, const char *s2)
1800{
1801 if (strlen(s2) != n)
1802 return 0;
1803 return memcmp(s1, s2, n) == 0;
1804}
3b46e624 1805
f193c797
FB
1806/* takes a comma separated list of log masks. Return 0 if error. */
1807int cpu_str_to_log_mask(const char *str)
1808{
c7cd6a37 1809 const CPULogItem *item;
f193c797
FB
1810 int mask;
1811 const char *p, *p1;
1812
1813 p = str;
1814 mask = 0;
1815 for(;;) {
1816 p1 = strchr(p, ',');
1817 if (!p1)
1818 p1 = p + strlen(p);
9742bf26
YT
1819 if(cmp1(p,p1-p,"all")) {
1820 for(item = cpu_log_items; item->mask != 0; item++) {
1821 mask |= item->mask;
1822 }
1823 } else {
1824 for(item = cpu_log_items; item->mask != 0; item++) {
1825 if (cmp1(p, p1 - p, item->name))
1826 goto found;
1827 }
1828 return 0;
f193c797 1829 }
f193c797
FB
1830 found:
1831 mask |= item->mask;
1832 if (*p1 != ',')
1833 break;
1834 p = p1 + 1;
1835 }
1836 return mask;
1837}
ea041c0e 1838
9349b4f9 1839void cpu_abort(CPUArchState *env, const char *fmt, ...)
7501267e
FB
1840{
1841 va_list ap;
493ae1f0 1842 va_list ap2;
7501267e
FB
1843
1844 va_start(ap, fmt);
493ae1f0 1845 va_copy(ap2, ap);
7501267e
FB
1846 fprintf(stderr, "qemu: fatal: ");
1847 vfprintf(stderr, fmt, ap);
1848 fprintf(stderr, "\n");
1849#ifdef TARGET_I386
7fe48483
FB
1850 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1851#else
1852 cpu_dump_state(env, stderr, fprintf, 0);
7501267e 1853#endif
93fcfe39
AL
1854 if (qemu_log_enabled()) {
1855 qemu_log("qemu: fatal: ");
1856 qemu_log_vprintf(fmt, ap2);
1857 qemu_log("\n");
f9373291 1858#ifdef TARGET_I386
93fcfe39 1859 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
f9373291 1860#else
93fcfe39 1861 log_cpu_state(env, 0);
f9373291 1862#endif
31b1a7b4 1863 qemu_log_flush();
93fcfe39 1864 qemu_log_close();
924edcae 1865 }
493ae1f0 1866 va_end(ap2);
f9373291 1867 va_end(ap);
fd052bf6
RV
1868#if defined(CONFIG_USER_ONLY)
1869 {
1870 struct sigaction act;
1871 sigfillset(&act.sa_mask);
1872 act.sa_handler = SIG_DFL;
1873 sigaction(SIGABRT, &act, NULL);
1874 }
1875#endif
7501267e
FB
1876 abort();
1877}
1878
9349b4f9 1879CPUArchState *cpu_copy(CPUArchState *env)
c5be9f08 1880{
9349b4f9
AF
1881 CPUArchState *new_env = cpu_init(env->cpu_model_str);
1882 CPUArchState *next_cpu = new_env->next_cpu;
c5be9f08 1883 int cpu_index = new_env->cpu_index;
5a38f081
AL
1884#if defined(TARGET_HAS_ICE)
1885 CPUBreakpoint *bp;
1886 CPUWatchpoint *wp;
1887#endif
1888
9349b4f9 1889 memcpy(new_env, env, sizeof(CPUArchState));
5a38f081
AL
1890
1891 /* Preserve chaining and index. */
c5be9f08
TS
1892 new_env->next_cpu = next_cpu;
1893 new_env->cpu_index = cpu_index;
5a38f081
AL
1894
1895 /* Clone all break/watchpoints.
1896 Note: Once we support ptrace with hw-debug register access, make sure
1897 BP_CPU break/watchpoints are handled correctly on clone. */
72cf2d4f
BS
1898 QTAILQ_INIT(&env->breakpoints);
1899 QTAILQ_INIT(&env->watchpoints);
5a38f081 1900#if defined(TARGET_HAS_ICE)
72cf2d4f 1901 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
5a38f081
AL
1902 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1903 }
72cf2d4f 1904 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
5a38f081
AL
1905 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1906 wp->flags, NULL);
1907 }
1908#endif
1909
c5be9f08
TS
1910 return new_env;
1911}
1912
0124311e 1913#if !defined(CONFIG_USER_ONLY)
0cac1b66 1914void tb_flush_jmp_cache(CPUArchState *env, target_ulong addr)
5c751e99
EI
1915{
1916 unsigned int i;
1917
1918 /* Discard jump cache entries for any tb which might potentially
1919 overlap the flushed page. */
1920 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1921 memset (&env->tb_jmp_cache[i], 0,
9742bf26 1922 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
5c751e99
EI
1923
1924 i = tb_jmp_cache_hash_page(addr);
1925 memset (&env->tb_jmp_cache[i], 0,
9742bf26 1926 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
5c751e99
EI
1927}
1928
5579c7f3 1929/* Note: start and end must be within the same ram block. */
c227f099 1930void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 1931 int dirty_flags)
1ccde1cb 1932{
8efe0ca8 1933 uintptr_t length, start1;
1ccde1cb
FB
1934
1935 start &= TARGET_PAGE_MASK;
1936 end = TARGET_PAGE_ALIGN(end);
1937
1938 length = end - start;
1939 if (length == 0)
1940 return;
f7c11b53 1941 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
f23db169 1942
1ccde1cb
FB
1943 /* we modify the TLB cache so that the dirty bit will be set again
1944 when accessing the range */
8efe0ca8 1945 start1 = (uintptr_t)qemu_safe_ram_ptr(start);
a57d23e4 1946 /* Check that we don't span multiple blocks - this breaks the
5579c7f3 1947 address comparisons below. */
8efe0ca8 1948 if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1
5579c7f3
PB
1949 != (end - 1) - start) {
1950 abort();
1951 }
e5548617 1952 cpu_tlb_reset_dirty_all(start1, length);
1ccde1cb
FB
1953}
1954
74576198
AL
1955int cpu_physical_memory_set_dirty_tracking(int enable)
1956{
f6f3fbca 1957 int ret = 0;
74576198 1958 in_migration = enable;
f6f3fbca 1959 return ret;
74576198
AL
1960}
1961
e5548617
BS
1962target_phys_addr_t memory_region_section_get_iotlb(CPUArchState *env,
1963 MemoryRegionSection *section,
1964 target_ulong vaddr,
1965 target_phys_addr_t paddr,
1966 int prot,
1967 target_ulong *address)
1968{
1969 target_phys_addr_t iotlb;
1970 CPUWatchpoint *wp;
1971
cc5bea60 1972 if (memory_region_is_ram(section->mr)) {
e5548617
BS
1973 /* Normal RAM. */
1974 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
cc5bea60 1975 + memory_region_section_addr(section, paddr);
e5548617
BS
1976 if (!section->readonly) {
1977 iotlb |= phys_section_notdirty;
1978 } else {
1979 iotlb |= phys_section_rom;
1980 }
1981 } else {
1982 /* IO handlers are currently passed a physical address.
1983 It would be nice to pass an offset from the base address
1984 of that region. This would avoid having to special case RAM,
1985 and avoid full address decoding in every device.
1986 We can't use the high bits of pd for this because
1987 IO_MEM_ROMD uses these as a ram address. */
1988 iotlb = section - phys_sections;
cc5bea60 1989 iotlb += memory_region_section_addr(section, paddr);
e5548617
BS
1990 }
1991
1992 /* Make accesses to pages with watchpoints go via the
1993 watchpoint trap routines. */
1994 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1995 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
1996 /* Avoid trapping reads of pages with a write breakpoint. */
1997 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
1998 iotlb = phys_section_watch + paddr;
1999 *address |= TLB_MMIO;
2000 break;
2001 }
2002 }
2003 }
2004
2005 return iotlb;
2006}
2007
0124311e 2008#else
edf8e2af
MW
2009/*
2010 * Walks guest process memory "regions" one by one
2011 * and calls callback function 'fn' for each region.
2012 */
5cd2c5b6
RH
2013
2014struct walk_memory_regions_data
2015{
2016 walk_memory_regions_fn fn;
2017 void *priv;
8efe0ca8 2018 uintptr_t start;
5cd2c5b6
RH
2019 int prot;
2020};
2021
2022static int walk_memory_regions_end(struct walk_memory_regions_data *data,
b480d9b7 2023 abi_ulong end, int new_prot)
5cd2c5b6
RH
2024{
2025 if (data->start != -1ul) {
2026 int rc = data->fn(data->priv, data->start, end, data->prot);
2027 if (rc != 0) {
2028 return rc;
2029 }
2030 }
2031
2032 data->start = (new_prot ? end : -1ul);
2033 data->prot = new_prot;
2034
2035 return 0;
2036}
2037
2038static int walk_memory_regions_1(struct walk_memory_regions_data *data,
b480d9b7 2039 abi_ulong base, int level, void **lp)
5cd2c5b6 2040{
b480d9b7 2041 abi_ulong pa;
5cd2c5b6
RH
2042 int i, rc;
2043
2044 if (*lp == NULL) {
2045 return walk_memory_regions_end(data, base, 0);
2046 }
2047
2048 if (level == 0) {
2049 PageDesc *pd = *lp;
7296abac 2050 for (i = 0; i < L2_SIZE; ++i) {
5cd2c5b6
RH
2051 int prot = pd[i].flags;
2052
2053 pa = base | (i << TARGET_PAGE_BITS);
2054 if (prot != data->prot) {
2055 rc = walk_memory_regions_end(data, pa, prot);
2056 if (rc != 0) {
2057 return rc;
9fa3e853 2058 }
9fa3e853 2059 }
5cd2c5b6
RH
2060 }
2061 } else {
2062 void **pp = *lp;
7296abac 2063 for (i = 0; i < L2_SIZE; ++i) {
b480d9b7
PB
2064 pa = base | ((abi_ulong)i <<
2065 (TARGET_PAGE_BITS + L2_BITS * level));
5cd2c5b6
RH
2066 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2067 if (rc != 0) {
2068 return rc;
2069 }
2070 }
2071 }
2072
2073 return 0;
2074}
2075
2076int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2077{
2078 struct walk_memory_regions_data data;
8efe0ca8 2079 uintptr_t i;
5cd2c5b6
RH
2080
2081 data.fn = fn;
2082 data.priv = priv;
2083 data.start = -1ul;
2084 data.prot = 0;
2085
2086 for (i = 0; i < V_L1_SIZE; i++) {
b480d9b7 2087 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
5cd2c5b6
RH
2088 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2089 if (rc != 0) {
2090 return rc;
9fa3e853 2091 }
33417e70 2092 }
5cd2c5b6
RH
2093
2094 return walk_memory_regions_end(&data, 0, 0);
edf8e2af
MW
2095}
2096
b480d9b7
PB
2097static int dump_region(void *priv, abi_ulong start,
2098 abi_ulong end, unsigned long prot)
edf8e2af
MW
2099{
2100 FILE *f = (FILE *)priv;
2101
b480d9b7
PB
2102 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2103 " "TARGET_ABI_FMT_lx" %c%c%c\n",
edf8e2af
MW
2104 start, end, end - start,
2105 ((prot & PAGE_READ) ? 'r' : '-'),
2106 ((prot & PAGE_WRITE) ? 'w' : '-'),
2107 ((prot & PAGE_EXEC) ? 'x' : '-'));
2108
2109 return (0);
2110}
2111
2112/* dump memory mappings */
2113void page_dump(FILE *f)
2114{
2115 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2116 "start", "end", "size", "prot");
2117 walk_memory_regions(f, dump_region);
33417e70
FB
2118}
2119
53a5960a 2120int page_get_flags(target_ulong address)
33417e70 2121{
9fa3e853
FB
2122 PageDesc *p;
2123
2124 p = page_find(address >> TARGET_PAGE_BITS);
33417e70 2125 if (!p)
9fa3e853
FB
2126 return 0;
2127 return p->flags;
2128}
2129
376a7909
RH
2130/* Modify the flags of a page and invalidate the code if necessary.
2131 The flag PAGE_WRITE_ORG is positioned automatically depending
2132 on PAGE_WRITE. The mmap_lock should already be held. */
53a5960a 2133void page_set_flags(target_ulong start, target_ulong end, int flags)
9fa3e853 2134{
376a7909
RH
2135 target_ulong addr, len;
2136
2137 /* This function should never be called with addresses outside the
2138 guest address space. If this assert fires, it probably indicates
2139 a missing call to h2g_valid. */
b480d9b7
PB
2140#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2141 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
376a7909
RH
2142#endif
2143 assert(start < end);
9fa3e853
FB
2144
2145 start = start & TARGET_PAGE_MASK;
2146 end = TARGET_PAGE_ALIGN(end);
376a7909
RH
2147
2148 if (flags & PAGE_WRITE) {
9fa3e853 2149 flags |= PAGE_WRITE_ORG;
376a7909
RH
2150 }
2151
2152 for (addr = start, len = end - start;
2153 len != 0;
2154 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2155 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2156
2157 /* If the write protection bit is set, then we invalidate
2158 the code inside. */
5fafdf24 2159 if (!(p->flags & PAGE_WRITE) &&
9fa3e853
FB
2160 (flags & PAGE_WRITE) &&
2161 p->first_tb) {
d720b93d 2162 tb_invalidate_phys_page(addr, 0, NULL);
9fa3e853
FB
2163 }
2164 p->flags = flags;
2165 }
33417e70
FB
2166}
2167
3d97b40b
TS
2168int page_check_range(target_ulong start, target_ulong len, int flags)
2169{
2170 PageDesc *p;
2171 target_ulong end;
2172 target_ulong addr;
2173
376a7909
RH
2174 /* This function should never be called with addresses outside the
2175 guest address space. If this assert fires, it probably indicates
2176 a missing call to h2g_valid. */
338e9e6c
BS
2177#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2178 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
376a7909
RH
2179#endif
2180
3e0650a9
RH
2181 if (len == 0) {
2182 return 0;
2183 }
376a7909
RH
2184 if (start + len - 1 < start) {
2185 /* We've wrapped around. */
55f280c9 2186 return -1;
376a7909 2187 }
55f280c9 2188
3d97b40b
TS
2189 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2190 start = start & TARGET_PAGE_MASK;
2191
376a7909
RH
2192 for (addr = start, len = end - start;
2193 len != 0;
2194 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
3d97b40b
TS
2195 p = page_find(addr >> TARGET_PAGE_BITS);
2196 if( !p )
2197 return -1;
2198 if( !(p->flags & PAGE_VALID) )
2199 return -1;
2200
dae3270c 2201 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
3d97b40b 2202 return -1;
dae3270c
FB
2203 if (flags & PAGE_WRITE) {
2204 if (!(p->flags & PAGE_WRITE_ORG))
2205 return -1;
2206 /* unprotect the page if it was put read-only because it
2207 contains translated code */
2208 if (!(p->flags & PAGE_WRITE)) {
2209 if (!page_unprotect(addr, 0, NULL))
2210 return -1;
2211 }
2212 return 0;
2213 }
3d97b40b
TS
2214 }
2215 return 0;
2216}
2217
9fa3e853 2218/* called from signal handler: invalidate the code and unprotect the
ccbb4d44 2219 page. Return TRUE if the fault was successfully handled. */
6375e09e 2220int page_unprotect(target_ulong address, uintptr_t pc, void *puc)
9fa3e853 2221{
45d679d6
AJ
2222 unsigned int prot;
2223 PageDesc *p;
53a5960a 2224 target_ulong host_start, host_end, addr;
9fa3e853 2225
c8a706fe
PB
2226 /* Technically this isn't safe inside a signal handler. However we
2227 know this only ever happens in a synchronous SEGV handler, so in
2228 practice it seems to be ok. */
2229 mmap_lock();
2230
45d679d6
AJ
2231 p = page_find(address >> TARGET_PAGE_BITS);
2232 if (!p) {
c8a706fe 2233 mmap_unlock();
9fa3e853 2234 return 0;
c8a706fe 2235 }
45d679d6 2236
9fa3e853
FB
2237 /* if the page was really writable, then we change its
2238 protection back to writable */
45d679d6
AJ
2239 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2240 host_start = address & qemu_host_page_mask;
2241 host_end = host_start + qemu_host_page_size;
2242
2243 prot = 0;
2244 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2245 p = page_find(addr >> TARGET_PAGE_BITS);
2246 p->flags |= PAGE_WRITE;
2247 prot |= p->flags;
2248
9fa3e853
FB
2249 /* and since the content will be modified, we must invalidate
2250 the corresponding translated code. */
45d679d6 2251 tb_invalidate_phys_page(addr, pc, puc);
9fa3e853 2252#ifdef DEBUG_TB_CHECK
45d679d6 2253 tb_invalidate_check(addr);
9fa3e853 2254#endif
9fa3e853 2255 }
45d679d6
AJ
2256 mprotect((void *)g2h(host_start), qemu_host_page_size,
2257 prot & PAGE_BITS);
2258
2259 mmap_unlock();
2260 return 1;
9fa3e853 2261 }
c8a706fe 2262 mmap_unlock();
9fa3e853
FB
2263 return 0;
2264}
9fa3e853
FB
2265#endif /* defined(CONFIG_USER_ONLY) */
2266
e2eef170 2267#if !defined(CONFIG_USER_ONLY)
8da3ff18 2268
c04b2b78
PB
2269#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2270typedef struct subpage_t {
70c68e44 2271 MemoryRegion iomem;
c04b2b78 2272 target_phys_addr_t base;
5312bd8b 2273 uint16_t sub_section[TARGET_PAGE_SIZE];
c04b2b78
PB
2274} subpage_t;
2275
c227f099 2276static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 2277 uint16_t section);
0f0cb164 2278static subpage_t *subpage_init(target_phys_addr_t base);
5312bd8b 2279static void destroy_page_desc(uint16_t section_index)
54688b1e 2280{
5312bd8b
AK
2281 MemoryRegionSection *section = &phys_sections[section_index];
2282 MemoryRegion *mr = section->mr;
54688b1e
AK
2283
2284 if (mr->subpage) {
2285 subpage_t *subpage = container_of(mr, subpage_t, iomem);
2286 memory_region_destroy(&subpage->iomem);
2287 g_free(subpage);
2288 }
2289}
2290
4346ae3e 2291static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level)
54688b1e
AK
2292{
2293 unsigned i;
d6f2ea22 2294 PhysPageEntry *p;
54688b1e 2295
c19e8800 2296 if (lp->ptr == PHYS_MAP_NODE_NIL) {
54688b1e
AK
2297 return;
2298 }
2299
c19e8800 2300 p = phys_map_nodes[lp->ptr];
4346ae3e 2301 for (i = 0; i < L2_SIZE; ++i) {
07f07b31 2302 if (!p[i].is_leaf) {
54688b1e 2303 destroy_l2_mapping(&p[i], level - 1);
4346ae3e 2304 } else {
c19e8800 2305 destroy_page_desc(p[i].ptr);
54688b1e 2306 }
54688b1e 2307 }
07f07b31 2308 lp->is_leaf = 0;
c19e8800 2309 lp->ptr = PHYS_MAP_NODE_NIL;
54688b1e
AK
2310}
2311
2312static void destroy_all_mappings(void)
2313{
3eef53df 2314 destroy_l2_mapping(&phys_map, P_L2_LEVELS - 1);
d6f2ea22 2315 phys_map_nodes_reset();
54688b1e
AK
2316}
2317
5312bd8b
AK
2318static uint16_t phys_section_add(MemoryRegionSection *section)
2319{
2320 if (phys_sections_nb == phys_sections_nb_alloc) {
2321 phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16);
2322 phys_sections = g_renew(MemoryRegionSection, phys_sections,
2323 phys_sections_nb_alloc);
2324 }
2325 phys_sections[phys_sections_nb] = *section;
2326 return phys_sections_nb++;
2327}
2328
2329static void phys_sections_clear(void)
2330{
2331 phys_sections_nb = 0;
2332}
2333
8f2498f9
MT
2334/* register physical memory.
2335 For RAM, 'size' must be a multiple of the target page size.
2336 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
8da3ff18
PB
2337 io memory page. The address used when calling the IO function is
2338 the offset from the start of the region, plus region_offset. Both
ccbb4d44 2339 start_addr and region_offset are rounded down to a page boundary
8da3ff18
PB
2340 before calculating this offset. This should not be a problem unless
2341 the low bits of start_addr and region_offset differ. */
0f0cb164
AK
2342static void register_subpage(MemoryRegionSection *section)
2343{
2344 subpage_t *subpage;
2345 target_phys_addr_t base = section->offset_within_address_space
2346 & TARGET_PAGE_MASK;
f3705d53 2347 MemoryRegionSection *existing = phys_page_find(base >> TARGET_PAGE_BITS);
0f0cb164
AK
2348 MemoryRegionSection subsection = {
2349 .offset_within_address_space = base,
2350 .size = TARGET_PAGE_SIZE,
2351 };
0f0cb164
AK
2352 target_phys_addr_t start, end;
2353
f3705d53 2354 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
0f0cb164 2355
f3705d53 2356 if (!(existing->mr->subpage)) {
0f0cb164
AK
2357 subpage = subpage_init(base);
2358 subsection.mr = &subpage->iomem;
2999097b
AK
2359 phys_page_set(base >> TARGET_PAGE_BITS, 1,
2360 phys_section_add(&subsection));
0f0cb164 2361 } else {
f3705d53 2362 subpage = container_of(existing->mr, subpage_t, iomem);
0f0cb164
AK
2363 }
2364 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
2365 end = start + section->size;
2366 subpage_register(subpage, start, end, phys_section_add(section));
2367}
2368
2369
2370static void register_multipage(MemoryRegionSection *section)
33417e70 2371{
dd81124b
AK
2372 target_phys_addr_t start_addr = section->offset_within_address_space;
2373 ram_addr_t size = section->size;
2999097b 2374 target_phys_addr_t addr;
5312bd8b 2375 uint16_t section_index = phys_section_add(section);
dd81124b 2376
3b8e6a2d 2377 assert(size);
f6f3fbca 2378
3b8e6a2d 2379 addr = start_addr;
2999097b
AK
2380 phys_page_set(addr >> TARGET_PAGE_BITS, size >> TARGET_PAGE_BITS,
2381 section_index);
33417e70
FB
2382}
2383
0f0cb164
AK
2384void cpu_register_physical_memory_log(MemoryRegionSection *section,
2385 bool readonly)
2386{
2387 MemoryRegionSection now = *section, remain = *section;
2388
2389 if ((now.offset_within_address_space & ~TARGET_PAGE_MASK)
2390 || (now.size < TARGET_PAGE_SIZE)) {
2391 now.size = MIN(TARGET_PAGE_ALIGN(now.offset_within_address_space)
2392 - now.offset_within_address_space,
2393 now.size);
2394 register_subpage(&now);
2395 remain.size -= now.size;
2396 remain.offset_within_address_space += now.size;
2397 remain.offset_within_region += now.size;
2398 }
2399 now = remain;
2400 now.size &= TARGET_PAGE_MASK;
2401 if (now.size) {
2402 register_multipage(&now);
2403 remain.size -= now.size;
2404 remain.offset_within_address_space += now.size;
2405 remain.offset_within_region += now.size;
2406 }
2407 now = remain;
2408 if (now.size) {
2409 register_subpage(&now);
2410 }
2411}
2412
2413
c227f099 2414void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
f65ed4c1
AL
2415{
2416 if (kvm_enabled())
2417 kvm_coalesce_mmio_region(addr, size);
2418}
2419
c227f099 2420void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
f65ed4c1
AL
2421{
2422 if (kvm_enabled())
2423 kvm_uncoalesce_mmio_region(addr, size);
2424}
2425
62a2744c
SY
2426void qemu_flush_coalesced_mmio_buffer(void)
2427{
2428 if (kvm_enabled())
2429 kvm_flush_coalesced_mmio_buffer();
2430}
2431
c902760f
MT
2432#if defined(__linux__) && !defined(TARGET_S390X)
2433
2434#include <sys/vfs.h>
2435
2436#define HUGETLBFS_MAGIC 0x958458f6
2437
2438static long gethugepagesize(const char *path)
2439{
2440 struct statfs fs;
2441 int ret;
2442
2443 do {
9742bf26 2444 ret = statfs(path, &fs);
c902760f
MT
2445 } while (ret != 0 && errno == EINTR);
2446
2447 if (ret != 0) {
9742bf26
YT
2448 perror(path);
2449 return 0;
c902760f
MT
2450 }
2451
2452 if (fs.f_type != HUGETLBFS_MAGIC)
9742bf26 2453 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
c902760f
MT
2454
2455 return fs.f_bsize;
2456}
2457
04b16653
AW
2458static void *file_ram_alloc(RAMBlock *block,
2459 ram_addr_t memory,
2460 const char *path)
c902760f
MT
2461{
2462 char *filename;
2463 void *area;
2464 int fd;
2465#ifdef MAP_POPULATE
2466 int flags;
2467#endif
2468 unsigned long hpagesize;
2469
2470 hpagesize = gethugepagesize(path);
2471 if (!hpagesize) {
9742bf26 2472 return NULL;
c902760f
MT
2473 }
2474
2475 if (memory < hpagesize) {
2476 return NULL;
2477 }
2478
2479 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2480 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2481 return NULL;
2482 }
2483
2484 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
9742bf26 2485 return NULL;
c902760f
MT
2486 }
2487
2488 fd = mkstemp(filename);
2489 if (fd < 0) {
9742bf26
YT
2490 perror("unable to create backing store for hugepages");
2491 free(filename);
2492 return NULL;
c902760f
MT
2493 }
2494 unlink(filename);
2495 free(filename);
2496
2497 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2498
2499 /*
2500 * ftruncate is not supported by hugetlbfs in older
2501 * hosts, so don't bother bailing out on errors.
2502 * If anything goes wrong with it under other filesystems,
2503 * mmap will fail.
2504 */
2505 if (ftruncate(fd, memory))
9742bf26 2506 perror("ftruncate");
c902760f
MT
2507
2508#ifdef MAP_POPULATE
2509 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2510 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2511 * to sidestep this quirk.
2512 */
2513 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2514 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2515#else
2516 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2517#endif
2518 if (area == MAP_FAILED) {
9742bf26
YT
2519 perror("file_ram_alloc: can't mmap RAM pages");
2520 close(fd);
2521 return (NULL);
c902760f 2522 }
04b16653 2523 block->fd = fd;
c902760f
MT
2524 return area;
2525}
2526#endif
2527
d17b5288 2528static ram_addr_t find_ram_offset(ram_addr_t size)
04b16653
AW
2529{
2530 RAMBlock *block, *next_block;
3e837b2c 2531 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
04b16653
AW
2532
2533 if (QLIST_EMPTY(&ram_list.blocks))
2534 return 0;
2535
2536 QLIST_FOREACH(block, &ram_list.blocks, next) {
f15fbc4b 2537 ram_addr_t end, next = RAM_ADDR_MAX;
04b16653
AW
2538
2539 end = block->offset + block->length;
2540
2541 QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2542 if (next_block->offset >= end) {
2543 next = MIN(next, next_block->offset);
2544 }
2545 }
2546 if (next - end >= size && next - end < mingap) {
3e837b2c 2547 offset = end;
04b16653
AW
2548 mingap = next - end;
2549 }
2550 }
3e837b2c
AW
2551
2552 if (offset == RAM_ADDR_MAX) {
2553 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
2554 (uint64_t)size);
2555 abort();
2556 }
2557
04b16653
AW
2558 return offset;
2559}
2560
2561static ram_addr_t last_ram_offset(void)
d17b5288
AW
2562{
2563 RAMBlock *block;
2564 ram_addr_t last = 0;
2565
2566 QLIST_FOREACH(block, &ram_list.blocks, next)
2567 last = MAX(last, block->offset + block->length);
2568
2569 return last;
2570}
2571
c5705a77 2572void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
84b89d78
CM
2573{
2574 RAMBlock *new_block, *block;
2575
c5705a77
AK
2576 new_block = NULL;
2577 QLIST_FOREACH(block, &ram_list.blocks, next) {
2578 if (block->offset == addr) {
2579 new_block = block;
2580 break;
2581 }
2582 }
2583 assert(new_block);
2584 assert(!new_block->idstr[0]);
84b89d78
CM
2585
2586 if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
2587 char *id = dev->parent_bus->info->get_dev_path(dev);
2588 if (id) {
2589 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
7267c094 2590 g_free(id);
84b89d78
CM
2591 }
2592 }
2593 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2594
2595 QLIST_FOREACH(block, &ram_list.blocks, next) {
c5705a77 2596 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
84b89d78
CM
2597 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2598 new_block->idstr);
2599 abort();
2600 }
2601 }
c5705a77
AK
2602}
2603
2604ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
2605 MemoryRegion *mr)
2606{
2607 RAMBlock *new_block;
2608
2609 size = TARGET_PAGE_ALIGN(size);
2610 new_block = g_malloc0(sizeof(*new_block));
84b89d78 2611
7c637366 2612 new_block->mr = mr;
432d268c 2613 new_block->offset = find_ram_offset(size);
6977dfe6
YT
2614 if (host) {
2615 new_block->host = host;
cd19cfa2 2616 new_block->flags |= RAM_PREALLOC_MASK;
6977dfe6
YT
2617 } else {
2618 if (mem_path) {
c902760f 2619#if defined (__linux__) && !defined(TARGET_S390X)
6977dfe6
YT
2620 new_block->host = file_ram_alloc(new_block, size, mem_path);
2621 if (!new_block->host) {
2622 new_block->host = qemu_vmalloc(size);
e78815a5 2623 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
6977dfe6 2624 }
c902760f 2625#else
6977dfe6
YT
2626 fprintf(stderr, "-mem-path option unsupported\n");
2627 exit(1);
c902760f 2628#endif
6977dfe6 2629 } else {
6b02494d 2630#if defined(TARGET_S390X) && defined(CONFIG_KVM)
ff83678a
CB
2631 /* S390 KVM requires the topmost vma of the RAM to be smaller than
2632 an system defined value, which is at least 256GB. Larger systems
2633 have larger values. We put the guest between the end of data
2634 segment (system break) and this value. We use 32GB as a base to
2635 have enough room for the system break to grow. */
2636 new_block->host = mmap((void*)0x800000000, size,
6977dfe6 2637 PROT_EXEC|PROT_READ|PROT_WRITE,
ff83678a 2638 MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
fb8b2735
AG
2639 if (new_block->host == MAP_FAILED) {
2640 fprintf(stderr, "Allocating RAM failed\n");
2641 abort();
2642 }
6b02494d 2643#else
868bb33f 2644 if (xen_enabled()) {
fce537d4 2645 xen_ram_alloc(new_block->offset, size, mr);
432d268c
JN
2646 } else {
2647 new_block->host = qemu_vmalloc(size);
2648 }
6b02494d 2649#endif
e78815a5 2650 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
6977dfe6 2651 }
c902760f 2652 }
94a6b54f
PB
2653 new_block->length = size;
2654
f471a17e 2655 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
94a6b54f 2656
7267c094 2657 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
04b16653 2658 last_ram_offset() >> TARGET_PAGE_BITS);
d17b5288 2659 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
94a6b54f
PB
2660 0xff, size >> TARGET_PAGE_BITS);
2661
6f0437e8
JK
2662 if (kvm_enabled())
2663 kvm_setup_guest_memory(new_block->host, size);
2664
94a6b54f
PB
2665 return new_block->offset;
2666}
e9a1ab19 2667
c5705a77 2668ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
6977dfe6 2669{
c5705a77 2670 return qemu_ram_alloc_from_ptr(size, NULL, mr);
6977dfe6
YT
2671}
2672
1f2e98b6
AW
2673void qemu_ram_free_from_ptr(ram_addr_t addr)
2674{
2675 RAMBlock *block;
2676
2677 QLIST_FOREACH(block, &ram_list.blocks, next) {
2678 if (addr == block->offset) {
2679 QLIST_REMOVE(block, next);
7267c094 2680 g_free(block);
1f2e98b6
AW
2681 return;
2682 }
2683 }
2684}
2685
c227f099 2686void qemu_ram_free(ram_addr_t addr)
e9a1ab19 2687{
04b16653
AW
2688 RAMBlock *block;
2689
2690 QLIST_FOREACH(block, &ram_list.blocks, next) {
2691 if (addr == block->offset) {
2692 QLIST_REMOVE(block, next);
cd19cfa2
HY
2693 if (block->flags & RAM_PREALLOC_MASK) {
2694 ;
2695 } else if (mem_path) {
04b16653
AW
2696#if defined (__linux__) && !defined(TARGET_S390X)
2697 if (block->fd) {
2698 munmap(block->host, block->length);
2699 close(block->fd);
2700 } else {
2701 qemu_vfree(block->host);
2702 }
fd28aa13
JK
2703#else
2704 abort();
04b16653
AW
2705#endif
2706 } else {
2707#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2708 munmap(block->host, block->length);
2709#else
868bb33f 2710 if (xen_enabled()) {
e41d7c69 2711 xen_invalidate_map_cache_entry(block->host);
432d268c
JN
2712 } else {
2713 qemu_vfree(block->host);
2714 }
04b16653
AW
2715#endif
2716 }
7267c094 2717 g_free(block);
04b16653
AW
2718 return;
2719 }
2720 }
2721
e9a1ab19
FB
2722}
2723
cd19cfa2
HY
2724#ifndef _WIN32
2725void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
2726{
2727 RAMBlock *block;
2728 ram_addr_t offset;
2729 int flags;
2730 void *area, *vaddr;
2731
2732 QLIST_FOREACH(block, &ram_list.blocks, next) {
2733 offset = addr - block->offset;
2734 if (offset < block->length) {
2735 vaddr = block->host + offset;
2736 if (block->flags & RAM_PREALLOC_MASK) {
2737 ;
2738 } else {
2739 flags = MAP_FIXED;
2740 munmap(vaddr, length);
2741 if (mem_path) {
2742#if defined(__linux__) && !defined(TARGET_S390X)
2743 if (block->fd) {
2744#ifdef MAP_POPULATE
2745 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
2746 MAP_PRIVATE;
2747#else
2748 flags |= MAP_PRIVATE;
2749#endif
2750 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2751 flags, block->fd, offset);
2752 } else {
2753 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2754 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2755 flags, -1, 0);
2756 }
fd28aa13
JK
2757#else
2758 abort();
cd19cfa2
HY
2759#endif
2760 } else {
2761#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2762 flags |= MAP_SHARED | MAP_ANONYMOUS;
2763 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
2764 flags, -1, 0);
2765#else
2766 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2767 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2768 flags, -1, 0);
2769#endif
2770 }
2771 if (area != vaddr) {
f15fbc4b
AP
2772 fprintf(stderr, "Could not remap addr: "
2773 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
cd19cfa2
HY
2774 length, addr);
2775 exit(1);
2776 }
2777 qemu_madvise(vaddr, length, QEMU_MADV_MERGEABLE);
2778 }
2779 return;
2780 }
2781 }
2782}
2783#endif /* !_WIN32 */
2784
dc828ca1 2785/* Return a host pointer to ram allocated with qemu_ram_alloc.
5579c7f3
PB
2786 With the exception of the softmmu code in this file, this should
2787 only be used for local memory (e.g. video ram) that the device owns,
2788 and knows it isn't going to access beyond the end of the block.
2789
2790 It should not be used for general purpose DMA.
2791 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2792 */
c227f099 2793void *qemu_get_ram_ptr(ram_addr_t addr)
dc828ca1 2794{
94a6b54f
PB
2795 RAMBlock *block;
2796
f471a17e
AW
2797 QLIST_FOREACH(block, &ram_list.blocks, next) {
2798 if (addr - block->offset < block->length) {
7d82af38
VP
2799 /* Move this entry to to start of the list. */
2800 if (block != QLIST_FIRST(&ram_list.blocks)) {
2801 QLIST_REMOVE(block, next);
2802 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
2803 }
868bb33f 2804 if (xen_enabled()) {
432d268c
JN
2805 /* We need to check if the requested address is in the RAM
2806 * because we don't want to map the entire memory in QEMU.
712c2b41 2807 * In that case just map until the end of the page.
432d268c
JN
2808 */
2809 if (block->offset == 0) {
e41d7c69 2810 return xen_map_cache(addr, 0, 0);
432d268c 2811 } else if (block->host == NULL) {
e41d7c69
JK
2812 block->host =
2813 xen_map_cache(block->offset, block->length, 1);
432d268c
JN
2814 }
2815 }
f471a17e
AW
2816 return block->host + (addr - block->offset);
2817 }
94a6b54f 2818 }
f471a17e
AW
2819
2820 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2821 abort();
2822
2823 return NULL;
dc828ca1
PB
2824}
2825
b2e0a138
MT
2826/* Return a host pointer to ram allocated with qemu_ram_alloc.
2827 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
2828 */
2829void *qemu_safe_ram_ptr(ram_addr_t addr)
2830{
2831 RAMBlock *block;
2832
2833 QLIST_FOREACH(block, &ram_list.blocks, next) {
2834 if (addr - block->offset < block->length) {
868bb33f 2835 if (xen_enabled()) {
432d268c
JN
2836 /* We need to check if the requested address is in the RAM
2837 * because we don't want to map the entire memory in QEMU.
712c2b41 2838 * In that case just map until the end of the page.
432d268c
JN
2839 */
2840 if (block->offset == 0) {
e41d7c69 2841 return xen_map_cache(addr, 0, 0);
432d268c 2842 } else if (block->host == NULL) {
e41d7c69
JK
2843 block->host =
2844 xen_map_cache(block->offset, block->length, 1);
432d268c
JN
2845 }
2846 }
b2e0a138
MT
2847 return block->host + (addr - block->offset);
2848 }
2849 }
2850
2851 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2852 abort();
2853
2854 return NULL;
2855}
2856
38bee5dc
SS
2857/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
2858 * but takes a size argument */
8ab934f9 2859void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
38bee5dc 2860{
8ab934f9
SS
2861 if (*size == 0) {
2862 return NULL;
2863 }
868bb33f 2864 if (xen_enabled()) {
e41d7c69 2865 return xen_map_cache(addr, *size, 1);
868bb33f 2866 } else {
38bee5dc
SS
2867 RAMBlock *block;
2868
2869 QLIST_FOREACH(block, &ram_list.blocks, next) {
2870 if (addr - block->offset < block->length) {
2871 if (addr - block->offset + *size > block->length)
2872 *size = block->length - addr + block->offset;
2873 return block->host + (addr - block->offset);
2874 }
2875 }
2876
2877 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2878 abort();
38bee5dc
SS
2879 }
2880}
2881
050a0ddf
AP
2882void qemu_put_ram_ptr(void *addr)
2883{
2884 trace_qemu_put_ram_ptr(addr);
050a0ddf
AP
2885}
2886
e890261f 2887int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
5579c7f3 2888{
94a6b54f
PB
2889 RAMBlock *block;
2890 uint8_t *host = ptr;
2891
868bb33f 2892 if (xen_enabled()) {
e41d7c69 2893 *ram_addr = xen_ram_addr_from_mapcache(ptr);
712c2b41
SS
2894 return 0;
2895 }
2896
f471a17e 2897 QLIST_FOREACH(block, &ram_list.blocks, next) {
432d268c
JN
2898 /* This case append when the block is not mapped. */
2899 if (block->host == NULL) {
2900 continue;
2901 }
f471a17e 2902 if (host - block->host < block->length) {
e890261f
MT
2903 *ram_addr = block->offset + (host - block->host);
2904 return 0;
f471a17e 2905 }
94a6b54f 2906 }
432d268c 2907
e890261f
MT
2908 return -1;
2909}
f471a17e 2910
e890261f
MT
2911/* Some of the softmmu routines need to translate from a host pointer
2912 (typically a TLB entry) back to a ram offset. */
2913ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
2914{
2915 ram_addr_t ram_addr;
f471a17e 2916
e890261f
MT
2917 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
2918 fprintf(stderr, "Bad ram pointer %p\n", ptr);
2919 abort();
2920 }
2921 return ram_addr;
5579c7f3
PB
2922}
2923
0e0df1e2
AK
2924static uint64_t unassigned_mem_read(void *opaque, target_phys_addr_t addr,
2925 unsigned size)
e18231a3
BS
2926{
2927#ifdef DEBUG_UNASSIGNED
2928 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2929#endif
5b450407 2930#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
0e0df1e2 2931 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
e18231a3
BS
2932#endif
2933 return 0;
2934}
2935
0e0df1e2
AK
2936static void unassigned_mem_write(void *opaque, target_phys_addr_t addr,
2937 uint64_t val, unsigned size)
e18231a3
BS
2938{
2939#ifdef DEBUG_UNASSIGNED
0e0df1e2 2940 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
e18231a3 2941#endif
5b450407 2942#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
0e0df1e2 2943 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
67d3b957 2944#endif
33417e70
FB
2945}
2946
0e0df1e2
AK
2947static const MemoryRegionOps unassigned_mem_ops = {
2948 .read = unassigned_mem_read,
2949 .write = unassigned_mem_write,
2950 .endianness = DEVICE_NATIVE_ENDIAN,
2951};
e18231a3 2952
0e0df1e2
AK
2953static uint64_t error_mem_read(void *opaque, target_phys_addr_t addr,
2954 unsigned size)
e18231a3 2955{
0e0df1e2 2956 abort();
e18231a3
BS
2957}
2958
0e0df1e2
AK
2959static void error_mem_write(void *opaque, target_phys_addr_t addr,
2960 uint64_t value, unsigned size)
e18231a3 2961{
0e0df1e2 2962 abort();
33417e70
FB
2963}
2964
0e0df1e2
AK
2965static const MemoryRegionOps error_mem_ops = {
2966 .read = error_mem_read,
2967 .write = error_mem_write,
2968 .endianness = DEVICE_NATIVE_ENDIAN,
33417e70
FB
2969};
2970
0e0df1e2
AK
2971static const MemoryRegionOps rom_mem_ops = {
2972 .read = error_mem_read,
2973 .write = unassigned_mem_write,
2974 .endianness = DEVICE_NATIVE_ENDIAN,
33417e70
FB
2975};
2976
0e0df1e2
AK
2977static void notdirty_mem_write(void *opaque, target_phys_addr_t ram_addr,
2978 uint64_t val, unsigned size)
9fa3e853 2979{
3a7d929e 2980 int dirty_flags;
f7c11b53 2981 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3a7d929e 2982 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2983#if !defined(CONFIG_USER_ONLY)
0e0df1e2 2984 tb_invalidate_phys_page_fast(ram_addr, size);
f7c11b53 2985 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
9fa3e853 2986#endif
3a7d929e 2987 }
0e0df1e2
AK
2988 switch (size) {
2989 case 1:
2990 stb_p(qemu_get_ram_ptr(ram_addr), val);
2991 break;
2992 case 2:
2993 stw_p(qemu_get_ram_ptr(ram_addr), val);
2994 break;
2995 case 4:
2996 stl_p(qemu_get_ram_ptr(ram_addr), val);
2997 break;
2998 default:
2999 abort();
3a7d929e 3000 }
f23db169 3001 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
f7c11b53 3002 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
f23db169
FB
3003 /* we remove the notdirty callback only if the code has been
3004 flushed */
3005 if (dirty_flags == 0xff)
2e70f6ef 3006 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
3007}
3008
0e0df1e2
AK
3009static const MemoryRegionOps notdirty_mem_ops = {
3010 .read = error_mem_read,
3011 .write = notdirty_mem_write,
3012 .endianness = DEVICE_NATIVE_ENDIAN,
1ccde1cb
FB
3013};
3014
0f459d16 3015/* Generate a debug exception if a watchpoint has been hit. */
b4051334 3016static void check_watchpoint(int offset, int len_mask, int flags)
0f459d16 3017{
9349b4f9 3018 CPUArchState *env = cpu_single_env;
06d55cc1
AL
3019 target_ulong pc, cs_base;
3020 TranslationBlock *tb;
0f459d16 3021 target_ulong vaddr;
a1d1bb31 3022 CPUWatchpoint *wp;
06d55cc1 3023 int cpu_flags;
0f459d16 3024
06d55cc1
AL
3025 if (env->watchpoint_hit) {
3026 /* We re-entered the check after replacing the TB. Now raise
3027 * the debug interrupt so that is will trigger after the
3028 * current instruction. */
3029 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3030 return;
3031 }
2e70f6ef 3032 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
72cf2d4f 3033 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334
AL
3034 if ((vaddr == (wp->vaddr & len_mask) ||
3035 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
6e140f28
AL
3036 wp->flags |= BP_WATCHPOINT_HIT;
3037 if (!env->watchpoint_hit) {
3038 env->watchpoint_hit = wp;
3039 tb = tb_find_pc(env->mem_io_pc);
3040 if (!tb) {
3041 cpu_abort(env, "check_watchpoint: could not find TB for "
3042 "pc=%p", (void *)env->mem_io_pc);
3043 }
618ba8e6 3044 cpu_restore_state(tb, env, env->mem_io_pc);
6e140f28
AL
3045 tb_phys_invalidate(tb, -1);
3046 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3047 env->exception_index = EXCP_DEBUG;
488d6577 3048 cpu_loop_exit(env);
6e140f28
AL
3049 } else {
3050 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3051 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
488d6577 3052 cpu_resume_from_signal(env, NULL);
6e140f28 3053 }
06d55cc1 3054 }
6e140f28
AL
3055 } else {
3056 wp->flags &= ~BP_WATCHPOINT_HIT;
0f459d16
PB
3057 }
3058 }
3059}
3060
6658ffb8
PB
3061/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3062 so these check for a hit then pass through to the normal out-of-line
3063 phys routines. */
1ec9b909
AK
3064static uint64_t watch_mem_read(void *opaque, target_phys_addr_t addr,
3065 unsigned size)
6658ffb8 3066{
1ec9b909
AK
3067 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
3068 switch (size) {
3069 case 1: return ldub_phys(addr);
3070 case 2: return lduw_phys(addr);
3071 case 4: return ldl_phys(addr);
3072 default: abort();
3073 }
6658ffb8
PB
3074}
3075
1ec9b909
AK
3076static void watch_mem_write(void *opaque, target_phys_addr_t addr,
3077 uint64_t val, unsigned size)
6658ffb8 3078{
1ec9b909
AK
3079 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
3080 switch (size) {
67364150
MF
3081 case 1:
3082 stb_phys(addr, val);
3083 break;
3084 case 2:
3085 stw_phys(addr, val);
3086 break;
3087 case 4:
3088 stl_phys(addr, val);
3089 break;
1ec9b909
AK
3090 default: abort();
3091 }
6658ffb8
PB
3092}
3093
1ec9b909
AK
3094static const MemoryRegionOps watch_mem_ops = {
3095 .read = watch_mem_read,
3096 .write = watch_mem_write,
3097 .endianness = DEVICE_NATIVE_ENDIAN,
6658ffb8 3098};
6658ffb8 3099
70c68e44
AK
3100static uint64_t subpage_read(void *opaque, target_phys_addr_t addr,
3101 unsigned len)
db7b5426 3102{
70c68e44 3103 subpage_t *mmio = opaque;
f6405247 3104 unsigned int idx = SUBPAGE_IDX(addr);
5312bd8b 3105 MemoryRegionSection *section;
db7b5426
BS
3106#if defined(DEBUG_SUBPAGE)
3107 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3108 mmio, len, addr, idx);
3109#endif
db7b5426 3110
5312bd8b
AK
3111 section = &phys_sections[mmio->sub_section[idx]];
3112 addr += mmio->base;
3113 addr -= section->offset_within_address_space;
3114 addr += section->offset_within_region;
37ec01d4 3115 return io_mem_read(section->mr, addr, len);
db7b5426
BS
3116}
3117
70c68e44
AK
3118static void subpage_write(void *opaque, target_phys_addr_t addr,
3119 uint64_t value, unsigned len)
db7b5426 3120{
70c68e44 3121 subpage_t *mmio = opaque;
f6405247 3122 unsigned int idx = SUBPAGE_IDX(addr);
5312bd8b 3123 MemoryRegionSection *section;
db7b5426 3124#if defined(DEBUG_SUBPAGE)
70c68e44
AK
3125 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
3126 " idx %d value %"PRIx64"\n",
f6405247 3127 __func__, mmio, len, addr, idx, value);
db7b5426 3128#endif
f6405247 3129
5312bd8b
AK
3130 section = &phys_sections[mmio->sub_section[idx]];
3131 addr += mmio->base;
3132 addr -= section->offset_within_address_space;
3133 addr += section->offset_within_region;
37ec01d4 3134 io_mem_write(section->mr, addr, value, len);
db7b5426
BS
3135}
3136
70c68e44
AK
3137static const MemoryRegionOps subpage_ops = {
3138 .read = subpage_read,
3139 .write = subpage_write,
3140 .endianness = DEVICE_NATIVE_ENDIAN,
db7b5426
BS
3141};
3142
de712f94
AK
3143static uint64_t subpage_ram_read(void *opaque, target_phys_addr_t addr,
3144 unsigned size)
56384e8b
AF
3145{
3146 ram_addr_t raddr = addr;
3147 void *ptr = qemu_get_ram_ptr(raddr);
de712f94
AK
3148 switch (size) {
3149 case 1: return ldub_p(ptr);
3150 case 2: return lduw_p(ptr);
3151 case 4: return ldl_p(ptr);
3152 default: abort();
3153 }
56384e8b
AF
3154}
3155
de712f94
AK
3156static void subpage_ram_write(void *opaque, target_phys_addr_t addr,
3157 uint64_t value, unsigned size)
56384e8b
AF
3158{
3159 ram_addr_t raddr = addr;
3160 void *ptr = qemu_get_ram_ptr(raddr);
de712f94
AK
3161 switch (size) {
3162 case 1: return stb_p(ptr, value);
3163 case 2: return stw_p(ptr, value);
3164 case 4: return stl_p(ptr, value);
3165 default: abort();
3166 }
56384e8b
AF
3167}
3168
de712f94
AK
3169static const MemoryRegionOps subpage_ram_ops = {
3170 .read = subpage_ram_read,
3171 .write = subpage_ram_write,
3172 .endianness = DEVICE_NATIVE_ENDIAN,
56384e8b
AF
3173};
3174
c227f099 3175static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 3176 uint16_t section)
db7b5426
BS
3177{
3178 int idx, eidx;
3179
3180 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3181 return -1;
3182 idx = SUBPAGE_IDX(start);
3183 eidx = SUBPAGE_IDX(end);
3184#if defined(DEBUG_SUBPAGE)
0bf9e31a 3185 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
db7b5426
BS
3186 mmio, start, end, idx, eidx, memory);
3187#endif
5312bd8b
AK
3188 if (memory_region_is_ram(phys_sections[section].mr)) {
3189 MemoryRegionSection new_section = phys_sections[section];
3190 new_section.mr = &io_mem_subpage_ram;
3191 section = phys_section_add(&new_section);
56384e8b 3192 }
db7b5426 3193 for (; idx <= eidx; idx++) {
5312bd8b 3194 mmio->sub_section[idx] = section;
db7b5426
BS
3195 }
3196
3197 return 0;
3198}
3199
0f0cb164 3200static subpage_t *subpage_init(target_phys_addr_t base)
db7b5426 3201{
c227f099 3202 subpage_t *mmio;
db7b5426 3203
7267c094 3204 mmio = g_malloc0(sizeof(subpage_t));
1eec614b
AL
3205
3206 mmio->base = base;
70c68e44
AK
3207 memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
3208 "subpage", TARGET_PAGE_SIZE);
b3b00c78 3209 mmio->iomem.subpage = true;
db7b5426 3210#if defined(DEBUG_SUBPAGE)
1eec614b
AL
3211 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3212 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
db7b5426 3213#endif
0f0cb164 3214 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, phys_section_unassigned);
db7b5426
BS
3215
3216 return mmio;
3217}
3218
5312bd8b
AK
3219static uint16_t dummy_section(MemoryRegion *mr)
3220{
3221 MemoryRegionSection section = {
3222 .mr = mr,
3223 .offset_within_address_space = 0,
3224 .offset_within_region = 0,
3225 .size = UINT64_MAX,
3226 };
3227
3228 return phys_section_add(&section);
3229}
3230
37ec01d4 3231MemoryRegion *iotlb_to_region(target_phys_addr_t index)
aa102231 3232{
37ec01d4 3233 return phys_sections[index & ~TARGET_PAGE_MASK].mr;
aa102231
AK
3234}
3235
e9179ce1
AK
3236static void io_mem_init(void)
3237{
0e0df1e2 3238 memory_region_init_io(&io_mem_ram, &error_mem_ops, NULL, "ram", UINT64_MAX);
0e0df1e2
AK
3239 memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
3240 memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
3241 "unassigned", UINT64_MAX);
3242 memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
3243 "notdirty", UINT64_MAX);
de712f94
AK
3244 memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
3245 "subpage-ram", UINT64_MAX);
1ec9b909
AK
3246 memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
3247 "watch", UINT64_MAX);
e9179ce1
AK
3248}
3249
50c1e149
AK
3250static void core_begin(MemoryListener *listener)
3251{
54688b1e 3252 destroy_all_mappings();
5312bd8b 3253 phys_sections_clear();
c19e8800 3254 phys_map.ptr = PHYS_MAP_NODE_NIL;
5312bd8b 3255 phys_section_unassigned = dummy_section(&io_mem_unassigned);
aa102231
AK
3256 phys_section_notdirty = dummy_section(&io_mem_notdirty);
3257 phys_section_rom = dummy_section(&io_mem_rom);
3258 phys_section_watch = dummy_section(&io_mem_watch);
50c1e149
AK
3259}
3260
3261static void core_commit(MemoryListener *listener)
3262{
9349b4f9 3263 CPUArchState *env;
117712c3
AK
3264
3265 /* since each CPU stores ram addresses in its TLB cache, we must
3266 reset the modified entries */
3267 /* XXX: slow ! */
3268 for(env = first_cpu; env != NULL; env = env->next_cpu) {
3269 tlb_flush(env, 1);
3270 }
50c1e149
AK
3271}
3272
93632747
AK
3273static void core_region_add(MemoryListener *listener,
3274 MemoryRegionSection *section)
3275{
4855d41a 3276 cpu_register_physical_memory_log(section, section->readonly);
93632747
AK
3277}
3278
3279static void core_region_del(MemoryListener *listener,
3280 MemoryRegionSection *section)
3281{
93632747
AK
3282}
3283
50c1e149
AK
3284static void core_region_nop(MemoryListener *listener,
3285 MemoryRegionSection *section)
3286{
54688b1e 3287 cpu_register_physical_memory_log(section, section->readonly);
50c1e149
AK
3288}
3289
93632747
AK
3290static void core_log_start(MemoryListener *listener,
3291 MemoryRegionSection *section)
3292{
3293}
3294
3295static void core_log_stop(MemoryListener *listener,
3296 MemoryRegionSection *section)
3297{
3298}
3299
3300static void core_log_sync(MemoryListener *listener,
3301 MemoryRegionSection *section)
3302{
3303}
3304
3305static void core_log_global_start(MemoryListener *listener)
3306{
3307 cpu_physical_memory_set_dirty_tracking(1);
3308}
3309
3310static void core_log_global_stop(MemoryListener *listener)
3311{
3312 cpu_physical_memory_set_dirty_tracking(0);
3313}
3314
3315static void core_eventfd_add(MemoryListener *listener,
3316 MemoryRegionSection *section,
3317 bool match_data, uint64_t data, int fd)
3318{
3319}
3320
3321static void core_eventfd_del(MemoryListener *listener,
3322 MemoryRegionSection *section,
3323 bool match_data, uint64_t data, int fd)
3324{
3325}
3326
50c1e149
AK
3327static void io_begin(MemoryListener *listener)
3328{
3329}
3330
3331static void io_commit(MemoryListener *listener)
3332{
3333}
3334
4855d41a
AK
3335static void io_region_add(MemoryListener *listener,
3336 MemoryRegionSection *section)
3337{
a2d33521
AK
3338 MemoryRegionIORange *mrio = g_new(MemoryRegionIORange, 1);
3339
3340 mrio->mr = section->mr;
3341 mrio->offset = section->offset_within_region;
3342 iorange_init(&mrio->iorange, &memory_region_iorange_ops,
4855d41a 3343 section->offset_within_address_space, section->size);
a2d33521 3344 ioport_register(&mrio->iorange);
4855d41a
AK
3345}
3346
3347static void io_region_del(MemoryListener *listener,
3348 MemoryRegionSection *section)
3349{
3350 isa_unassign_ioport(section->offset_within_address_space, section->size);
3351}
3352
50c1e149
AK
3353static void io_region_nop(MemoryListener *listener,
3354 MemoryRegionSection *section)
3355{
3356}
3357
4855d41a
AK
3358static void io_log_start(MemoryListener *listener,
3359 MemoryRegionSection *section)
3360{
3361}
3362
3363static void io_log_stop(MemoryListener *listener,
3364 MemoryRegionSection *section)
3365{
3366}
3367
3368static void io_log_sync(MemoryListener *listener,
3369 MemoryRegionSection *section)
3370{
3371}
3372
3373static void io_log_global_start(MemoryListener *listener)
3374{
3375}
3376
3377static void io_log_global_stop(MemoryListener *listener)
3378{
3379}
3380
3381static void io_eventfd_add(MemoryListener *listener,
3382 MemoryRegionSection *section,
3383 bool match_data, uint64_t data, int fd)
3384{
3385}
3386
3387static void io_eventfd_del(MemoryListener *listener,
3388 MemoryRegionSection *section,
3389 bool match_data, uint64_t data, int fd)
3390{
3391}
3392
93632747 3393static MemoryListener core_memory_listener = {
50c1e149
AK
3394 .begin = core_begin,
3395 .commit = core_commit,
93632747
AK
3396 .region_add = core_region_add,
3397 .region_del = core_region_del,
50c1e149 3398 .region_nop = core_region_nop,
93632747
AK
3399 .log_start = core_log_start,
3400 .log_stop = core_log_stop,
3401 .log_sync = core_log_sync,
3402 .log_global_start = core_log_global_start,
3403 .log_global_stop = core_log_global_stop,
3404 .eventfd_add = core_eventfd_add,
3405 .eventfd_del = core_eventfd_del,
3406 .priority = 0,
3407};
3408
4855d41a 3409static MemoryListener io_memory_listener = {
50c1e149
AK
3410 .begin = io_begin,
3411 .commit = io_commit,
4855d41a
AK
3412 .region_add = io_region_add,
3413 .region_del = io_region_del,
50c1e149 3414 .region_nop = io_region_nop,
4855d41a
AK
3415 .log_start = io_log_start,
3416 .log_stop = io_log_stop,
3417 .log_sync = io_log_sync,
3418 .log_global_start = io_log_global_start,
3419 .log_global_stop = io_log_global_stop,
3420 .eventfd_add = io_eventfd_add,
3421 .eventfd_del = io_eventfd_del,
3422 .priority = 0,
3423};
3424
62152b8a
AK
3425static void memory_map_init(void)
3426{
7267c094 3427 system_memory = g_malloc(sizeof(*system_memory));
8417cebf 3428 memory_region_init(system_memory, "system", INT64_MAX);
62152b8a 3429 set_system_memory_map(system_memory);
309cb471 3430
7267c094 3431 system_io = g_malloc(sizeof(*system_io));
309cb471
AK
3432 memory_region_init(system_io, "io", 65536);
3433 set_system_io_map(system_io);
93632747 3434
4855d41a
AK
3435 memory_listener_register(&core_memory_listener, system_memory);
3436 memory_listener_register(&io_memory_listener, system_io);
62152b8a
AK
3437}
3438
3439MemoryRegion *get_system_memory(void)
3440{
3441 return system_memory;
3442}
3443
309cb471
AK
3444MemoryRegion *get_system_io(void)
3445{
3446 return system_io;
3447}
3448
e2eef170
PB
3449#endif /* !defined(CONFIG_USER_ONLY) */
3450
13eb76e0
FB
3451/* physical memory access (slow version, mainly for debug) */
3452#if defined(CONFIG_USER_ONLY)
9349b4f9 3453int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
a68fe89c 3454 uint8_t *buf, int len, int is_write)
13eb76e0
FB
3455{
3456 int l, flags;
3457 target_ulong page;
53a5960a 3458 void * p;
13eb76e0
FB
3459
3460 while (len > 0) {
3461 page = addr & TARGET_PAGE_MASK;
3462 l = (page + TARGET_PAGE_SIZE) - addr;
3463 if (l > len)
3464 l = len;
3465 flags = page_get_flags(page);
3466 if (!(flags & PAGE_VALID))
a68fe89c 3467 return -1;
13eb76e0
FB
3468 if (is_write) {
3469 if (!(flags & PAGE_WRITE))
a68fe89c 3470 return -1;
579a97f7 3471 /* XXX: this code should not depend on lock_user */
72fb7daa 3472 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
a68fe89c 3473 return -1;
72fb7daa
AJ
3474 memcpy(p, buf, l);
3475 unlock_user(p, addr, l);
13eb76e0
FB
3476 } else {
3477 if (!(flags & PAGE_READ))
a68fe89c 3478 return -1;
579a97f7 3479 /* XXX: this code should not depend on lock_user */
72fb7daa 3480 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
a68fe89c 3481 return -1;
72fb7daa 3482 memcpy(buf, p, l);
5b257578 3483 unlock_user(p, addr, 0);
13eb76e0
FB
3484 }
3485 len -= l;
3486 buf += l;
3487 addr += l;
3488 }
a68fe89c 3489 return 0;
13eb76e0 3490}
8df1cd07 3491
13eb76e0 3492#else
c227f099 3493void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
3494 int len, int is_write)
3495{
37ec01d4 3496 int l;
13eb76e0
FB
3497 uint8_t *ptr;
3498 uint32_t val;
c227f099 3499 target_phys_addr_t page;
f3705d53 3500 MemoryRegionSection *section;
3b46e624 3501
13eb76e0
FB
3502 while (len > 0) {
3503 page = addr & TARGET_PAGE_MASK;
3504 l = (page + TARGET_PAGE_SIZE) - addr;
3505 if (l > len)
3506 l = len;
06ef3525 3507 section = phys_page_find(page >> TARGET_PAGE_BITS);
3b46e624 3508
13eb76e0 3509 if (is_write) {
f3705d53 3510 if (!memory_region_is_ram(section->mr)) {
f1f6e3b8 3511 target_phys_addr_t addr1;
cc5bea60 3512 addr1 = memory_region_section_addr(section, addr);
6a00d601
FB
3513 /* XXX: could force cpu_single_env to NULL to avoid
3514 potential bugs */
6c2934db 3515 if (l >= 4 && ((addr1 & 3) == 0)) {
1c213d19 3516 /* 32 bit write access */
c27004ec 3517 val = ldl_p(buf);
37ec01d4 3518 io_mem_write(section->mr, addr1, val, 4);
13eb76e0 3519 l = 4;
6c2934db 3520 } else if (l >= 2 && ((addr1 & 1) == 0)) {
1c213d19 3521 /* 16 bit write access */
c27004ec 3522 val = lduw_p(buf);
37ec01d4 3523 io_mem_write(section->mr, addr1, val, 2);
13eb76e0
FB
3524 l = 2;
3525 } else {
1c213d19 3526 /* 8 bit write access */
c27004ec 3527 val = ldub_p(buf);
37ec01d4 3528 io_mem_write(section->mr, addr1, val, 1);
13eb76e0
FB
3529 l = 1;
3530 }
f3705d53 3531 } else if (!section->readonly) {
8ca5692d 3532 ram_addr_t addr1;
f3705d53 3533 addr1 = memory_region_get_ram_addr(section->mr)
cc5bea60 3534 + memory_region_section_addr(section, addr);
13eb76e0 3535 /* RAM case */
5579c7f3 3536 ptr = qemu_get_ram_ptr(addr1);
13eb76e0 3537 memcpy(ptr, buf, l);
3a7d929e
FB
3538 if (!cpu_physical_memory_is_dirty(addr1)) {
3539 /* invalidate code */
3540 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3541 /* set dirty bit */
f7c11b53
YT
3542 cpu_physical_memory_set_dirty_flags(
3543 addr1, (0xff & ~CODE_DIRTY_FLAG));
3a7d929e 3544 }
050a0ddf 3545 qemu_put_ram_ptr(ptr);
13eb76e0
FB
3546 }
3547 } else {
cc5bea60
BS
3548 if (!(memory_region_is_ram(section->mr) ||
3549 memory_region_is_romd(section->mr))) {
f1f6e3b8 3550 target_phys_addr_t addr1;
13eb76e0 3551 /* I/O case */
cc5bea60 3552 addr1 = memory_region_section_addr(section, addr);
6c2934db 3553 if (l >= 4 && ((addr1 & 3) == 0)) {
13eb76e0 3554 /* 32 bit read access */
37ec01d4 3555 val = io_mem_read(section->mr, addr1, 4);
c27004ec 3556 stl_p(buf, val);
13eb76e0 3557 l = 4;
6c2934db 3558 } else if (l >= 2 && ((addr1 & 1) == 0)) {
13eb76e0 3559 /* 16 bit read access */
37ec01d4 3560 val = io_mem_read(section->mr, addr1, 2);
c27004ec 3561 stw_p(buf, val);
13eb76e0
FB
3562 l = 2;
3563 } else {
1c213d19 3564 /* 8 bit read access */
37ec01d4 3565 val = io_mem_read(section->mr, addr1, 1);
c27004ec 3566 stb_p(buf, val);
13eb76e0
FB
3567 l = 1;
3568 }
3569 } else {
3570 /* RAM case */
0a1b357f 3571 ptr = qemu_get_ram_ptr(section->mr->ram_addr
cc5bea60
BS
3572 + memory_region_section_addr(section,
3573 addr));
f3705d53 3574 memcpy(buf, ptr, l);
050a0ddf 3575 qemu_put_ram_ptr(ptr);
13eb76e0
FB
3576 }
3577 }
3578 len -= l;
3579 buf += l;
3580 addr += l;
3581 }
3582}
8df1cd07 3583
d0ecd2aa 3584/* used for ROM loading : can write in RAM and ROM */
c227f099 3585void cpu_physical_memory_write_rom(target_phys_addr_t addr,
d0ecd2aa
FB
3586 const uint8_t *buf, int len)
3587{
3588 int l;
3589 uint8_t *ptr;
c227f099 3590 target_phys_addr_t page;
f3705d53 3591 MemoryRegionSection *section;
3b46e624 3592
d0ecd2aa
FB
3593 while (len > 0) {
3594 page = addr & TARGET_PAGE_MASK;
3595 l = (page + TARGET_PAGE_SIZE) - addr;
3596 if (l > len)
3597 l = len;
06ef3525 3598 section = phys_page_find(page >> TARGET_PAGE_BITS);
3b46e624 3599
cc5bea60
BS
3600 if (!(memory_region_is_ram(section->mr) ||
3601 memory_region_is_romd(section->mr))) {
d0ecd2aa
FB
3602 /* do nothing */
3603 } else {
3604 unsigned long addr1;
f3705d53 3605 addr1 = memory_region_get_ram_addr(section->mr)
cc5bea60 3606 + memory_region_section_addr(section, addr);
d0ecd2aa 3607 /* ROM/RAM case */
5579c7f3 3608 ptr = qemu_get_ram_ptr(addr1);
d0ecd2aa 3609 memcpy(ptr, buf, l);
050a0ddf 3610 qemu_put_ram_ptr(ptr);
d0ecd2aa
FB
3611 }
3612 len -= l;
3613 buf += l;
3614 addr += l;
3615 }
3616}
3617
6d16c2f8
AL
3618typedef struct {
3619 void *buffer;
c227f099
AL
3620 target_phys_addr_t addr;
3621 target_phys_addr_t len;
6d16c2f8
AL
3622} BounceBuffer;
3623
3624static BounceBuffer bounce;
3625
ba223c29
AL
3626typedef struct MapClient {
3627 void *opaque;
3628 void (*callback)(void *opaque);
72cf2d4f 3629 QLIST_ENTRY(MapClient) link;
ba223c29
AL
3630} MapClient;
3631
72cf2d4f
BS
3632static QLIST_HEAD(map_client_list, MapClient) map_client_list
3633 = QLIST_HEAD_INITIALIZER(map_client_list);
ba223c29
AL
3634
3635void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3636{
7267c094 3637 MapClient *client = g_malloc(sizeof(*client));
ba223c29
AL
3638
3639 client->opaque = opaque;
3640 client->callback = callback;
72cf2d4f 3641 QLIST_INSERT_HEAD(&map_client_list, client, link);
ba223c29
AL
3642 return client;
3643}
3644
3645void cpu_unregister_map_client(void *_client)
3646{
3647 MapClient *client = (MapClient *)_client;
3648
72cf2d4f 3649 QLIST_REMOVE(client, link);
7267c094 3650 g_free(client);
ba223c29
AL
3651}
3652
3653static void cpu_notify_map_clients(void)
3654{
3655 MapClient *client;
3656
72cf2d4f
BS
3657 while (!QLIST_EMPTY(&map_client_list)) {
3658 client = QLIST_FIRST(&map_client_list);
ba223c29 3659 client->callback(client->opaque);
34d5e948 3660 cpu_unregister_map_client(client);
ba223c29
AL
3661 }
3662}
3663
6d16c2f8
AL
3664/* Map a physical memory region into a host virtual address.
3665 * May map a subset of the requested range, given by and returned in *plen.
3666 * May return NULL if resources needed to perform the mapping are exhausted.
3667 * Use only for reads OR writes - not for read-modify-write operations.
ba223c29
AL
3668 * Use cpu_register_map_client() to know when retrying the map operation is
3669 * likely to succeed.
6d16c2f8 3670 */
c227f099
AL
3671void *cpu_physical_memory_map(target_phys_addr_t addr,
3672 target_phys_addr_t *plen,
6d16c2f8
AL
3673 int is_write)
3674{
c227f099 3675 target_phys_addr_t len = *plen;
38bee5dc 3676 target_phys_addr_t todo = 0;
6d16c2f8 3677 int l;
c227f099 3678 target_phys_addr_t page;
f3705d53 3679 MemoryRegionSection *section;
f15fbc4b 3680 ram_addr_t raddr = RAM_ADDR_MAX;
8ab934f9
SS
3681 ram_addr_t rlen;
3682 void *ret;
6d16c2f8
AL
3683
3684 while (len > 0) {
3685 page = addr & TARGET_PAGE_MASK;
3686 l = (page + TARGET_PAGE_SIZE) - addr;
3687 if (l > len)
3688 l = len;
06ef3525 3689 section = phys_page_find(page >> TARGET_PAGE_BITS);
6d16c2f8 3690
f3705d53 3691 if (!(memory_region_is_ram(section->mr) && !section->readonly)) {
38bee5dc 3692 if (todo || bounce.buffer) {
6d16c2f8
AL
3693 break;
3694 }
3695 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3696 bounce.addr = addr;
3697 bounce.len = l;
3698 if (!is_write) {
54f7b4a3 3699 cpu_physical_memory_read(addr, bounce.buffer, l);
6d16c2f8 3700 }
38bee5dc
SS
3701
3702 *plen = l;
3703 return bounce.buffer;
6d16c2f8 3704 }
8ab934f9 3705 if (!todo) {
f3705d53 3706 raddr = memory_region_get_ram_addr(section->mr)
cc5bea60 3707 + memory_region_section_addr(section, addr);
8ab934f9 3708 }
6d16c2f8
AL
3709
3710 len -= l;
3711 addr += l;
38bee5dc 3712 todo += l;
6d16c2f8 3713 }
8ab934f9
SS
3714 rlen = todo;
3715 ret = qemu_ram_ptr_length(raddr, &rlen);
3716 *plen = rlen;
3717 return ret;
6d16c2f8
AL
3718}
3719
3720/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3721 * Will also mark the memory as dirty if is_write == 1. access_len gives
3722 * the amount of memory that was actually read or written by the caller.
3723 */
c227f099
AL
3724void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3725 int is_write, target_phys_addr_t access_len)
6d16c2f8
AL
3726{
3727 if (buffer != bounce.buffer) {
3728 if (is_write) {
e890261f 3729 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
6d16c2f8
AL
3730 while (access_len) {
3731 unsigned l;
3732 l = TARGET_PAGE_SIZE;
3733 if (l > access_len)
3734 l = access_len;
3735 if (!cpu_physical_memory_is_dirty(addr1)) {
3736 /* invalidate code */
3737 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3738 /* set dirty bit */
f7c11b53
YT
3739 cpu_physical_memory_set_dirty_flags(
3740 addr1, (0xff & ~CODE_DIRTY_FLAG));
6d16c2f8
AL
3741 }
3742 addr1 += l;
3743 access_len -= l;
3744 }
3745 }
868bb33f 3746 if (xen_enabled()) {
e41d7c69 3747 xen_invalidate_map_cache_entry(buffer);
050a0ddf 3748 }
6d16c2f8
AL
3749 return;
3750 }
3751 if (is_write) {
3752 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3753 }
f8a83245 3754 qemu_vfree(bounce.buffer);
6d16c2f8 3755 bounce.buffer = NULL;
ba223c29 3756 cpu_notify_map_clients();
6d16c2f8 3757}
d0ecd2aa 3758
8df1cd07 3759/* warning: addr must be aligned */
1e78bcc1
AG
3760static inline uint32_t ldl_phys_internal(target_phys_addr_t addr,
3761 enum device_endian endian)
8df1cd07 3762{
8df1cd07
FB
3763 uint8_t *ptr;
3764 uint32_t val;
f3705d53 3765 MemoryRegionSection *section;
8df1cd07 3766
06ef3525 3767 section = phys_page_find(addr >> TARGET_PAGE_BITS);
3b46e624 3768
cc5bea60
BS
3769 if (!(memory_region_is_ram(section->mr) ||
3770 memory_region_is_romd(section->mr))) {
8df1cd07 3771 /* I/O case */
cc5bea60 3772 addr = memory_region_section_addr(section, addr);
37ec01d4 3773 val = io_mem_read(section->mr, addr, 4);
1e78bcc1
AG
3774#if defined(TARGET_WORDS_BIGENDIAN)
3775 if (endian == DEVICE_LITTLE_ENDIAN) {
3776 val = bswap32(val);
3777 }
3778#else
3779 if (endian == DEVICE_BIG_ENDIAN) {
3780 val = bswap32(val);
3781 }
3782#endif
8df1cd07
FB
3783 } else {
3784 /* RAM case */
f3705d53 3785 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
06ef3525 3786 & TARGET_PAGE_MASK)
cc5bea60 3787 + memory_region_section_addr(section, addr));
1e78bcc1
AG
3788 switch (endian) {
3789 case DEVICE_LITTLE_ENDIAN:
3790 val = ldl_le_p(ptr);
3791 break;
3792 case DEVICE_BIG_ENDIAN:
3793 val = ldl_be_p(ptr);
3794 break;
3795 default:
3796 val = ldl_p(ptr);
3797 break;
3798 }
8df1cd07
FB
3799 }
3800 return val;
3801}
3802
1e78bcc1
AG
3803uint32_t ldl_phys(target_phys_addr_t addr)
3804{
3805 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3806}
3807
3808uint32_t ldl_le_phys(target_phys_addr_t addr)
3809{
3810 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3811}
3812
3813uint32_t ldl_be_phys(target_phys_addr_t addr)
3814{
3815 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
3816}
3817
84b7b8e7 3818/* warning: addr must be aligned */
1e78bcc1
AG
3819static inline uint64_t ldq_phys_internal(target_phys_addr_t addr,
3820 enum device_endian endian)
84b7b8e7 3821{
84b7b8e7
FB
3822 uint8_t *ptr;
3823 uint64_t val;
f3705d53 3824 MemoryRegionSection *section;
84b7b8e7 3825
06ef3525 3826 section = phys_page_find(addr >> TARGET_PAGE_BITS);
3b46e624 3827
cc5bea60
BS
3828 if (!(memory_region_is_ram(section->mr) ||
3829 memory_region_is_romd(section->mr))) {
84b7b8e7 3830 /* I/O case */
cc5bea60 3831 addr = memory_region_section_addr(section, addr);
1e78bcc1
AG
3832
3833 /* XXX This is broken when device endian != cpu endian.
3834 Fix and add "endian" variable check */
84b7b8e7 3835#ifdef TARGET_WORDS_BIGENDIAN
37ec01d4
AK
3836 val = io_mem_read(section->mr, addr, 4) << 32;
3837 val |= io_mem_read(section->mr, addr + 4, 4);
84b7b8e7 3838#else
37ec01d4
AK
3839 val = io_mem_read(section->mr, addr, 4);
3840 val |= io_mem_read(section->mr, addr + 4, 4) << 32;
84b7b8e7
FB
3841#endif
3842 } else {
3843 /* RAM case */
f3705d53 3844 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
06ef3525 3845 & TARGET_PAGE_MASK)
cc5bea60 3846 + memory_region_section_addr(section, addr));
1e78bcc1
AG
3847 switch (endian) {
3848 case DEVICE_LITTLE_ENDIAN:
3849 val = ldq_le_p(ptr);
3850 break;
3851 case DEVICE_BIG_ENDIAN:
3852 val = ldq_be_p(ptr);
3853 break;
3854 default:
3855 val = ldq_p(ptr);
3856 break;
3857 }
84b7b8e7
FB
3858 }
3859 return val;
3860}
3861
1e78bcc1
AG
3862uint64_t ldq_phys(target_phys_addr_t addr)
3863{
3864 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3865}
3866
3867uint64_t ldq_le_phys(target_phys_addr_t addr)
3868{
3869 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3870}
3871
3872uint64_t ldq_be_phys(target_phys_addr_t addr)
3873{
3874 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
3875}
3876
aab33094 3877/* XXX: optimize */
c227f099 3878uint32_t ldub_phys(target_phys_addr_t addr)
aab33094
FB
3879{
3880 uint8_t val;
3881 cpu_physical_memory_read(addr, &val, 1);
3882 return val;
3883}
3884
733f0b02 3885/* warning: addr must be aligned */
1e78bcc1
AG
3886static inline uint32_t lduw_phys_internal(target_phys_addr_t addr,
3887 enum device_endian endian)
aab33094 3888{
733f0b02
MT
3889 uint8_t *ptr;
3890 uint64_t val;
f3705d53 3891 MemoryRegionSection *section;
733f0b02 3892
06ef3525 3893 section = phys_page_find(addr >> TARGET_PAGE_BITS);
733f0b02 3894
cc5bea60
BS
3895 if (!(memory_region_is_ram(section->mr) ||
3896 memory_region_is_romd(section->mr))) {
733f0b02 3897 /* I/O case */
cc5bea60 3898 addr = memory_region_section_addr(section, addr);
37ec01d4 3899 val = io_mem_read(section->mr, addr, 2);
1e78bcc1
AG
3900#if defined(TARGET_WORDS_BIGENDIAN)
3901 if (endian == DEVICE_LITTLE_ENDIAN) {
3902 val = bswap16(val);
3903 }
3904#else
3905 if (endian == DEVICE_BIG_ENDIAN) {
3906 val = bswap16(val);
3907 }
3908#endif
733f0b02
MT
3909 } else {
3910 /* RAM case */
f3705d53 3911 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
06ef3525 3912 & TARGET_PAGE_MASK)
cc5bea60 3913 + memory_region_section_addr(section, addr));
1e78bcc1
AG
3914 switch (endian) {
3915 case DEVICE_LITTLE_ENDIAN:
3916 val = lduw_le_p(ptr);
3917 break;
3918 case DEVICE_BIG_ENDIAN:
3919 val = lduw_be_p(ptr);
3920 break;
3921 default:
3922 val = lduw_p(ptr);
3923 break;
3924 }
733f0b02
MT
3925 }
3926 return val;
aab33094
FB
3927}
3928
1e78bcc1
AG
3929uint32_t lduw_phys(target_phys_addr_t addr)
3930{
3931 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3932}
3933
3934uint32_t lduw_le_phys(target_phys_addr_t addr)
3935{
3936 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3937}
3938
3939uint32_t lduw_be_phys(target_phys_addr_t addr)
3940{
3941 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
3942}
3943
8df1cd07
FB
3944/* warning: addr must be aligned. The ram page is not masked as dirty
3945 and the code inside is not invalidated. It is useful if the dirty
3946 bits are used to track modified PTEs */
c227f099 3947void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
8df1cd07 3948{
8df1cd07 3949 uint8_t *ptr;
f3705d53 3950 MemoryRegionSection *section;
8df1cd07 3951
06ef3525 3952 section = phys_page_find(addr >> TARGET_PAGE_BITS);
3b46e624 3953
f3705d53 3954 if (!memory_region_is_ram(section->mr) || section->readonly) {
cc5bea60 3955 addr = memory_region_section_addr(section, addr);
f3705d53 3956 if (memory_region_is_ram(section->mr)) {
37ec01d4 3957 section = &phys_sections[phys_section_rom];
06ef3525 3958 }
37ec01d4 3959 io_mem_write(section->mr, addr, val, 4);
8df1cd07 3960 } else {
f3705d53 3961 unsigned long addr1 = (memory_region_get_ram_addr(section->mr)
06ef3525 3962 & TARGET_PAGE_MASK)
cc5bea60 3963 + memory_region_section_addr(section, addr);
5579c7f3 3964 ptr = qemu_get_ram_ptr(addr1);
8df1cd07 3965 stl_p(ptr, val);
74576198
AL
3966
3967 if (unlikely(in_migration)) {
3968 if (!cpu_physical_memory_is_dirty(addr1)) {
3969 /* invalidate code */
3970 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3971 /* set dirty bit */
f7c11b53
YT
3972 cpu_physical_memory_set_dirty_flags(
3973 addr1, (0xff & ~CODE_DIRTY_FLAG));
74576198
AL
3974 }
3975 }
8df1cd07
FB
3976 }
3977}
3978
c227f099 3979void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
bc98a7ef 3980{
bc98a7ef 3981 uint8_t *ptr;
f3705d53 3982 MemoryRegionSection *section;
bc98a7ef 3983
06ef3525 3984 section = phys_page_find(addr >> TARGET_PAGE_BITS);
3b46e624 3985
f3705d53 3986 if (!memory_region_is_ram(section->mr) || section->readonly) {
cc5bea60 3987 addr = memory_region_section_addr(section, addr);
f3705d53 3988 if (memory_region_is_ram(section->mr)) {
37ec01d4 3989 section = &phys_sections[phys_section_rom];
06ef3525 3990 }
bc98a7ef 3991#ifdef TARGET_WORDS_BIGENDIAN
37ec01d4
AK
3992 io_mem_write(section->mr, addr, val >> 32, 4);
3993 io_mem_write(section->mr, addr + 4, (uint32_t)val, 4);
bc98a7ef 3994#else
37ec01d4
AK
3995 io_mem_write(section->mr, addr, (uint32_t)val, 4);
3996 io_mem_write(section->mr, addr + 4, val >> 32, 4);
bc98a7ef
JM
3997#endif
3998 } else {
f3705d53 3999 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
06ef3525 4000 & TARGET_PAGE_MASK)
cc5bea60 4001 + memory_region_section_addr(section, addr));
bc98a7ef
JM
4002 stq_p(ptr, val);
4003 }
4004}
4005
8df1cd07 4006/* warning: addr must be aligned */
1e78bcc1
AG
4007static inline void stl_phys_internal(target_phys_addr_t addr, uint32_t val,
4008 enum device_endian endian)
8df1cd07 4009{
8df1cd07 4010 uint8_t *ptr;
f3705d53 4011 MemoryRegionSection *section;
8df1cd07 4012
06ef3525 4013 section = phys_page_find(addr >> TARGET_PAGE_BITS);
3b46e624 4014
f3705d53 4015 if (!memory_region_is_ram(section->mr) || section->readonly) {
cc5bea60 4016 addr = memory_region_section_addr(section, addr);
f3705d53 4017 if (memory_region_is_ram(section->mr)) {
37ec01d4 4018 section = &phys_sections[phys_section_rom];
06ef3525 4019 }
1e78bcc1
AG
4020#if defined(TARGET_WORDS_BIGENDIAN)
4021 if (endian == DEVICE_LITTLE_ENDIAN) {
4022 val = bswap32(val);
4023 }
4024#else
4025 if (endian == DEVICE_BIG_ENDIAN) {
4026 val = bswap32(val);
4027 }
4028#endif
37ec01d4 4029 io_mem_write(section->mr, addr, val, 4);
8df1cd07
FB
4030 } else {
4031 unsigned long addr1;
f3705d53 4032 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
cc5bea60 4033 + memory_region_section_addr(section, addr);
8df1cd07 4034 /* RAM case */
5579c7f3 4035 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
4036 switch (endian) {
4037 case DEVICE_LITTLE_ENDIAN:
4038 stl_le_p(ptr, val);
4039 break;
4040 case DEVICE_BIG_ENDIAN:
4041 stl_be_p(ptr, val);
4042 break;
4043 default:
4044 stl_p(ptr, val);
4045 break;
4046 }
3a7d929e
FB
4047 if (!cpu_physical_memory_is_dirty(addr1)) {
4048 /* invalidate code */
4049 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4050 /* set dirty bit */
f7c11b53
YT
4051 cpu_physical_memory_set_dirty_flags(addr1,
4052 (0xff & ~CODE_DIRTY_FLAG));
3a7d929e 4053 }
8df1cd07
FB
4054 }
4055}
4056
1e78bcc1
AG
4057void stl_phys(target_phys_addr_t addr, uint32_t val)
4058{
4059 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4060}
4061
4062void stl_le_phys(target_phys_addr_t addr, uint32_t val)
4063{
4064 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4065}
4066
4067void stl_be_phys(target_phys_addr_t addr, uint32_t val)
4068{
4069 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4070}
4071
aab33094 4072/* XXX: optimize */
c227f099 4073void stb_phys(target_phys_addr_t addr, uint32_t val)
aab33094
FB
4074{
4075 uint8_t v = val;
4076 cpu_physical_memory_write(addr, &v, 1);
4077}
4078
733f0b02 4079/* warning: addr must be aligned */
1e78bcc1
AG
4080static inline void stw_phys_internal(target_phys_addr_t addr, uint32_t val,
4081 enum device_endian endian)
aab33094 4082{
733f0b02 4083 uint8_t *ptr;
f3705d53 4084 MemoryRegionSection *section;
733f0b02 4085
06ef3525 4086 section = phys_page_find(addr >> TARGET_PAGE_BITS);
733f0b02 4087
f3705d53 4088 if (!memory_region_is_ram(section->mr) || section->readonly) {
cc5bea60 4089 addr = memory_region_section_addr(section, addr);
f3705d53 4090 if (memory_region_is_ram(section->mr)) {
37ec01d4 4091 section = &phys_sections[phys_section_rom];
06ef3525 4092 }
1e78bcc1
AG
4093#if defined(TARGET_WORDS_BIGENDIAN)
4094 if (endian == DEVICE_LITTLE_ENDIAN) {
4095 val = bswap16(val);
4096 }
4097#else
4098 if (endian == DEVICE_BIG_ENDIAN) {
4099 val = bswap16(val);
4100 }
4101#endif
37ec01d4 4102 io_mem_write(section->mr, addr, val, 2);
733f0b02
MT
4103 } else {
4104 unsigned long addr1;
f3705d53 4105 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
cc5bea60 4106 + memory_region_section_addr(section, addr);
733f0b02
MT
4107 /* RAM case */
4108 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
4109 switch (endian) {
4110 case DEVICE_LITTLE_ENDIAN:
4111 stw_le_p(ptr, val);
4112 break;
4113 case DEVICE_BIG_ENDIAN:
4114 stw_be_p(ptr, val);
4115 break;
4116 default:
4117 stw_p(ptr, val);
4118 break;
4119 }
733f0b02
MT
4120 if (!cpu_physical_memory_is_dirty(addr1)) {
4121 /* invalidate code */
4122 tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
4123 /* set dirty bit */
4124 cpu_physical_memory_set_dirty_flags(addr1,
4125 (0xff & ~CODE_DIRTY_FLAG));
4126 }
4127 }
aab33094
FB
4128}
4129
1e78bcc1
AG
4130void stw_phys(target_phys_addr_t addr, uint32_t val)
4131{
4132 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4133}
4134
4135void stw_le_phys(target_phys_addr_t addr, uint32_t val)
4136{
4137 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4138}
4139
4140void stw_be_phys(target_phys_addr_t addr, uint32_t val)
4141{
4142 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4143}
4144
aab33094 4145/* XXX: optimize */
c227f099 4146void stq_phys(target_phys_addr_t addr, uint64_t val)
aab33094
FB
4147{
4148 val = tswap64(val);
71d2b725 4149 cpu_physical_memory_write(addr, &val, 8);
aab33094
FB
4150}
4151
1e78bcc1
AG
4152void stq_le_phys(target_phys_addr_t addr, uint64_t val)
4153{
4154 val = cpu_to_le64(val);
4155 cpu_physical_memory_write(addr, &val, 8);
4156}
4157
4158void stq_be_phys(target_phys_addr_t addr, uint64_t val)
4159{
4160 val = cpu_to_be64(val);
4161 cpu_physical_memory_write(addr, &val, 8);
4162}
4163
5e2972fd 4164/* virtual memory access for debug (includes writing to ROM) */
9349b4f9 4165int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
b448f2f3 4166 uint8_t *buf, int len, int is_write)
13eb76e0
FB
4167{
4168 int l;
c227f099 4169 target_phys_addr_t phys_addr;
9b3c35e0 4170 target_ulong page;
13eb76e0
FB
4171
4172 while (len > 0) {
4173 page = addr & TARGET_PAGE_MASK;
4174 phys_addr = cpu_get_phys_page_debug(env, page);
4175 /* if no physical page mapped, return an error */
4176 if (phys_addr == -1)
4177 return -1;
4178 l = (page + TARGET_PAGE_SIZE) - addr;
4179 if (l > len)
4180 l = len;
5e2972fd 4181 phys_addr += (addr & ~TARGET_PAGE_MASK);
5e2972fd
AL
4182 if (is_write)
4183 cpu_physical_memory_write_rom(phys_addr, buf, l);
4184 else
5e2972fd 4185 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
13eb76e0
FB
4186 len -= l;
4187 buf += l;
4188 addr += l;
4189 }
4190 return 0;
4191}
a68fe89c 4192#endif
13eb76e0 4193
2e70f6ef
PB
4194/* in deterministic execution mode, instructions doing device I/Os
4195 must be at the end of the TB */
20503968 4196void cpu_io_recompile(CPUArchState *env, uintptr_t retaddr)
2e70f6ef
PB
4197{
4198 TranslationBlock *tb;
4199 uint32_t n, cflags;
4200 target_ulong pc, cs_base;
4201 uint64_t flags;
4202
20503968 4203 tb = tb_find_pc(retaddr);
2e70f6ef
PB
4204 if (!tb) {
4205 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
20503968 4206 (void *)retaddr);
2e70f6ef
PB
4207 }
4208 n = env->icount_decr.u16.low + tb->icount;
20503968 4209 cpu_restore_state(tb, env, retaddr);
2e70f6ef 4210 /* Calculate how many instructions had been executed before the fault
bf20dc07 4211 occurred. */
2e70f6ef
PB
4212 n = n - env->icount_decr.u16.low;
4213 /* Generate a new TB ending on the I/O insn. */
4214 n++;
4215 /* On MIPS and SH, delay slot instructions can only be restarted if
4216 they were already the first instruction in the TB. If this is not
bf20dc07 4217 the first instruction in a TB then re-execute the preceding
2e70f6ef
PB
4218 branch. */
4219#if defined(TARGET_MIPS)
4220 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4221 env->active_tc.PC -= 4;
4222 env->icount_decr.u16.low++;
4223 env->hflags &= ~MIPS_HFLAG_BMASK;
4224 }
4225#elif defined(TARGET_SH4)
4226 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4227 && n > 1) {
4228 env->pc -= 2;
4229 env->icount_decr.u16.low++;
4230 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4231 }
4232#endif
4233 /* This should never happen. */
4234 if (n > CF_COUNT_MASK)
4235 cpu_abort(env, "TB too big during recompile");
4236
4237 cflags = n | CF_LAST_IO;
4238 pc = tb->pc;
4239 cs_base = tb->cs_base;
4240 flags = tb->flags;
4241 tb_phys_invalidate(tb, -1);
4242 /* FIXME: In theory this could raise an exception. In practice
4243 we have already translated the block once so it's probably ok. */
4244 tb_gen_code(env, pc, cs_base, flags, cflags);
bf20dc07 4245 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
2e70f6ef
PB
4246 the first in the TB) then we end up generating a whole new TB and
4247 repeating the fault, which is horribly inefficient.
4248 Better would be to execute just this insn uncached, or generate a
4249 second new TB. */
4250 cpu_resume_from_signal(env, NULL);
4251}
4252
b3755a91
PB
4253#if !defined(CONFIG_USER_ONLY)
4254
055403b2 4255void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
e3db7226
FB
4256{
4257 int i, target_code_size, max_target_code_size;
4258 int direct_jmp_count, direct_jmp2_count, cross_page;
4259 TranslationBlock *tb;
3b46e624 4260
e3db7226
FB
4261 target_code_size = 0;
4262 max_target_code_size = 0;
4263 cross_page = 0;
4264 direct_jmp_count = 0;
4265 direct_jmp2_count = 0;
4266 for(i = 0; i < nb_tbs; i++) {
4267 tb = &tbs[i];
4268 target_code_size += tb->size;
4269 if (tb->size > max_target_code_size)
4270 max_target_code_size = tb->size;
4271 if (tb->page_addr[1] != -1)
4272 cross_page++;
4273 if (tb->tb_next_offset[0] != 0xffff) {
4274 direct_jmp_count++;
4275 if (tb->tb_next_offset[1] != 0xffff) {
4276 direct_jmp2_count++;
4277 }
4278 }
4279 }
4280 /* XXX: avoid using doubles ? */
57fec1fe 4281 cpu_fprintf(f, "Translation buffer state:\n");
055403b2 4282 cpu_fprintf(f, "gen code size %td/%ld\n",
26a5f13b
FB
4283 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4284 cpu_fprintf(f, "TB count %d/%d\n",
4285 nb_tbs, code_gen_max_blocks);
5fafdf24 4286 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
e3db7226
FB
4287 nb_tbs ? target_code_size / nb_tbs : 0,
4288 max_target_code_size);
055403b2 4289 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
e3db7226
FB
4290 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4291 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
5fafdf24
TS
4292 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4293 cross_page,
e3db7226
FB
4294 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4295 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
5fafdf24 4296 direct_jmp_count,
e3db7226
FB
4297 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4298 direct_jmp2_count,
4299 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
57fec1fe 4300 cpu_fprintf(f, "\nStatistics:\n");
e3db7226
FB
4301 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4302 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4303 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
b67d9a52 4304 tcg_dump_info(f, cpu_fprintf);
e3db7226
FB
4305}
4306
82afa586
BH
4307/*
4308 * A helper function for the _utterly broken_ virtio device model to find out if
4309 * it's running on a big endian machine. Don't do this at home kids!
4310 */
4311bool virtio_is_big_endian(void);
4312bool virtio_is_big_endian(void)
4313{
4314#if defined(TARGET_WORDS_BIGENDIAN)
4315 return true;
4316#else
4317 return false;
4318#endif
4319}
4320
61382a50 4321#endif