]> git.proxmox.com Git - mirror_qemu.git/blame - exec.c
Exit loop if we have been there too long
[mirror_qemu.git] / exec.c
CommitLineData
54936004 1/*
fd6ce8f6 2 * virtual page mapping and translated block handling
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
54936004 18 */
67b915a5 19#include "config.h"
d5a8f07c
FB
20#ifdef _WIN32
21#include <windows.h>
22#else
a98d49b1 23#include <sys/types.h>
d5a8f07c
FB
24#include <sys/mman.h>
25#endif
54936004 26
055403b2 27#include "qemu-common.h"
6180a181 28#include "cpu.h"
b67d9a52 29#include "tcg.h"
b3c7724c 30#include "hw/hw.h"
cc9e98cb 31#include "hw/qdev.h"
74576198 32#include "osdep.h"
7ba1e619 33#include "kvm.h"
432d268c 34#include "hw/xen.h"
29e922b6 35#include "qemu-timer.h"
62152b8a
AK
36#include "memory.h"
37#include "exec-memory.h"
53a5960a
PB
38#if defined(CONFIG_USER_ONLY)
39#include <qemu.h>
f01576f1
JL
40#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41#include <sys/param.h>
42#if __FreeBSD_version >= 700104
43#define HAVE_KINFO_GETVMMAP
44#define sigqueue sigqueue_freebsd /* avoid redefinition */
45#include <sys/time.h>
46#include <sys/proc.h>
47#include <machine/profile.h>
48#define _KERNEL
49#include <sys/user.h>
50#undef _KERNEL
51#undef sigqueue
52#include <libutil.h>
53#endif
54#endif
432d268c
JN
55#else /* !CONFIG_USER_ONLY */
56#include "xen-mapcache.h"
6506e4f9 57#include "trace.h"
53a5960a 58#endif
54936004 59
0cac1b66
BS
60#include "cputlb.h"
61
67d95c15
AK
62#define WANT_EXEC_OBSOLETE
63#include "exec-obsolete.h"
64
fd6ce8f6 65//#define DEBUG_TB_INVALIDATE
66e85a21 66//#define DEBUG_FLUSH
67d3b957 67//#define DEBUG_UNASSIGNED
fd6ce8f6
FB
68
69/* make various TB consistency checks */
5fafdf24 70//#define DEBUG_TB_CHECK
fd6ce8f6 71
1196be37 72//#define DEBUG_IOPORT
db7b5426 73//#define DEBUG_SUBPAGE
1196be37 74
99773bd4
PB
75#if !defined(CONFIG_USER_ONLY)
76/* TB consistency checks only implemented for usermode emulation. */
77#undef DEBUG_TB_CHECK
78#endif
79
9fa3e853
FB
80#define SMC_BITMAP_USE_THRESHOLD 10
81
bdaf78e0 82static TranslationBlock *tbs;
24ab68ac 83static int code_gen_max_blocks;
9fa3e853 84TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
bdaf78e0 85static int nb_tbs;
eb51d102 86/* any access to the tbs or the page table must use this lock */
c227f099 87spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
fd6ce8f6 88
141ac468
BS
89#if defined(__arm__) || defined(__sparc_v9__)
90/* The prologue must be reachable with a direct jump. ARM and Sparc64
91 have limited branch ranges (possibly also PPC) so place it in a
d03d860b
BS
92 section close to code segment. */
93#define code_gen_section \
94 __attribute__((__section__(".gen_code"))) \
95 __attribute__((aligned (32)))
6840981d 96#elif defined(_WIN32) && !defined(_WIN64)
f8e2af11
SW
97#define code_gen_section \
98 __attribute__((aligned (16)))
d03d860b
BS
99#else
100#define code_gen_section \
101 __attribute__((aligned (32)))
102#endif
103
104uint8_t code_gen_prologue[1024] code_gen_section;
bdaf78e0
BS
105static uint8_t *code_gen_buffer;
106static unsigned long code_gen_buffer_size;
26a5f13b 107/* threshold to flush the translated code buffer */
bdaf78e0 108static unsigned long code_gen_buffer_max_size;
24ab68ac 109static uint8_t *code_gen_ptr;
fd6ce8f6 110
e2eef170 111#if !defined(CONFIG_USER_ONLY)
9fa3e853 112int phys_ram_fd;
74576198 113static int in_migration;
94a6b54f 114
85d59fef 115RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
62152b8a
AK
116
117static MemoryRegion *system_memory;
309cb471 118static MemoryRegion *system_io;
62152b8a 119
0e0df1e2 120MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty;
de712f94 121static MemoryRegion io_mem_subpage_ram;
0e0df1e2 122
e2eef170 123#endif
9fa3e853 124
9349b4f9 125CPUArchState *first_cpu;
6a00d601
FB
126/* current CPU in the current thread. It is only valid inside
127 cpu_exec() */
9349b4f9 128DEFINE_TLS(CPUArchState *,cpu_single_env);
2e70f6ef 129/* 0 = Do not count executed instructions.
bf20dc07 130 1 = Precise instruction counting.
2e70f6ef
PB
131 2 = Adaptive rate instruction counting. */
132int use_icount = 0;
6a00d601 133
54936004 134typedef struct PageDesc {
92e873b9 135 /* list of TBs intersecting this ram page */
fd6ce8f6 136 TranslationBlock *first_tb;
9fa3e853
FB
137 /* in order to optimize self modifying code, we count the number
138 of lookups we do to a given page to use a bitmap */
139 unsigned int code_write_count;
140 uint8_t *code_bitmap;
141#if defined(CONFIG_USER_ONLY)
142 unsigned long flags;
143#endif
54936004
FB
144} PageDesc;
145
41c1b1c9 146/* In system mode we want L1_MAP to be based on ram offsets,
5cd2c5b6
RH
147 while in user mode we want it to be based on virtual addresses. */
148#if !defined(CONFIG_USER_ONLY)
41c1b1c9
PB
149#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
150# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
151#else
5cd2c5b6 152# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
41c1b1c9 153#endif
bedb69ea 154#else
5cd2c5b6 155# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
bedb69ea 156#endif
54936004 157
5cd2c5b6
RH
158/* Size of the L2 (and L3, etc) page tables. */
159#define L2_BITS 10
54936004
FB
160#define L2_SIZE (1 << L2_BITS)
161
3eef53df
AK
162#define P_L2_LEVELS \
163 (((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / L2_BITS) + 1)
164
5cd2c5b6 165/* The bits remaining after N lower levels of page tables. */
5cd2c5b6
RH
166#define V_L1_BITS_REM \
167 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
168
5cd2c5b6
RH
169#if V_L1_BITS_REM < 4
170#define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
171#else
172#define V_L1_BITS V_L1_BITS_REM
173#endif
174
5cd2c5b6
RH
175#define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
176
5cd2c5b6
RH
177#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
178
c6d50674
SW
179uintptr_t qemu_real_host_page_size;
180uintptr_t qemu_host_page_size;
181uintptr_t qemu_host_page_mask;
54936004 182
5cd2c5b6
RH
183/* This is a multi-level map on the virtual address space.
184 The bottom level has pointers to PageDesc. */
185static void *l1_map[V_L1_SIZE];
54936004 186
e2eef170 187#if !defined(CONFIG_USER_ONLY)
4346ae3e
AK
188typedef struct PhysPageEntry PhysPageEntry;
189
5312bd8b
AK
190static MemoryRegionSection *phys_sections;
191static unsigned phys_sections_nb, phys_sections_nb_alloc;
192static uint16_t phys_section_unassigned;
aa102231
AK
193static uint16_t phys_section_notdirty;
194static uint16_t phys_section_rom;
195static uint16_t phys_section_watch;
5312bd8b 196
4346ae3e 197struct PhysPageEntry {
07f07b31
AK
198 uint16_t is_leaf : 1;
199 /* index into phys_sections (is_leaf) or phys_map_nodes (!is_leaf) */
200 uint16_t ptr : 15;
4346ae3e
AK
201};
202
d6f2ea22
AK
203/* Simple allocator for PhysPageEntry nodes */
204static PhysPageEntry (*phys_map_nodes)[L2_SIZE];
205static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc;
206
07f07b31 207#define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
d6f2ea22 208
5cd2c5b6 209/* This is a multi-level map on the physical address space.
06ef3525 210 The bottom level has pointers to MemoryRegionSections. */
07f07b31 211static PhysPageEntry phys_map = { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
6d9a1304 212
e2eef170 213static void io_mem_init(void);
62152b8a 214static void memory_map_init(void);
e2eef170 215
1ec9b909 216static MemoryRegion io_mem_watch;
6658ffb8 217#endif
33417e70 218
e3db7226 219/* statistics */
e3db7226
FB
220static int tb_flush_count;
221static int tb_phys_invalidate_count;
222
7cb69cae
FB
223#ifdef _WIN32
224static void map_exec(void *addr, long size)
225{
226 DWORD old_protect;
227 VirtualProtect(addr, size,
228 PAGE_EXECUTE_READWRITE, &old_protect);
229
230}
231#else
232static void map_exec(void *addr, long size)
233{
4369415f 234 unsigned long start, end, page_size;
7cb69cae 235
4369415f 236 page_size = getpagesize();
7cb69cae 237 start = (unsigned long)addr;
4369415f 238 start &= ~(page_size - 1);
7cb69cae
FB
239
240 end = (unsigned long)addr + size;
4369415f
FB
241 end += page_size - 1;
242 end &= ~(page_size - 1);
7cb69cae
FB
243
244 mprotect((void *)start, end - start,
245 PROT_READ | PROT_WRITE | PROT_EXEC);
246}
247#endif
248
b346ff46 249static void page_init(void)
54936004 250{
83fb7adf 251 /* NOTE: we can always suppose that qemu_host_page_size >=
54936004 252 TARGET_PAGE_SIZE */
c2b48b69
AL
253#ifdef _WIN32
254 {
255 SYSTEM_INFO system_info;
256
257 GetSystemInfo(&system_info);
258 qemu_real_host_page_size = system_info.dwPageSize;
259 }
260#else
261 qemu_real_host_page_size = getpagesize();
262#endif
83fb7adf
FB
263 if (qemu_host_page_size == 0)
264 qemu_host_page_size = qemu_real_host_page_size;
265 if (qemu_host_page_size < TARGET_PAGE_SIZE)
266 qemu_host_page_size = TARGET_PAGE_SIZE;
83fb7adf 267 qemu_host_page_mask = ~(qemu_host_page_size - 1);
50a9569b 268
2e9a5713 269#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
50a9569b 270 {
f01576f1
JL
271#ifdef HAVE_KINFO_GETVMMAP
272 struct kinfo_vmentry *freep;
273 int i, cnt;
274
275 freep = kinfo_getvmmap(getpid(), &cnt);
276 if (freep) {
277 mmap_lock();
278 for (i = 0; i < cnt; i++) {
279 unsigned long startaddr, endaddr;
280
281 startaddr = freep[i].kve_start;
282 endaddr = freep[i].kve_end;
283 if (h2g_valid(startaddr)) {
284 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
285
286 if (h2g_valid(endaddr)) {
287 endaddr = h2g(endaddr);
fd436907 288 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
f01576f1
JL
289 } else {
290#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
291 endaddr = ~0ul;
fd436907 292 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
f01576f1
JL
293#endif
294 }
295 }
296 }
297 free(freep);
298 mmap_unlock();
299 }
300#else
50a9569b 301 FILE *f;
50a9569b 302
0776590d 303 last_brk = (unsigned long)sbrk(0);
5cd2c5b6 304
fd436907 305 f = fopen("/compat/linux/proc/self/maps", "r");
50a9569b 306 if (f) {
5cd2c5b6
RH
307 mmap_lock();
308
50a9569b 309 do {
5cd2c5b6
RH
310 unsigned long startaddr, endaddr;
311 int n;
312
313 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
314
315 if (n == 2 && h2g_valid(startaddr)) {
316 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
317
318 if (h2g_valid(endaddr)) {
319 endaddr = h2g(endaddr);
320 } else {
321 endaddr = ~0ul;
322 }
323 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
50a9569b
AZ
324 }
325 } while (!feof(f));
5cd2c5b6 326
50a9569b 327 fclose(f);
5cd2c5b6 328 mmap_unlock();
50a9569b 329 }
f01576f1 330#endif
50a9569b
AZ
331 }
332#endif
54936004
FB
333}
334
41c1b1c9 335static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
54936004 336{
41c1b1c9
PB
337 PageDesc *pd;
338 void **lp;
339 int i;
340
5cd2c5b6 341#if defined(CONFIG_USER_ONLY)
7267c094 342 /* We can't use g_malloc because it may recurse into a locked mutex. */
5cd2c5b6
RH
343# define ALLOC(P, SIZE) \
344 do { \
345 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
346 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
5cd2c5b6
RH
347 } while (0)
348#else
349# define ALLOC(P, SIZE) \
7267c094 350 do { P = g_malloc0(SIZE); } while (0)
17e2377a 351#endif
434929bf 352
5cd2c5b6
RH
353 /* Level 1. Always allocated. */
354 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
355
356 /* Level 2..N-1. */
357 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
358 void **p = *lp;
359
360 if (p == NULL) {
361 if (!alloc) {
362 return NULL;
363 }
364 ALLOC(p, sizeof(void *) * L2_SIZE);
365 *lp = p;
17e2377a 366 }
5cd2c5b6
RH
367
368 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
369 }
370
371 pd = *lp;
372 if (pd == NULL) {
373 if (!alloc) {
374 return NULL;
375 }
376 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
377 *lp = pd;
54936004 378 }
5cd2c5b6
RH
379
380#undef ALLOC
5cd2c5b6
RH
381
382 return pd + (index & (L2_SIZE - 1));
54936004
FB
383}
384
41c1b1c9 385static inline PageDesc *page_find(tb_page_addr_t index)
54936004 386{
5cd2c5b6 387 return page_find_alloc(index, 0);
fd6ce8f6
FB
388}
389
6d9a1304 390#if !defined(CONFIG_USER_ONLY)
d6f2ea22 391
f7bf5461 392static void phys_map_node_reserve(unsigned nodes)
d6f2ea22 393{
f7bf5461 394 if (phys_map_nodes_nb + nodes > phys_map_nodes_nb_alloc) {
d6f2ea22
AK
395 typedef PhysPageEntry Node[L2_SIZE];
396 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16);
f7bf5461
AK
397 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc,
398 phys_map_nodes_nb + nodes);
d6f2ea22
AK
399 phys_map_nodes = g_renew(Node, phys_map_nodes,
400 phys_map_nodes_nb_alloc);
401 }
f7bf5461
AK
402}
403
404static uint16_t phys_map_node_alloc(void)
405{
406 unsigned i;
407 uint16_t ret;
408
409 ret = phys_map_nodes_nb++;
410 assert(ret != PHYS_MAP_NODE_NIL);
411 assert(ret != phys_map_nodes_nb_alloc);
d6f2ea22 412 for (i = 0; i < L2_SIZE; ++i) {
07f07b31 413 phys_map_nodes[ret][i].is_leaf = 0;
c19e8800 414 phys_map_nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
d6f2ea22 415 }
f7bf5461 416 return ret;
d6f2ea22
AK
417}
418
419static void phys_map_nodes_reset(void)
420{
421 phys_map_nodes_nb = 0;
422}
423
92e873b9 424
2999097b
AK
425static void phys_page_set_level(PhysPageEntry *lp, target_phys_addr_t *index,
426 target_phys_addr_t *nb, uint16_t leaf,
427 int level)
f7bf5461
AK
428{
429 PhysPageEntry *p;
430 int i;
07f07b31 431 target_phys_addr_t step = (target_phys_addr_t)1 << (level * L2_BITS);
108c49b8 432
07f07b31 433 if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
c19e8800
AK
434 lp->ptr = phys_map_node_alloc();
435 p = phys_map_nodes[lp->ptr];
f7bf5461
AK
436 if (level == 0) {
437 for (i = 0; i < L2_SIZE; i++) {
07f07b31 438 p[i].is_leaf = 1;
c19e8800 439 p[i].ptr = phys_section_unassigned;
4346ae3e 440 }
67c4d23c 441 }
f7bf5461 442 } else {
c19e8800 443 p = phys_map_nodes[lp->ptr];
92e873b9 444 }
2999097b 445 lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
f7bf5461 446
2999097b 447 while (*nb && lp < &p[L2_SIZE]) {
07f07b31
AK
448 if ((*index & (step - 1)) == 0 && *nb >= step) {
449 lp->is_leaf = true;
c19e8800 450 lp->ptr = leaf;
07f07b31
AK
451 *index += step;
452 *nb -= step;
2999097b
AK
453 } else {
454 phys_page_set_level(lp, index, nb, leaf, level - 1);
455 }
456 ++lp;
f7bf5461
AK
457 }
458}
459
2999097b
AK
460static void phys_page_set(target_phys_addr_t index, target_phys_addr_t nb,
461 uint16_t leaf)
f7bf5461 462{
2999097b 463 /* Wildly overreserve - it doesn't matter much. */
07f07b31 464 phys_map_node_reserve(3 * P_L2_LEVELS);
5cd2c5b6 465
2999097b 466 phys_page_set_level(&phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
92e873b9
FB
467}
468
0cac1b66 469MemoryRegionSection *phys_page_find(target_phys_addr_t index)
92e873b9 470{
31ab2b4a
AK
471 PhysPageEntry lp = phys_map;
472 PhysPageEntry *p;
473 int i;
31ab2b4a 474 uint16_t s_index = phys_section_unassigned;
f1f6e3b8 475
07f07b31 476 for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
c19e8800 477 if (lp.ptr == PHYS_MAP_NODE_NIL) {
31ab2b4a
AK
478 goto not_found;
479 }
c19e8800 480 p = phys_map_nodes[lp.ptr];
31ab2b4a 481 lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
5312bd8b 482 }
31ab2b4a 483
c19e8800 484 s_index = lp.ptr;
31ab2b4a 485not_found:
f3705d53
AK
486 return &phys_sections[s_index];
487}
488
e5548617
BS
489bool memory_region_is_unassigned(MemoryRegion *mr)
490{
491 return mr != &io_mem_ram && mr != &io_mem_rom
492 && mr != &io_mem_notdirty && !mr->rom_device
493 && mr != &io_mem_watch;
494}
495
c8a706fe
PB
496#define mmap_lock() do { } while(0)
497#define mmap_unlock() do { } while(0)
9fa3e853 498#endif
fd6ce8f6 499
4369415f
FB
500#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
501
502#if defined(CONFIG_USER_ONLY)
ccbb4d44 503/* Currently it is not recommended to allocate big chunks of data in
4369415f
FB
504 user mode. It will change when a dedicated libc will be used */
505#define USE_STATIC_CODE_GEN_BUFFER
506#endif
507
508#ifdef USE_STATIC_CODE_GEN_BUFFER
ebf50fb3
AJ
509static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
510 __attribute__((aligned (CODE_GEN_ALIGN)));
4369415f
FB
511#endif
512
8fcd3692 513static void code_gen_alloc(unsigned long tb_size)
26a5f13b 514{
4369415f
FB
515#ifdef USE_STATIC_CODE_GEN_BUFFER
516 code_gen_buffer = static_code_gen_buffer;
517 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
518 map_exec(code_gen_buffer, code_gen_buffer_size);
519#else
26a5f13b
FB
520 code_gen_buffer_size = tb_size;
521 if (code_gen_buffer_size == 0) {
4369415f 522#if defined(CONFIG_USER_ONLY)
4369415f
FB
523 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
524#else
ccbb4d44 525 /* XXX: needs adjustments */
94a6b54f 526 code_gen_buffer_size = (unsigned long)(ram_size / 4);
4369415f 527#endif
26a5f13b
FB
528 }
529 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
530 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
531 /* The code gen buffer location may have constraints depending on
532 the host cpu and OS */
533#if defined(__linux__)
534 {
535 int flags;
141ac468
BS
536 void *start = NULL;
537
26a5f13b
FB
538 flags = MAP_PRIVATE | MAP_ANONYMOUS;
539#if defined(__x86_64__)
540 flags |= MAP_32BIT;
541 /* Cannot map more than that */
542 if (code_gen_buffer_size > (800 * 1024 * 1024))
543 code_gen_buffer_size = (800 * 1024 * 1024);
141ac468
BS
544#elif defined(__sparc_v9__)
545 // Map the buffer below 2G, so we can use direct calls and branches
546 flags |= MAP_FIXED;
547 start = (void *) 0x60000000UL;
548 if (code_gen_buffer_size > (512 * 1024 * 1024))
549 code_gen_buffer_size = (512 * 1024 * 1024);
1cb0661e 550#elif defined(__arm__)
5c84bd90 551 /* Keep the buffer no bigger than 16MB to branch between blocks */
1cb0661e
AZ
552 if (code_gen_buffer_size > 16 * 1024 * 1024)
553 code_gen_buffer_size = 16 * 1024 * 1024;
eba0b893
RH
554#elif defined(__s390x__)
555 /* Map the buffer so that we can use direct calls and branches. */
556 /* We have a +- 4GB range on the branches; leave some slop. */
557 if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
558 code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
559 }
560 start = (void *)0x90000000UL;
26a5f13b 561#endif
141ac468
BS
562 code_gen_buffer = mmap(start, code_gen_buffer_size,
563 PROT_WRITE | PROT_READ | PROT_EXEC,
26a5f13b
FB
564 flags, -1, 0);
565 if (code_gen_buffer == MAP_FAILED) {
566 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
567 exit(1);
568 }
569 }
cbb608a5 570#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
9f4b09a4
TN
571 || defined(__DragonFly__) || defined(__OpenBSD__) \
572 || defined(__NetBSD__)
06e67a82
AL
573 {
574 int flags;
575 void *addr = NULL;
576 flags = MAP_PRIVATE | MAP_ANONYMOUS;
577#if defined(__x86_64__)
578 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
579 * 0x40000000 is free */
580 flags |= MAP_FIXED;
581 addr = (void *)0x40000000;
582 /* Cannot map more than that */
583 if (code_gen_buffer_size > (800 * 1024 * 1024))
584 code_gen_buffer_size = (800 * 1024 * 1024);
4cd31ad2
BS
585#elif defined(__sparc_v9__)
586 // Map the buffer below 2G, so we can use direct calls and branches
587 flags |= MAP_FIXED;
588 addr = (void *) 0x60000000UL;
589 if (code_gen_buffer_size > (512 * 1024 * 1024)) {
590 code_gen_buffer_size = (512 * 1024 * 1024);
591 }
06e67a82
AL
592#endif
593 code_gen_buffer = mmap(addr, code_gen_buffer_size,
594 PROT_WRITE | PROT_READ | PROT_EXEC,
595 flags, -1, 0);
596 if (code_gen_buffer == MAP_FAILED) {
597 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
598 exit(1);
599 }
600 }
26a5f13b 601#else
7267c094 602 code_gen_buffer = g_malloc(code_gen_buffer_size);
26a5f13b
FB
603 map_exec(code_gen_buffer, code_gen_buffer_size);
604#endif
4369415f 605#endif /* !USE_STATIC_CODE_GEN_BUFFER */
26a5f13b 606 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
a884da8a
PM
607 code_gen_buffer_max_size = code_gen_buffer_size -
608 (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
26a5f13b 609 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
7267c094 610 tbs = g_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
26a5f13b
FB
611}
612
613/* Must be called before using the QEMU cpus. 'tb_size' is the size
614 (in bytes) allocated to the translation buffer. Zero means default
615 size. */
d5ab9713 616void tcg_exec_init(unsigned long tb_size)
26a5f13b 617{
26a5f13b
FB
618 cpu_gen_init();
619 code_gen_alloc(tb_size);
620 code_gen_ptr = code_gen_buffer;
813da627 621 tcg_register_jit(code_gen_buffer, code_gen_buffer_size);
4369415f 622 page_init();
9002ec79
RH
623#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
624 /* There's no guest base to take into account, so go ahead and
625 initialize the prologue now. */
626 tcg_prologue_init(&tcg_ctx);
627#endif
26a5f13b
FB
628}
629
d5ab9713
JK
630bool tcg_enabled(void)
631{
632 return code_gen_buffer != NULL;
633}
634
635void cpu_exec_init_all(void)
636{
637#if !defined(CONFIG_USER_ONLY)
638 memory_map_init();
639 io_mem_init();
640#endif
641}
642
9656f324
PB
643#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
644
e59fb374 645static int cpu_common_post_load(void *opaque, int version_id)
e7f4eff7 646{
9349b4f9 647 CPUArchState *env = opaque;
9656f324 648
3098dba0
AJ
649 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
650 version_id is increased. */
651 env->interrupt_request &= ~0x01;
9656f324
PB
652 tlb_flush(env, 1);
653
654 return 0;
655}
e7f4eff7
JQ
656
657static const VMStateDescription vmstate_cpu_common = {
658 .name = "cpu_common",
659 .version_id = 1,
660 .minimum_version_id = 1,
661 .minimum_version_id_old = 1,
e7f4eff7
JQ
662 .post_load = cpu_common_post_load,
663 .fields = (VMStateField []) {
9349b4f9
AF
664 VMSTATE_UINT32(halted, CPUArchState),
665 VMSTATE_UINT32(interrupt_request, CPUArchState),
e7f4eff7
JQ
666 VMSTATE_END_OF_LIST()
667 }
668};
9656f324
PB
669#endif
670
9349b4f9 671CPUArchState *qemu_get_cpu(int cpu)
950f1472 672{
9349b4f9 673 CPUArchState *env = first_cpu;
950f1472
GC
674
675 while (env) {
676 if (env->cpu_index == cpu)
677 break;
678 env = env->next_cpu;
679 }
680
681 return env;
682}
683
9349b4f9 684void cpu_exec_init(CPUArchState *env)
fd6ce8f6 685{
9349b4f9 686 CPUArchState **penv;
6a00d601
FB
687 int cpu_index;
688
c2764719
PB
689#if defined(CONFIG_USER_ONLY)
690 cpu_list_lock();
691#endif
6a00d601
FB
692 env->next_cpu = NULL;
693 penv = &first_cpu;
694 cpu_index = 0;
695 while (*penv != NULL) {
1e9fa730 696 penv = &(*penv)->next_cpu;
6a00d601
FB
697 cpu_index++;
698 }
699 env->cpu_index = cpu_index;
268a362c 700 env->numa_node = 0;
72cf2d4f
BS
701 QTAILQ_INIT(&env->breakpoints);
702 QTAILQ_INIT(&env->watchpoints);
dc7a09cf
JK
703#ifndef CONFIG_USER_ONLY
704 env->thread_id = qemu_get_thread_id();
705#endif
6a00d601 706 *penv = env;
c2764719
PB
707#if defined(CONFIG_USER_ONLY)
708 cpu_list_unlock();
709#endif
b3c7724c 710#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
0be71e32
AW
711 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
712 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
b3c7724c
PB
713 cpu_save, cpu_load, env);
714#endif
fd6ce8f6
FB
715}
716
d1a1eb74
TG
717/* Allocate a new translation block. Flush the translation buffer if
718 too many translation blocks or too much generated code. */
719static TranslationBlock *tb_alloc(target_ulong pc)
720{
721 TranslationBlock *tb;
722
723 if (nb_tbs >= code_gen_max_blocks ||
724 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
725 return NULL;
726 tb = &tbs[nb_tbs++];
727 tb->pc = pc;
728 tb->cflags = 0;
729 return tb;
730}
731
732void tb_free(TranslationBlock *tb)
733{
734 /* In practice this is mostly used for single use temporary TB
735 Ignore the hard cases and just back up if this TB happens to
736 be the last one generated. */
737 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
738 code_gen_ptr = tb->tc_ptr;
739 nb_tbs--;
740 }
741}
742
9fa3e853
FB
743static inline void invalidate_page_bitmap(PageDesc *p)
744{
745 if (p->code_bitmap) {
7267c094 746 g_free(p->code_bitmap);
9fa3e853
FB
747 p->code_bitmap = NULL;
748 }
749 p->code_write_count = 0;
750}
751
5cd2c5b6
RH
752/* Set to NULL all the 'first_tb' fields in all PageDescs. */
753
754static void page_flush_tb_1 (int level, void **lp)
fd6ce8f6 755{
5cd2c5b6 756 int i;
fd6ce8f6 757
5cd2c5b6
RH
758 if (*lp == NULL) {
759 return;
760 }
761 if (level == 0) {
762 PageDesc *pd = *lp;
7296abac 763 for (i = 0; i < L2_SIZE; ++i) {
5cd2c5b6
RH
764 pd[i].first_tb = NULL;
765 invalidate_page_bitmap(pd + i);
fd6ce8f6 766 }
5cd2c5b6
RH
767 } else {
768 void **pp = *lp;
7296abac 769 for (i = 0; i < L2_SIZE; ++i) {
5cd2c5b6
RH
770 page_flush_tb_1 (level - 1, pp + i);
771 }
772 }
773}
774
775static void page_flush_tb(void)
776{
777 int i;
778 for (i = 0; i < V_L1_SIZE; i++) {
779 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
fd6ce8f6
FB
780 }
781}
782
783/* flush all the translation blocks */
d4e8164f 784/* XXX: tb_flush is currently not thread safe */
9349b4f9 785void tb_flush(CPUArchState *env1)
fd6ce8f6 786{
9349b4f9 787 CPUArchState *env;
0124311e 788#if defined(DEBUG_FLUSH)
ab3d1727
BS
789 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
790 (unsigned long)(code_gen_ptr - code_gen_buffer),
791 nb_tbs, nb_tbs > 0 ?
792 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
fd6ce8f6 793#endif
26a5f13b 794 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
a208e54a
PB
795 cpu_abort(env1, "Internal error: code buffer overflow\n");
796
fd6ce8f6 797 nb_tbs = 0;
3b46e624 798
6a00d601
FB
799 for(env = first_cpu; env != NULL; env = env->next_cpu) {
800 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
801 }
9fa3e853 802
8a8a608f 803 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
fd6ce8f6 804 page_flush_tb();
9fa3e853 805
fd6ce8f6 806 code_gen_ptr = code_gen_buffer;
d4e8164f
FB
807 /* XXX: flush processor icache at this point if cache flush is
808 expensive */
e3db7226 809 tb_flush_count++;
fd6ce8f6
FB
810}
811
812#ifdef DEBUG_TB_CHECK
813
bc98a7ef 814static void tb_invalidate_check(target_ulong address)
fd6ce8f6
FB
815{
816 TranslationBlock *tb;
817 int i;
818 address &= TARGET_PAGE_MASK;
99773bd4
PB
819 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
820 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
821 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
822 address >= tb->pc + tb->size)) {
0bf9e31a
BS
823 printf("ERROR invalidate: address=" TARGET_FMT_lx
824 " PC=%08lx size=%04x\n",
99773bd4 825 address, (long)tb->pc, tb->size);
fd6ce8f6
FB
826 }
827 }
828 }
829}
830
831/* verify that all the pages have correct rights for code */
832static void tb_page_check(void)
833{
834 TranslationBlock *tb;
835 int i, flags1, flags2;
3b46e624 836
99773bd4
PB
837 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
838 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
839 flags1 = page_get_flags(tb->pc);
840 flags2 = page_get_flags(tb->pc + tb->size - 1);
841 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
842 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
99773bd4 843 (long)tb->pc, tb->size, flags1, flags2);
fd6ce8f6
FB
844 }
845 }
846 }
847}
848
849#endif
850
851/* invalidate one TB */
852static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
853 int next_offset)
854{
855 TranslationBlock *tb1;
856 for(;;) {
857 tb1 = *ptb;
858 if (tb1 == tb) {
859 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
860 break;
861 }
862 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
863 }
864}
865
9fa3e853
FB
866static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
867{
868 TranslationBlock *tb1;
869 unsigned int n1;
870
871 for(;;) {
872 tb1 = *ptb;
8efe0ca8
SW
873 n1 = (uintptr_t)tb1 & 3;
874 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
9fa3e853
FB
875 if (tb1 == tb) {
876 *ptb = tb1->page_next[n1];
877 break;
878 }
879 ptb = &tb1->page_next[n1];
880 }
881}
882
d4e8164f
FB
883static inline void tb_jmp_remove(TranslationBlock *tb, int n)
884{
885 TranslationBlock *tb1, **ptb;
886 unsigned int n1;
887
888 ptb = &tb->jmp_next[n];
889 tb1 = *ptb;
890 if (tb1) {
891 /* find tb(n) in circular list */
892 for(;;) {
893 tb1 = *ptb;
8efe0ca8
SW
894 n1 = (uintptr_t)tb1 & 3;
895 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
d4e8164f
FB
896 if (n1 == n && tb1 == tb)
897 break;
898 if (n1 == 2) {
899 ptb = &tb1->jmp_first;
900 } else {
901 ptb = &tb1->jmp_next[n1];
902 }
903 }
904 /* now we can suppress tb(n) from the list */
905 *ptb = tb->jmp_next[n];
906
907 tb->jmp_next[n] = NULL;
908 }
909}
910
911/* reset the jump entry 'n' of a TB so that it is not chained to
912 another TB */
913static inline void tb_reset_jump(TranslationBlock *tb, int n)
914{
8efe0ca8 915 tb_set_jmp_target(tb, n, (uintptr_t)(tb->tc_ptr + tb->tb_next_offset[n]));
d4e8164f
FB
916}
917
41c1b1c9 918void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
fd6ce8f6 919{
9349b4f9 920 CPUArchState *env;
8a40a180 921 PageDesc *p;
d4e8164f 922 unsigned int h, n1;
41c1b1c9 923 tb_page_addr_t phys_pc;
8a40a180 924 TranslationBlock *tb1, *tb2;
3b46e624 925
8a40a180
FB
926 /* remove the TB from the hash list */
927 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
928 h = tb_phys_hash_func(phys_pc);
5fafdf24 929 tb_remove(&tb_phys_hash[h], tb,
8a40a180
FB
930 offsetof(TranslationBlock, phys_hash_next));
931
932 /* remove the TB from the page list */
933 if (tb->page_addr[0] != page_addr) {
934 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
935 tb_page_remove(&p->first_tb, tb);
936 invalidate_page_bitmap(p);
937 }
938 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
939 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
940 tb_page_remove(&p->first_tb, tb);
941 invalidate_page_bitmap(p);
942 }
943
36bdbe54 944 tb_invalidated_flag = 1;
59817ccb 945
fd6ce8f6 946 /* remove the TB from the hash list */
8a40a180 947 h = tb_jmp_cache_hash_func(tb->pc);
6a00d601
FB
948 for(env = first_cpu; env != NULL; env = env->next_cpu) {
949 if (env->tb_jmp_cache[h] == tb)
950 env->tb_jmp_cache[h] = NULL;
951 }
d4e8164f
FB
952
953 /* suppress this TB from the two jump lists */
954 tb_jmp_remove(tb, 0);
955 tb_jmp_remove(tb, 1);
956
957 /* suppress any remaining jumps to this TB */
958 tb1 = tb->jmp_first;
959 for(;;) {
8efe0ca8 960 n1 = (uintptr_t)tb1 & 3;
d4e8164f
FB
961 if (n1 == 2)
962 break;
8efe0ca8 963 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
d4e8164f
FB
964 tb2 = tb1->jmp_next[n1];
965 tb_reset_jump(tb1, n1);
966 tb1->jmp_next[n1] = NULL;
967 tb1 = tb2;
968 }
8efe0ca8 969 tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2); /* fail safe */
9fa3e853 970
e3db7226 971 tb_phys_invalidate_count++;
9fa3e853
FB
972}
973
974static inline void set_bits(uint8_t *tab, int start, int len)
975{
976 int end, mask, end1;
977
978 end = start + len;
979 tab += start >> 3;
980 mask = 0xff << (start & 7);
981 if ((start & ~7) == (end & ~7)) {
982 if (start < end) {
983 mask &= ~(0xff << (end & 7));
984 *tab |= mask;
985 }
986 } else {
987 *tab++ |= mask;
988 start = (start + 8) & ~7;
989 end1 = end & ~7;
990 while (start < end1) {
991 *tab++ = 0xff;
992 start += 8;
993 }
994 if (start < end) {
995 mask = ~(0xff << (end & 7));
996 *tab |= mask;
997 }
998 }
999}
1000
1001static void build_page_bitmap(PageDesc *p)
1002{
1003 int n, tb_start, tb_end;
1004 TranslationBlock *tb;
3b46e624 1005
7267c094 1006 p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8);
9fa3e853
FB
1007
1008 tb = p->first_tb;
1009 while (tb != NULL) {
8efe0ca8
SW
1010 n = (uintptr_t)tb & 3;
1011 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
9fa3e853
FB
1012 /* NOTE: this is subtle as a TB may span two physical pages */
1013 if (n == 0) {
1014 /* NOTE: tb_end may be after the end of the page, but
1015 it is not a problem */
1016 tb_start = tb->pc & ~TARGET_PAGE_MASK;
1017 tb_end = tb_start + tb->size;
1018 if (tb_end > TARGET_PAGE_SIZE)
1019 tb_end = TARGET_PAGE_SIZE;
1020 } else {
1021 tb_start = 0;
1022 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1023 }
1024 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
1025 tb = tb->page_next[n];
1026 }
1027}
1028
9349b4f9 1029TranslationBlock *tb_gen_code(CPUArchState *env,
2e70f6ef
PB
1030 target_ulong pc, target_ulong cs_base,
1031 int flags, int cflags)
d720b93d
FB
1032{
1033 TranslationBlock *tb;
1034 uint8_t *tc_ptr;
41c1b1c9
PB
1035 tb_page_addr_t phys_pc, phys_page2;
1036 target_ulong virt_page2;
d720b93d
FB
1037 int code_gen_size;
1038
41c1b1c9 1039 phys_pc = get_page_addr_code(env, pc);
c27004ec 1040 tb = tb_alloc(pc);
d720b93d
FB
1041 if (!tb) {
1042 /* flush must be done */
1043 tb_flush(env);
1044 /* cannot fail at this point */
c27004ec 1045 tb = tb_alloc(pc);
2e70f6ef
PB
1046 /* Don't forget to invalidate previous TB info. */
1047 tb_invalidated_flag = 1;
d720b93d
FB
1048 }
1049 tc_ptr = code_gen_ptr;
1050 tb->tc_ptr = tc_ptr;
1051 tb->cs_base = cs_base;
1052 tb->flags = flags;
1053 tb->cflags = cflags;
d07bde88 1054 cpu_gen_code(env, tb, &code_gen_size);
8efe0ca8
SW
1055 code_gen_ptr = (void *)(((uintptr_t)code_gen_ptr + code_gen_size +
1056 CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
3b46e624 1057
d720b93d 1058 /* check next page if needed */
c27004ec 1059 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
d720b93d 1060 phys_page2 = -1;
c27004ec 1061 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
41c1b1c9 1062 phys_page2 = get_page_addr_code(env, virt_page2);
d720b93d 1063 }
41c1b1c9 1064 tb_link_page(tb, phys_pc, phys_page2);
2e70f6ef 1065 return tb;
d720b93d 1066}
3b46e624 1067
77a8f1a5 1068/*
8e0fdce3
JK
1069 * Invalidate all TBs which intersect with the target physical address range
1070 * [start;end[. NOTE: start and end may refer to *different* physical pages.
1071 * 'is_cpu_write_access' should be true if called from a real cpu write
1072 * access: the virtual CPU will exit the current TB if code is modified inside
1073 * this TB.
77a8f1a5
AG
1074 */
1075void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end,
1076 int is_cpu_write_access)
1077{
1078 while (start < end) {
1079 tb_invalidate_phys_page_range(start, end, is_cpu_write_access);
1080 start &= TARGET_PAGE_MASK;
1081 start += TARGET_PAGE_SIZE;
1082 }
1083}
1084
8e0fdce3
JK
1085/*
1086 * Invalidate all TBs which intersect with the target physical address range
1087 * [start;end[. NOTE: start and end must refer to the *same* physical page.
1088 * 'is_cpu_write_access' should be true if called from a real cpu write
1089 * access: the virtual CPU will exit the current TB if code is modified inside
1090 * this TB.
1091 */
41c1b1c9 1092void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
d720b93d
FB
1093 int is_cpu_write_access)
1094{
6b917547 1095 TranslationBlock *tb, *tb_next, *saved_tb;
9349b4f9 1096 CPUArchState *env = cpu_single_env;
41c1b1c9 1097 tb_page_addr_t tb_start, tb_end;
6b917547
AL
1098 PageDesc *p;
1099 int n;
1100#ifdef TARGET_HAS_PRECISE_SMC
1101 int current_tb_not_found = is_cpu_write_access;
1102 TranslationBlock *current_tb = NULL;
1103 int current_tb_modified = 0;
1104 target_ulong current_pc = 0;
1105 target_ulong current_cs_base = 0;
1106 int current_flags = 0;
1107#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
1108
1109 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 1110 if (!p)
9fa3e853 1111 return;
5fafdf24 1112 if (!p->code_bitmap &&
d720b93d
FB
1113 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1114 is_cpu_write_access) {
9fa3e853
FB
1115 /* build code bitmap */
1116 build_page_bitmap(p);
1117 }
1118
1119 /* we remove all the TBs in the range [start, end[ */
1120 /* XXX: see if in some cases it could be faster to invalidate all the code */
1121 tb = p->first_tb;
1122 while (tb != NULL) {
8efe0ca8
SW
1123 n = (uintptr_t)tb & 3;
1124 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
9fa3e853
FB
1125 tb_next = tb->page_next[n];
1126 /* NOTE: this is subtle as a TB may span two physical pages */
1127 if (n == 0) {
1128 /* NOTE: tb_end may be after the end of the page, but
1129 it is not a problem */
1130 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1131 tb_end = tb_start + tb->size;
1132 } else {
1133 tb_start = tb->page_addr[1];
1134 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1135 }
1136 if (!(tb_end <= start || tb_start >= end)) {
d720b93d
FB
1137#ifdef TARGET_HAS_PRECISE_SMC
1138 if (current_tb_not_found) {
1139 current_tb_not_found = 0;
1140 current_tb = NULL;
2e70f6ef 1141 if (env->mem_io_pc) {
d720b93d 1142 /* now we have a real cpu fault */
2e70f6ef 1143 current_tb = tb_find_pc(env->mem_io_pc);
d720b93d
FB
1144 }
1145 }
1146 if (current_tb == tb &&
2e70f6ef 1147 (current_tb->cflags & CF_COUNT_MASK) != 1) {
d720b93d
FB
1148 /* If we are modifying the current TB, we must stop
1149 its execution. We could be more precise by checking
1150 that the modification is after the current PC, but it
1151 would require a specialized function to partially
1152 restore the CPU state */
3b46e624 1153
d720b93d 1154 current_tb_modified = 1;
618ba8e6 1155 cpu_restore_state(current_tb, env, env->mem_io_pc);
6b917547
AL
1156 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1157 &current_flags);
d720b93d
FB
1158 }
1159#endif /* TARGET_HAS_PRECISE_SMC */
6f5a9f7e
FB
1160 /* we need to do that to handle the case where a signal
1161 occurs while doing tb_phys_invalidate() */
1162 saved_tb = NULL;
1163 if (env) {
1164 saved_tb = env->current_tb;
1165 env->current_tb = NULL;
1166 }
9fa3e853 1167 tb_phys_invalidate(tb, -1);
6f5a9f7e
FB
1168 if (env) {
1169 env->current_tb = saved_tb;
1170 if (env->interrupt_request && env->current_tb)
1171 cpu_interrupt(env, env->interrupt_request);
1172 }
9fa3e853
FB
1173 }
1174 tb = tb_next;
1175 }
1176#if !defined(CONFIG_USER_ONLY)
1177 /* if no code remaining, no need to continue to use slow writes */
1178 if (!p->first_tb) {
1179 invalidate_page_bitmap(p);
d720b93d 1180 if (is_cpu_write_access) {
2e70f6ef 1181 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
d720b93d
FB
1182 }
1183 }
1184#endif
1185#ifdef TARGET_HAS_PRECISE_SMC
1186 if (current_tb_modified) {
1187 /* we generate a block containing just the instruction
1188 modifying the memory. It will ensure that it cannot modify
1189 itself */
ea1c1802 1190 env->current_tb = NULL;
2e70f6ef 1191 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
d720b93d 1192 cpu_resume_from_signal(env, NULL);
9fa3e853 1193 }
fd6ce8f6 1194#endif
9fa3e853 1195}
fd6ce8f6 1196
9fa3e853 1197/* len must be <= 8 and start must be a multiple of len */
41c1b1c9 1198static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
9fa3e853
FB
1199{
1200 PageDesc *p;
1201 int offset, b;
59817ccb 1202#if 0
a4193c8a 1203 if (1) {
93fcfe39
AL
1204 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1205 cpu_single_env->mem_io_vaddr, len,
1206 cpu_single_env->eip,
8efe0ca8
SW
1207 cpu_single_env->eip +
1208 (intptr_t)cpu_single_env->segs[R_CS].base);
59817ccb
FB
1209 }
1210#endif
9fa3e853 1211 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 1212 if (!p)
9fa3e853
FB
1213 return;
1214 if (p->code_bitmap) {
1215 offset = start & ~TARGET_PAGE_MASK;
1216 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1217 if (b & ((1 << len) - 1))
1218 goto do_invalidate;
1219 } else {
1220 do_invalidate:
d720b93d 1221 tb_invalidate_phys_page_range(start, start + len, 1);
9fa3e853
FB
1222 }
1223}
1224
9fa3e853 1225#if !defined(CONFIG_SOFTMMU)
41c1b1c9 1226static void tb_invalidate_phys_page(tb_page_addr_t addr,
20503968 1227 uintptr_t pc, void *puc)
9fa3e853 1228{
6b917547 1229 TranslationBlock *tb;
9fa3e853 1230 PageDesc *p;
6b917547 1231 int n;
d720b93d 1232#ifdef TARGET_HAS_PRECISE_SMC
6b917547 1233 TranslationBlock *current_tb = NULL;
9349b4f9 1234 CPUArchState *env = cpu_single_env;
6b917547
AL
1235 int current_tb_modified = 0;
1236 target_ulong current_pc = 0;
1237 target_ulong current_cs_base = 0;
1238 int current_flags = 0;
d720b93d 1239#endif
9fa3e853
FB
1240
1241 addr &= TARGET_PAGE_MASK;
1242 p = page_find(addr >> TARGET_PAGE_BITS);
5fafdf24 1243 if (!p)
9fa3e853
FB
1244 return;
1245 tb = p->first_tb;
d720b93d
FB
1246#ifdef TARGET_HAS_PRECISE_SMC
1247 if (tb && pc != 0) {
1248 current_tb = tb_find_pc(pc);
1249 }
1250#endif
9fa3e853 1251 while (tb != NULL) {
8efe0ca8
SW
1252 n = (uintptr_t)tb & 3;
1253 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
d720b93d
FB
1254#ifdef TARGET_HAS_PRECISE_SMC
1255 if (current_tb == tb &&
2e70f6ef 1256 (current_tb->cflags & CF_COUNT_MASK) != 1) {
d720b93d
FB
1257 /* If we are modifying the current TB, we must stop
1258 its execution. We could be more precise by checking
1259 that the modification is after the current PC, but it
1260 would require a specialized function to partially
1261 restore the CPU state */
3b46e624 1262
d720b93d 1263 current_tb_modified = 1;
618ba8e6 1264 cpu_restore_state(current_tb, env, pc);
6b917547
AL
1265 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1266 &current_flags);
d720b93d
FB
1267 }
1268#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
1269 tb_phys_invalidate(tb, addr);
1270 tb = tb->page_next[n];
1271 }
fd6ce8f6 1272 p->first_tb = NULL;
d720b93d
FB
1273#ifdef TARGET_HAS_PRECISE_SMC
1274 if (current_tb_modified) {
1275 /* we generate a block containing just the instruction
1276 modifying the memory. It will ensure that it cannot modify
1277 itself */
ea1c1802 1278 env->current_tb = NULL;
2e70f6ef 1279 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
d720b93d
FB
1280 cpu_resume_from_signal(env, puc);
1281 }
1282#endif
fd6ce8f6 1283}
9fa3e853 1284#endif
fd6ce8f6
FB
1285
1286/* add the tb in the target page and protect it if necessary */
5fafdf24 1287static inline void tb_alloc_page(TranslationBlock *tb,
41c1b1c9 1288 unsigned int n, tb_page_addr_t page_addr)
fd6ce8f6
FB
1289{
1290 PageDesc *p;
4429ab44
JQ
1291#ifndef CONFIG_USER_ONLY
1292 bool page_already_protected;
1293#endif
9fa3e853
FB
1294
1295 tb->page_addr[n] = page_addr;
5cd2c5b6 1296 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
9fa3e853 1297 tb->page_next[n] = p->first_tb;
4429ab44
JQ
1298#ifndef CONFIG_USER_ONLY
1299 page_already_protected = p->first_tb != NULL;
1300#endif
8efe0ca8 1301 p->first_tb = (TranslationBlock *)((uintptr_t)tb | n);
9fa3e853 1302 invalidate_page_bitmap(p);
fd6ce8f6 1303
107db443 1304#if defined(TARGET_HAS_SMC) || 1
d720b93d 1305
9fa3e853 1306#if defined(CONFIG_USER_ONLY)
fd6ce8f6 1307 if (p->flags & PAGE_WRITE) {
53a5960a
PB
1308 target_ulong addr;
1309 PageDesc *p2;
9fa3e853
FB
1310 int prot;
1311
fd6ce8f6
FB
1312 /* force the host page as non writable (writes will have a
1313 page fault + mprotect overhead) */
53a5960a 1314 page_addr &= qemu_host_page_mask;
fd6ce8f6 1315 prot = 0;
53a5960a
PB
1316 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1317 addr += TARGET_PAGE_SIZE) {
1318
1319 p2 = page_find (addr >> TARGET_PAGE_BITS);
1320 if (!p2)
1321 continue;
1322 prot |= p2->flags;
1323 p2->flags &= ~PAGE_WRITE;
53a5960a 1324 }
5fafdf24 1325 mprotect(g2h(page_addr), qemu_host_page_size,
fd6ce8f6
FB
1326 (prot & PAGE_BITS) & ~PAGE_WRITE);
1327#ifdef DEBUG_TB_INVALIDATE
ab3d1727 1328 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
53a5960a 1329 page_addr);
fd6ce8f6 1330#endif
fd6ce8f6 1331 }
9fa3e853
FB
1332#else
1333 /* if some code is already present, then the pages are already
1334 protected. So we handle the case where only the first TB is
1335 allocated in a physical page */
4429ab44 1336 if (!page_already_protected) {
6a00d601 1337 tlb_protect_code(page_addr);
9fa3e853
FB
1338 }
1339#endif
d720b93d
FB
1340
1341#endif /* TARGET_HAS_SMC */
fd6ce8f6
FB
1342}
1343
9fa3e853
FB
1344/* add a new TB and link it to the physical page tables. phys_page2 is
1345 (-1) to indicate that only one page contains the TB. */
41c1b1c9
PB
1346void tb_link_page(TranslationBlock *tb,
1347 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
d4e8164f 1348{
9fa3e853
FB
1349 unsigned int h;
1350 TranslationBlock **ptb;
1351
c8a706fe
PB
1352 /* Grab the mmap lock to stop another thread invalidating this TB
1353 before we are done. */
1354 mmap_lock();
9fa3e853
FB
1355 /* add in the physical hash table */
1356 h = tb_phys_hash_func(phys_pc);
1357 ptb = &tb_phys_hash[h];
1358 tb->phys_hash_next = *ptb;
1359 *ptb = tb;
fd6ce8f6
FB
1360
1361 /* add in the page list */
9fa3e853
FB
1362 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1363 if (phys_page2 != -1)
1364 tb_alloc_page(tb, 1, phys_page2);
1365 else
1366 tb->page_addr[1] = -1;
9fa3e853 1367
8efe0ca8 1368 tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2);
d4e8164f
FB
1369 tb->jmp_next[0] = NULL;
1370 tb->jmp_next[1] = NULL;
1371
1372 /* init original jump addresses */
1373 if (tb->tb_next_offset[0] != 0xffff)
1374 tb_reset_jump(tb, 0);
1375 if (tb->tb_next_offset[1] != 0xffff)
1376 tb_reset_jump(tb, 1);
8a40a180
FB
1377
1378#ifdef DEBUG_TB_CHECK
1379 tb_page_check();
1380#endif
c8a706fe 1381 mmap_unlock();
fd6ce8f6
FB
1382}
1383
9fa3e853
FB
1384/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1385 tb[1].tc_ptr. Return NULL if not found */
6375e09e 1386TranslationBlock *tb_find_pc(uintptr_t tc_ptr)
fd6ce8f6 1387{
9fa3e853 1388 int m_min, m_max, m;
8efe0ca8 1389 uintptr_t v;
9fa3e853 1390 TranslationBlock *tb;
a513fe19
FB
1391
1392 if (nb_tbs <= 0)
1393 return NULL;
8efe0ca8
SW
1394 if (tc_ptr < (uintptr_t)code_gen_buffer ||
1395 tc_ptr >= (uintptr_t)code_gen_ptr) {
a513fe19 1396 return NULL;
8efe0ca8 1397 }
a513fe19
FB
1398 /* binary search (cf Knuth) */
1399 m_min = 0;
1400 m_max = nb_tbs - 1;
1401 while (m_min <= m_max) {
1402 m = (m_min + m_max) >> 1;
1403 tb = &tbs[m];
8efe0ca8 1404 v = (uintptr_t)tb->tc_ptr;
a513fe19
FB
1405 if (v == tc_ptr)
1406 return tb;
1407 else if (tc_ptr < v) {
1408 m_max = m - 1;
1409 } else {
1410 m_min = m + 1;
1411 }
5fafdf24 1412 }
a513fe19
FB
1413 return &tbs[m_max];
1414}
7501267e 1415
ea041c0e
FB
1416static void tb_reset_jump_recursive(TranslationBlock *tb);
1417
1418static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1419{
1420 TranslationBlock *tb1, *tb_next, **ptb;
1421 unsigned int n1;
1422
1423 tb1 = tb->jmp_next[n];
1424 if (tb1 != NULL) {
1425 /* find head of list */
1426 for(;;) {
8efe0ca8
SW
1427 n1 = (uintptr_t)tb1 & 3;
1428 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
ea041c0e
FB
1429 if (n1 == 2)
1430 break;
1431 tb1 = tb1->jmp_next[n1];
1432 }
1433 /* we are now sure now that tb jumps to tb1 */
1434 tb_next = tb1;
1435
1436 /* remove tb from the jmp_first list */
1437 ptb = &tb_next->jmp_first;
1438 for(;;) {
1439 tb1 = *ptb;
8efe0ca8
SW
1440 n1 = (uintptr_t)tb1 & 3;
1441 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
ea041c0e
FB
1442 if (n1 == n && tb1 == tb)
1443 break;
1444 ptb = &tb1->jmp_next[n1];
1445 }
1446 *ptb = tb->jmp_next[n];
1447 tb->jmp_next[n] = NULL;
3b46e624 1448
ea041c0e
FB
1449 /* suppress the jump to next tb in generated code */
1450 tb_reset_jump(tb, n);
1451
0124311e 1452 /* suppress jumps in the tb on which we could have jumped */
ea041c0e
FB
1453 tb_reset_jump_recursive(tb_next);
1454 }
1455}
1456
1457static void tb_reset_jump_recursive(TranslationBlock *tb)
1458{
1459 tb_reset_jump_recursive2(tb, 0);
1460 tb_reset_jump_recursive2(tb, 1);
1461}
1462
1fddef4b 1463#if defined(TARGET_HAS_ICE)
94df27fd 1464#if defined(CONFIG_USER_ONLY)
9349b4f9 1465static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
94df27fd
PB
1466{
1467 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1468}
1469#else
1e7855a5 1470void tb_invalidate_phys_addr(target_phys_addr_t addr)
d720b93d 1471{
c227f099 1472 ram_addr_t ram_addr;
f3705d53 1473 MemoryRegionSection *section;
d720b93d 1474
06ef3525 1475 section = phys_page_find(addr >> TARGET_PAGE_BITS);
f3705d53
AK
1476 if (!(memory_region_is_ram(section->mr)
1477 || (section->mr->rom_device && section->mr->readable))) {
06ef3525
AK
1478 return;
1479 }
f3705d53 1480 ram_addr = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
cc5bea60 1481 + memory_region_section_addr(section, addr);
706cd4b5 1482 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
d720b93d 1483}
1e7855a5
MF
1484
1485static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
1486{
9d70c4b7
MF
1487 tb_invalidate_phys_addr(cpu_get_phys_page_debug(env, pc) |
1488 (pc & ~TARGET_PAGE_MASK));
1e7855a5 1489}
c27004ec 1490#endif
94df27fd 1491#endif /* TARGET_HAS_ICE */
d720b93d 1492
c527ee8f 1493#if defined(CONFIG_USER_ONLY)
9349b4f9 1494void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
c527ee8f
PB
1495
1496{
1497}
1498
9349b4f9 1499int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
c527ee8f
PB
1500 int flags, CPUWatchpoint **watchpoint)
1501{
1502 return -ENOSYS;
1503}
1504#else
6658ffb8 1505/* Add a watchpoint. */
9349b4f9 1506int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
a1d1bb31 1507 int flags, CPUWatchpoint **watchpoint)
6658ffb8 1508{
b4051334 1509 target_ulong len_mask = ~(len - 1);
c0ce998e 1510 CPUWatchpoint *wp;
6658ffb8 1511
b4051334 1512 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
0dc23828
MF
1513 if ((len & (len - 1)) || (addr & ~len_mask) ||
1514 len == 0 || len > TARGET_PAGE_SIZE) {
b4051334
AL
1515 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1516 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1517 return -EINVAL;
1518 }
7267c094 1519 wp = g_malloc(sizeof(*wp));
a1d1bb31
AL
1520
1521 wp->vaddr = addr;
b4051334 1522 wp->len_mask = len_mask;
a1d1bb31
AL
1523 wp->flags = flags;
1524
2dc9f411 1525 /* keep all GDB-injected watchpoints in front */
c0ce998e 1526 if (flags & BP_GDB)
72cf2d4f 1527 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
c0ce998e 1528 else
72cf2d4f 1529 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
6658ffb8 1530
6658ffb8 1531 tlb_flush_page(env, addr);
a1d1bb31
AL
1532
1533 if (watchpoint)
1534 *watchpoint = wp;
1535 return 0;
6658ffb8
PB
1536}
1537
a1d1bb31 1538/* Remove a specific watchpoint. */
9349b4f9 1539int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
a1d1bb31 1540 int flags)
6658ffb8 1541{
b4051334 1542 target_ulong len_mask = ~(len - 1);
a1d1bb31 1543 CPUWatchpoint *wp;
6658ffb8 1544
72cf2d4f 1545 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334 1546 if (addr == wp->vaddr && len_mask == wp->len_mask
6e140f28 1547 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
a1d1bb31 1548 cpu_watchpoint_remove_by_ref(env, wp);
6658ffb8
PB
1549 return 0;
1550 }
1551 }
a1d1bb31 1552 return -ENOENT;
6658ffb8
PB
1553}
1554
a1d1bb31 1555/* Remove a specific watchpoint by reference. */
9349b4f9 1556void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
a1d1bb31 1557{
72cf2d4f 1558 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
7d03f82f 1559
a1d1bb31
AL
1560 tlb_flush_page(env, watchpoint->vaddr);
1561
7267c094 1562 g_free(watchpoint);
a1d1bb31
AL
1563}
1564
1565/* Remove all matching watchpoints. */
9349b4f9 1566void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
a1d1bb31 1567{
c0ce998e 1568 CPUWatchpoint *wp, *next;
a1d1bb31 1569
72cf2d4f 1570 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
a1d1bb31
AL
1571 if (wp->flags & mask)
1572 cpu_watchpoint_remove_by_ref(env, wp);
c0ce998e 1573 }
7d03f82f 1574}
c527ee8f 1575#endif
7d03f82f 1576
a1d1bb31 1577/* Add a breakpoint. */
9349b4f9 1578int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
a1d1bb31 1579 CPUBreakpoint **breakpoint)
4c3a88a2 1580{
1fddef4b 1581#if defined(TARGET_HAS_ICE)
c0ce998e 1582 CPUBreakpoint *bp;
3b46e624 1583
7267c094 1584 bp = g_malloc(sizeof(*bp));
4c3a88a2 1585
a1d1bb31
AL
1586 bp->pc = pc;
1587 bp->flags = flags;
1588
2dc9f411 1589 /* keep all GDB-injected breakpoints in front */
c0ce998e 1590 if (flags & BP_GDB)
72cf2d4f 1591 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
c0ce998e 1592 else
72cf2d4f 1593 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
3b46e624 1594
d720b93d 1595 breakpoint_invalidate(env, pc);
a1d1bb31
AL
1596
1597 if (breakpoint)
1598 *breakpoint = bp;
4c3a88a2
FB
1599 return 0;
1600#else
a1d1bb31 1601 return -ENOSYS;
4c3a88a2
FB
1602#endif
1603}
1604
a1d1bb31 1605/* Remove a specific breakpoint. */
9349b4f9 1606int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
a1d1bb31 1607{
7d03f82f 1608#if defined(TARGET_HAS_ICE)
a1d1bb31
AL
1609 CPUBreakpoint *bp;
1610
72cf2d4f 1611 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31
AL
1612 if (bp->pc == pc && bp->flags == flags) {
1613 cpu_breakpoint_remove_by_ref(env, bp);
1614 return 0;
1615 }
7d03f82f 1616 }
a1d1bb31
AL
1617 return -ENOENT;
1618#else
1619 return -ENOSYS;
7d03f82f
EI
1620#endif
1621}
1622
a1d1bb31 1623/* Remove a specific breakpoint by reference. */
9349b4f9 1624void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
4c3a88a2 1625{
1fddef4b 1626#if defined(TARGET_HAS_ICE)
72cf2d4f 1627 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
d720b93d 1628
a1d1bb31
AL
1629 breakpoint_invalidate(env, breakpoint->pc);
1630
7267c094 1631 g_free(breakpoint);
a1d1bb31
AL
1632#endif
1633}
1634
1635/* Remove all matching breakpoints. */
9349b4f9 1636void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
a1d1bb31
AL
1637{
1638#if defined(TARGET_HAS_ICE)
c0ce998e 1639 CPUBreakpoint *bp, *next;
a1d1bb31 1640
72cf2d4f 1641 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
a1d1bb31
AL
1642 if (bp->flags & mask)
1643 cpu_breakpoint_remove_by_ref(env, bp);
c0ce998e 1644 }
4c3a88a2
FB
1645#endif
1646}
1647
c33a346e
FB
1648/* enable or disable single step mode. EXCP_DEBUG is returned by the
1649 CPU loop after each instruction */
9349b4f9 1650void cpu_single_step(CPUArchState *env, int enabled)
c33a346e 1651{
1fddef4b 1652#if defined(TARGET_HAS_ICE)
c33a346e
FB
1653 if (env->singlestep_enabled != enabled) {
1654 env->singlestep_enabled = enabled;
e22a25c9
AL
1655 if (kvm_enabled())
1656 kvm_update_guest_debug(env, 0);
1657 else {
ccbb4d44 1658 /* must flush all the translated code to avoid inconsistencies */
e22a25c9
AL
1659 /* XXX: only flush what is necessary */
1660 tb_flush(env);
1661 }
c33a346e
FB
1662 }
1663#endif
1664}
1665
9349b4f9 1666static void cpu_unlink_tb(CPUArchState *env)
ea041c0e 1667{
3098dba0
AJ
1668 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1669 problem and hope the cpu will stop of its own accord. For userspace
1670 emulation this often isn't actually as bad as it sounds. Often
1671 signals are used primarily to interrupt blocking syscalls. */
ea041c0e 1672 TranslationBlock *tb;
c227f099 1673 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
59817ccb 1674
cab1b4bd 1675 spin_lock(&interrupt_lock);
3098dba0
AJ
1676 tb = env->current_tb;
1677 /* if the cpu is currently executing code, we must unlink it and
1678 all the potentially executing TB */
f76cfe56 1679 if (tb) {
3098dba0
AJ
1680 env->current_tb = NULL;
1681 tb_reset_jump_recursive(tb);
be214e6c 1682 }
cab1b4bd 1683 spin_unlock(&interrupt_lock);
3098dba0
AJ
1684}
1685
97ffbd8d 1686#ifndef CONFIG_USER_ONLY
3098dba0 1687/* mask must never be zero, except for A20 change call */
9349b4f9 1688static void tcg_handle_interrupt(CPUArchState *env, int mask)
3098dba0
AJ
1689{
1690 int old_mask;
be214e6c 1691
2e70f6ef 1692 old_mask = env->interrupt_request;
68a79315 1693 env->interrupt_request |= mask;
3098dba0 1694
8edac960
AL
1695 /*
1696 * If called from iothread context, wake the target cpu in
1697 * case its halted.
1698 */
b7680cb6 1699 if (!qemu_cpu_is_self(env)) {
8edac960
AL
1700 qemu_cpu_kick(env);
1701 return;
1702 }
8edac960 1703
2e70f6ef 1704 if (use_icount) {
266910c4 1705 env->icount_decr.u16.high = 0xffff;
2e70f6ef 1706 if (!can_do_io(env)
be214e6c 1707 && (mask & ~old_mask) != 0) {
2e70f6ef
PB
1708 cpu_abort(env, "Raised interrupt while not in I/O function");
1709 }
2e70f6ef 1710 } else {
3098dba0 1711 cpu_unlink_tb(env);
ea041c0e
FB
1712 }
1713}
1714
ec6959d0
JK
1715CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1716
97ffbd8d
JK
1717#else /* CONFIG_USER_ONLY */
1718
9349b4f9 1719void cpu_interrupt(CPUArchState *env, int mask)
97ffbd8d
JK
1720{
1721 env->interrupt_request |= mask;
1722 cpu_unlink_tb(env);
1723}
1724#endif /* CONFIG_USER_ONLY */
1725
9349b4f9 1726void cpu_reset_interrupt(CPUArchState *env, int mask)
b54ad049
FB
1727{
1728 env->interrupt_request &= ~mask;
1729}
1730
9349b4f9 1731void cpu_exit(CPUArchState *env)
3098dba0
AJ
1732{
1733 env->exit_request = 1;
1734 cpu_unlink_tb(env);
1735}
1736
9349b4f9 1737void cpu_abort(CPUArchState *env, const char *fmt, ...)
7501267e
FB
1738{
1739 va_list ap;
493ae1f0 1740 va_list ap2;
7501267e
FB
1741
1742 va_start(ap, fmt);
493ae1f0 1743 va_copy(ap2, ap);
7501267e
FB
1744 fprintf(stderr, "qemu: fatal: ");
1745 vfprintf(stderr, fmt, ap);
1746 fprintf(stderr, "\n");
1747#ifdef TARGET_I386
7fe48483
FB
1748 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1749#else
1750 cpu_dump_state(env, stderr, fprintf, 0);
7501267e 1751#endif
93fcfe39
AL
1752 if (qemu_log_enabled()) {
1753 qemu_log("qemu: fatal: ");
1754 qemu_log_vprintf(fmt, ap2);
1755 qemu_log("\n");
f9373291 1756#ifdef TARGET_I386
93fcfe39 1757 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
f9373291 1758#else
93fcfe39 1759 log_cpu_state(env, 0);
f9373291 1760#endif
31b1a7b4 1761 qemu_log_flush();
93fcfe39 1762 qemu_log_close();
924edcae 1763 }
493ae1f0 1764 va_end(ap2);
f9373291 1765 va_end(ap);
fd052bf6
RV
1766#if defined(CONFIG_USER_ONLY)
1767 {
1768 struct sigaction act;
1769 sigfillset(&act.sa_mask);
1770 act.sa_handler = SIG_DFL;
1771 sigaction(SIGABRT, &act, NULL);
1772 }
1773#endif
7501267e
FB
1774 abort();
1775}
1776
9349b4f9 1777CPUArchState *cpu_copy(CPUArchState *env)
c5be9f08 1778{
9349b4f9
AF
1779 CPUArchState *new_env = cpu_init(env->cpu_model_str);
1780 CPUArchState *next_cpu = new_env->next_cpu;
c5be9f08 1781 int cpu_index = new_env->cpu_index;
5a38f081
AL
1782#if defined(TARGET_HAS_ICE)
1783 CPUBreakpoint *bp;
1784 CPUWatchpoint *wp;
1785#endif
1786
9349b4f9 1787 memcpy(new_env, env, sizeof(CPUArchState));
5a38f081
AL
1788
1789 /* Preserve chaining and index. */
c5be9f08
TS
1790 new_env->next_cpu = next_cpu;
1791 new_env->cpu_index = cpu_index;
5a38f081
AL
1792
1793 /* Clone all break/watchpoints.
1794 Note: Once we support ptrace with hw-debug register access, make sure
1795 BP_CPU break/watchpoints are handled correctly on clone. */
72cf2d4f
BS
1796 QTAILQ_INIT(&env->breakpoints);
1797 QTAILQ_INIT(&env->watchpoints);
5a38f081 1798#if defined(TARGET_HAS_ICE)
72cf2d4f 1799 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
5a38f081
AL
1800 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1801 }
72cf2d4f 1802 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
5a38f081
AL
1803 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1804 wp->flags, NULL);
1805 }
1806#endif
1807
c5be9f08
TS
1808 return new_env;
1809}
1810
0124311e 1811#if !defined(CONFIG_USER_ONLY)
0cac1b66 1812void tb_flush_jmp_cache(CPUArchState *env, target_ulong addr)
5c751e99
EI
1813{
1814 unsigned int i;
1815
1816 /* Discard jump cache entries for any tb which might potentially
1817 overlap the flushed page. */
1818 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1819 memset (&env->tb_jmp_cache[i], 0,
9742bf26 1820 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
5c751e99
EI
1821
1822 i = tb_jmp_cache_hash_page(addr);
1823 memset (&env->tb_jmp_cache[i], 0,
9742bf26 1824 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
5c751e99
EI
1825}
1826
d24981d3
JQ
1827static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end,
1828 uintptr_t length)
1829{
1830 uintptr_t start1;
1831
1832 /* we modify the TLB cache so that the dirty bit will be set again
1833 when accessing the range */
1834 start1 = (uintptr_t)qemu_safe_ram_ptr(start);
1835 /* Check that we don't span multiple blocks - this breaks the
1836 address comparisons below. */
1837 if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1
1838 != (end - 1) - start) {
1839 abort();
1840 }
1841 cpu_tlb_reset_dirty_all(start1, length);
1842
1843}
1844
5579c7f3 1845/* Note: start and end must be within the same ram block. */
c227f099 1846void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 1847 int dirty_flags)
1ccde1cb 1848{
d24981d3 1849 uintptr_t length;
1ccde1cb
FB
1850
1851 start &= TARGET_PAGE_MASK;
1852 end = TARGET_PAGE_ALIGN(end);
1853
1854 length = end - start;
1855 if (length == 0)
1856 return;
f7c11b53 1857 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
f23db169 1858
d24981d3
JQ
1859 if (tcg_enabled()) {
1860 tlb_reset_dirty_range_all(start, end, length);
5579c7f3 1861 }
1ccde1cb
FB
1862}
1863
74576198
AL
1864int cpu_physical_memory_set_dirty_tracking(int enable)
1865{
f6f3fbca 1866 int ret = 0;
74576198 1867 in_migration = enable;
f6f3fbca 1868 return ret;
74576198
AL
1869}
1870
e5548617
BS
1871target_phys_addr_t memory_region_section_get_iotlb(CPUArchState *env,
1872 MemoryRegionSection *section,
1873 target_ulong vaddr,
1874 target_phys_addr_t paddr,
1875 int prot,
1876 target_ulong *address)
1877{
1878 target_phys_addr_t iotlb;
1879 CPUWatchpoint *wp;
1880
cc5bea60 1881 if (memory_region_is_ram(section->mr)) {
e5548617
BS
1882 /* Normal RAM. */
1883 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
cc5bea60 1884 + memory_region_section_addr(section, paddr);
e5548617
BS
1885 if (!section->readonly) {
1886 iotlb |= phys_section_notdirty;
1887 } else {
1888 iotlb |= phys_section_rom;
1889 }
1890 } else {
1891 /* IO handlers are currently passed a physical address.
1892 It would be nice to pass an offset from the base address
1893 of that region. This would avoid having to special case RAM,
1894 and avoid full address decoding in every device.
1895 We can't use the high bits of pd for this because
1896 IO_MEM_ROMD uses these as a ram address. */
1897 iotlb = section - phys_sections;
cc5bea60 1898 iotlb += memory_region_section_addr(section, paddr);
e5548617
BS
1899 }
1900
1901 /* Make accesses to pages with watchpoints go via the
1902 watchpoint trap routines. */
1903 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1904 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
1905 /* Avoid trapping reads of pages with a write breakpoint. */
1906 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
1907 iotlb = phys_section_watch + paddr;
1908 *address |= TLB_MMIO;
1909 break;
1910 }
1911 }
1912 }
1913
1914 return iotlb;
1915}
1916
0124311e 1917#else
edf8e2af
MW
1918/*
1919 * Walks guest process memory "regions" one by one
1920 * and calls callback function 'fn' for each region.
1921 */
5cd2c5b6
RH
1922
1923struct walk_memory_regions_data
1924{
1925 walk_memory_regions_fn fn;
1926 void *priv;
8efe0ca8 1927 uintptr_t start;
5cd2c5b6
RH
1928 int prot;
1929};
1930
1931static int walk_memory_regions_end(struct walk_memory_regions_data *data,
b480d9b7 1932 abi_ulong end, int new_prot)
5cd2c5b6
RH
1933{
1934 if (data->start != -1ul) {
1935 int rc = data->fn(data->priv, data->start, end, data->prot);
1936 if (rc != 0) {
1937 return rc;
1938 }
1939 }
1940
1941 data->start = (new_prot ? end : -1ul);
1942 data->prot = new_prot;
1943
1944 return 0;
1945}
1946
1947static int walk_memory_regions_1(struct walk_memory_regions_data *data,
b480d9b7 1948 abi_ulong base, int level, void **lp)
5cd2c5b6 1949{
b480d9b7 1950 abi_ulong pa;
5cd2c5b6
RH
1951 int i, rc;
1952
1953 if (*lp == NULL) {
1954 return walk_memory_regions_end(data, base, 0);
1955 }
1956
1957 if (level == 0) {
1958 PageDesc *pd = *lp;
7296abac 1959 for (i = 0; i < L2_SIZE; ++i) {
5cd2c5b6
RH
1960 int prot = pd[i].flags;
1961
1962 pa = base | (i << TARGET_PAGE_BITS);
1963 if (prot != data->prot) {
1964 rc = walk_memory_regions_end(data, pa, prot);
1965 if (rc != 0) {
1966 return rc;
9fa3e853 1967 }
9fa3e853 1968 }
5cd2c5b6
RH
1969 }
1970 } else {
1971 void **pp = *lp;
7296abac 1972 for (i = 0; i < L2_SIZE; ++i) {
b480d9b7
PB
1973 pa = base | ((abi_ulong)i <<
1974 (TARGET_PAGE_BITS + L2_BITS * level));
5cd2c5b6
RH
1975 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
1976 if (rc != 0) {
1977 return rc;
1978 }
1979 }
1980 }
1981
1982 return 0;
1983}
1984
1985int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
1986{
1987 struct walk_memory_regions_data data;
8efe0ca8 1988 uintptr_t i;
5cd2c5b6
RH
1989
1990 data.fn = fn;
1991 data.priv = priv;
1992 data.start = -1ul;
1993 data.prot = 0;
1994
1995 for (i = 0; i < V_L1_SIZE; i++) {
b480d9b7 1996 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
5cd2c5b6
RH
1997 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
1998 if (rc != 0) {
1999 return rc;
9fa3e853 2000 }
33417e70 2001 }
5cd2c5b6
RH
2002
2003 return walk_memory_regions_end(&data, 0, 0);
edf8e2af
MW
2004}
2005
b480d9b7
PB
2006static int dump_region(void *priv, abi_ulong start,
2007 abi_ulong end, unsigned long prot)
edf8e2af
MW
2008{
2009 FILE *f = (FILE *)priv;
2010
b480d9b7
PB
2011 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2012 " "TARGET_ABI_FMT_lx" %c%c%c\n",
edf8e2af
MW
2013 start, end, end - start,
2014 ((prot & PAGE_READ) ? 'r' : '-'),
2015 ((prot & PAGE_WRITE) ? 'w' : '-'),
2016 ((prot & PAGE_EXEC) ? 'x' : '-'));
2017
2018 return (0);
2019}
2020
2021/* dump memory mappings */
2022void page_dump(FILE *f)
2023{
2024 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2025 "start", "end", "size", "prot");
2026 walk_memory_regions(f, dump_region);
33417e70
FB
2027}
2028
53a5960a 2029int page_get_flags(target_ulong address)
33417e70 2030{
9fa3e853
FB
2031 PageDesc *p;
2032
2033 p = page_find(address >> TARGET_PAGE_BITS);
33417e70 2034 if (!p)
9fa3e853
FB
2035 return 0;
2036 return p->flags;
2037}
2038
376a7909
RH
2039/* Modify the flags of a page and invalidate the code if necessary.
2040 The flag PAGE_WRITE_ORG is positioned automatically depending
2041 on PAGE_WRITE. The mmap_lock should already be held. */
53a5960a 2042void page_set_flags(target_ulong start, target_ulong end, int flags)
9fa3e853 2043{
376a7909
RH
2044 target_ulong addr, len;
2045
2046 /* This function should never be called with addresses outside the
2047 guest address space. If this assert fires, it probably indicates
2048 a missing call to h2g_valid. */
b480d9b7
PB
2049#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2050 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
376a7909
RH
2051#endif
2052 assert(start < end);
9fa3e853
FB
2053
2054 start = start & TARGET_PAGE_MASK;
2055 end = TARGET_PAGE_ALIGN(end);
376a7909
RH
2056
2057 if (flags & PAGE_WRITE) {
9fa3e853 2058 flags |= PAGE_WRITE_ORG;
376a7909
RH
2059 }
2060
2061 for (addr = start, len = end - start;
2062 len != 0;
2063 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2064 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2065
2066 /* If the write protection bit is set, then we invalidate
2067 the code inside. */
5fafdf24 2068 if (!(p->flags & PAGE_WRITE) &&
9fa3e853
FB
2069 (flags & PAGE_WRITE) &&
2070 p->first_tb) {
d720b93d 2071 tb_invalidate_phys_page(addr, 0, NULL);
9fa3e853
FB
2072 }
2073 p->flags = flags;
2074 }
33417e70
FB
2075}
2076
3d97b40b
TS
2077int page_check_range(target_ulong start, target_ulong len, int flags)
2078{
2079 PageDesc *p;
2080 target_ulong end;
2081 target_ulong addr;
2082
376a7909
RH
2083 /* This function should never be called with addresses outside the
2084 guest address space. If this assert fires, it probably indicates
2085 a missing call to h2g_valid. */
338e9e6c
BS
2086#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2087 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
376a7909
RH
2088#endif
2089
3e0650a9
RH
2090 if (len == 0) {
2091 return 0;
2092 }
376a7909
RH
2093 if (start + len - 1 < start) {
2094 /* We've wrapped around. */
55f280c9 2095 return -1;
376a7909 2096 }
55f280c9 2097
3d97b40b
TS
2098 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2099 start = start & TARGET_PAGE_MASK;
2100
376a7909
RH
2101 for (addr = start, len = end - start;
2102 len != 0;
2103 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
3d97b40b
TS
2104 p = page_find(addr >> TARGET_PAGE_BITS);
2105 if( !p )
2106 return -1;
2107 if( !(p->flags & PAGE_VALID) )
2108 return -1;
2109
dae3270c 2110 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
3d97b40b 2111 return -1;
dae3270c
FB
2112 if (flags & PAGE_WRITE) {
2113 if (!(p->flags & PAGE_WRITE_ORG))
2114 return -1;
2115 /* unprotect the page if it was put read-only because it
2116 contains translated code */
2117 if (!(p->flags & PAGE_WRITE)) {
2118 if (!page_unprotect(addr, 0, NULL))
2119 return -1;
2120 }
2121 return 0;
2122 }
3d97b40b
TS
2123 }
2124 return 0;
2125}
2126
9fa3e853 2127/* called from signal handler: invalidate the code and unprotect the
ccbb4d44 2128 page. Return TRUE if the fault was successfully handled. */
6375e09e 2129int page_unprotect(target_ulong address, uintptr_t pc, void *puc)
9fa3e853 2130{
45d679d6
AJ
2131 unsigned int prot;
2132 PageDesc *p;
53a5960a 2133 target_ulong host_start, host_end, addr;
9fa3e853 2134
c8a706fe
PB
2135 /* Technically this isn't safe inside a signal handler. However we
2136 know this only ever happens in a synchronous SEGV handler, so in
2137 practice it seems to be ok. */
2138 mmap_lock();
2139
45d679d6
AJ
2140 p = page_find(address >> TARGET_PAGE_BITS);
2141 if (!p) {
c8a706fe 2142 mmap_unlock();
9fa3e853 2143 return 0;
c8a706fe 2144 }
45d679d6 2145
9fa3e853
FB
2146 /* if the page was really writable, then we change its
2147 protection back to writable */
45d679d6
AJ
2148 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2149 host_start = address & qemu_host_page_mask;
2150 host_end = host_start + qemu_host_page_size;
2151
2152 prot = 0;
2153 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2154 p = page_find(addr >> TARGET_PAGE_BITS);
2155 p->flags |= PAGE_WRITE;
2156 prot |= p->flags;
2157
9fa3e853
FB
2158 /* and since the content will be modified, we must invalidate
2159 the corresponding translated code. */
45d679d6 2160 tb_invalidate_phys_page(addr, pc, puc);
9fa3e853 2161#ifdef DEBUG_TB_CHECK
45d679d6 2162 tb_invalidate_check(addr);
9fa3e853 2163#endif
9fa3e853 2164 }
45d679d6
AJ
2165 mprotect((void *)g2h(host_start), qemu_host_page_size,
2166 prot & PAGE_BITS);
2167
2168 mmap_unlock();
2169 return 1;
9fa3e853 2170 }
c8a706fe 2171 mmap_unlock();
9fa3e853
FB
2172 return 0;
2173}
9fa3e853
FB
2174#endif /* defined(CONFIG_USER_ONLY) */
2175
e2eef170 2176#if !defined(CONFIG_USER_ONLY)
8da3ff18 2177
c04b2b78
PB
2178#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2179typedef struct subpage_t {
70c68e44 2180 MemoryRegion iomem;
c04b2b78 2181 target_phys_addr_t base;
5312bd8b 2182 uint16_t sub_section[TARGET_PAGE_SIZE];
c04b2b78
PB
2183} subpage_t;
2184
c227f099 2185static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 2186 uint16_t section);
0f0cb164 2187static subpage_t *subpage_init(target_phys_addr_t base);
5312bd8b 2188static void destroy_page_desc(uint16_t section_index)
54688b1e 2189{
5312bd8b
AK
2190 MemoryRegionSection *section = &phys_sections[section_index];
2191 MemoryRegion *mr = section->mr;
54688b1e
AK
2192
2193 if (mr->subpage) {
2194 subpage_t *subpage = container_of(mr, subpage_t, iomem);
2195 memory_region_destroy(&subpage->iomem);
2196 g_free(subpage);
2197 }
2198}
2199
4346ae3e 2200static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level)
54688b1e
AK
2201{
2202 unsigned i;
d6f2ea22 2203 PhysPageEntry *p;
54688b1e 2204
c19e8800 2205 if (lp->ptr == PHYS_MAP_NODE_NIL) {
54688b1e
AK
2206 return;
2207 }
2208
c19e8800 2209 p = phys_map_nodes[lp->ptr];
4346ae3e 2210 for (i = 0; i < L2_SIZE; ++i) {
07f07b31 2211 if (!p[i].is_leaf) {
54688b1e 2212 destroy_l2_mapping(&p[i], level - 1);
4346ae3e 2213 } else {
c19e8800 2214 destroy_page_desc(p[i].ptr);
54688b1e 2215 }
54688b1e 2216 }
07f07b31 2217 lp->is_leaf = 0;
c19e8800 2218 lp->ptr = PHYS_MAP_NODE_NIL;
54688b1e
AK
2219}
2220
2221static void destroy_all_mappings(void)
2222{
3eef53df 2223 destroy_l2_mapping(&phys_map, P_L2_LEVELS - 1);
d6f2ea22 2224 phys_map_nodes_reset();
54688b1e
AK
2225}
2226
5312bd8b
AK
2227static uint16_t phys_section_add(MemoryRegionSection *section)
2228{
2229 if (phys_sections_nb == phys_sections_nb_alloc) {
2230 phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16);
2231 phys_sections = g_renew(MemoryRegionSection, phys_sections,
2232 phys_sections_nb_alloc);
2233 }
2234 phys_sections[phys_sections_nb] = *section;
2235 return phys_sections_nb++;
2236}
2237
2238static void phys_sections_clear(void)
2239{
2240 phys_sections_nb = 0;
2241}
2242
8f2498f9
MT
2243/* register physical memory.
2244 For RAM, 'size' must be a multiple of the target page size.
2245 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
8da3ff18
PB
2246 io memory page. The address used when calling the IO function is
2247 the offset from the start of the region, plus region_offset. Both
ccbb4d44 2248 start_addr and region_offset are rounded down to a page boundary
8da3ff18
PB
2249 before calculating this offset. This should not be a problem unless
2250 the low bits of start_addr and region_offset differ. */
0f0cb164
AK
2251static void register_subpage(MemoryRegionSection *section)
2252{
2253 subpage_t *subpage;
2254 target_phys_addr_t base = section->offset_within_address_space
2255 & TARGET_PAGE_MASK;
f3705d53 2256 MemoryRegionSection *existing = phys_page_find(base >> TARGET_PAGE_BITS);
0f0cb164
AK
2257 MemoryRegionSection subsection = {
2258 .offset_within_address_space = base,
2259 .size = TARGET_PAGE_SIZE,
2260 };
0f0cb164
AK
2261 target_phys_addr_t start, end;
2262
f3705d53 2263 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
0f0cb164 2264
f3705d53 2265 if (!(existing->mr->subpage)) {
0f0cb164
AK
2266 subpage = subpage_init(base);
2267 subsection.mr = &subpage->iomem;
2999097b
AK
2268 phys_page_set(base >> TARGET_PAGE_BITS, 1,
2269 phys_section_add(&subsection));
0f0cb164 2270 } else {
f3705d53 2271 subpage = container_of(existing->mr, subpage_t, iomem);
0f0cb164
AK
2272 }
2273 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
2274 end = start + section->size;
2275 subpage_register(subpage, start, end, phys_section_add(section));
2276}
2277
2278
2279static void register_multipage(MemoryRegionSection *section)
33417e70 2280{
dd81124b
AK
2281 target_phys_addr_t start_addr = section->offset_within_address_space;
2282 ram_addr_t size = section->size;
2999097b 2283 target_phys_addr_t addr;
5312bd8b 2284 uint16_t section_index = phys_section_add(section);
dd81124b 2285
3b8e6a2d 2286 assert(size);
f6f3fbca 2287
3b8e6a2d 2288 addr = start_addr;
2999097b
AK
2289 phys_page_set(addr >> TARGET_PAGE_BITS, size >> TARGET_PAGE_BITS,
2290 section_index);
33417e70
FB
2291}
2292
0f0cb164
AK
2293void cpu_register_physical_memory_log(MemoryRegionSection *section,
2294 bool readonly)
2295{
2296 MemoryRegionSection now = *section, remain = *section;
2297
2298 if ((now.offset_within_address_space & ~TARGET_PAGE_MASK)
2299 || (now.size < TARGET_PAGE_SIZE)) {
2300 now.size = MIN(TARGET_PAGE_ALIGN(now.offset_within_address_space)
2301 - now.offset_within_address_space,
2302 now.size);
2303 register_subpage(&now);
2304 remain.size -= now.size;
2305 remain.offset_within_address_space += now.size;
2306 remain.offset_within_region += now.size;
2307 }
2308 now = remain;
2309 now.size &= TARGET_PAGE_MASK;
2310 if (now.size) {
2311 register_multipage(&now);
2312 remain.size -= now.size;
2313 remain.offset_within_address_space += now.size;
2314 remain.offset_within_region += now.size;
2315 }
2316 now = remain;
2317 if (now.size) {
2318 register_subpage(&now);
2319 }
2320}
2321
2322
c227f099 2323void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
f65ed4c1
AL
2324{
2325 if (kvm_enabled())
2326 kvm_coalesce_mmio_region(addr, size);
2327}
2328
c227f099 2329void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
f65ed4c1
AL
2330{
2331 if (kvm_enabled())
2332 kvm_uncoalesce_mmio_region(addr, size);
2333}
2334
62a2744c
SY
2335void qemu_flush_coalesced_mmio_buffer(void)
2336{
2337 if (kvm_enabled())
2338 kvm_flush_coalesced_mmio_buffer();
2339}
2340
c902760f
MT
2341#if defined(__linux__) && !defined(TARGET_S390X)
2342
2343#include <sys/vfs.h>
2344
2345#define HUGETLBFS_MAGIC 0x958458f6
2346
2347static long gethugepagesize(const char *path)
2348{
2349 struct statfs fs;
2350 int ret;
2351
2352 do {
9742bf26 2353 ret = statfs(path, &fs);
c902760f
MT
2354 } while (ret != 0 && errno == EINTR);
2355
2356 if (ret != 0) {
9742bf26
YT
2357 perror(path);
2358 return 0;
c902760f
MT
2359 }
2360
2361 if (fs.f_type != HUGETLBFS_MAGIC)
9742bf26 2362 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
c902760f
MT
2363
2364 return fs.f_bsize;
2365}
2366
04b16653
AW
2367static void *file_ram_alloc(RAMBlock *block,
2368 ram_addr_t memory,
2369 const char *path)
c902760f
MT
2370{
2371 char *filename;
2372 void *area;
2373 int fd;
2374#ifdef MAP_POPULATE
2375 int flags;
2376#endif
2377 unsigned long hpagesize;
2378
2379 hpagesize = gethugepagesize(path);
2380 if (!hpagesize) {
9742bf26 2381 return NULL;
c902760f
MT
2382 }
2383
2384 if (memory < hpagesize) {
2385 return NULL;
2386 }
2387
2388 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2389 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2390 return NULL;
2391 }
2392
2393 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
9742bf26 2394 return NULL;
c902760f
MT
2395 }
2396
2397 fd = mkstemp(filename);
2398 if (fd < 0) {
9742bf26
YT
2399 perror("unable to create backing store for hugepages");
2400 free(filename);
2401 return NULL;
c902760f
MT
2402 }
2403 unlink(filename);
2404 free(filename);
2405
2406 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2407
2408 /*
2409 * ftruncate is not supported by hugetlbfs in older
2410 * hosts, so don't bother bailing out on errors.
2411 * If anything goes wrong with it under other filesystems,
2412 * mmap will fail.
2413 */
2414 if (ftruncate(fd, memory))
9742bf26 2415 perror("ftruncate");
c902760f
MT
2416
2417#ifdef MAP_POPULATE
2418 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2419 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2420 * to sidestep this quirk.
2421 */
2422 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2423 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2424#else
2425 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2426#endif
2427 if (area == MAP_FAILED) {
9742bf26
YT
2428 perror("file_ram_alloc: can't mmap RAM pages");
2429 close(fd);
2430 return (NULL);
c902760f 2431 }
04b16653 2432 block->fd = fd;
c902760f
MT
2433 return area;
2434}
2435#endif
2436
d17b5288 2437static ram_addr_t find_ram_offset(ram_addr_t size)
04b16653
AW
2438{
2439 RAMBlock *block, *next_block;
3e837b2c 2440 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
04b16653
AW
2441
2442 if (QLIST_EMPTY(&ram_list.blocks))
2443 return 0;
2444
2445 QLIST_FOREACH(block, &ram_list.blocks, next) {
f15fbc4b 2446 ram_addr_t end, next = RAM_ADDR_MAX;
04b16653
AW
2447
2448 end = block->offset + block->length;
2449
2450 QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2451 if (next_block->offset >= end) {
2452 next = MIN(next, next_block->offset);
2453 }
2454 }
2455 if (next - end >= size && next - end < mingap) {
3e837b2c 2456 offset = end;
04b16653
AW
2457 mingap = next - end;
2458 }
2459 }
3e837b2c
AW
2460
2461 if (offset == RAM_ADDR_MAX) {
2462 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
2463 (uint64_t)size);
2464 abort();
2465 }
2466
04b16653
AW
2467 return offset;
2468}
2469
2470static ram_addr_t last_ram_offset(void)
d17b5288
AW
2471{
2472 RAMBlock *block;
2473 ram_addr_t last = 0;
2474
2475 QLIST_FOREACH(block, &ram_list.blocks, next)
2476 last = MAX(last, block->offset + block->length);
2477
2478 return last;
2479}
2480
c5705a77 2481void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
84b89d78
CM
2482{
2483 RAMBlock *new_block, *block;
2484
c5705a77
AK
2485 new_block = NULL;
2486 QLIST_FOREACH(block, &ram_list.blocks, next) {
2487 if (block->offset == addr) {
2488 new_block = block;
2489 break;
2490 }
2491 }
2492 assert(new_block);
2493 assert(!new_block->idstr[0]);
84b89d78 2494
09e5ab63
AL
2495 if (dev) {
2496 char *id = qdev_get_dev_path(dev);
84b89d78
CM
2497 if (id) {
2498 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
7267c094 2499 g_free(id);
84b89d78
CM
2500 }
2501 }
2502 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2503
2504 QLIST_FOREACH(block, &ram_list.blocks, next) {
c5705a77 2505 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
84b89d78
CM
2506 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2507 new_block->idstr);
2508 abort();
2509 }
2510 }
c5705a77
AK
2511}
2512
2513ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
2514 MemoryRegion *mr)
2515{
2516 RAMBlock *new_block;
2517
2518 size = TARGET_PAGE_ALIGN(size);
2519 new_block = g_malloc0(sizeof(*new_block));
84b89d78 2520
7c637366 2521 new_block->mr = mr;
432d268c 2522 new_block->offset = find_ram_offset(size);
6977dfe6
YT
2523 if (host) {
2524 new_block->host = host;
cd19cfa2 2525 new_block->flags |= RAM_PREALLOC_MASK;
6977dfe6
YT
2526 } else {
2527 if (mem_path) {
c902760f 2528#if defined (__linux__) && !defined(TARGET_S390X)
6977dfe6
YT
2529 new_block->host = file_ram_alloc(new_block, size, mem_path);
2530 if (!new_block->host) {
2531 new_block->host = qemu_vmalloc(size);
e78815a5 2532 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
6977dfe6 2533 }
c902760f 2534#else
6977dfe6
YT
2535 fprintf(stderr, "-mem-path option unsupported\n");
2536 exit(1);
c902760f 2537#endif
6977dfe6 2538 } else {
6b02494d 2539#if defined(TARGET_S390X) && defined(CONFIG_KVM)
ff83678a
CB
2540 /* S390 KVM requires the topmost vma of the RAM to be smaller than
2541 an system defined value, which is at least 256GB. Larger systems
2542 have larger values. We put the guest between the end of data
2543 segment (system break) and this value. We use 32GB as a base to
2544 have enough room for the system break to grow. */
2545 new_block->host = mmap((void*)0x800000000, size,
6977dfe6 2546 PROT_EXEC|PROT_READ|PROT_WRITE,
ff83678a 2547 MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
fb8b2735
AG
2548 if (new_block->host == MAP_FAILED) {
2549 fprintf(stderr, "Allocating RAM failed\n");
2550 abort();
2551 }
6b02494d 2552#else
868bb33f 2553 if (xen_enabled()) {
fce537d4 2554 xen_ram_alloc(new_block->offset, size, mr);
432d268c
JN
2555 } else {
2556 new_block->host = qemu_vmalloc(size);
2557 }
6b02494d 2558#endif
e78815a5 2559 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
6977dfe6 2560 }
c902760f 2561 }
94a6b54f
PB
2562 new_block->length = size;
2563
f471a17e 2564 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
94a6b54f 2565
7267c094 2566 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
04b16653 2567 last_ram_offset() >> TARGET_PAGE_BITS);
d17b5288 2568 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
94a6b54f
PB
2569 0xff, size >> TARGET_PAGE_BITS);
2570
6f0437e8
JK
2571 if (kvm_enabled())
2572 kvm_setup_guest_memory(new_block->host, size);
2573
94a6b54f
PB
2574 return new_block->offset;
2575}
e9a1ab19 2576
c5705a77 2577ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
6977dfe6 2578{
c5705a77 2579 return qemu_ram_alloc_from_ptr(size, NULL, mr);
6977dfe6
YT
2580}
2581
1f2e98b6
AW
2582void qemu_ram_free_from_ptr(ram_addr_t addr)
2583{
2584 RAMBlock *block;
2585
2586 QLIST_FOREACH(block, &ram_list.blocks, next) {
2587 if (addr == block->offset) {
2588 QLIST_REMOVE(block, next);
7267c094 2589 g_free(block);
1f2e98b6
AW
2590 return;
2591 }
2592 }
2593}
2594
c227f099 2595void qemu_ram_free(ram_addr_t addr)
e9a1ab19 2596{
04b16653
AW
2597 RAMBlock *block;
2598
2599 QLIST_FOREACH(block, &ram_list.blocks, next) {
2600 if (addr == block->offset) {
2601 QLIST_REMOVE(block, next);
cd19cfa2
HY
2602 if (block->flags & RAM_PREALLOC_MASK) {
2603 ;
2604 } else if (mem_path) {
04b16653
AW
2605#if defined (__linux__) && !defined(TARGET_S390X)
2606 if (block->fd) {
2607 munmap(block->host, block->length);
2608 close(block->fd);
2609 } else {
2610 qemu_vfree(block->host);
2611 }
fd28aa13
JK
2612#else
2613 abort();
04b16653
AW
2614#endif
2615 } else {
2616#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2617 munmap(block->host, block->length);
2618#else
868bb33f 2619 if (xen_enabled()) {
e41d7c69 2620 xen_invalidate_map_cache_entry(block->host);
432d268c
JN
2621 } else {
2622 qemu_vfree(block->host);
2623 }
04b16653
AW
2624#endif
2625 }
7267c094 2626 g_free(block);
04b16653
AW
2627 return;
2628 }
2629 }
2630
e9a1ab19
FB
2631}
2632
cd19cfa2
HY
2633#ifndef _WIN32
2634void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
2635{
2636 RAMBlock *block;
2637 ram_addr_t offset;
2638 int flags;
2639 void *area, *vaddr;
2640
2641 QLIST_FOREACH(block, &ram_list.blocks, next) {
2642 offset = addr - block->offset;
2643 if (offset < block->length) {
2644 vaddr = block->host + offset;
2645 if (block->flags & RAM_PREALLOC_MASK) {
2646 ;
2647 } else {
2648 flags = MAP_FIXED;
2649 munmap(vaddr, length);
2650 if (mem_path) {
2651#if defined(__linux__) && !defined(TARGET_S390X)
2652 if (block->fd) {
2653#ifdef MAP_POPULATE
2654 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
2655 MAP_PRIVATE;
2656#else
2657 flags |= MAP_PRIVATE;
2658#endif
2659 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2660 flags, block->fd, offset);
2661 } else {
2662 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2663 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2664 flags, -1, 0);
2665 }
fd28aa13
JK
2666#else
2667 abort();
cd19cfa2
HY
2668#endif
2669 } else {
2670#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2671 flags |= MAP_SHARED | MAP_ANONYMOUS;
2672 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
2673 flags, -1, 0);
2674#else
2675 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2676 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2677 flags, -1, 0);
2678#endif
2679 }
2680 if (area != vaddr) {
f15fbc4b
AP
2681 fprintf(stderr, "Could not remap addr: "
2682 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
cd19cfa2
HY
2683 length, addr);
2684 exit(1);
2685 }
2686 qemu_madvise(vaddr, length, QEMU_MADV_MERGEABLE);
2687 }
2688 return;
2689 }
2690 }
2691}
2692#endif /* !_WIN32 */
2693
dc828ca1 2694/* Return a host pointer to ram allocated with qemu_ram_alloc.
5579c7f3
PB
2695 With the exception of the softmmu code in this file, this should
2696 only be used for local memory (e.g. video ram) that the device owns,
2697 and knows it isn't going to access beyond the end of the block.
2698
2699 It should not be used for general purpose DMA.
2700 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2701 */
c227f099 2702void *qemu_get_ram_ptr(ram_addr_t addr)
dc828ca1 2703{
94a6b54f
PB
2704 RAMBlock *block;
2705
f471a17e
AW
2706 QLIST_FOREACH(block, &ram_list.blocks, next) {
2707 if (addr - block->offset < block->length) {
7d82af38
VP
2708 /* Move this entry to to start of the list. */
2709 if (block != QLIST_FIRST(&ram_list.blocks)) {
2710 QLIST_REMOVE(block, next);
2711 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
2712 }
868bb33f 2713 if (xen_enabled()) {
432d268c
JN
2714 /* We need to check if the requested address is in the RAM
2715 * because we don't want to map the entire memory in QEMU.
712c2b41 2716 * In that case just map until the end of the page.
432d268c
JN
2717 */
2718 if (block->offset == 0) {
e41d7c69 2719 return xen_map_cache(addr, 0, 0);
432d268c 2720 } else if (block->host == NULL) {
e41d7c69
JK
2721 block->host =
2722 xen_map_cache(block->offset, block->length, 1);
432d268c
JN
2723 }
2724 }
f471a17e
AW
2725 return block->host + (addr - block->offset);
2726 }
94a6b54f 2727 }
f471a17e
AW
2728
2729 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2730 abort();
2731
2732 return NULL;
dc828ca1
PB
2733}
2734
b2e0a138
MT
2735/* Return a host pointer to ram allocated with qemu_ram_alloc.
2736 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
2737 */
2738void *qemu_safe_ram_ptr(ram_addr_t addr)
2739{
2740 RAMBlock *block;
2741
2742 QLIST_FOREACH(block, &ram_list.blocks, next) {
2743 if (addr - block->offset < block->length) {
868bb33f 2744 if (xen_enabled()) {
432d268c
JN
2745 /* We need to check if the requested address is in the RAM
2746 * because we don't want to map the entire memory in QEMU.
712c2b41 2747 * In that case just map until the end of the page.
432d268c
JN
2748 */
2749 if (block->offset == 0) {
e41d7c69 2750 return xen_map_cache(addr, 0, 0);
432d268c 2751 } else if (block->host == NULL) {
e41d7c69
JK
2752 block->host =
2753 xen_map_cache(block->offset, block->length, 1);
432d268c
JN
2754 }
2755 }
b2e0a138
MT
2756 return block->host + (addr - block->offset);
2757 }
2758 }
2759
2760 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2761 abort();
2762
2763 return NULL;
2764}
2765
38bee5dc
SS
2766/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
2767 * but takes a size argument */
8ab934f9 2768void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
38bee5dc 2769{
8ab934f9
SS
2770 if (*size == 0) {
2771 return NULL;
2772 }
868bb33f 2773 if (xen_enabled()) {
e41d7c69 2774 return xen_map_cache(addr, *size, 1);
868bb33f 2775 } else {
38bee5dc
SS
2776 RAMBlock *block;
2777
2778 QLIST_FOREACH(block, &ram_list.blocks, next) {
2779 if (addr - block->offset < block->length) {
2780 if (addr - block->offset + *size > block->length)
2781 *size = block->length - addr + block->offset;
2782 return block->host + (addr - block->offset);
2783 }
2784 }
2785
2786 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2787 abort();
38bee5dc
SS
2788 }
2789}
2790
050a0ddf
AP
2791void qemu_put_ram_ptr(void *addr)
2792{
2793 trace_qemu_put_ram_ptr(addr);
050a0ddf
AP
2794}
2795
e890261f 2796int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
5579c7f3 2797{
94a6b54f
PB
2798 RAMBlock *block;
2799 uint8_t *host = ptr;
2800
868bb33f 2801 if (xen_enabled()) {
e41d7c69 2802 *ram_addr = xen_ram_addr_from_mapcache(ptr);
712c2b41
SS
2803 return 0;
2804 }
2805
f471a17e 2806 QLIST_FOREACH(block, &ram_list.blocks, next) {
432d268c
JN
2807 /* This case append when the block is not mapped. */
2808 if (block->host == NULL) {
2809 continue;
2810 }
f471a17e 2811 if (host - block->host < block->length) {
e890261f
MT
2812 *ram_addr = block->offset + (host - block->host);
2813 return 0;
f471a17e 2814 }
94a6b54f 2815 }
432d268c 2816
e890261f
MT
2817 return -1;
2818}
f471a17e 2819
e890261f
MT
2820/* Some of the softmmu routines need to translate from a host pointer
2821 (typically a TLB entry) back to a ram offset. */
2822ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
2823{
2824 ram_addr_t ram_addr;
f471a17e 2825
e890261f
MT
2826 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
2827 fprintf(stderr, "Bad ram pointer %p\n", ptr);
2828 abort();
2829 }
2830 return ram_addr;
5579c7f3
PB
2831}
2832
0e0df1e2
AK
2833static uint64_t unassigned_mem_read(void *opaque, target_phys_addr_t addr,
2834 unsigned size)
e18231a3
BS
2835{
2836#ifdef DEBUG_UNASSIGNED
2837 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2838#endif
5b450407 2839#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
0e0df1e2 2840 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
e18231a3
BS
2841#endif
2842 return 0;
2843}
2844
0e0df1e2
AK
2845static void unassigned_mem_write(void *opaque, target_phys_addr_t addr,
2846 uint64_t val, unsigned size)
e18231a3
BS
2847{
2848#ifdef DEBUG_UNASSIGNED
0e0df1e2 2849 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
e18231a3 2850#endif
5b450407 2851#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
0e0df1e2 2852 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
67d3b957 2853#endif
33417e70
FB
2854}
2855
0e0df1e2
AK
2856static const MemoryRegionOps unassigned_mem_ops = {
2857 .read = unassigned_mem_read,
2858 .write = unassigned_mem_write,
2859 .endianness = DEVICE_NATIVE_ENDIAN,
2860};
e18231a3 2861
0e0df1e2
AK
2862static uint64_t error_mem_read(void *opaque, target_phys_addr_t addr,
2863 unsigned size)
e18231a3 2864{
0e0df1e2 2865 abort();
e18231a3
BS
2866}
2867
0e0df1e2
AK
2868static void error_mem_write(void *opaque, target_phys_addr_t addr,
2869 uint64_t value, unsigned size)
e18231a3 2870{
0e0df1e2 2871 abort();
33417e70
FB
2872}
2873
0e0df1e2
AK
2874static const MemoryRegionOps error_mem_ops = {
2875 .read = error_mem_read,
2876 .write = error_mem_write,
2877 .endianness = DEVICE_NATIVE_ENDIAN,
33417e70
FB
2878};
2879
0e0df1e2
AK
2880static const MemoryRegionOps rom_mem_ops = {
2881 .read = error_mem_read,
2882 .write = unassigned_mem_write,
2883 .endianness = DEVICE_NATIVE_ENDIAN,
33417e70
FB
2884};
2885
0e0df1e2
AK
2886static void notdirty_mem_write(void *opaque, target_phys_addr_t ram_addr,
2887 uint64_t val, unsigned size)
9fa3e853 2888{
3a7d929e 2889 int dirty_flags;
f7c11b53 2890 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3a7d929e 2891 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2892#if !defined(CONFIG_USER_ONLY)
0e0df1e2 2893 tb_invalidate_phys_page_fast(ram_addr, size);
f7c11b53 2894 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
9fa3e853 2895#endif
3a7d929e 2896 }
0e0df1e2
AK
2897 switch (size) {
2898 case 1:
2899 stb_p(qemu_get_ram_ptr(ram_addr), val);
2900 break;
2901 case 2:
2902 stw_p(qemu_get_ram_ptr(ram_addr), val);
2903 break;
2904 case 4:
2905 stl_p(qemu_get_ram_ptr(ram_addr), val);
2906 break;
2907 default:
2908 abort();
3a7d929e 2909 }
f23db169 2910 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
f7c11b53 2911 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
f23db169
FB
2912 /* we remove the notdirty callback only if the code has been
2913 flushed */
2914 if (dirty_flags == 0xff)
2e70f6ef 2915 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
2916}
2917
0e0df1e2
AK
2918static const MemoryRegionOps notdirty_mem_ops = {
2919 .read = error_mem_read,
2920 .write = notdirty_mem_write,
2921 .endianness = DEVICE_NATIVE_ENDIAN,
1ccde1cb
FB
2922};
2923
0f459d16 2924/* Generate a debug exception if a watchpoint has been hit. */
b4051334 2925static void check_watchpoint(int offset, int len_mask, int flags)
0f459d16 2926{
9349b4f9 2927 CPUArchState *env = cpu_single_env;
06d55cc1
AL
2928 target_ulong pc, cs_base;
2929 TranslationBlock *tb;
0f459d16 2930 target_ulong vaddr;
a1d1bb31 2931 CPUWatchpoint *wp;
06d55cc1 2932 int cpu_flags;
0f459d16 2933
06d55cc1
AL
2934 if (env->watchpoint_hit) {
2935 /* We re-entered the check after replacing the TB. Now raise
2936 * the debug interrupt so that is will trigger after the
2937 * current instruction. */
2938 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2939 return;
2940 }
2e70f6ef 2941 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
72cf2d4f 2942 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334
AL
2943 if ((vaddr == (wp->vaddr & len_mask) ||
2944 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
6e140f28
AL
2945 wp->flags |= BP_WATCHPOINT_HIT;
2946 if (!env->watchpoint_hit) {
2947 env->watchpoint_hit = wp;
2948 tb = tb_find_pc(env->mem_io_pc);
2949 if (!tb) {
2950 cpu_abort(env, "check_watchpoint: could not find TB for "
2951 "pc=%p", (void *)env->mem_io_pc);
2952 }
618ba8e6 2953 cpu_restore_state(tb, env, env->mem_io_pc);
6e140f28
AL
2954 tb_phys_invalidate(tb, -1);
2955 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2956 env->exception_index = EXCP_DEBUG;
488d6577 2957 cpu_loop_exit(env);
6e140f28
AL
2958 } else {
2959 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2960 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
488d6577 2961 cpu_resume_from_signal(env, NULL);
6e140f28 2962 }
06d55cc1 2963 }
6e140f28
AL
2964 } else {
2965 wp->flags &= ~BP_WATCHPOINT_HIT;
0f459d16
PB
2966 }
2967 }
2968}
2969
6658ffb8
PB
2970/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2971 so these check for a hit then pass through to the normal out-of-line
2972 phys routines. */
1ec9b909
AK
2973static uint64_t watch_mem_read(void *opaque, target_phys_addr_t addr,
2974 unsigned size)
6658ffb8 2975{
1ec9b909
AK
2976 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
2977 switch (size) {
2978 case 1: return ldub_phys(addr);
2979 case 2: return lduw_phys(addr);
2980 case 4: return ldl_phys(addr);
2981 default: abort();
2982 }
6658ffb8
PB
2983}
2984
1ec9b909
AK
2985static void watch_mem_write(void *opaque, target_phys_addr_t addr,
2986 uint64_t val, unsigned size)
6658ffb8 2987{
1ec9b909
AK
2988 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
2989 switch (size) {
67364150
MF
2990 case 1:
2991 stb_phys(addr, val);
2992 break;
2993 case 2:
2994 stw_phys(addr, val);
2995 break;
2996 case 4:
2997 stl_phys(addr, val);
2998 break;
1ec9b909
AK
2999 default: abort();
3000 }
6658ffb8
PB
3001}
3002
1ec9b909
AK
3003static const MemoryRegionOps watch_mem_ops = {
3004 .read = watch_mem_read,
3005 .write = watch_mem_write,
3006 .endianness = DEVICE_NATIVE_ENDIAN,
6658ffb8 3007};
6658ffb8 3008
70c68e44
AK
3009static uint64_t subpage_read(void *opaque, target_phys_addr_t addr,
3010 unsigned len)
db7b5426 3011{
70c68e44 3012 subpage_t *mmio = opaque;
f6405247 3013 unsigned int idx = SUBPAGE_IDX(addr);
5312bd8b 3014 MemoryRegionSection *section;
db7b5426
BS
3015#if defined(DEBUG_SUBPAGE)
3016 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3017 mmio, len, addr, idx);
3018#endif
db7b5426 3019
5312bd8b
AK
3020 section = &phys_sections[mmio->sub_section[idx]];
3021 addr += mmio->base;
3022 addr -= section->offset_within_address_space;
3023 addr += section->offset_within_region;
37ec01d4 3024 return io_mem_read(section->mr, addr, len);
db7b5426
BS
3025}
3026
70c68e44
AK
3027static void subpage_write(void *opaque, target_phys_addr_t addr,
3028 uint64_t value, unsigned len)
db7b5426 3029{
70c68e44 3030 subpage_t *mmio = opaque;
f6405247 3031 unsigned int idx = SUBPAGE_IDX(addr);
5312bd8b 3032 MemoryRegionSection *section;
db7b5426 3033#if defined(DEBUG_SUBPAGE)
70c68e44
AK
3034 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
3035 " idx %d value %"PRIx64"\n",
f6405247 3036 __func__, mmio, len, addr, idx, value);
db7b5426 3037#endif
f6405247 3038
5312bd8b
AK
3039 section = &phys_sections[mmio->sub_section[idx]];
3040 addr += mmio->base;
3041 addr -= section->offset_within_address_space;
3042 addr += section->offset_within_region;
37ec01d4 3043 io_mem_write(section->mr, addr, value, len);
db7b5426
BS
3044}
3045
70c68e44
AK
3046static const MemoryRegionOps subpage_ops = {
3047 .read = subpage_read,
3048 .write = subpage_write,
3049 .endianness = DEVICE_NATIVE_ENDIAN,
db7b5426
BS
3050};
3051
de712f94
AK
3052static uint64_t subpage_ram_read(void *opaque, target_phys_addr_t addr,
3053 unsigned size)
56384e8b
AF
3054{
3055 ram_addr_t raddr = addr;
3056 void *ptr = qemu_get_ram_ptr(raddr);
de712f94
AK
3057 switch (size) {
3058 case 1: return ldub_p(ptr);
3059 case 2: return lduw_p(ptr);
3060 case 4: return ldl_p(ptr);
3061 default: abort();
3062 }
56384e8b
AF
3063}
3064
de712f94
AK
3065static void subpage_ram_write(void *opaque, target_phys_addr_t addr,
3066 uint64_t value, unsigned size)
56384e8b
AF
3067{
3068 ram_addr_t raddr = addr;
3069 void *ptr = qemu_get_ram_ptr(raddr);
de712f94
AK
3070 switch (size) {
3071 case 1: return stb_p(ptr, value);
3072 case 2: return stw_p(ptr, value);
3073 case 4: return stl_p(ptr, value);
3074 default: abort();
3075 }
56384e8b
AF
3076}
3077
de712f94
AK
3078static const MemoryRegionOps subpage_ram_ops = {
3079 .read = subpage_ram_read,
3080 .write = subpage_ram_write,
3081 .endianness = DEVICE_NATIVE_ENDIAN,
56384e8b
AF
3082};
3083
c227f099 3084static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 3085 uint16_t section)
db7b5426
BS
3086{
3087 int idx, eidx;
3088
3089 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3090 return -1;
3091 idx = SUBPAGE_IDX(start);
3092 eidx = SUBPAGE_IDX(end);
3093#if defined(DEBUG_SUBPAGE)
0bf9e31a 3094 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
db7b5426
BS
3095 mmio, start, end, idx, eidx, memory);
3096#endif
5312bd8b
AK
3097 if (memory_region_is_ram(phys_sections[section].mr)) {
3098 MemoryRegionSection new_section = phys_sections[section];
3099 new_section.mr = &io_mem_subpage_ram;
3100 section = phys_section_add(&new_section);
56384e8b 3101 }
db7b5426 3102 for (; idx <= eidx; idx++) {
5312bd8b 3103 mmio->sub_section[idx] = section;
db7b5426
BS
3104 }
3105
3106 return 0;
3107}
3108
0f0cb164 3109static subpage_t *subpage_init(target_phys_addr_t base)
db7b5426 3110{
c227f099 3111 subpage_t *mmio;
db7b5426 3112
7267c094 3113 mmio = g_malloc0(sizeof(subpage_t));
1eec614b
AL
3114
3115 mmio->base = base;
70c68e44
AK
3116 memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
3117 "subpage", TARGET_PAGE_SIZE);
b3b00c78 3118 mmio->iomem.subpage = true;
db7b5426 3119#if defined(DEBUG_SUBPAGE)
1eec614b
AL
3120 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3121 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
db7b5426 3122#endif
0f0cb164 3123 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, phys_section_unassigned);
db7b5426
BS
3124
3125 return mmio;
3126}
3127
5312bd8b
AK
3128static uint16_t dummy_section(MemoryRegion *mr)
3129{
3130 MemoryRegionSection section = {
3131 .mr = mr,
3132 .offset_within_address_space = 0,
3133 .offset_within_region = 0,
3134 .size = UINT64_MAX,
3135 };
3136
3137 return phys_section_add(&section);
3138}
3139
37ec01d4 3140MemoryRegion *iotlb_to_region(target_phys_addr_t index)
aa102231 3141{
37ec01d4 3142 return phys_sections[index & ~TARGET_PAGE_MASK].mr;
aa102231
AK
3143}
3144
e9179ce1
AK
3145static void io_mem_init(void)
3146{
0e0df1e2 3147 memory_region_init_io(&io_mem_ram, &error_mem_ops, NULL, "ram", UINT64_MAX);
0e0df1e2
AK
3148 memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
3149 memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
3150 "unassigned", UINT64_MAX);
3151 memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
3152 "notdirty", UINT64_MAX);
de712f94
AK
3153 memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
3154 "subpage-ram", UINT64_MAX);
1ec9b909
AK
3155 memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
3156 "watch", UINT64_MAX);
e9179ce1
AK
3157}
3158
50c1e149
AK
3159static void core_begin(MemoryListener *listener)
3160{
54688b1e 3161 destroy_all_mappings();
5312bd8b 3162 phys_sections_clear();
c19e8800 3163 phys_map.ptr = PHYS_MAP_NODE_NIL;
5312bd8b 3164 phys_section_unassigned = dummy_section(&io_mem_unassigned);
aa102231
AK
3165 phys_section_notdirty = dummy_section(&io_mem_notdirty);
3166 phys_section_rom = dummy_section(&io_mem_rom);
3167 phys_section_watch = dummy_section(&io_mem_watch);
50c1e149
AK
3168}
3169
3170static void core_commit(MemoryListener *listener)
3171{
9349b4f9 3172 CPUArchState *env;
117712c3
AK
3173
3174 /* since each CPU stores ram addresses in its TLB cache, we must
3175 reset the modified entries */
3176 /* XXX: slow ! */
3177 for(env = first_cpu; env != NULL; env = env->next_cpu) {
3178 tlb_flush(env, 1);
3179 }
50c1e149
AK
3180}
3181
93632747
AK
3182static void core_region_add(MemoryListener *listener,
3183 MemoryRegionSection *section)
3184{
4855d41a 3185 cpu_register_physical_memory_log(section, section->readonly);
93632747
AK
3186}
3187
3188static void core_region_del(MemoryListener *listener,
3189 MemoryRegionSection *section)
3190{
93632747
AK
3191}
3192
50c1e149
AK
3193static void core_region_nop(MemoryListener *listener,
3194 MemoryRegionSection *section)
3195{
54688b1e 3196 cpu_register_physical_memory_log(section, section->readonly);
50c1e149
AK
3197}
3198
93632747
AK
3199static void core_log_start(MemoryListener *listener,
3200 MemoryRegionSection *section)
3201{
3202}
3203
3204static void core_log_stop(MemoryListener *listener,
3205 MemoryRegionSection *section)
3206{
3207}
3208
3209static void core_log_sync(MemoryListener *listener,
3210 MemoryRegionSection *section)
3211{
3212}
3213
3214static void core_log_global_start(MemoryListener *listener)
3215{
3216 cpu_physical_memory_set_dirty_tracking(1);
3217}
3218
3219static void core_log_global_stop(MemoryListener *listener)
3220{
3221 cpu_physical_memory_set_dirty_tracking(0);
3222}
3223
3224static void core_eventfd_add(MemoryListener *listener,
3225 MemoryRegionSection *section,
3226 bool match_data, uint64_t data, int fd)
3227{
3228}
3229
3230static void core_eventfd_del(MemoryListener *listener,
3231 MemoryRegionSection *section,
3232 bool match_data, uint64_t data, int fd)
3233{
3234}
3235
50c1e149
AK
3236static void io_begin(MemoryListener *listener)
3237{
3238}
3239
3240static void io_commit(MemoryListener *listener)
3241{
3242}
3243
4855d41a
AK
3244static void io_region_add(MemoryListener *listener,
3245 MemoryRegionSection *section)
3246{
a2d33521
AK
3247 MemoryRegionIORange *mrio = g_new(MemoryRegionIORange, 1);
3248
3249 mrio->mr = section->mr;
3250 mrio->offset = section->offset_within_region;
3251 iorange_init(&mrio->iorange, &memory_region_iorange_ops,
4855d41a 3252 section->offset_within_address_space, section->size);
a2d33521 3253 ioport_register(&mrio->iorange);
4855d41a
AK
3254}
3255
3256static void io_region_del(MemoryListener *listener,
3257 MemoryRegionSection *section)
3258{
3259 isa_unassign_ioport(section->offset_within_address_space, section->size);
3260}
3261
50c1e149
AK
3262static void io_region_nop(MemoryListener *listener,
3263 MemoryRegionSection *section)
3264{
3265}
3266
4855d41a
AK
3267static void io_log_start(MemoryListener *listener,
3268 MemoryRegionSection *section)
3269{
3270}
3271
3272static void io_log_stop(MemoryListener *listener,
3273 MemoryRegionSection *section)
3274{
3275}
3276
3277static void io_log_sync(MemoryListener *listener,
3278 MemoryRegionSection *section)
3279{
3280}
3281
3282static void io_log_global_start(MemoryListener *listener)
3283{
3284}
3285
3286static void io_log_global_stop(MemoryListener *listener)
3287{
3288}
3289
3290static void io_eventfd_add(MemoryListener *listener,
3291 MemoryRegionSection *section,
3292 bool match_data, uint64_t data, int fd)
3293{
3294}
3295
3296static void io_eventfd_del(MemoryListener *listener,
3297 MemoryRegionSection *section,
3298 bool match_data, uint64_t data, int fd)
3299{
3300}
3301
93632747 3302static MemoryListener core_memory_listener = {
50c1e149
AK
3303 .begin = core_begin,
3304 .commit = core_commit,
93632747
AK
3305 .region_add = core_region_add,
3306 .region_del = core_region_del,
50c1e149 3307 .region_nop = core_region_nop,
93632747
AK
3308 .log_start = core_log_start,
3309 .log_stop = core_log_stop,
3310 .log_sync = core_log_sync,
3311 .log_global_start = core_log_global_start,
3312 .log_global_stop = core_log_global_stop,
3313 .eventfd_add = core_eventfd_add,
3314 .eventfd_del = core_eventfd_del,
3315 .priority = 0,
3316};
3317
4855d41a 3318static MemoryListener io_memory_listener = {
50c1e149
AK
3319 .begin = io_begin,
3320 .commit = io_commit,
4855d41a
AK
3321 .region_add = io_region_add,
3322 .region_del = io_region_del,
50c1e149 3323 .region_nop = io_region_nop,
4855d41a
AK
3324 .log_start = io_log_start,
3325 .log_stop = io_log_stop,
3326 .log_sync = io_log_sync,
3327 .log_global_start = io_log_global_start,
3328 .log_global_stop = io_log_global_stop,
3329 .eventfd_add = io_eventfd_add,
3330 .eventfd_del = io_eventfd_del,
3331 .priority = 0,
3332};
3333
62152b8a
AK
3334static void memory_map_init(void)
3335{
7267c094 3336 system_memory = g_malloc(sizeof(*system_memory));
8417cebf 3337 memory_region_init(system_memory, "system", INT64_MAX);
62152b8a 3338 set_system_memory_map(system_memory);
309cb471 3339
7267c094 3340 system_io = g_malloc(sizeof(*system_io));
309cb471
AK
3341 memory_region_init(system_io, "io", 65536);
3342 set_system_io_map(system_io);
93632747 3343
4855d41a
AK
3344 memory_listener_register(&core_memory_listener, system_memory);
3345 memory_listener_register(&io_memory_listener, system_io);
62152b8a
AK
3346}
3347
3348MemoryRegion *get_system_memory(void)
3349{
3350 return system_memory;
3351}
3352
309cb471
AK
3353MemoryRegion *get_system_io(void)
3354{
3355 return system_io;
3356}
3357
e2eef170
PB
3358#endif /* !defined(CONFIG_USER_ONLY) */
3359
13eb76e0
FB
3360/* physical memory access (slow version, mainly for debug) */
3361#if defined(CONFIG_USER_ONLY)
9349b4f9 3362int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
a68fe89c 3363 uint8_t *buf, int len, int is_write)
13eb76e0
FB
3364{
3365 int l, flags;
3366 target_ulong page;
53a5960a 3367 void * p;
13eb76e0
FB
3368
3369 while (len > 0) {
3370 page = addr & TARGET_PAGE_MASK;
3371 l = (page + TARGET_PAGE_SIZE) - addr;
3372 if (l > len)
3373 l = len;
3374 flags = page_get_flags(page);
3375 if (!(flags & PAGE_VALID))
a68fe89c 3376 return -1;
13eb76e0
FB
3377 if (is_write) {
3378 if (!(flags & PAGE_WRITE))
a68fe89c 3379 return -1;
579a97f7 3380 /* XXX: this code should not depend on lock_user */
72fb7daa 3381 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
a68fe89c 3382 return -1;
72fb7daa
AJ
3383 memcpy(p, buf, l);
3384 unlock_user(p, addr, l);
13eb76e0
FB
3385 } else {
3386 if (!(flags & PAGE_READ))
a68fe89c 3387 return -1;
579a97f7 3388 /* XXX: this code should not depend on lock_user */
72fb7daa 3389 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
a68fe89c 3390 return -1;
72fb7daa 3391 memcpy(buf, p, l);
5b257578 3392 unlock_user(p, addr, 0);
13eb76e0
FB
3393 }
3394 len -= l;
3395 buf += l;
3396 addr += l;
3397 }
a68fe89c 3398 return 0;
13eb76e0 3399}
8df1cd07 3400
13eb76e0 3401#else
c227f099 3402void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
3403 int len, int is_write)
3404{
37ec01d4 3405 int l;
13eb76e0
FB
3406 uint8_t *ptr;
3407 uint32_t val;
c227f099 3408 target_phys_addr_t page;
f3705d53 3409 MemoryRegionSection *section;
3b46e624 3410
13eb76e0
FB
3411 while (len > 0) {
3412 page = addr & TARGET_PAGE_MASK;
3413 l = (page + TARGET_PAGE_SIZE) - addr;
3414 if (l > len)
3415 l = len;
06ef3525 3416 section = phys_page_find(page >> TARGET_PAGE_BITS);
3b46e624 3417
13eb76e0 3418 if (is_write) {
f3705d53 3419 if (!memory_region_is_ram(section->mr)) {
f1f6e3b8 3420 target_phys_addr_t addr1;
cc5bea60 3421 addr1 = memory_region_section_addr(section, addr);
6a00d601
FB
3422 /* XXX: could force cpu_single_env to NULL to avoid
3423 potential bugs */
6c2934db 3424 if (l >= 4 && ((addr1 & 3) == 0)) {
1c213d19 3425 /* 32 bit write access */
c27004ec 3426 val = ldl_p(buf);
37ec01d4 3427 io_mem_write(section->mr, addr1, val, 4);
13eb76e0 3428 l = 4;
6c2934db 3429 } else if (l >= 2 && ((addr1 & 1) == 0)) {
1c213d19 3430 /* 16 bit write access */
c27004ec 3431 val = lduw_p(buf);
37ec01d4 3432 io_mem_write(section->mr, addr1, val, 2);
13eb76e0
FB
3433 l = 2;
3434 } else {
1c213d19 3435 /* 8 bit write access */
c27004ec 3436 val = ldub_p(buf);
37ec01d4 3437 io_mem_write(section->mr, addr1, val, 1);
13eb76e0
FB
3438 l = 1;
3439 }
f3705d53 3440 } else if (!section->readonly) {
8ca5692d 3441 ram_addr_t addr1;
f3705d53 3442 addr1 = memory_region_get_ram_addr(section->mr)
cc5bea60 3443 + memory_region_section_addr(section, addr);
13eb76e0 3444 /* RAM case */
5579c7f3 3445 ptr = qemu_get_ram_ptr(addr1);
13eb76e0 3446 memcpy(ptr, buf, l);
3a7d929e
FB
3447 if (!cpu_physical_memory_is_dirty(addr1)) {
3448 /* invalidate code */
3449 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3450 /* set dirty bit */
f7c11b53
YT
3451 cpu_physical_memory_set_dirty_flags(
3452 addr1, (0xff & ~CODE_DIRTY_FLAG));
3a7d929e 3453 }
050a0ddf 3454 qemu_put_ram_ptr(ptr);
13eb76e0
FB
3455 }
3456 } else {
cc5bea60
BS
3457 if (!(memory_region_is_ram(section->mr) ||
3458 memory_region_is_romd(section->mr))) {
f1f6e3b8 3459 target_phys_addr_t addr1;
13eb76e0 3460 /* I/O case */
cc5bea60 3461 addr1 = memory_region_section_addr(section, addr);
6c2934db 3462 if (l >= 4 && ((addr1 & 3) == 0)) {
13eb76e0 3463 /* 32 bit read access */
37ec01d4 3464 val = io_mem_read(section->mr, addr1, 4);
c27004ec 3465 stl_p(buf, val);
13eb76e0 3466 l = 4;
6c2934db 3467 } else if (l >= 2 && ((addr1 & 1) == 0)) {
13eb76e0 3468 /* 16 bit read access */
37ec01d4 3469 val = io_mem_read(section->mr, addr1, 2);
c27004ec 3470 stw_p(buf, val);
13eb76e0
FB
3471 l = 2;
3472 } else {
1c213d19 3473 /* 8 bit read access */
37ec01d4 3474 val = io_mem_read(section->mr, addr1, 1);
c27004ec 3475 stb_p(buf, val);
13eb76e0
FB
3476 l = 1;
3477 }
3478 } else {
3479 /* RAM case */
0a1b357f 3480 ptr = qemu_get_ram_ptr(section->mr->ram_addr
cc5bea60
BS
3481 + memory_region_section_addr(section,
3482 addr));
f3705d53 3483 memcpy(buf, ptr, l);
050a0ddf 3484 qemu_put_ram_ptr(ptr);
13eb76e0
FB
3485 }
3486 }
3487 len -= l;
3488 buf += l;
3489 addr += l;
3490 }
3491}
8df1cd07 3492
d0ecd2aa 3493/* used for ROM loading : can write in RAM and ROM */
c227f099 3494void cpu_physical_memory_write_rom(target_phys_addr_t addr,
d0ecd2aa
FB
3495 const uint8_t *buf, int len)
3496{
3497 int l;
3498 uint8_t *ptr;
c227f099 3499 target_phys_addr_t page;
f3705d53 3500 MemoryRegionSection *section;
3b46e624 3501
d0ecd2aa
FB
3502 while (len > 0) {
3503 page = addr & TARGET_PAGE_MASK;
3504 l = (page + TARGET_PAGE_SIZE) - addr;
3505 if (l > len)
3506 l = len;
06ef3525 3507 section = phys_page_find(page >> TARGET_PAGE_BITS);
3b46e624 3508
cc5bea60
BS
3509 if (!(memory_region_is_ram(section->mr) ||
3510 memory_region_is_romd(section->mr))) {
d0ecd2aa
FB
3511 /* do nothing */
3512 } else {
3513 unsigned long addr1;
f3705d53 3514 addr1 = memory_region_get_ram_addr(section->mr)
cc5bea60 3515 + memory_region_section_addr(section, addr);
d0ecd2aa 3516 /* ROM/RAM case */
5579c7f3 3517 ptr = qemu_get_ram_ptr(addr1);
d0ecd2aa 3518 memcpy(ptr, buf, l);
050a0ddf 3519 qemu_put_ram_ptr(ptr);
d0ecd2aa
FB
3520 }
3521 len -= l;
3522 buf += l;
3523 addr += l;
3524 }
3525}
3526
6d16c2f8
AL
3527typedef struct {
3528 void *buffer;
c227f099
AL
3529 target_phys_addr_t addr;
3530 target_phys_addr_t len;
6d16c2f8
AL
3531} BounceBuffer;
3532
3533static BounceBuffer bounce;
3534
ba223c29
AL
3535typedef struct MapClient {
3536 void *opaque;
3537 void (*callback)(void *opaque);
72cf2d4f 3538 QLIST_ENTRY(MapClient) link;
ba223c29
AL
3539} MapClient;
3540
72cf2d4f
BS
3541static QLIST_HEAD(map_client_list, MapClient) map_client_list
3542 = QLIST_HEAD_INITIALIZER(map_client_list);
ba223c29
AL
3543
3544void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3545{
7267c094 3546 MapClient *client = g_malloc(sizeof(*client));
ba223c29
AL
3547
3548 client->opaque = opaque;
3549 client->callback = callback;
72cf2d4f 3550 QLIST_INSERT_HEAD(&map_client_list, client, link);
ba223c29
AL
3551 return client;
3552}
3553
3554void cpu_unregister_map_client(void *_client)
3555{
3556 MapClient *client = (MapClient *)_client;
3557
72cf2d4f 3558 QLIST_REMOVE(client, link);
7267c094 3559 g_free(client);
ba223c29
AL
3560}
3561
3562static void cpu_notify_map_clients(void)
3563{
3564 MapClient *client;
3565
72cf2d4f
BS
3566 while (!QLIST_EMPTY(&map_client_list)) {
3567 client = QLIST_FIRST(&map_client_list);
ba223c29 3568 client->callback(client->opaque);
34d5e948 3569 cpu_unregister_map_client(client);
ba223c29
AL
3570 }
3571}
3572
6d16c2f8
AL
3573/* Map a physical memory region into a host virtual address.
3574 * May map a subset of the requested range, given by and returned in *plen.
3575 * May return NULL if resources needed to perform the mapping are exhausted.
3576 * Use only for reads OR writes - not for read-modify-write operations.
ba223c29
AL
3577 * Use cpu_register_map_client() to know when retrying the map operation is
3578 * likely to succeed.
6d16c2f8 3579 */
c227f099
AL
3580void *cpu_physical_memory_map(target_phys_addr_t addr,
3581 target_phys_addr_t *plen,
6d16c2f8
AL
3582 int is_write)
3583{
c227f099 3584 target_phys_addr_t len = *plen;
38bee5dc 3585 target_phys_addr_t todo = 0;
6d16c2f8 3586 int l;
c227f099 3587 target_phys_addr_t page;
f3705d53 3588 MemoryRegionSection *section;
f15fbc4b 3589 ram_addr_t raddr = RAM_ADDR_MAX;
8ab934f9
SS
3590 ram_addr_t rlen;
3591 void *ret;
6d16c2f8
AL
3592
3593 while (len > 0) {
3594 page = addr & TARGET_PAGE_MASK;
3595 l = (page + TARGET_PAGE_SIZE) - addr;
3596 if (l > len)
3597 l = len;
06ef3525 3598 section = phys_page_find(page >> TARGET_PAGE_BITS);
6d16c2f8 3599
f3705d53 3600 if (!(memory_region_is_ram(section->mr) && !section->readonly)) {
38bee5dc 3601 if (todo || bounce.buffer) {
6d16c2f8
AL
3602 break;
3603 }
3604 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3605 bounce.addr = addr;
3606 bounce.len = l;
3607 if (!is_write) {
54f7b4a3 3608 cpu_physical_memory_read(addr, bounce.buffer, l);
6d16c2f8 3609 }
38bee5dc
SS
3610
3611 *plen = l;
3612 return bounce.buffer;
6d16c2f8 3613 }
8ab934f9 3614 if (!todo) {
f3705d53 3615 raddr = memory_region_get_ram_addr(section->mr)
cc5bea60 3616 + memory_region_section_addr(section, addr);
8ab934f9 3617 }
6d16c2f8
AL
3618
3619 len -= l;
3620 addr += l;
38bee5dc 3621 todo += l;
6d16c2f8 3622 }
8ab934f9
SS
3623 rlen = todo;
3624 ret = qemu_ram_ptr_length(raddr, &rlen);
3625 *plen = rlen;
3626 return ret;
6d16c2f8
AL
3627}
3628
3629/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3630 * Will also mark the memory as dirty if is_write == 1. access_len gives
3631 * the amount of memory that was actually read or written by the caller.
3632 */
c227f099
AL
3633void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3634 int is_write, target_phys_addr_t access_len)
6d16c2f8
AL
3635{
3636 if (buffer != bounce.buffer) {
3637 if (is_write) {
e890261f 3638 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
6d16c2f8
AL
3639 while (access_len) {
3640 unsigned l;
3641 l = TARGET_PAGE_SIZE;
3642 if (l > access_len)
3643 l = access_len;
3644 if (!cpu_physical_memory_is_dirty(addr1)) {
3645 /* invalidate code */
3646 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3647 /* set dirty bit */
f7c11b53
YT
3648 cpu_physical_memory_set_dirty_flags(
3649 addr1, (0xff & ~CODE_DIRTY_FLAG));
6d16c2f8
AL
3650 }
3651 addr1 += l;
3652 access_len -= l;
3653 }
3654 }
868bb33f 3655 if (xen_enabled()) {
e41d7c69 3656 xen_invalidate_map_cache_entry(buffer);
050a0ddf 3657 }
6d16c2f8
AL
3658 return;
3659 }
3660 if (is_write) {
3661 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3662 }
f8a83245 3663 qemu_vfree(bounce.buffer);
6d16c2f8 3664 bounce.buffer = NULL;
ba223c29 3665 cpu_notify_map_clients();
6d16c2f8 3666}
d0ecd2aa 3667
8df1cd07 3668/* warning: addr must be aligned */
1e78bcc1
AG
3669static inline uint32_t ldl_phys_internal(target_phys_addr_t addr,
3670 enum device_endian endian)
8df1cd07 3671{
8df1cd07
FB
3672 uint8_t *ptr;
3673 uint32_t val;
f3705d53 3674 MemoryRegionSection *section;
8df1cd07 3675
06ef3525 3676 section = phys_page_find(addr >> TARGET_PAGE_BITS);
3b46e624 3677
cc5bea60
BS
3678 if (!(memory_region_is_ram(section->mr) ||
3679 memory_region_is_romd(section->mr))) {
8df1cd07 3680 /* I/O case */
cc5bea60 3681 addr = memory_region_section_addr(section, addr);
37ec01d4 3682 val = io_mem_read(section->mr, addr, 4);
1e78bcc1
AG
3683#if defined(TARGET_WORDS_BIGENDIAN)
3684 if (endian == DEVICE_LITTLE_ENDIAN) {
3685 val = bswap32(val);
3686 }
3687#else
3688 if (endian == DEVICE_BIG_ENDIAN) {
3689 val = bswap32(val);
3690 }
3691#endif
8df1cd07
FB
3692 } else {
3693 /* RAM case */
f3705d53 3694 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
06ef3525 3695 & TARGET_PAGE_MASK)
cc5bea60 3696 + memory_region_section_addr(section, addr));
1e78bcc1
AG
3697 switch (endian) {
3698 case DEVICE_LITTLE_ENDIAN:
3699 val = ldl_le_p(ptr);
3700 break;
3701 case DEVICE_BIG_ENDIAN:
3702 val = ldl_be_p(ptr);
3703 break;
3704 default:
3705 val = ldl_p(ptr);
3706 break;
3707 }
8df1cd07
FB
3708 }
3709 return val;
3710}
3711
1e78bcc1
AG
3712uint32_t ldl_phys(target_phys_addr_t addr)
3713{
3714 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3715}
3716
3717uint32_t ldl_le_phys(target_phys_addr_t addr)
3718{
3719 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3720}
3721
3722uint32_t ldl_be_phys(target_phys_addr_t addr)
3723{
3724 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
3725}
3726
84b7b8e7 3727/* warning: addr must be aligned */
1e78bcc1
AG
3728static inline uint64_t ldq_phys_internal(target_phys_addr_t addr,
3729 enum device_endian endian)
84b7b8e7 3730{
84b7b8e7
FB
3731 uint8_t *ptr;
3732 uint64_t val;
f3705d53 3733 MemoryRegionSection *section;
84b7b8e7 3734
06ef3525 3735 section = phys_page_find(addr >> TARGET_PAGE_BITS);
3b46e624 3736
cc5bea60
BS
3737 if (!(memory_region_is_ram(section->mr) ||
3738 memory_region_is_romd(section->mr))) {
84b7b8e7 3739 /* I/O case */
cc5bea60 3740 addr = memory_region_section_addr(section, addr);
1e78bcc1
AG
3741
3742 /* XXX This is broken when device endian != cpu endian.
3743 Fix and add "endian" variable check */
84b7b8e7 3744#ifdef TARGET_WORDS_BIGENDIAN
37ec01d4
AK
3745 val = io_mem_read(section->mr, addr, 4) << 32;
3746 val |= io_mem_read(section->mr, addr + 4, 4);
84b7b8e7 3747#else
37ec01d4
AK
3748 val = io_mem_read(section->mr, addr, 4);
3749 val |= io_mem_read(section->mr, addr + 4, 4) << 32;
84b7b8e7
FB
3750#endif
3751 } else {
3752 /* RAM case */
f3705d53 3753 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
06ef3525 3754 & TARGET_PAGE_MASK)
cc5bea60 3755 + memory_region_section_addr(section, addr));
1e78bcc1
AG
3756 switch (endian) {
3757 case DEVICE_LITTLE_ENDIAN:
3758 val = ldq_le_p(ptr);
3759 break;
3760 case DEVICE_BIG_ENDIAN:
3761 val = ldq_be_p(ptr);
3762 break;
3763 default:
3764 val = ldq_p(ptr);
3765 break;
3766 }
84b7b8e7
FB
3767 }
3768 return val;
3769}
3770
1e78bcc1
AG
3771uint64_t ldq_phys(target_phys_addr_t addr)
3772{
3773 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3774}
3775
3776uint64_t ldq_le_phys(target_phys_addr_t addr)
3777{
3778 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3779}
3780
3781uint64_t ldq_be_phys(target_phys_addr_t addr)
3782{
3783 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
3784}
3785
aab33094 3786/* XXX: optimize */
c227f099 3787uint32_t ldub_phys(target_phys_addr_t addr)
aab33094
FB
3788{
3789 uint8_t val;
3790 cpu_physical_memory_read(addr, &val, 1);
3791 return val;
3792}
3793
733f0b02 3794/* warning: addr must be aligned */
1e78bcc1
AG
3795static inline uint32_t lduw_phys_internal(target_phys_addr_t addr,
3796 enum device_endian endian)
aab33094 3797{
733f0b02
MT
3798 uint8_t *ptr;
3799 uint64_t val;
f3705d53 3800 MemoryRegionSection *section;
733f0b02 3801
06ef3525 3802 section = phys_page_find(addr >> TARGET_PAGE_BITS);
733f0b02 3803
cc5bea60
BS
3804 if (!(memory_region_is_ram(section->mr) ||
3805 memory_region_is_romd(section->mr))) {
733f0b02 3806 /* I/O case */
cc5bea60 3807 addr = memory_region_section_addr(section, addr);
37ec01d4 3808 val = io_mem_read(section->mr, addr, 2);
1e78bcc1
AG
3809#if defined(TARGET_WORDS_BIGENDIAN)
3810 if (endian == DEVICE_LITTLE_ENDIAN) {
3811 val = bswap16(val);
3812 }
3813#else
3814 if (endian == DEVICE_BIG_ENDIAN) {
3815 val = bswap16(val);
3816 }
3817#endif
733f0b02
MT
3818 } else {
3819 /* RAM case */
f3705d53 3820 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
06ef3525 3821 & TARGET_PAGE_MASK)
cc5bea60 3822 + memory_region_section_addr(section, addr));
1e78bcc1
AG
3823 switch (endian) {
3824 case DEVICE_LITTLE_ENDIAN:
3825 val = lduw_le_p(ptr);
3826 break;
3827 case DEVICE_BIG_ENDIAN:
3828 val = lduw_be_p(ptr);
3829 break;
3830 default:
3831 val = lduw_p(ptr);
3832 break;
3833 }
733f0b02
MT
3834 }
3835 return val;
aab33094
FB
3836}
3837
1e78bcc1
AG
3838uint32_t lduw_phys(target_phys_addr_t addr)
3839{
3840 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3841}
3842
3843uint32_t lduw_le_phys(target_phys_addr_t addr)
3844{
3845 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3846}
3847
3848uint32_t lduw_be_phys(target_phys_addr_t addr)
3849{
3850 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
3851}
3852
8df1cd07
FB
3853/* warning: addr must be aligned. The ram page is not masked as dirty
3854 and the code inside is not invalidated. It is useful if the dirty
3855 bits are used to track modified PTEs */
c227f099 3856void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
8df1cd07 3857{
8df1cd07 3858 uint8_t *ptr;
f3705d53 3859 MemoryRegionSection *section;
8df1cd07 3860
06ef3525 3861 section = phys_page_find(addr >> TARGET_PAGE_BITS);
3b46e624 3862
f3705d53 3863 if (!memory_region_is_ram(section->mr) || section->readonly) {
cc5bea60 3864 addr = memory_region_section_addr(section, addr);
f3705d53 3865 if (memory_region_is_ram(section->mr)) {
37ec01d4 3866 section = &phys_sections[phys_section_rom];
06ef3525 3867 }
37ec01d4 3868 io_mem_write(section->mr, addr, val, 4);
8df1cd07 3869 } else {
f3705d53 3870 unsigned long addr1 = (memory_region_get_ram_addr(section->mr)
06ef3525 3871 & TARGET_PAGE_MASK)
cc5bea60 3872 + memory_region_section_addr(section, addr);
5579c7f3 3873 ptr = qemu_get_ram_ptr(addr1);
8df1cd07 3874 stl_p(ptr, val);
74576198
AL
3875
3876 if (unlikely(in_migration)) {
3877 if (!cpu_physical_memory_is_dirty(addr1)) {
3878 /* invalidate code */
3879 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3880 /* set dirty bit */
f7c11b53
YT
3881 cpu_physical_memory_set_dirty_flags(
3882 addr1, (0xff & ~CODE_DIRTY_FLAG));
74576198
AL
3883 }
3884 }
8df1cd07
FB
3885 }
3886}
3887
c227f099 3888void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
bc98a7ef 3889{
bc98a7ef 3890 uint8_t *ptr;
f3705d53 3891 MemoryRegionSection *section;
bc98a7ef 3892
06ef3525 3893 section = phys_page_find(addr >> TARGET_PAGE_BITS);
3b46e624 3894
f3705d53 3895 if (!memory_region_is_ram(section->mr) || section->readonly) {
cc5bea60 3896 addr = memory_region_section_addr(section, addr);
f3705d53 3897 if (memory_region_is_ram(section->mr)) {
37ec01d4 3898 section = &phys_sections[phys_section_rom];
06ef3525 3899 }
bc98a7ef 3900#ifdef TARGET_WORDS_BIGENDIAN
37ec01d4
AK
3901 io_mem_write(section->mr, addr, val >> 32, 4);
3902 io_mem_write(section->mr, addr + 4, (uint32_t)val, 4);
bc98a7ef 3903#else
37ec01d4
AK
3904 io_mem_write(section->mr, addr, (uint32_t)val, 4);
3905 io_mem_write(section->mr, addr + 4, val >> 32, 4);
bc98a7ef
JM
3906#endif
3907 } else {
f3705d53 3908 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
06ef3525 3909 & TARGET_PAGE_MASK)
cc5bea60 3910 + memory_region_section_addr(section, addr));
bc98a7ef
JM
3911 stq_p(ptr, val);
3912 }
3913}
3914
8df1cd07 3915/* warning: addr must be aligned */
1e78bcc1
AG
3916static inline void stl_phys_internal(target_phys_addr_t addr, uint32_t val,
3917 enum device_endian endian)
8df1cd07 3918{
8df1cd07 3919 uint8_t *ptr;
f3705d53 3920 MemoryRegionSection *section;
8df1cd07 3921
06ef3525 3922 section = phys_page_find(addr >> TARGET_PAGE_BITS);
3b46e624 3923
f3705d53 3924 if (!memory_region_is_ram(section->mr) || section->readonly) {
cc5bea60 3925 addr = memory_region_section_addr(section, addr);
f3705d53 3926 if (memory_region_is_ram(section->mr)) {
37ec01d4 3927 section = &phys_sections[phys_section_rom];
06ef3525 3928 }
1e78bcc1
AG
3929#if defined(TARGET_WORDS_BIGENDIAN)
3930 if (endian == DEVICE_LITTLE_ENDIAN) {
3931 val = bswap32(val);
3932 }
3933#else
3934 if (endian == DEVICE_BIG_ENDIAN) {
3935 val = bswap32(val);
3936 }
3937#endif
37ec01d4 3938 io_mem_write(section->mr, addr, val, 4);
8df1cd07
FB
3939 } else {
3940 unsigned long addr1;
f3705d53 3941 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
cc5bea60 3942 + memory_region_section_addr(section, addr);
8df1cd07 3943 /* RAM case */
5579c7f3 3944 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
3945 switch (endian) {
3946 case DEVICE_LITTLE_ENDIAN:
3947 stl_le_p(ptr, val);
3948 break;
3949 case DEVICE_BIG_ENDIAN:
3950 stl_be_p(ptr, val);
3951 break;
3952 default:
3953 stl_p(ptr, val);
3954 break;
3955 }
3a7d929e
FB
3956 if (!cpu_physical_memory_is_dirty(addr1)) {
3957 /* invalidate code */
3958 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3959 /* set dirty bit */
f7c11b53
YT
3960 cpu_physical_memory_set_dirty_flags(addr1,
3961 (0xff & ~CODE_DIRTY_FLAG));
3a7d929e 3962 }
8df1cd07
FB
3963 }
3964}
3965
1e78bcc1
AG
3966void stl_phys(target_phys_addr_t addr, uint32_t val)
3967{
3968 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
3969}
3970
3971void stl_le_phys(target_phys_addr_t addr, uint32_t val)
3972{
3973 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
3974}
3975
3976void stl_be_phys(target_phys_addr_t addr, uint32_t val)
3977{
3978 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
3979}
3980
aab33094 3981/* XXX: optimize */
c227f099 3982void stb_phys(target_phys_addr_t addr, uint32_t val)
aab33094
FB
3983{
3984 uint8_t v = val;
3985 cpu_physical_memory_write(addr, &v, 1);
3986}
3987
733f0b02 3988/* warning: addr must be aligned */
1e78bcc1
AG
3989static inline void stw_phys_internal(target_phys_addr_t addr, uint32_t val,
3990 enum device_endian endian)
aab33094 3991{
733f0b02 3992 uint8_t *ptr;
f3705d53 3993 MemoryRegionSection *section;
733f0b02 3994
06ef3525 3995 section = phys_page_find(addr >> TARGET_PAGE_BITS);
733f0b02 3996
f3705d53 3997 if (!memory_region_is_ram(section->mr) || section->readonly) {
cc5bea60 3998 addr = memory_region_section_addr(section, addr);
f3705d53 3999 if (memory_region_is_ram(section->mr)) {
37ec01d4 4000 section = &phys_sections[phys_section_rom];
06ef3525 4001 }
1e78bcc1
AG
4002#if defined(TARGET_WORDS_BIGENDIAN)
4003 if (endian == DEVICE_LITTLE_ENDIAN) {
4004 val = bswap16(val);
4005 }
4006#else
4007 if (endian == DEVICE_BIG_ENDIAN) {
4008 val = bswap16(val);
4009 }
4010#endif
37ec01d4 4011 io_mem_write(section->mr, addr, val, 2);
733f0b02
MT
4012 } else {
4013 unsigned long addr1;
f3705d53 4014 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
cc5bea60 4015 + memory_region_section_addr(section, addr);
733f0b02
MT
4016 /* RAM case */
4017 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
4018 switch (endian) {
4019 case DEVICE_LITTLE_ENDIAN:
4020 stw_le_p(ptr, val);
4021 break;
4022 case DEVICE_BIG_ENDIAN:
4023 stw_be_p(ptr, val);
4024 break;
4025 default:
4026 stw_p(ptr, val);
4027 break;
4028 }
733f0b02
MT
4029 if (!cpu_physical_memory_is_dirty(addr1)) {
4030 /* invalidate code */
4031 tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
4032 /* set dirty bit */
4033 cpu_physical_memory_set_dirty_flags(addr1,
4034 (0xff & ~CODE_DIRTY_FLAG));
4035 }
4036 }
aab33094
FB
4037}
4038
1e78bcc1
AG
4039void stw_phys(target_phys_addr_t addr, uint32_t val)
4040{
4041 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4042}
4043
4044void stw_le_phys(target_phys_addr_t addr, uint32_t val)
4045{
4046 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4047}
4048
4049void stw_be_phys(target_phys_addr_t addr, uint32_t val)
4050{
4051 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4052}
4053
aab33094 4054/* XXX: optimize */
c227f099 4055void stq_phys(target_phys_addr_t addr, uint64_t val)
aab33094
FB
4056{
4057 val = tswap64(val);
71d2b725 4058 cpu_physical_memory_write(addr, &val, 8);
aab33094
FB
4059}
4060
1e78bcc1
AG
4061void stq_le_phys(target_phys_addr_t addr, uint64_t val)
4062{
4063 val = cpu_to_le64(val);
4064 cpu_physical_memory_write(addr, &val, 8);
4065}
4066
4067void stq_be_phys(target_phys_addr_t addr, uint64_t val)
4068{
4069 val = cpu_to_be64(val);
4070 cpu_physical_memory_write(addr, &val, 8);
4071}
4072
5e2972fd 4073/* virtual memory access for debug (includes writing to ROM) */
9349b4f9 4074int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
b448f2f3 4075 uint8_t *buf, int len, int is_write)
13eb76e0
FB
4076{
4077 int l;
c227f099 4078 target_phys_addr_t phys_addr;
9b3c35e0 4079 target_ulong page;
13eb76e0
FB
4080
4081 while (len > 0) {
4082 page = addr & TARGET_PAGE_MASK;
4083 phys_addr = cpu_get_phys_page_debug(env, page);
4084 /* if no physical page mapped, return an error */
4085 if (phys_addr == -1)
4086 return -1;
4087 l = (page + TARGET_PAGE_SIZE) - addr;
4088 if (l > len)
4089 l = len;
5e2972fd 4090 phys_addr += (addr & ~TARGET_PAGE_MASK);
5e2972fd
AL
4091 if (is_write)
4092 cpu_physical_memory_write_rom(phys_addr, buf, l);
4093 else
5e2972fd 4094 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
13eb76e0
FB
4095 len -= l;
4096 buf += l;
4097 addr += l;
4098 }
4099 return 0;
4100}
a68fe89c 4101#endif
13eb76e0 4102
2e70f6ef
PB
4103/* in deterministic execution mode, instructions doing device I/Os
4104 must be at the end of the TB */
20503968 4105void cpu_io_recompile(CPUArchState *env, uintptr_t retaddr)
2e70f6ef
PB
4106{
4107 TranslationBlock *tb;
4108 uint32_t n, cflags;
4109 target_ulong pc, cs_base;
4110 uint64_t flags;
4111
20503968 4112 tb = tb_find_pc(retaddr);
2e70f6ef
PB
4113 if (!tb) {
4114 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
20503968 4115 (void *)retaddr);
2e70f6ef
PB
4116 }
4117 n = env->icount_decr.u16.low + tb->icount;
20503968 4118 cpu_restore_state(tb, env, retaddr);
2e70f6ef 4119 /* Calculate how many instructions had been executed before the fault
bf20dc07 4120 occurred. */
2e70f6ef
PB
4121 n = n - env->icount_decr.u16.low;
4122 /* Generate a new TB ending on the I/O insn. */
4123 n++;
4124 /* On MIPS and SH, delay slot instructions can only be restarted if
4125 they were already the first instruction in the TB. If this is not
bf20dc07 4126 the first instruction in a TB then re-execute the preceding
2e70f6ef
PB
4127 branch. */
4128#if defined(TARGET_MIPS)
4129 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4130 env->active_tc.PC -= 4;
4131 env->icount_decr.u16.low++;
4132 env->hflags &= ~MIPS_HFLAG_BMASK;
4133 }
4134#elif defined(TARGET_SH4)
4135 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4136 && n > 1) {
4137 env->pc -= 2;
4138 env->icount_decr.u16.low++;
4139 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4140 }
4141#endif
4142 /* This should never happen. */
4143 if (n > CF_COUNT_MASK)
4144 cpu_abort(env, "TB too big during recompile");
4145
4146 cflags = n | CF_LAST_IO;
4147 pc = tb->pc;
4148 cs_base = tb->cs_base;
4149 flags = tb->flags;
4150 tb_phys_invalidate(tb, -1);
4151 /* FIXME: In theory this could raise an exception. In practice
4152 we have already translated the block once so it's probably ok. */
4153 tb_gen_code(env, pc, cs_base, flags, cflags);
bf20dc07 4154 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
2e70f6ef
PB
4155 the first in the TB) then we end up generating a whole new TB and
4156 repeating the fault, which is horribly inefficient.
4157 Better would be to execute just this insn uncached, or generate a
4158 second new TB. */
4159 cpu_resume_from_signal(env, NULL);
4160}
4161
b3755a91
PB
4162#if !defined(CONFIG_USER_ONLY)
4163
055403b2 4164void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
e3db7226
FB
4165{
4166 int i, target_code_size, max_target_code_size;
4167 int direct_jmp_count, direct_jmp2_count, cross_page;
4168 TranslationBlock *tb;
3b46e624 4169
e3db7226
FB
4170 target_code_size = 0;
4171 max_target_code_size = 0;
4172 cross_page = 0;
4173 direct_jmp_count = 0;
4174 direct_jmp2_count = 0;
4175 for(i = 0; i < nb_tbs; i++) {
4176 tb = &tbs[i];
4177 target_code_size += tb->size;
4178 if (tb->size > max_target_code_size)
4179 max_target_code_size = tb->size;
4180 if (tb->page_addr[1] != -1)
4181 cross_page++;
4182 if (tb->tb_next_offset[0] != 0xffff) {
4183 direct_jmp_count++;
4184 if (tb->tb_next_offset[1] != 0xffff) {
4185 direct_jmp2_count++;
4186 }
4187 }
4188 }
4189 /* XXX: avoid using doubles ? */
57fec1fe 4190 cpu_fprintf(f, "Translation buffer state:\n");
055403b2 4191 cpu_fprintf(f, "gen code size %td/%ld\n",
26a5f13b
FB
4192 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4193 cpu_fprintf(f, "TB count %d/%d\n",
4194 nb_tbs, code_gen_max_blocks);
5fafdf24 4195 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
e3db7226
FB
4196 nb_tbs ? target_code_size / nb_tbs : 0,
4197 max_target_code_size);
055403b2 4198 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
e3db7226
FB
4199 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4200 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
5fafdf24
TS
4201 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4202 cross_page,
e3db7226
FB
4203 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4204 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
5fafdf24 4205 direct_jmp_count,
e3db7226
FB
4206 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4207 direct_jmp2_count,
4208 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
57fec1fe 4209 cpu_fprintf(f, "\nStatistics:\n");
e3db7226
FB
4210 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4211 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4212 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
b67d9a52 4213 tcg_dump_info(f, cpu_fprintf);
e3db7226
FB
4214}
4215
82afa586
BH
4216/*
4217 * A helper function for the _utterly broken_ virtio device model to find out if
4218 * it's running on a big endian machine. Don't do this at home kids!
4219 */
4220bool virtio_is_big_endian(void);
4221bool virtio_is_big_endian(void)
4222{
4223#if defined(TARGET_WORDS_BIGENDIAN)
4224 return true;
4225#else
4226 return false;
4227#endif
4228}
4229
61382a50 4230#endif
76f35538
WC
4231
4232#ifndef CONFIG_USER_ONLY
4233bool cpu_physical_memory_is_io(target_phys_addr_t phys_addr)
4234{
4235 MemoryRegionSection *section;
4236
4237 section = phys_page_find(phys_addr >> TARGET_PAGE_BITS);
4238
4239 return !(memory_region_is_ram(section->mr) ||
4240 memory_region_is_romd(section->mr));
4241}
4242#endif