]> git.proxmox.com Git - qemu.git/blame - exec.c
target-mips: Enable access to required RDHWR hardware registers
[qemu.git] / exec.c
CommitLineData
54936004 1/*
fd6ce8f6 2 * virtual page mapping and translated block handling
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
54936004 18 */
67b915a5 19#include "config.h"
d5a8f07c
FB
20#ifdef _WIN32
21#include <windows.h>
22#else
a98d49b1 23#include <sys/types.h>
d5a8f07c
FB
24#include <sys/mman.h>
25#endif
54936004 26
055403b2 27#include "qemu-common.h"
6180a181 28#include "cpu.h"
b67d9a52 29#include "tcg.h"
b3c7724c 30#include "hw/hw.h"
cc9e98cb 31#include "hw/qdev.h"
74576198 32#include "osdep.h"
7ba1e619 33#include "kvm.h"
432d268c 34#include "hw/xen.h"
29e922b6 35#include "qemu-timer.h"
62152b8a
AK
36#include "memory.h"
37#include "exec-memory.h"
53a5960a
PB
38#if defined(CONFIG_USER_ONLY)
39#include <qemu.h>
f01576f1
JL
40#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41#include <sys/param.h>
42#if __FreeBSD_version >= 700104
43#define HAVE_KINFO_GETVMMAP
44#define sigqueue sigqueue_freebsd /* avoid redefinition */
45#include <sys/time.h>
46#include <sys/proc.h>
47#include <machine/profile.h>
48#define _KERNEL
49#include <sys/user.h>
50#undef _KERNEL
51#undef sigqueue
52#include <libutil.h>
53#endif
54#endif
432d268c
JN
55#else /* !CONFIG_USER_ONLY */
56#include "xen-mapcache.h"
6506e4f9 57#include "trace.h"
53a5960a 58#endif
54936004 59
0cac1b66
BS
60#include "cputlb.h"
61
67d95c15
AK
62#define WANT_EXEC_OBSOLETE
63#include "exec-obsolete.h"
64
fd6ce8f6 65//#define DEBUG_TB_INVALIDATE
66e85a21 66//#define DEBUG_FLUSH
67d3b957 67//#define DEBUG_UNASSIGNED
fd6ce8f6
FB
68
69/* make various TB consistency checks */
5fafdf24 70//#define DEBUG_TB_CHECK
fd6ce8f6 71
1196be37 72//#define DEBUG_IOPORT
db7b5426 73//#define DEBUG_SUBPAGE
1196be37 74
99773bd4
PB
75#if !defined(CONFIG_USER_ONLY)
76/* TB consistency checks only implemented for usermode emulation. */
77#undef DEBUG_TB_CHECK
78#endif
79
9fa3e853
FB
80#define SMC_BITMAP_USE_THRESHOLD 10
81
bdaf78e0 82static TranslationBlock *tbs;
24ab68ac 83static int code_gen_max_blocks;
9fa3e853 84TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
bdaf78e0 85static int nb_tbs;
eb51d102 86/* any access to the tbs or the page table must use this lock */
c227f099 87spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
fd6ce8f6 88
141ac468
BS
89#if defined(__arm__) || defined(__sparc_v9__)
90/* The prologue must be reachable with a direct jump. ARM and Sparc64
91 have limited branch ranges (possibly also PPC) so place it in a
d03d860b
BS
92 section close to code segment. */
93#define code_gen_section \
94 __attribute__((__section__(".gen_code"))) \
95 __attribute__((aligned (32)))
6840981d 96#elif defined(_WIN32) && !defined(_WIN64)
f8e2af11
SW
97#define code_gen_section \
98 __attribute__((aligned (16)))
d03d860b
BS
99#else
100#define code_gen_section \
101 __attribute__((aligned (32)))
102#endif
103
104uint8_t code_gen_prologue[1024] code_gen_section;
bdaf78e0
BS
105static uint8_t *code_gen_buffer;
106static unsigned long code_gen_buffer_size;
26a5f13b 107/* threshold to flush the translated code buffer */
bdaf78e0 108static unsigned long code_gen_buffer_max_size;
24ab68ac 109static uint8_t *code_gen_ptr;
fd6ce8f6 110
e2eef170 111#if !defined(CONFIG_USER_ONLY)
9fa3e853 112int phys_ram_fd;
74576198 113static int in_migration;
94a6b54f 114
85d59fef 115RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
62152b8a
AK
116
117static MemoryRegion *system_memory;
309cb471 118static MemoryRegion *system_io;
62152b8a 119
0e0df1e2 120MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty;
de712f94 121static MemoryRegion io_mem_subpage_ram;
0e0df1e2 122
e2eef170 123#endif
9fa3e853 124
9349b4f9 125CPUArchState *first_cpu;
6a00d601
FB
126/* current CPU in the current thread. It is only valid inside
127 cpu_exec() */
9349b4f9 128DEFINE_TLS(CPUArchState *,cpu_single_env);
2e70f6ef 129/* 0 = Do not count executed instructions.
bf20dc07 130 1 = Precise instruction counting.
2e70f6ef
PB
131 2 = Adaptive rate instruction counting. */
132int use_icount = 0;
6a00d601 133
54936004 134typedef struct PageDesc {
92e873b9 135 /* list of TBs intersecting this ram page */
fd6ce8f6 136 TranslationBlock *first_tb;
9fa3e853
FB
137 /* in order to optimize self modifying code, we count the number
138 of lookups we do to a given page to use a bitmap */
139 unsigned int code_write_count;
140 uint8_t *code_bitmap;
141#if defined(CONFIG_USER_ONLY)
142 unsigned long flags;
143#endif
54936004
FB
144} PageDesc;
145
41c1b1c9 146/* In system mode we want L1_MAP to be based on ram offsets,
5cd2c5b6
RH
147 while in user mode we want it to be based on virtual addresses. */
148#if !defined(CONFIG_USER_ONLY)
41c1b1c9
PB
149#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
150# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
151#else
5cd2c5b6 152# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
41c1b1c9 153#endif
bedb69ea 154#else
5cd2c5b6 155# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
bedb69ea 156#endif
54936004 157
5cd2c5b6
RH
158/* Size of the L2 (and L3, etc) page tables. */
159#define L2_BITS 10
54936004
FB
160#define L2_SIZE (1 << L2_BITS)
161
3eef53df
AK
162#define P_L2_LEVELS \
163 (((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / L2_BITS) + 1)
164
5cd2c5b6 165/* The bits remaining after N lower levels of page tables. */
5cd2c5b6
RH
166#define V_L1_BITS_REM \
167 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
168
5cd2c5b6
RH
169#if V_L1_BITS_REM < 4
170#define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
171#else
172#define V_L1_BITS V_L1_BITS_REM
173#endif
174
5cd2c5b6
RH
175#define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
176
5cd2c5b6
RH
177#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
178
c6d50674
SW
179uintptr_t qemu_real_host_page_size;
180uintptr_t qemu_host_page_size;
181uintptr_t qemu_host_page_mask;
54936004 182
5cd2c5b6
RH
183/* This is a multi-level map on the virtual address space.
184 The bottom level has pointers to PageDesc. */
185static void *l1_map[V_L1_SIZE];
54936004 186
e2eef170 187#if !defined(CONFIG_USER_ONLY)
4346ae3e
AK
188typedef struct PhysPageEntry PhysPageEntry;
189
5312bd8b
AK
190static MemoryRegionSection *phys_sections;
191static unsigned phys_sections_nb, phys_sections_nb_alloc;
192static uint16_t phys_section_unassigned;
aa102231
AK
193static uint16_t phys_section_notdirty;
194static uint16_t phys_section_rom;
195static uint16_t phys_section_watch;
5312bd8b 196
4346ae3e 197struct PhysPageEntry {
07f07b31
AK
198 uint16_t is_leaf : 1;
199 /* index into phys_sections (is_leaf) or phys_map_nodes (!is_leaf) */
200 uint16_t ptr : 15;
4346ae3e
AK
201};
202
d6f2ea22
AK
203/* Simple allocator for PhysPageEntry nodes */
204static PhysPageEntry (*phys_map_nodes)[L2_SIZE];
205static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc;
206
07f07b31 207#define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
d6f2ea22 208
5cd2c5b6 209/* This is a multi-level map on the physical address space.
06ef3525 210 The bottom level has pointers to MemoryRegionSections. */
07f07b31 211static PhysPageEntry phys_map = { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
6d9a1304 212
e2eef170 213static void io_mem_init(void);
62152b8a 214static void memory_map_init(void);
e2eef170 215
1ec9b909 216static MemoryRegion io_mem_watch;
6658ffb8 217#endif
33417e70 218
e3db7226 219/* statistics */
e3db7226
FB
220static int tb_flush_count;
221static int tb_phys_invalidate_count;
222
7cb69cae
FB
223#ifdef _WIN32
224static void map_exec(void *addr, long size)
225{
226 DWORD old_protect;
227 VirtualProtect(addr, size,
228 PAGE_EXECUTE_READWRITE, &old_protect);
229
230}
231#else
232static void map_exec(void *addr, long size)
233{
4369415f 234 unsigned long start, end, page_size;
7cb69cae 235
4369415f 236 page_size = getpagesize();
7cb69cae 237 start = (unsigned long)addr;
4369415f 238 start &= ~(page_size - 1);
7cb69cae
FB
239
240 end = (unsigned long)addr + size;
4369415f
FB
241 end += page_size - 1;
242 end &= ~(page_size - 1);
7cb69cae
FB
243
244 mprotect((void *)start, end - start,
245 PROT_READ | PROT_WRITE | PROT_EXEC);
246}
247#endif
248
b346ff46 249static void page_init(void)
54936004 250{
83fb7adf 251 /* NOTE: we can always suppose that qemu_host_page_size >=
54936004 252 TARGET_PAGE_SIZE */
c2b48b69
AL
253#ifdef _WIN32
254 {
255 SYSTEM_INFO system_info;
256
257 GetSystemInfo(&system_info);
258 qemu_real_host_page_size = system_info.dwPageSize;
259 }
260#else
261 qemu_real_host_page_size = getpagesize();
262#endif
83fb7adf
FB
263 if (qemu_host_page_size == 0)
264 qemu_host_page_size = qemu_real_host_page_size;
265 if (qemu_host_page_size < TARGET_PAGE_SIZE)
266 qemu_host_page_size = TARGET_PAGE_SIZE;
83fb7adf 267 qemu_host_page_mask = ~(qemu_host_page_size - 1);
50a9569b 268
2e9a5713 269#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
50a9569b 270 {
f01576f1
JL
271#ifdef HAVE_KINFO_GETVMMAP
272 struct kinfo_vmentry *freep;
273 int i, cnt;
274
275 freep = kinfo_getvmmap(getpid(), &cnt);
276 if (freep) {
277 mmap_lock();
278 for (i = 0; i < cnt; i++) {
279 unsigned long startaddr, endaddr;
280
281 startaddr = freep[i].kve_start;
282 endaddr = freep[i].kve_end;
283 if (h2g_valid(startaddr)) {
284 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
285
286 if (h2g_valid(endaddr)) {
287 endaddr = h2g(endaddr);
fd436907 288 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
f01576f1
JL
289 } else {
290#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
291 endaddr = ~0ul;
fd436907 292 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
f01576f1
JL
293#endif
294 }
295 }
296 }
297 free(freep);
298 mmap_unlock();
299 }
300#else
50a9569b 301 FILE *f;
50a9569b 302
0776590d 303 last_brk = (unsigned long)sbrk(0);
5cd2c5b6 304
fd436907 305 f = fopen("/compat/linux/proc/self/maps", "r");
50a9569b 306 if (f) {
5cd2c5b6
RH
307 mmap_lock();
308
50a9569b 309 do {
5cd2c5b6
RH
310 unsigned long startaddr, endaddr;
311 int n;
312
313 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
314
315 if (n == 2 && h2g_valid(startaddr)) {
316 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
317
318 if (h2g_valid(endaddr)) {
319 endaddr = h2g(endaddr);
320 } else {
321 endaddr = ~0ul;
322 }
323 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
50a9569b
AZ
324 }
325 } while (!feof(f));
5cd2c5b6 326
50a9569b 327 fclose(f);
5cd2c5b6 328 mmap_unlock();
50a9569b 329 }
f01576f1 330#endif
50a9569b
AZ
331 }
332#endif
54936004
FB
333}
334
41c1b1c9 335static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
54936004 336{
41c1b1c9
PB
337 PageDesc *pd;
338 void **lp;
339 int i;
340
5cd2c5b6 341#if defined(CONFIG_USER_ONLY)
7267c094 342 /* We can't use g_malloc because it may recurse into a locked mutex. */
5cd2c5b6
RH
343# define ALLOC(P, SIZE) \
344 do { \
345 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
346 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
5cd2c5b6
RH
347 } while (0)
348#else
349# define ALLOC(P, SIZE) \
7267c094 350 do { P = g_malloc0(SIZE); } while (0)
17e2377a 351#endif
434929bf 352
5cd2c5b6
RH
353 /* Level 1. Always allocated. */
354 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
355
356 /* Level 2..N-1. */
357 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
358 void **p = *lp;
359
360 if (p == NULL) {
361 if (!alloc) {
362 return NULL;
363 }
364 ALLOC(p, sizeof(void *) * L2_SIZE);
365 *lp = p;
17e2377a 366 }
5cd2c5b6
RH
367
368 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
369 }
370
371 pd = *lp;
372 if (pd == NULL) {
373 if (!alloc) {
374 return NULL;
375 }
376 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
377 *lp = pd;
54936004 378 }
5cd2c5b6
RH
379
380#undef ALLOC
5cd2c5b6
RH
381
382 return pd + (index & (L2_SIZE - 1));
54936004
FB
383}
384
41c1b1c9 385static inline PageDesc *page_find(tb_page_addr_t index)
54936004 386{
5cd2c5b6 387 return page_find_alloc(index, 0);
fd6ce8f6
FB
388}
389
6d9a1304 390#if !defined(CONFIG_USER_ONLY)
d6f2ea22 391
f7bf5461 392static void phys_map_node_reserve(unsigned nodes)
d6f2ea22 393{
f7bf5461 394 if (phys_map_nodes_nb + nodes > phys_map_nodes_nb_alloc) {
d6f2ea22
AK
395 typedef PhysPageEntry Node[L2_SIZE];
396 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16);
f7bf5461
AK
397 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc,
398 phys_map_nodes_nb + nodes);
d6f2ea22
AK
399 phys_map_nodes = g_renew(Node, phys_map_nodes,
400 phys_map_nodes_nb_alloc);
401 }
f7bf5461
AK
402}
403
404static uint16_t phys_map_node_alloc(void)
405{
406 unsigned i;
407 uint16_t ret;
408
409 ret = phys_map_nodes_nb++;
410 assert(ret != PHYS_MAP_NODE_NIL);
411 assert(ret != phys_map_nodes_nb_alloc);
d6f2ea22 412 for (i = 0; i < L2_SIZE; ++i) {
07f07b31 413 phys_map_nodes[ret][i].is_leaf = 0;
c19e8800 414 phys_map_nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
d6f2ea22 415 }
f7bf5461 416 return ret;
d6f2ea22
AK
417}
418
419static void phys_map_nodes_reset(void)
420{
421 phys_map_nodes_nb = 0;
422}
423
92e873b9 424
2999097b
AK
425static void phys_page_set_level(PhysPageEntry *lp, target_phys_addr_t *index,
426 target_phys_addr_t *nb, uint16_t leaf,
427 int level)
f7bf5461
AK
428{
429 PhysPageEntry *p;
430 int i;
07f07b31 431 target_phys_addr_t step = (target_phys_addr_t)1 << (level * L2_BITS);
108c49b8 432
07f07b31 433 if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
c19e8800
AK
434 lp->ptr = phys_map_node_alloc();
435 p = phys_map_nodes[lp->ptr];
f7bf5461
AK
436 if (level == 0) {
437 for (i = 0; i < L2_SIZE; i++) {
07f07b31 438 p[i].is_leaf = 1;
c19e8800 439 p[i].ptr = phys_section_unassigned;
4346ae3e 440 }
67c4d23c 441 }
f7bf5461 442 } else {
c19e8800 443 p = phys_map_nodes[lp->ptr];
92e873b9 444 }
2999097b 445 lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
f7bf5461 446
2999097b 447 while (*nb && lp < &p[L2_SIZE]) {
07f07b31
AK
448 if ((*index & (step - 1)) == 0 && *nb >= step) {
449 lp->is_leaf = true;
c19e8800 450 lp->ptr = leaf;
07f07b31
AK
451 *index += step;
452 *nb -= step;
2999097b
AK
453 } else {
454 phys_page_set_level(lp, index, nb, leaf, level - 1);
455 }
456 ++lp;
f7bf5461
AK
457 }
458}
459
2999097b
AK
460static void phys_page_set(target_phys_addr_t index, target_phys_addr_t nb,
461 uint16_t leaf)
f7bf5461 462{
2999097b 463 /* Wildly overreserve - it doesn't matter much. */
07f07b31 464 phys_map_node_reserve(3 * P_L2_LEVELS);
5cd2c5b6 465
2999097b 466 phys_page_set_level(&phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
92e873b9
FB
467}
468
0cac1b66 469MemoryRegionSection *phys_page_find(target_phys_addr_t index)
92e873b9 470{
31ab2b4a
AK
471 PhysPageEntry lp = phys_map;
472 PhysPageEntry *p;
473 int i;
31ab2b4a 474 uint16_t s_index = phys_section_unassigned;
f1f6e3b8 475
07f07b31 476 for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
c19e8800 477 if (lp.ptr == PHYS_MAP_NODE_NIL) {
31ab2b4a
AK
478 goto not_found;
479 }
c19e8800 480 p = phys_map_nodes[lp.ptr];
31ab2b4a 481 lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
5312bd8b 482 }
31ab2b4a 483
c19e8800 484 s_index = lp.ptr;
31ab2b4a 485not_found:
f3705d53
AK
486 return &phys_sections[s_index];
487}
488
e5548617
BS
489bool memory_region_is_unassigned(MemoryRegion *mr)
490{
491 return mr != &io_mem_ram && mr != &io_mem_rom
492 && mr != &io_mem_notdirty && !mr->rom_device
493 && mr != &io_mem_watch;
494}
495
c8a706fe
PB
496#define mmap_lock() do { } while(0)
497#define mmap_unlock() do { } while(0)
9fa3e853 498#endif
fd6ce8f6 499
4369415f
FB
500#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
501
502#if defined(CONFIG_USER_ONLY)
ccbb4d44 503/* Currently it is not recommended to allocate big chunks of data in
4369415f
FB
504 user mode. It will change when a dedicated libc will be used */
505#define USE_STATIC_CODE_GEN_BUFFER
506#endif
507
508#ifdef USE_STATIC_CODE_GEN_BUFFER
ebf50fb3
AJ
509static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
510 __attribute__((aligned (CODE_GEN_ALIGN)));
4369415f
FB
511#endif
512
8fcd3692 513static void code_gen_alloc(unsigned long tb_size)
26a5f13b 514{
4369415f
FB
515#ifdef USE_STATIC_CODE_GEN_BUFFER
516 code_gen_buffer = static_code_gen_buffer;
517 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
518 map_exec(code_gen_buffer, code_gen_buffer_size);
519#else
26a5f13b
FB
520 code_gen_buffer_size = tb_size;
521 if (code_gen_buffer_size == 0) {
4369415f 522#if defined(CONFIG_USER_ONLY)
4369415f
FB
523 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
524#else
ccbb4d44 525 /* XXX: needs adjustments */
94a6b54f 526 code_gen_buffer_size = (unsigned long)(ram_size / 4);
4369415f 527#endif
26a5f13b
FB
528 }
529 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
530 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
531 /* The code gen buffer location may have constraints depending on
532 the host cpu and OS */
533#if defined(__linux__)
534 {
535 int flags;
141ac468
BS
536 void *start = NULL;
537
26a5f13b
FB
538 flags = MAP_PRIVATE | MAP_ANONYMOUS;
539#if defined(__x86_64__)
540 flags |= MAP_32BIT;
541 /* Cannot map more than that */
542 if (code_gen_buffer_size > (800 * 1024 * 1024))
543 code_gen_buffer_size = (800 * 1024 * 1024);
141ac468
BS
544#elif defined(__sparc_v9__)
545 // Map the buffer below 2G, so we can use direct calls and branches
546 flags |= MAP_FIXED;
547 start = (void *) 0x60000000UL;
548 if (code_gen_buffer_size > (512 * 1024 * 1024))
549 code_gen_buffer_size = (512 * 1024 * 1024);
1cb0661e 550#elif defined(__arm__)
5c84bd90 551 /* Keep the buffer no bigger than 16MB to branch between blocks */
1cb0661e
AZ
552 if (code_gen_buffer_size > 16 * 1024 * 1024)
553 code_gen_buffer_size = 16 * 1024 * 1024;
eba0b893
RH
554#elif defined(__s390x__)
555 /* Map the buffer so that we can use direct calls and branches. */
556 /* We have a +- 4GB range on the branches; leave some slop. */
557 if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
558 code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
559 }
560 start = (void *)0x90000000UL;
26a5f13b 561#endif
141ac468
BS
562 code_gen_buffer = mmap(start, code_gen_buffer_size,
563 PROT_WRITE | PROT_READ | PROT_EXEC,
26a5f13b
FB
564 flags, -1, 0);
565 if (code_gen_buffer == MAP_FAILED) {
566 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
567 exit(1);
568 }
569 }
cbb608a5 570#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
9f4b09a4
TN
571 || defined(__DragonFly__) || defined(__OpenBSD__) \
572 || defined(__NetBSD__)
06e67a82
AL
573 {
574 int flags;
575 void *addr = NULL;
576 flags = MAP_PRIVATE | MAP_ANONYMOUS;
577#if defined(__x86_64__)
578 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
579 * 0x40000000 is free */
580 flags |= MAP_FIXED;
581 addr = (void *)0x40000000;
582 /* Cannot map more than that */
583 if (code_gen_buffer_size > (800 * 1024 * 1024))
584 code_gen_buffer_size = (800 * 1024 * 1024);
4cd31ad2
BS
585#elif defined(__sparc_v9__)
586 // Map the buffer below 2G, so we can use direct calls and branches
587 flags |= MAP_FIXED;
588 addr = (void *) 0x60000000UL;
589 if (code_gen_buffer_size > (512 * 1024 * 1024)) {
590 code_gen_buffer_size = (512 * 1024 * 1024);
591 }
06e67a82
AL
592#endif
593 code_gen_buffer = mmap(addr, code_gen_buffer_size,
594 PROT_WRITE | PROT_READ | PROT_EXEC,
595 flags, -1, 0);
596 if (code_gen_buffer == MAP_FAILED) {
597 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
598 exit(1);
599 }
600 }
26a5f13b 601#else
7267c094 602 code_gen_buffer = g_malloc(code_gen_buffer_size);
26a5f13b
FB
603 map_exec(code_gen_buffer, code_gen_buffer_size);
604#endif
4369415f 605#endif /* !USE_STATIC_CODE_GEN_BUFFER */
26a5f13b 606 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
a884da8a
PM
607 code_gen_buffer_max_size = code_gen_buffer_size -
608 (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
26a5f13b 609 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
7267c094 610 tbs = g_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
26a5f13b
FB
611}
612
613/* Must be called before using the QEMU cpus. 'tb_size' is the size
614 (in bytes) allocated to the translation buffer. Zero means default
615 size. */
d5ab9713 616void tcg_exec_init(unsigned long tb_size)
26a5f13b 617{
26a5f13b
FB
618 cpu_gen_init();
619 code_gen_alloc(tb_size);
620 code_gen_ptr = code_gen_buffer;
813da627 621 tcg_register_jit(code_gen_buffer, code_gen_buffer_size);
4369415f 622 page_init();
9002ec79
RH
623#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
624 /* There's no guest base to take into account, so go ahead and
625 initialize the prologue now. */
626 tcg_prologue_init(&tcg_ctx);
627#endif
26a5f13b
FB
628}
629
d5ab9713
JK
630bool tcg_enabled(void)
631{
632 return code_gen_buffer != NULL;
633}
634
635void cpu_exec_init_all(void)
636{
637#if !defined(CONFIG_USER_ONLY)
638 memory_map_init();
639 io_mem_init();
640#endif
641}
642
9656f324
PB
643#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
644
e59fb374 645static int cpu_common_post_load(void *opaque, int version_id)
e7f4eff7 646{
9349b4f9 647 CPUArchState *env = opaque;
9656f324 648
3098dba0
AJ
649 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
650 version_id is increased. */
651 env->interrupt_request &= ~0x01;
9656f324
PB
652 tlb_flush(env, 1);
653
654 return 0;
655}
e7f4eff7
JQ
656
657static const VMStateDescription vmstate_cpu_common = {
658 .name = "cpu_common",
659 .version_id = 1,
660 .minimum_version_id = 1,
661 .minimum_version_id_old = 1,
e7f4eff7
JQ
662 .post_load = cpu_common_post_load,
663 .fields = (VMStateField []) {
9349b4f9
AF
664 VMSTATE_UINT32(halted, CPUArchState),
665 VMSTATE_UINT32(interrupt_request, CPUArchState),
e7f4eff7
JQ
666 VMSTATE_END_OF_LIST()
667 }
668};
9656f324
PB
669#endif
670
9349b4f9 671CPUArchState *qemu_get_cpu(int cpu)
950f1472 672{
9349b4f9 673 CPUArchState *env = first_cpu;
950f1472
GC
674
675 while (env) {
676 if (env->cpu_index == cpu)
677 break;
678 env = env->next_cpu;
679 }
680
681 return env;
682}
683
9349b4f9 684void cpu_exec_init(CPUArchState *env)
fd6ce8f6 685{
9349b4f9 686 CPUArchState **penv;
6a00d601
FB
687 int cpu_index;
688
c2764719
PB
689#if defined(CONFIG_USER_ONLY)
690 cpu_list_lock();
691#endif
6a00d601
FB
692 env->next_cpu = NULL;
693 penv = &first_cpu;
694 cpu_index = 0;
695 while (*penv != NULL) {
1e9fa730 696 penv = &(*penv)->next_cpu;
6a00d601
FB
697 cpu_index++;
698 }
699 env->cpu_index = cpu_index;
268a362c 700 env->numa_node = 0;
72cf2d4f
BS
701 QTAILQ_INIT(&env->breakpoints);
702 QTAILQ_INIT(&env->watchpoints);
dc7a09cf
JK
703#ifndef CONFIG_USER_ONLY
704 env->thread_id = qemu_get_thread_id();
705#endif
6a00d601 706 *penv = env;
c2764719
PB
707#if defined(CONFIG_USER_ONLY)
708 cpu_list_unlock();
709#endif
b3c7724c 710#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
0be71e32
AW
711 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
712 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
b3c7724c
PB
713 cpu_save, cpu_load, env);
714#endif
fd6ce8f6
FB
715}
716
d1a1eb74
TG
717/* Allocate a new translation block. Flush the translation buffer if
718 too many translation blocks or too much generated code. */
719static TranslationBlock *tb_alloc(target_ulong pc)
720{
721 TranslationBlock *tb;
722
723 if (nb_tbs >= code_gen_max_blocks ||
724 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
725 return NULL;
726 tb = &tbs[nb_tbs++];
727 tb->pc = pc;
728 tb->cflags = 0;
729 return tb;
730}
731
732void tb_free(TranslationBlock *tb)
733{
734 /* In practice this is mostly used for single use temporary TB
735 Ignore the hard cases and just back up if this TB happens to
736 be the last one generated. */
737 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
738 code_gen_ptr = tb->tc_ptr;
739 nb_tbs--;
740 }
741}
742
9fa3e853
FB
743static inline void invalidate_page_bitmap(PageDesc *p)
744{
745 if (p->code_bitmap) {
7267c094 746 g_free(p->code_bitmap);
9fa3e853
FB
747 p->code_bitmap = NULL;
748 }
749 p->code_write_count = 0;
750}
751
5cd2c5b6
RH
752/* Set to NULL all the 'first_tb' fields in all PageDescs. */
753
754static void page_flush_tb_1 (int level, void **lp)
fd6ce8f6 755{
5cd2c5b6 756 int i;
fd6ce8f6 757
5cd2c5b6
RH
758 if (*lp == NULL) {
759 return;
760 }
761 if (level == 0) {
762 PageDesc *pd = *lp;
7296abac 763 for (i = 0; i < L2_SIZE; ++i) {
5cd2c5b6
RH
764 pd[i].first_tb = NULL;
765 invalidate_page_bitmap(pd + i);
fd6ce8f6 766 }
5cd2c5b6
RH
767 } else {
768 void **pp = *lp;
7296abac 769 for (i = 0; i < L2_SIZE; ++i) {
5cd2c5b6
RH
770 page_flush_tb_1 (level - 1, pp + i);
771 }
772 }
773}
774
775static void page_flush_tb(void)
776{
777 int i;
778 for (i = 0; i < V_L1_SIZE; i++) {
779 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
fd6ce8f6
FB
780 }
781}
782
783/* flush all the translation blocks */
d4e8164f 784/* XXX: tb_flush is currently not thread safe */
9349b4f9 785void tb_flush(CPUArchState *env1)
fd6ce8f6 786{
9349b4f9 787 CPUArchState *env;
0124311e 788#if defined(DEBUG_FLUSH)
ab3d1727
BS
789 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
790 (unsigned long)(code_gen_ptr - code_gen_buffer),
791 nb_tbs, nb_tbs > 0 ?
792 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
fd6ce8f6 793#endif
26a5f13b 794 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
a208e54a
PB
795 cpu_abort(env1, "Internal error: code buffer overflow\n");
796
fd6ce8f6 797 nb_tbs = 0;
3b46e624 798
6a00d601
FB
799 for(env = first_cpu; env != NULL; env = env->next_cpu) {
800 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
801 }
9fa3e853 802
8a8a608f 803 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
fd6ce8f6 804 page_flush_tb();
9fa3e853 805
fd6ce8f6 806 code_gen_ptr = code_gen_buffer;
d4e8164f
FB
807 /* XXX: flush processor icache at this point if cache flush is
808 expensive */
e3db7226 809 tb_flush_count++;
fd6ce8f6
FB
810}
811
812#ifdef DEBUG_TB_CHECK
813
bc98a7ef 814static void tb_invalidate_check(target_ulong address)
fd6ce8f6
FB
815{
816 TranslationBlock *tb;
817 int i;
818 address &= TARGET_PAGE_MASK;
99773bd4
PB
819 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
820 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
821 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
822 address >= tb->pc + tb->size)) {
0bf9e31a
BS
823 printf("ERROR invalidate: address=" TARGET_FMT_lx
824 " PC=%08lx size=%04x\n",
99773bd4 825 address, (long)tb->pc, tb->size);
fd6ce8f6
FB
826 }
827 }
828 }
829}
830
831/* verify that all the pages have correct rights for code */
832static void tb_page_check(void)
833{
834 TranslationBlock *tb;
835 int i, flags1, flags2;
3b46e624 836
99773bd4
PB
837 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
838 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
839 flags1 = page_get_flags(tb->pc);
840 flags2 = page_get_flags(tb->pc + tb->size - 1);
841 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
842 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
99773bd4 843 (long)tb->pc, tb->size, flags1, flags2);
fd6ce8f6
FB
844 }
845 }
846 }
847}
848
849#endif
850
851/* invalidate one TB */
852static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
853 int next_offset)
854{
855 TranslationBlock *tb1;
856 for(;;) {
857 tb1 = *ptb;
858 if (tb1 == tb) {
859 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
860 break;
861 }
862 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
863 }
864}
865
9fa3e853
FB
866static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
867{
868 TranslationBlock *tb1;
869 unsigned int n1;
870
871 for(;;) {
872 tb1 = *ptb;
8efe0ca8
SW
873 n1 = (uintptr_t)tb1 & 3;
874 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
9fa3e853
FB
875 if (tb1 == tb) {
876 *ptb = tb1->page_next[n1];
877 break;
878 }
879 ptb = &tb1->page_next[n1];
880 }
881}
882
d4e8164f
FB
883static inline void tb_jmp_remove(TranslationBlock *tb, int n)
884{
885 TranslationBlock *tb1, **ptb;
886 unsigned int n1;
887
888 ptb = &tb->jmp_next[n];
889 tb1 = *ptb;
890 if (tb1) {
891 /* find tb(n) in circular list */
892 for(;;) {
893 tb1 = *ptb;
8efe0ca8
SW
894 n1 = (uintptr_t)tb1 & 3;
895 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
d4e8164f
FB
896 if (n1 == n && tb1 == tb)
897 break;
898 if (n1 == 2) {
899 ptb = &tb1->jmp_first;
900 } else {
901 ptb = &tb1->jmp_next[n1];
902 }
903 }
904 /* now we can suppress tb(n) from the list */
905 *ptb = tb->jmp_next[n];
906
907 tb->jmp_next[n] = NULL;
908 }
909}
910
911/* reset the jump entry 'n' of a TB so that it is not chained to
912 another TB */
913static inline void tb_reset_jump(TranslationBlock *tb, int n)
914{
8efe0ca8 915 tb_set_jmp_target(tb, n, (uintptr_t)(tb->tc_ptr + tb->tb_next_offset[n]));
d4e8164f
FB
916}
917
41c1b1c9 918void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
fd6ce8f6 919{
9349b4f9 920 CPUArchState *env;
8a40a180 921 PageDesc *p;
d4e8164f 922 unsigned int h, n1;
41c1b1c9 923 tb_page_addr_t phys_pc;
8a40a180 924 TranslationBlock *tb1, *tb2;
3b46e624 925
8a40a180
FB
926 /* remove the TB from the hash list */
927 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
928 h = tb_phys_hash_func(phys_pc);
5fafdf24 929 tb_remove(&tb_phys_hash[h], tb,
8a40a180
FB
930 offsetof(TranslationBlock, phys_hash_next));
931
932 /* remove the TB from the page list */
933 if (tb->page_addr[0] != page_addr) {
934 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
935 tb_page_remove(&p->first_tb, tb);
936 invalidate_page_bitmap(p);
937 }
938 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
939 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
940 tb_page_remove(&p->first_tb, tb);
941 invalidate_page_bitmap(p);
942 }
943
36bdbe54 944 tb_invalidated_flag = 1;
59817ccb 945
fd6ce8f6 946 /* remove the TB from the hash list */
8a40a180 947 h = tb_jmp_cache_hash_func(tb->pc);
6a00d601
FB
948 for(env = first_cpu; env != NULL; env = env->next_cpu) {
949 if (env->tb_jmp_cache[h] == tb)
950 env->tb_jmp_cache[h] = NULL;
951 }
d4e8164f
FB
952
953 /* suppress this TB from the two jump lists */
954 tb_jmp_remove(tb, 0);
955 tb_jmp_remove(tb, 1);
956
957 /* suppress any remaining jumps to this TB */
958 tb1 = tb->jmp_first;
959 for(;;) {
8efe0ca8 960 n1 = (uintptr_t)tb1 & 3;
d4e8164f
FB
961 if (n1 == 2)
962 break;
8efe0ca8 963 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
d4e8164f
FB
964 tb2 = tb1->jmp_next[n1];
965 tb_reset_jump(tb1, n1);
966 tb1->jmp_next[n1] = NULL;
967 tb1 = tb2;
968 }
8efe0ca8 969 tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2); /* fail safe */
9fa3e853 970
e3db7226 971 tb_phys_invalidate_count++;
9fa3e853
FB
972}
973
974static inline void set_bits(uint8_t *tab, int start, int len)
975{
976 int end, mask, end1;
977
978 end = start + len;
979 tab += start >> 3;
980 mask = 0xff << (start & 7);
981 if ((start & ~7) == (end & ~7)) {
982 if (start < end) {
983 mask &= ~(0xff << (end & 7));
984 *tab |= mask;
985 }
986 } else {
987 *tab++ |= mask;
988 start = (start + 8) & ~7;
989 end1 = end & ~7;
990 while (start < end1) {
991 *tab++ = 0xff;
992 start += 8;
993 }
994 if (start < end) {
995 mask = ~(0xff << (end & 7));
996 *tab |= mask;
997 }
998 }
999}
1000
1001static void build_page_bitmap(PageDesc *p)
1002{
1003 int n, tb_start, tb_end;
1004 TranslationBlock *tb;
3b46e624 1005
7267c094 1006 p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8);
9fa3e853
FB
1007
1008 tb = p->first_tb;
1009 while (tb != NULL) {
8efe0ca8
SW
1010 n = (uintptr_t)tb & 3;
1011 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
9fa3e853
FB
1012 /* NOTE: this is subtle as a TB may span two physical pages */
1013 if (n == 0) {
1014 /* NOTE: tb_end may be after the end of the page, but
1015 it is not a problem */
1016 tb_start = tb->pc & ~TARGET_PAGE_MASK;
1017 tb_end = tb_start + tb->size;
1018 if (tb_end > TARGET_PAGE_SIZE)
1019 tb_end = TARGET_PAGE_SIZE;
1020 } else {
1021 tb_start = 0;
1022 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1023 }
1024 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
1025 tb = tb->page_next[n];
1026 }
1027}
1028
9349b4f9 1029TranslationBlock *tb_gen_code(CPUArchState *env,
2e70f6ef
PB
1030 target_ulong pc, target_ulong cs_base,
1031 int flags, int cflags)
d720b93d
FB
1032{
1033 TranslationBlock *tb;
1034 uint8_t *tc_ptr;
41c1b1c9
PB
1035 tb_page_addr_t phys_pc, phys_page2;
1036 target_ulong virt_page2;
d720b93d
FB
1037 int code_gen_size;
1038
41c1b1c9 1039 phys_pc = get_page_addr_code(env, pc);
c27004ec 1040 tb = tb_alloc(pc);
d720b93d
FB
1041 if (!tb) {
1042 /* flush must be done */
1043 tb_flush(env);
1044 /* cannot fail at this point */
c27004ec 1045 tb = tb_alloc(pc);
2e70f6ef
PB
1046 /* Don't forget to invalidate previous TB info. */
1047 tb_invalidated_flag = 1;
d720b93d
FB
1048 }
1049 tc_ptr = code_gen_ptr;
1050 tb->tc_ptr = tc_ptr;
1051 tb->cs_base = cs_base;
1052 tb->flags = flags;
1053 tb->cflags = cflags;
d07bde88 1054 cpu_gen_code(env, tb, &code_gen_size);
8efe0ca8
SW
1055 code_gen_ptr = (void *)(((uintptr_t)code_gen_ptr + code_gen_size +
1056 CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
3b46e624 1057
d720b93d 1058 /* check next page if needed */
c27004ec 1059 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
d720b93d 1060 phys_page2 = -1;
c27004ec 1061 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
41c1b1c9 1062 phys_page2 = get_page_addr_code(env, virt_page2);
d720b93d 1063 }
41c1b1c9 1064 tb_link_page(tb, phys_pc, phys_page2);
2e70f6ef 1065 return tb;
d720b93d 1066}
3b46e624 1067
77a8f1a5 1068/*
8e0fdce3
JK
1069 * Invalidate all TBs which intersect with the target physical address range
1070 * [start;end[. NOTE: start and end may refer to *different* physical pages.
1071 * 'is_cpu_write_access' should be true if called from a real cpu write
1072 * access: the virtual CPU will exit the current TB if code is modified inside
1073 * this TB.
77a8f1a5
AG
1074 */
1075void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end,
1076 int is_cpu_write_access)
1077{
1078 while (start < end) {
1079 tb_invalidate_phys_page_range(start, end, is_cpu_write_access);
1080 start &= TARGET_PAGE_MASK;
1081 start += TARGET_PAGE_SIZE;
1082 }
1083}
1084
8e0fdce3
JK
1085/*
1086 * Invalidate all TBs which intersect with the target physical address range
1087 * [start;end[. NOTE: start and end must refer to the *same* physical page.
1088 * 'is_cpu_write_access' should be true if called from a real cpu write
1089 * access: the virtual CPU will exit the current TB if code is modified inside
1090 * this TB.
1091 */
41c1b1c9 1092void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
d720b93d
FB
1093 int is_cpu_write_access)
1094{
6b917547 1095 TranslationBlock *tb, *tb_next, *saved_tb;
9349b4f9 1096 CPUArchState *env = cpu_single_env;
41c1b1c9 1097 tb_page_addr_t tb_start, tb_end;
6b917547
AL
1098 PageDesc *p;
1099 int n;
1100#ifdef TARGET_HAS_PRECISE_SMC
1101 int current_tb_not_found = is_cpu_write_access;
1102 TranslationBlock *current_tb = NULL;
1103 int current_tb_modified = 0;
1104 target_ulong current_pc = 0;
1105 target_ulong current_cs_base = 0;
1106 int current_flags = 0;
1107#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
1108
1109 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 1110 if (!p)
9fa3e853 1111 return;
5fafdf24 1112 if (!p->code_bitmap &&
d720b93d
FB
1113 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1114 is_cpu_write_access) {
9fa3e853
FB
1115 /* build code bitmap */
1116 build_page_bitmap(p);
1117 }
1118
1119 /* we remove all the TBs in the range [start, end[ */
1120 /* XXX: see if in some cases it could be faster to invalidate all the code */
1121 tb = p->first_tb;
1122 while (tb != NULL) {
8efe0ca8
SW
1123 n = (uintptr_t)tb & 3;
1124 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
9fa3e853
FB
1125 tb_next = tb->page_next[n];
1126 /* NOTE: this is subtle as a TB may span two physical pages */
1127 if (n == 0) {
1128 /* NOTE: tb_end may be after the end of the page, but
1129 it is not a problem */
1130 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1131 tb_end = tb_start + tb->size;
1132 } else {
1133 tb_start = tb->page_addr[1];
1134 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1135 }
1136 if (!(tb_end <= start || tb_start >= end)) {
d720b93d
FB
1137#ifdef TARGET_HAS_PRECISE_SMC
1138 if (current_tb_not_found) {
1139 current_tb_not_found = 0;
1140 current_tb = NULL;
2e70f6ef 1141 if (env->mem_io_pc) {
d720b93d 1142 /* now we have a real cpu fault */
2e70f6ef 1143 current_tb = tb_find_pc(env->mem_io_pc);
d720b93d
FB
1144 }
1145 }
1146 if (current_tb == tb &&
2e70f6ef 1147 (current_tb->cflags & CF_COUNT_MASK) != 1) {
d720b93d
FB
1148 /* If we are modifying the current TB, we must stop
1149 its execution. We could be more precise by checking
1150 that the modification is after the current PC, but it
1151 would require a specialized function to partially
1152 restore the CPU state */
3b46e624 1153
d720b93d 1154 current_tb_modified = 1;
618ba8e6 1155 cpu_restore_state(current_tb, env, env->mem_io_pc);
6b917547
AL
1156 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1157 &current_flags);
d720b93d
FB
1158 }
1159#endif /* TARGET_HAS_PRECISE_SMC */
6f5a9f7e
FB
1160 /* we need to do that to handle the case where a signal
1161 occurs while doing tb_phys_invalidate() */
1162 saved_tb = NULL;
1163 if (env) {
1164 saved_tb = env->current_tb;
1165 env->current_tb = NULL;
1166 }
9fa3e853 1167 tb_phys_invalidate(tb, -1);
6f5a9f7e
FB
1168 if (env) {
1169 env->current_tb = saved_tb;
1170 if (env->interrupt_request && env->current_tb)
1171 cpu_interrupt(env, env->interrupt_request);
1172 }
9fa3e853
FB
1173 }
1174 tb = tb_next;
1175 }
1176#if !defined(CONFIG_USER_ONLY)
1177 /* if no code remaining, no need to continue to use slow writes */
1178 if (!p->first_tb) {
1179 invalidate_page_bitmap(p);
d720b93d 1180 if (is_cpu_write_access) {
2e70f6ef 1181 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
d720b93d
FB
1182 }
1183 }
1184#endif
1185#ifdef TARGET_HAS_PRECISE_SMC
1186 if (current_tb_modified) {
1187 /* we generate a block containing just the instruction
1188 modifying the memory. It will ensure that it cannot modify
1189 itself */
ea1c1802 1190 env->current_tb = NULL;
2e70f6ef 1191 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
d720b93d 1192 cpu_resume_from_signal(env, NULL);
9fa3e853 1193 }
fd6ce8f6 1194#endif
9fa3e853 1195}
fd6ce8f6 1196
9fa3e853 1197/* len must be <= 8 and start must be a multiple of len */
41c1b1c9 1198static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
9fa3e853
FB
1199{
1200 PageDesc *p;
1201 int offset, b;
59817ccb 1202#if 0
a4193c8a 1203 if (1) {
93fcfe39
AL
1204 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1205 cpu_single_env->mem_io_vaddr, len,
1206 cpu_single_env->eip,
8efe0ca8
SW
1207 cpu_single_env->eip +
1208 (intptr_t)cpu_single_env->segs[R_CS].base);
59817ccb
FB
1209 }
1210#endif
9fa3e853 1211 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 1212 if (!p)
9fa3e853
FB
1213 return;
1214 if (p->code_bitmap) {
1215 offset = start & ~TARGET_PAGE_MASK;
1216 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1217 if (b & ((1 << len) - 1))
1218 goto do_invalidate;
1219 } else {
1220 do_invalidate:
d720b93d 1221 tb_invalidate_phys_page_range(start, start + len, 1);
9fa3e853
FB
1222 }
1223}
1224
9fa3e853 1225#if !defined(CONFIG_SOFTMMU)
41c1b1c9 1226static void tb_invalidate_phys_page(tb_page_addr_t addr,
20503968 1227 uintptr_t pc, void *puc)
9fa3e853 1228{
6b917547 1229 TranslationBlock *tb;
9fa3e853 1230 PageDesc *p;
6b917547 1231 int n;
d720b93d 1232#ifdef TARGET_HAS_PRECISE_SMC
6b917547 1233 TranslationBlock *current_tb = NULL;
9349b4f9 1234 CPUArchState *env = cpu_single_env;
6b917547
AL
1235 int current_tb_modified = 0;
1236 target_ulong current_pc = 0;
1237 target_ulong current_cs_base = 0;
1238 int current_flags = 0;
d720b93d 1239#endif
9fa3e853
FB
1240
1241 addr &= TARGET_PAGE_MASK;
1242 p = page_find(addr >> TARGET_PAGE_BITS);
5fafdf24 1243 if (!p)
9fa3e853
FB
1244 return;
1245 tb = p->first_tb;
d720b93d
FB
1246#ifdef TARGET_HAS_PRECISE_SMC
1247 if (tb && pc != 0) {
1248 current_tb = tb_find_pc(pc);
1249 }
1250#endif
9fa3e853 1251 while (tb != NULL) {
8efe0ca8
SW
1252 n = (uintptr_t)tb & 3;
1253 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
d720b93d
FB
1254#ifdef TARGET_HAS_PRECISE_SMC
1255 if (current_tb == tb &&
2e70f6ef 1256 (current_tb->cflags & CF_COUNT_MASK) != 1) {
d720b93d
FB
1257 /* If we are modifying the current TB, we must stop
1258 its execution. We could be more precise by checking
1259 that the modification is after the current PC, but it
1260 would require a specialized function to partially
1261 restore the CPU state */
3b46e624 1262
d720b93d 1263 current_tb_modified = 1;
618ba8e6 1264 cpu_restore_state(current_tb, env, pc);
6b917547
AL
1265 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1266 &current_flags);
d720b93d
FB
1267 }
1268#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
1269 tb_phys_invalidate(tb, addr);
1270 tb = tb->page_next[n];
1271 }
fd6ce8f6 1272 p->first_tb = NULL;
d720b93d
FB
1273#ifdef TARGET_HAS_PRECISE_SMC
1274 if (current_tb_modified) {
1275 /* we generate a block containing just the instruction
1276 modifying the memory. It will ensure that it cannot modify
1277 itself */
ea1c1802 1278 env->current_tb = NULL;
2e70f6ef 1279 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
d720b93d
FB
1280 cpu_resume_from_signal(env, puc);
1281 }
1282#endif
fd6ce8f6 1283}
9fa3e853 1284#endif
fd6ce8f6
FB
1285
1286/* add the tb in the target page and protect it if necessary */
5fafdf24 1287static inline void tb_alloc_page(TranslationBlock *tb,
41c1b1c9 1288 unsigned int n, tb_page_addr_t page_addr)
fd6ce8f6
FB
1289{
1290 PageDesc *p;
4429ab44
JQ
1291#ifndef CONFIG_USER_ONLY
1292 bool page_already_protected;
1293#endif
9fa3e853
FB
1294
1295 tb->page_addr[n] = page_addr;
5cd2c5b6 1296 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
9fa3e853 1297 tb->page_next[n] = p->first_tb;
4429ab44
JQ
1298#ifndef CONFIG_USER_ONLY
1299 page_already_protected = p->first_tb != NULL;
1300#endif
8efe0ca8 1301 p->first_tb = (TranslationBlock *)((uintptr_t)tb | n);
9fa3e853 1302 invalidate_page_bitmap(p);
fd6ce8f6 1303
107db443 1304#if defined(TARGET_HAS_SMC) || 1
d720b93d 1305
9fa3e853 1306#if defined(CONFIG_USER_ONLY)
fd6ce8f6 1307 if (p->flags & PAGE_WRITE) {
53a5960a
PB
1308 target_ulong addr;
1309 PageDesc *p2;
9fa3e853
FB
1310 int prot;
1311
fd6ce8f6
FB
1312 /* force the host page as non writable (writes will have a
1313 page fault + mprotect overhead) */
53a5960a 1314 page_addr &= qemu_host_page_mask;
fd6ce8f6 1315 prot = 0;
53a5960a
PB
1316 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1317 addr += TARGET_PAGE_SIZE) {
1318
1319 p2 = page_find (addr >> TARGET_PAGE_BITS);
1320 if (!p2)
1321 continue;
1322 prot |= p2->flags;
1323 p2->flags &= ~PAGE_WRITE;
53a5960a 1324 }
5fafdf24 1325 mprotect(g2h(page_addr), qemu_host_page_size,
fd6ce8f6
FB
1326 (prot & PAGE_BITS) & ~PAGE_WRITE);
1327#ifdef DEBUG_TB_INVALIDATE
ab3d1727 1328 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
53a5960a 1329 page_addr);
fd6ce8f6 1330#endif
fd6ce8f6 1331 }
9fa3e853
FB
1332#else
1333 /* if some code is already present, then the pages are already
1334 protected. So we handle the case where only the first TB is
1335 allocated in a physical page */
4429ab44 1336 if (!page_already_protected) {
6a00d601 1337 tlb_protect_code(page_addr);
9fa3e853
FB
1338 }
1339#endif
d720b93d
FB
1340
1341#endif /* TARGET_HAS_SMC */
fd6ce8f6
FB
1342}
1343
9fa3e853
FB
1344/* add a new TB and link it to the physical page tables. phys_page2 is
1345 (-1) to indicate that only one page contains the TB. */
41c1b1c9
PB
1346void tb_link_page(TranslationBlock *tb,
1347 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
d4e8164f 1348{
9fa3e853
FB
1349 unsigned int h;
1350 TranslationBlock **ptb;
1351
c8a706fe
PB
1352 /* Grab the mmap lock to stop another thread invalidating this TB
1353 before we are done. */
1354 mmap_lock();
9fa3e853
FB
1355 /* add in the physical hash table */
1356 h = tb_phys_hash_func(phys_pc);
1357 ptb = &tb_phys_hash[h];
1358 tb->phys_hash_next = *ptb;
1359 *ptb = tb;
fd6ce8f6
FB
1360
1361 /* add in the page list */
9fa3e853
FB
1362 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1363 if (phys_page2 != -1)
1364 tb_alloc_page(tb, 1, phys_page2);
1365 else
1366 tb->page_addr[1] = -1;
9fa3e853 1367
8efe0ca8 1368 tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2);
d4e8164f
FB
1369 tb->jmp_next[0] = NULL;
1370 tb->jmp_next[1] = NULL;
1371
1372 /* init original jump addresses */
1373 if (tb->tb_next_offset[0] != 0xffff)
1374 tb_reset_jump(tb, 0);
1375 if (tb->tb_next_offset[1] != 0xffff)
1376 tb_reset_jump(tb, 1);
8a40a180
FB
1377
1378#ifdef DEBUG_TB_CHECK
1379 tb_page_check();
1380#endif
c8a706fe 1381 mmap_unlock();
fd6ce8f6
FB
1382}
1383
9fa3e853
FB
1384/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1385 tb[1].tc_ptr. Return NULL if not found */
6375e09e 1386TranslationBlock *tb_find_pc(uintptr_t tc_ptr)
fd6ce8f6 1387{
9fa3e853 1388 int m_min, m_max, m;
8efe0ca8 1389 uintptr_t v;
9fa3e853 1390 TranslationBlock *tb;
a513fe19
FB
1391
1392 if (nb_tbs <= 0)
1393 return NULL;
8efe0ca8
SW
1394 if (tc_ptr < (uintptr_t)code_gen_buffer ||
1395 tc_ptr >= (uintptr_t)code_gen_ptr) {
a513fe19 1396 return NULL;
8efe0ca8 1397 }
a513fe19
FB
1398 /* binary search (cf Knuth) */
1399 m_min = 0;
1400 m_max = nb_tbs - 1;
1401 while (m_min <= m_max) {
1402 m = (m_min + m_max) >> 1;
1403 tb = &tbs[m];
8efe0ca8 1404 v = (uintptr_t)tb->tc_ptr;
a513fe19
FB
1405 if (v == tc_ptr)
1406 return tb;
1407 else if (tc_ptr < v) {
1408 m_max = m - 1;
1409 } else {
1410 m_min = m + 1;
1411 }
5fafdf24 1412 }
a513fe19
FB
1413 return &tbs[m_max];
1414}
7501267e 1415
ea041c0e
FB
1416static void tb_reset_jump_recursive(TranslationBlock *tb);
1417
1418static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1419{
1420 TranslationBlock *tb1, *tb_next, **ptb;
1421 unsigned int n1;
1422
1423 tb1 = tb->jmp_next[n];
1424 if (tb1 != NULL) {
1425 /* find head of list */
1426 for(;;) {
8efe0ca8
SW
1427 n1 = (uintptr_t)tb1 & 3;
1428 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
ea041c0e
FB
1429 if (n1 == 2)
1430 break;
1431 tb1 = tb1->jmp_next[n1];
1432 }
1433 /* we are now sure now that tb jumps to tb1 */
1434 tb_next = tb1;
1435
1436 /* remove tb from the jmp_first list */
1437 ptb = &tb_next->jmp_first;
1438 for(;;) {
1439 tb1 = *ptb;
8efe0ca8
SW
1440 n1 = (uintptr_t)tb1 & 3;
1441 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
ea041c0e
FB
1442 if (n1 == n && tb1 == tb)
1443 break;
1444 ptb = &tb1->jmp_next[n1];
1445 }
1446 *ptb = tb->jmp_next[n];
1447 tb->jmp_next[n] = NULL;
3b46e624 1448
ea041c0e
FB
1449 /* suppress the jump to next tb in generated code */
1450 tb_reset_jump(tb, n);
1451
0124311e 1452 /* suppress jumps in the tb on which we could have jumped */
ea041c0e
FB
1453 tb_reset_jump_recursive(tb_next);
1454 }
1455}
1456
1457static void tb_reset_jump_recursive(TranslationBlock *tb)
1458{
1459 tb_reset_jump_recursive2(tb, 0);
1460 tb_reset_jump_recursive2(tb, 1);
1461}
1462
1fddef4b 1463#if defined(TARGET_HAS_ICE)
94df27fd 1464#if defined(CONFIG_USER_ONLY)
9349b4f9 1465static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
94df27fd
PB
1466{
1467 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1468}
1469#else
1e7855a5 1470void tb_invalidate_phys_addr(target_phys_addr_t addr)
d720b93d 1471{
c227f099 1472 ram_addr_t ram_addr;
f3705d53 1473 MemoryRegionSection *section;
d720b93d 1474
06ef3525 1475 section = phys_page_find(addr >> TARGET_PAGE_BITS);
f3705d53
AK
1476 if (!(memory_region_is_ram(section->mr)
1477 || (section->mr->rom_device && section->mr->readable))) {
06ef3525
AK
1478 return;
1479 }
f3705d53 1480 ram_addr = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
cc5bea60 1481 + memory_region_section_addr(section, addr);
706cd4b5 1482 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
d720b93d 1483}
1e7855a5
MF
1484
1485static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
1486{
9d70c4b7
MF
1487 tb_invalidate_phys_addr(cpu_get_phys_page_debug(env, pc) |
1488 (pc & ~TARGET_PAGE_MASK));
1e7855a5 1489}
c27004ec 1490#endif
94df27fd 1491#endif /* TARGET_HAS_ICE */
d720b93d 1492
c527ee8f 1493#if defined(CONFIG_USER_ONLY)
9349b4f9 1494void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
c527ee8f
PB
1495
1496{
1497}
1498
9349b4f9 1499int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
c527ee8f
PB
1500 int flags, CPUWatchpoint **watchpoint)
1501{
1502 return -ENOSYS;
1503}
1504#else
6658ffb8 1505/* Add a watchpoint. */
9349b4f9 1506int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
a1d1bb31 1507 int flags, CPUWatchpoint **watchpoint)
6658ffb8 1508{
b4051334 1509 target_ulong len_mask = ~(len - 1);
c0ce998e 1510 CPUWatchpoint *wp;
6658ffb8 1511
b4051334 1512 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
0dc23828
MF
1513 if ((len & (len - 1)) || (addr & ~len_mask) ||
1514 len == 0 || len > TARGET_PAGE_SIZE) {
b4051334
AL
1515 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1516 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1517 return -EINVAL;
1518 }
7267c094 1519 wp = g_malloc(sizeof(*wp));
a1d1bb31
AL
1520
1521 wp->vaddr = addr;
b4051334 1522 wp->len_mask = len_mask;
a1d1bb31
AL
1523 wp->flags = flags;
1524
2dc9f411 1525 /* keep all GDB-injected watchpoints in front */
c0ce998e 1526 if (flags & BP_GDB)
72cf2d4f 1527 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
c0ce998e 1528 else
72cf2d4f 1529 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
6658ffb8 1530
6658ffb8 1531 tlb_flush_page(env, addr);
a1d1bb31
AL
1532
1533 if (watchpoint)
1534 *watchpoint = wp;
1535 return 0;
6658ffb8
PB
1536}
1537
a1d1bb31 1538/* Remove a specific watchpoint. */
9349b4f9 1539int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
a1d1bb31 1540 int flags)
6658ffb8 1541{
b4051334 1542 target_ulong len_mask = ~(len - 1);
a1d1bb31 1543 CPUWatchpoint *wp;
6658ffb8 1544
72cf2d4f 1545 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334 1546 if (addr == wp->vaddr && len_mask == wp->len_mask
6e140f28 1547 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
a1d1bb31 1548 cpu_watchpoint_remove_by_ref(env, wp);
6658ffb8
PB
1549 return 0;
1550 }
1551 }
a1d1bb31 1552 return -ENOENT;
6658ffb8
PB
1553}
1554
a1d1bb31 1555/* Remove a specific watchpoint by reference. */
9349b4f9 1556void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
a1d1bb31 1557{
72cf2d4f 1558 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
7d03f82f 1559
a1d1bb31
AL
1560 tlb_flush_page(env, watchpoint->vaddr);
1561
7267c094 1562 g_free(watchpoint);
a1d1bb31
AL
1563}
1564
1565/* Remove all matching watchpoints. */
9349b4f9 1566void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
a1d1bb31 1567{
c0ce998e 1568 CPUWatchpoint *wp, *next;
a1d1bb31 1569
72cf2d4f 1570 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
a1d1bb31
AL
1571 if (wp->flags & mask)
1572 cpu_watchpoint_remove_by_ref(env, wp);
c0ce998e 1573 }
7d03f82f 1574}
c527ee8f 1575#endif
7d03f82f 1576
a1d1bb31 1577/* Add a breakpoint. */
9349b4f9 1578int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
a1d1bb31 1579 CPUBreakpoint **breakpoint)
4c3a88a2 1580{
1fddef4b 1581#if defined(TARGET_HAS_ICE)
c0ce998e 1582 CPUBreakpoint *bp;
3b46e624 1583
7267c094 1584 bp = g_malloc(sizeof(*bp));
4c3a88a2 1585
a1d1bb31
AL
1586 bp->pc = pc;
1587 bp->flags = flags;
1588
2dc9f411 1589 /* keep all GDB-injected breakpoints in front */
c0ce998e 1590 if (flags & BP_GDB)
72cf2d4f 1591 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
c0ce998e 1592 else
72cf2d4f 1593 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
3b46e624 1594
d720b93d 1595 breakpoint_invalidate(env, pc);
a1d1bb31
AL
1596
1597 if (breakpoint)
1598 *breakpoint = bp;
4c3a88a2
FB
1599 return 0;
1600#else
a1d1bb31 1601 return -ENOSYS;
4c3a88a2
FB
1602#endif
1603}
1604
a1d1bb31 1605/* Remove a specific breakpoint. */
9349b4f9 1606int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
a1d1bb31 1607{
7d03f82f 1608#if defined(TARGET_HAS_ICE)
a1d1bb31
AL
1609 CPUBreakpoint *bp;
1610
72cf2d4f 1611 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31
AL
1612 if (bp->pc == pc && bp->flags == flags) {
1613 cpu_breakpoint_remove_by_ref(env, bp);
1614 return 0;
1615 }
7d03f82f 1616 }
a1d1bb31
AL
1617 return -ENOENT;
1618#else
1619 return -ENOSYS;
7d03f82f
EI
1620#endif
1621}
1622
a1d1bb31 1623/* Remove a specific breakpoint by reference. */
9349b4f9 1624void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
4c3a88a2 1625{
1fddef4b 1626#if defined(TARGET_HAS_ICE)
72cf2d4f 1627 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
d720b93d 1628
a1d1bb31
AL
1629 breakpoint_invalidate(env, breakpoint->pc);
1630
7267c094 1631 g_free(breakpoint);
a1d1bb31
AL
1632#endif
1633}
1634
1635/* Remove all matching breakpoints. */
9349b4f9 1636void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
a1d1bb31
AL
1637{
1638#if defined(TARGET_HAS_ICE)
c0ce998e 1639 CPUBreakpoint *bp, *next;
a1d1bb31 1640
72cf2d4f 1641 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
a1d1bb31
AL
1642 if (bp->flags & mask)
1643 cpu_breakpoint_remove_by_ref(env, bp);
c0ce998e 1644 }
4c3a88a2
FB
1645#endif
1646}
1647
c33a346e
FB
1648/* enable or disable single step mode. EXCP_DEBUG is returned by the
1649 CPU loop after each instruction */
9349b4f9 1650void cpu_single_step(CPUArchState *env, int enabled)
c33a346e 1651{
1fddef4b 1652#if defined(TARGET_HAS_ICE)
c33a346e
FB
1653 if (env->singlestep_enabled != enabled) {
1654 env->singlestep_enabled = enabled;
e22a25c9
AL
1655 if (kvm_enabled())
1656 kvm_update_guest_debug(env, 0);
1657 else {
ccbb4d44 1658 /* must flush all the translated code to avoid inconsistencies */
e22a25c9
AL
1659 /* XXX: only flush what is necessary */
1660 tb_flush(env);
1661 }
c33a346e
FB
1662 }
1663#endif
1664}
1665
9349b4f9 1666static void cpu_unlink_tb(CPUArchState *env)
ea041c0e 1667{
3098dba0
AJ
1668 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1669 problem and hope the cpu will stop of its own accord. For userspace
1670 emulation this often isn't actually as bad as it sounds. Often
1671 signals are used primarily to interrupt blocking syscalls. */
ea041c0e 1672 TranslationBlock *tb;
c227f099 1673 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
59817ccb 1674
cab1b4bd 1675 spin_lock(&interrupt_lock);
3098dba0
AJ
1676 tb = env->current_tb;
1677 /* if the cpu is currently executing code, we must unlink it and
1678 all the potentially executing TB */
f76cfe56 1679 if (tb) {
3098dba0
AJ
1680 env->current_tb = NULL;
1681 tb_reset_jump_recursive(tb);
be214e6c 1682 }
cab1b4bd 1683 spin_unlock(&interrupt_lock);
3098dba0
AJ
1684}
1685
97ffbd8d 1686#ifndef CONFIG_USER_ONLY
3098dba0 1687/* mask must never be zero, except for A20 change call */
9349b4f9 1688static void tcg_handle_interrupt(CPUArchState *env, int mask)
3098dba0
AJ
1689{
1690 int old_mask;
be214e6c 1691
2e70f6ef 1692 old_mask = env->interrupt_request;
68a79315 1693 env->interrupt_request |= mask;
3098dba0 1694
8edac960
AL
1695 /*
1696 * If called from iothread context, wake the target cpu in
1697 * case its halted.
1698 */
b7680cb6 1699 if (!qemu_cpu_is_self(env)) {
8edac960
AL
1700 qemu_cpu_kick(env);
1701 return;
1702 }
8edac960 1703
2e70f6ef 1704 if (use_icount) {
266910c4 1705 env->icount_decr.u16.high = 0xffff;
2e70f6ef 1706 if (!can_do_io(env)
be214e6c 1707 && (mask & ~old_mask) != 0) {
2e70f6ef
PB
1708 cpu_abort(env, "Raised interrupt while not in I/O function");
1709 }
2e70f6ef 1710 } else {
3098dba0 1711 cpu_unlink_tb(env);
ea041c0e
FB
1712 }
1713}
1714
ec6959d0
JK
1715CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1716
97ffbd8d
JK
1717#else /* CONFIG_USER_ONLY */
1718
9349b4f9 1719void cpu_interrupt(CPUArchState *env, int mask)
97ffbd8d
JK
1720{
1721 env->interrupt_request |= mask;
1722 cpu_unlink_tb(env);
1723}
1724#endif /* CONFIG_USER_ONLY */
1725
9349b4f9 1726void cpu_reset_interrupt(CPUArchState *env, int mask)
b54ad049
FB
1727{
1728 env->interrupt_request &= ~mask;
1729}
1730
9349b4f9 1731void cpu_exit(CPUArchState *env)
3098dba0
AJ
1732{
1733 env->exit_request = 1;
1734 cpu_unlink_tb(env);
1735}
1736
9349b4f9 1737void cpu_abort(CPUArchState *env, const char *fmt, ...)
7501267e
FB
1738{
1739 va_list ap;
493ae1f0 1740 va_list ap2;
7501267e
FB
1741
1742 va_start(ap, fmt);
493ae1f0 1743 va_copy(ap2, ap);
7501267e
FB
1744 fprintf(stderr, "qemu: fatal: ");
1745 vfprintf(stderr, fmt, ap);
1746 fprintf(stderr, "\n");
1747#ifdef TARGET_I386
7fe48483
FB
1748 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1749#else
1750 cpu_dump_state(env, stderr, fprintf, 0);
7501267e 1751#endif
93fcfe39
AL
1752 if (qemu_log_enabled()) {
1753 qemu_log("qemu: fatal: ");
1754 qemu_log_vprintf(fmt, ap2);
1755 qemu_log("\n");
f9373291 1756#ifdef TARGET_I386
93fcfe39 1757 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
f9373291 1758#else
93fcfe39 1759 log_cpu_state(env, 0);
f9373291 1760#endif
31b1a7b4 1761 qemu_log_flush();
93fcfe39 1762 qemu_log_close();
924edcae 1763 }
493ae1f0 1764 va_end(ap2);
f9373291 1765 va_end(ap);
fd052bf6
RV
1766#if defined(CONFIG_USER_ONLY)
1767 {
1768 struct sigaction act;
1769 sigfillset(&act.sa_mask);
1770 act.sa_handler = SIG_DFL;
1771 sigaction(SIGABRT, &act, NULL);
1772 }
1773#endif
7501267e
FB
1774 abort();
1775}
1776
9349b4f9 1777CPUArchState *cpu_copy(CPUArchState *env)
c5be9f08 1778{
9349b4f9
AF
1779 CPUArchState *new_env = cpu_init(env->cpu_model_str);
1780 CPUArchState *next_cpu = new_env->next_cpu;
c5be9f08 1781 int cpu_index = new_env->cpu_index;
5a38f081
AL
1782#if defined(TARGET_HAS_ICE)
1783 CPUBreakpoint *bp;
1784 CPUWatchpoint *wp;
1785#endif
1786
9349b4f9 1787 memcpy(new_env, env, sizeof(CPUArchState));
5a38f081
AL
1788
1789 /* Preserve chaining and index. */
c5be9f08
TS
1790 new_env->next_cpu = next_cpu;
1791 new_env->cpu_index = cpu_index;
5a38f081
AL
1792
1793 /* Clone all break/watchpoints.
1794 Note: Once we support ptrace with hw-debug register access, make sure
1795 BP_CPU break/watchpoints are handled correctly on clone. */
72cf2d4f
BS
1796 QTAILQ_INIT(&env->breakpoints);
1797 QTAILQ_INIT(&env->watchpoints);
5a38f081 1798#if defined(TARGET_HAS_ICE)
72cf2d4f 1799 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
5a38f081
AL
1800 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1801 }
72cf2d4f 1802 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
5a38f081
AL
1803 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1804 wp->flags, NULL);
1805 }
1806#endif
1807
c5be9f08
TS
1808 return new_env;
1809}
1810
0124311e 1811#if !defined(CONFIG_USER_ONLY)
0cac1b66 1812void tb_flush_jmp_cache(CPUArchState *env, target_ulong addr)
5c751e99
EI
1813{
1814 unsigned int i;
1815
1816 /* Discard jump cache entries for any tb which might potentially
1817 overlap the flushed page. */
1818 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1819 memset (&env->tb_jmp_cache[i], 0,
9742bf26 1820 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
5c751e99
EI
1821
1822 i = tb_jmp_cache_hash_page(addr);
1823 memset (&env->tb_jmp_cache[i], 0,
9742bf26 1824 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
5c751e99
EI
1825}
1826
d24981d3
JQ
1827static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end,
1828 uintptr_t length)
1829{
1830 uintptr_t start1;
1831
1832 /* we modify the TLB cache so that the dirty bit will be set again
1833 when accessing the range */
1834 start1 = (uintptr_t)qemu_safe_ram_ptr(start);
1835 /* Check that we don't span multiple blocks - this breaks the
1836 address comparisons below. */
1837 if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1
1838 != (end - 1) - start) {
1839 abort();
1840 }
1841 cpu_tlb_reset_dirty_all(start1, length);
1842
1843}
1844
5579c7f3 1845/* Note: start and end must be within the same ram block. */
c227f099 1846void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 1847 int dirty_flags)
1ccde1cb 1848{
d24981d3 1849 uintptr_t length;
1ccde1cb
FB
1850
1851 start &= TARGET_PAGE_MASK;
1852 end = TARGET_PAGE_ALIGN(end);
1853
1854 length = end - start;
1855 if (length == 0)
1856 return;
f7c11b53 1857 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
f23db169 1858
d24981d3
JQ
1859 if (tcg_enabled()) {
1860 tlb_reset_dirty_range_all(start, end, length);
5579c7f3 1861 }
1ccde1cb
FB
1862}
1863
74576198
AL
1864int cpu_physical_memory_set_dirty_tracking(int enable)
1865{
f6f3fbca 1866 int ret = 0;
74576198 1867 in_migration = enable;
f6f3fbca 1868 return ret;
74576198
AL
1869}
1870
e5548617
BS
1871target_phys_addr_t memory_region_section_get_iotlb(CPUArchState *env,
1872 MemoryRegionSection *section,
1873 target_ulong vaddr,
1874 target_phys_addr_t paddr,
1875 int prot,
1876 target_ulong *address)
1877{
1878 target_phys_addr_t iotlb;
1879 CPUWatchpoint *wp;
1880
cc5bea60 1881 if (memory_region_is_ram(section->mr)) {
e5548617
BS
1882 /* Normal RAM. */
1883 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
cc5bea60 1884 + memory_region_section_addr(section, paddr);
e5548617
BS
1885 if (!section->readonly) {
1886 iotlb |= phys_section_notdirty;
1887 } else {
1888 iotlb |= phys_section_rom;
1889 }
1890 } else {
1891 /* IO handlers are currently passed a physical address.
1892 It would be nice to pass an offset from the base address
1893 of that region. This would avoid having to special case RAM,
1894 and avoid full address decoding in every device.
1895 We can't use the high bits of pd for this because
1896 IO_MEM_ROMD uses these as a ram address. */
1897 iotlb = section - phys_sections;
cc5bea60 1898 iotlb += memory_region_section_addr(section, paddr);
e5548617
BS
1899 }
1900
1901 /* Make accesses to pages with watchpoints go via the
1902 watchpoint trap routines. */
1903 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1904 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
1905 /* Avoid trapping reads of pages with a write breakpoint. */
1906 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
1907 iotlb = phys_section_watch + paddr;
1908 *address |= TLB_MMIO;
1909 break;
1910 }
1911 }
1912 }
1913
1914 return iotlb;
1915}
1916
0124311e 1917#else
edf8e2af
MW
1918/*
1919 * Walks guest process memory "regions" one by one
1920 * and calls callback function 'fn' for each region.
1921 */
5cd2c5b6
RH
1922
1923struct walk_memory_regions_data
1924{
1925 walk_memory_regions_fn fn;
1926 void *priv;
8efe0ca8 1927 uintptr_t start;
5cd2c5b6
RH
1928 int prot;
1929};
1930
1931static int walk_memory_regions_end(struct walk_memory_regions_data *data,
b480d9b7 1932 abi_ulong end, int new_prot)
5cd2c5b6
RH
1933{
1934 if (data->start != -1ul) {
1935 int rc = data->fn(data->priv, data->start, end, data->prot);
1936 if (rc != 0) {
1937 return rc;
1938 }
1939 }
1940
1941 data->start = (new_prot ? end : -1ul);
1942 data->prot = new_prot;
1943
1944 return 0;
1945}
1946
1947static int walk_memory_regions_1(struct walk_memory_regions_data *data,
b480d9b7 1948 abi_ulong base, int level, void **lp)
5cd2c5b6 1949{
b480d9b7 1950 abi_ulong pa;
5cd2c5b6
RH
1951 int i, rc;
1952
1953 if (*lp == NULL) {
1954 return walk_memory_regions_end(data, base, 0);
1955 }
1956
1957 if (level == 0) {
1958 PageDesc *pd = *lp;
7296abac 1959 for (i = 0; i < L2_SIZE; ++i) {
5cd2c5b6
RH
1960 int prot = pd[i].flags;
1961
1962 pa = base | (i << TARGET_PAGE_BITS);
1963 if (prot != data->prot) {
1964 rc = walk_memory_regions_end(data, pa, prot);
1965 if (rc != 0) {
1966 return rc;
9fa3e853 1967 }
9fa3e853 1968 }
5cd2c5b6
RH
1969 }
1970 } else {
1971 void **pp = *lp;
7296abac 1972 for (i = 0; i < L2_SIZE; ++i) {
b480d9b7
PB
1973 pa = base | ((abi_ulong)i <<
1974 (TARGET_PAGE_BITS + L2_BITS * level));
5cd2c5b6
RH
1975 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
1976 if (rc != 0) {
1977 return rc;
1978 }
1979 }
1980 }
1981
1982 return 0;
1983}
1984
1985int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
1986{
1987 struct walk_memory_regions_data data;
8efe0ca8 1988 uintptr_t i;
5cd2c5b6
RH
1989
1990 data.fn = fn;
1991 data.priv = priv;
1992 data.start = -1ul;
1993 data.prot = 0;
1994
1995 for (i = 0; i < V_L1_SIZE; i++) {
b480d9b7 1996 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
5cd2c5b6
RH
1997 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
1998 if (rc != 0) {
1999 return rc;
9fa3e853 2000 }
33417e70 2001 }
5cd2c5b6
RH
2002
2003 return walk_memory_regions_end(&data, 0, 0);
edf8e2af
MW
2004}
2005
b480d9b7
PB
2006static int dump_region(void *priv, abi_ulong start,
2007 abi_ulong end, unsigned long prot)
edf8e2af
MW
2008{
2009 FILE *f = (FILE *)priv;
2010
b480d9b7
PB
2011 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2012 " "TARGET_ABI_FMT_lx" %c%c%c\n",
edf8e2af
MW
2013 start, end, end - start,
2014 ((prot & PAGE_READ) ? 'r' : '-'),
2015 ((prot & PAGE_WRITE) ? 'w' : '-'),
2016 ((prot & PAGE_EXEC) ? 'x' : '-'));
2017
2018 return (0);
2019}
2020
2021/* dump memory mappings */
2022void page_dump(FILE *f)
2023{
2024 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2025 "start", "end", "size", "prot");
2026 walk_memory_regions(f, dump_region);
33417e70
FB
2027}
2028
53a5960a 2029int page_get_flags(target_ulong address)
33417e70 2030{
9fa3e853
FB
2031 PageDesc *p;
2032
2033 p = page_find(address >> TARGET_PAGE_BITS);
33417e70 2034 if (!p)
9fa3e853
FB
2035 return 0;
2036 return p->flags;
2037}
2038
376a7909
RH
2039/* Modify the flags of a page and invalidate the code if necessary.
2040 The flag PAGE_WRITE_ORG is positioned automatically depending
2041 on PAGE_WRITE. The mmap_lock should already be held. */
53a5960a 2042void page_set_flags(target_ulong start, target_ulong end, int flags)
9fa3e853 2043{
376a7909
RH
2044 target_ulong addr, len;
2045
2046 /* This function should never be called with addresses outside the
2047 guest address space. If this assert fires, it probably indicates
2048 a missing call to h2g_valid. */
b480d9b7
PB
2049#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2050 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
376a7909
RH
2051#endif
2052 assert(start < end);
9fa3e853
FB
2053
2054 start = start & TARGET_PAGE_MASK;
2055 end = TARGET_PAGE_ALIGN(end);
376a7909
RH
2056
2057 if (flags & PAGE_WRITE) {
9fa3e853 2058 flags |= PAGE_WRITE_ORG;
376a7909
RH
2059 }
2060
2061 for (addr = start, len = end - start;
2062 len != 0;
2063 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2064 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2065
2066 /* If the write protection bit is set, then we invalidate
2067 the code inside. */
5fafdf24 2068 if (!(p->flags & PAGE_WRITE) &&
9fa3e853
FB
2069 (flags & PAGE_WRITE) &&
2070 p->first_tb) {
d720b93d 2071 tb_invalidate_phys_page(addr, 0, NULL);
9fa3e853
FB
2072 }
2073 p->flags = flags;
2074 }
33417e70
FB
2075}
2076
3d97b40b
TS
2077int page_check_range(target_ulong start, target_ulong len, int flags)
2078{
2079 PageDesc *p;
2080 target_ulong end;
2081 target_ulong addr;
2082
376a7909
RH
2083 /* This function should never be called with addresses outside the
2084 guest address space. If this assert fires, it probably indicates
2085 a missing call to h2g_valid. */
338e9e6c
BS
2086#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2087 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
376a7909
RH
2088#endif
2089
3e0650a9
RH
2090 if (len == 0) {
2091 return 0;
2092 }
376a7909
RH
2093 if (start + len - 1 < start) {
2094 /* We've wrapped around. */
55f280c9 2095 return -1;
376a7909 2096 }
55f280c9 2097
3d97b40b
TS
2098 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2099 start = start & TARGET_PAGE_MASK;
2100
376a7909
RH
2101 for (addr = start, len = end - start;
2102 len != 0;
2103 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
3d97b40b
TS
2104 p = page_find(addr >> TARGET_PAGE_BITS);
2105 if( !p )
2106 return -1;
2107 if( !(p->flags & PAGE_VALID) )
2108 return -1;
2109
dae3270c 2110 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
3d97b40b 2111 return -1;
dae3270c
FB
2112 if (flags & PAGE_WRITE) {
2113 if (!(p->flags & PAGE_WRITE_ORG))
2114 return -1;
2115 /* unprotect the page if it was put read-only because it
2116 contains translated code */
2117 if (!(p->flags & PAGE_WRITE)) {
2118 if (!page_unprotect(addr, 0, NULL))
2119 return -1;
2120 }
2121 return 0;
2122 }
3d97b40b
TS
2123 }
2124 return 0;
2125}
2126
9fa3e853 2127/* called from signal handler: invalidate the code and unprotect the
ccbb4d44 2128 page. Return TRUE if the fault was successfully handled. */
6375e09e 2129int page_unprotect(target_ulong address, uintptr_t pc, void *puc)
9fa3e853 2130{
45d679d6
AJ
2131 unsigned int prot;
2132 PageDesc *p;
53a5960a 2133 target_ulong host_start, host_end, addr;
9fa3e853 2134
c8a706fe
PB
2135 /* Technically this isn't safe inside a signal handler. However we
2136 know this only ever happens in a synchronous SEGV handler, so in
2137 practice it seems to be ok. */
2138 mmap_lock();
2139
45d679d6
AJ
2140 p = page_find(address >> TARGET_PAGE_BITS);
2141 if (!p) {
c8a706fe 2142 mmap_unlock();
9fa3e853 2143 return 0;
c8a706fe 2144 }
45d679d6 2145
9fa3e853
FB
2146 /* if the page was really writable, then we change its
2147 protection back to writable */
45d679d6
AJ
2148 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2149 host_start = address & qemu_host_page_mask;
2150 host_end = host_start + qemu_host_page_size;
2151
2152 prot = 0;
2153 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2154 p = page_find(addr >> TARGET_PAGE_BITS);
2155 p->flags |= PAGE_WRITE;
2156 prot |= p->flags;
2157
9fa3e853
FB
2158 /* and since the content will be modified, we must invalidate
2159 the corresponding translated code. */
45d679d6 2160 tb_invalidate_phys_page(addr, pc, puc);
9fa3e853 2161#ifdef DEBUG_TB_CHECK
45d679d6 2162 tb_invalidate_check(addr);
9fa3e853 2163#endif
9fa3e853 2164 }
45d679d6
AJ
2165 mprotect((void *)g2h(host_start), qemu_host_page_size,
2166 prot & PAGE_BITS);
2167
2168 mmap_unlock();
2169 return 1;
9fa3e853 2170 }
c8a706fe 2171 mmap_unlock();
9fa3e853
FB
2172 return 0;
2173}
9fa3e853
FB
2174#endif /* defined(CONFIG_USER_ONLY) */
2175
e2eef170 2176#if !defined(CONFIG_USER_ONLY)
8da3ff18 2177
c04b2b78
PB
2178#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2179typedef struct subpage_t {
70c68e44 2180 MemoryRegion iomem;
c04b2b78 2181 target_phys_addr_t base;
5312bd8b 2182 uint16_t sub_section[TARGET_PAGE_SIZE];
c04b2b78
PB
2183} subpage_t;
2184
c227f099 2185static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 2186 uint16_t section);
0f0cb164 2187static subpage_t *subpage_init(target_phys_addr_t base);
5312bd8b 2188static void destroy_page_desc(uint16_t section_index)
54688b1e 2189{
5312bd8b
AK
2190 MemoryRegionSection *section = &phys_sections[section_index];
2191 MemoryRegion *mr = section->mr;
54688b1e
AK
2192
2193 if (mr->subpage) {
2194 subpage_t *subpage = container_of(mr, subpage_t, iomem);
2195 memory_region_destroy(&subpage->iomem);
2196 g_free(subpage);
2197 }
2198}
2199
4346ae3e 2200static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level)
54688b1e
AK
2201{
2202 unsigned i;
d6f2ea22 2203 PhysPageEntry *p;
54688b1e 2204
c19e8800 2205 if (lp->ptr == PHYS_MAP_NODE_NIL) {
54688b1e
AK
2206 return;
2207 }
2208
c19e8800 2209 p = phys_map_nodes[lp->ptr];
4346ae3e 2210 for (i = 0; i < L2_SIZE; ++i) {
07f07b31 2211 if (!p[i].is_leaf) {
54688b1e 2212 destroy_l2_mapping(&p[i], level - 1);
4346ae3e 2213 } else {
c19e8800 2214 destroy_page_desc(p[i].ptr);
54688b1e 2215 }
54688b1e 2216 }
07f07b31 2217 lp->is_leaf = 0;
c19e8800 2218 lp->ptr = PHYS_MAP_NODE_NIL;
54688b1e
AK
2219}
2220
2221static void destroy_all_mappings(void)
2222{
3eef53df 2223 destroy_l2_mapping(&phys_map, P_L2_LEVELS - 1);
d6f2ea22 2224 phys_map_nodes_reset();
54688b1e
AK
2225}
2226
5312bd8b
AK
2227static uint16_t phys_section_add(MemoryRegionSection *section)
2228{
2229 if (phys_sections_nb == phys_sections_nb_alloc) {
2230 phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16);
2231 phys_sections = g_renew(MemoryRegionSection, phys_sections,
2232 phys_sections_nb_alloc);
2233 }
2234 phys_sections[phys_sections_nb] = *section;
2235 return phys_sections_nb++;
2236}
2237
2238static void phys_sections_clear(void)
2239{
2240 phys_sections_nb = 0;
2241}
2242
0f0cb164
AK
2243static void register_subpage(MemoryRegionSection *section)
2244{
2245 subpage_t *subpage;
2246 target_phys_addr_t base = section->offset_within_address_space
2247 & TARGET_PAGE_MASK;
f3705d53 2248 MemoryRegionSection *existing = phys_page_find(base >> TARGET_PAGE_BITS);
0f0cb164
AK
2249 MemoryRegionSection subsection = {
2250 .offset_within_address_space = base,
2251 .size = TARGET_PAGE_SIZE,
2252 };
0f0cb164
AK
2253 target_phys_addr_t start, end;
2254
f3705d53 2255 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
0f0cb164 2256
f3705d53 2257 if (!(existing->mr->subpage)) {
0f0cb164
AK
2258 subpage = subpage_init(base);
2259 subsection.mr = &subpage->iomem;
2999097b
AK
2260 phys_page_set(base >> TARGET_PAGE_BITS, 1,
2261 phys_section_add(&subsection));
0f0cb164 2262 } else {
f3705d53 2263 subpage = container_of(existing->mr, subpage_t, iomem);
0f0cb164
AK
2264 }
2265 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
adb2a9b5 2266 end = start + section->size - 1;
0f0cb164
AK
2267 subpage_register(subpage, start, end, phys_section_add(section));
2268}
2269
2270
2271static void register_multipage(MemoryRegionSection *section)
33417e70 2272{
dd81124b
AK
2273 target_phys_addr_t start_addr = section->offset_within_address_space;
2274 ram_addr_t size = section->size;
2999097b 2275 target_phys_addr_t addr;
5312bd8b 2276 uint16_t section_index = phys_section_add(section);
dd81124b 2277
3b8e6a2d 2278 assert(size);
f6f3fbca 2279
3b8e6a2d 2280 addr = start_addr;
2999097b
AK
2281 phys_page_set(addr >> TARGET_PAGE_BITS, size >> TARGET_PAGE_BITS,
2282 section_index);
33417e70
FB
2283}
2284
0f0cb164
AK
2285void cpu_register_physical_memory_log(MemoryRegionSection *section,
2286 bool readonly)
2287{
2288 MemoryRegionSection now = *section, remain = *section;
2289
2290 if ((now.offset_within_address_space & ~TARGET_PAGE_MASK)
2291 || (now.size < TARGET_PAGE_SIZE)) {
2292 now.size = MIN(TARGET_PAGE_ALIGN(now.offset_within_address_space)
2293 - now.offset_within_address_space,
2294 now.size);
2295 register_subpage(&now);
2296 remain.size -= now.size;
2297 remain.offset_within_address_space += now.size;
2298 remain.offset_within_region += now.size;
2299 }
69b67646
TH
2300 while (remain.size >= TARGET_PAGE_SIZE) {
2301 now = remain;
2302 if (remain.offset_within_region & ~TARGET_PAGE_MASK) {
2303 now.size = TARGET_PAGE_SIZE;
2304 register_subpage(&now);
2305 } else {
2306 now.size &= TARGET_PAGE_MASK;
2307 register_multipage(&now);
2308 }
0f0cb164
AK
2309 remain.size -= now.size;
2310 remain.offset_within_address_space += now.size;
2311 remain.offset_within_region += now.size;
2312 }
2313 now = remain;
2314 if (now.size) {
2315 register_subpage(&now);
2316 }
2317}
2318
2319
c227f099 2320void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
f65ed4c1
AL
2321{
2322 if (kvm_enabled())
2323 kvm_coalesce_mmio_region(addr, size);
2324}
2325
c227f099 2326void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
f65ed4c1
AL
2327{
2328 if (kvm_enabled())
2329 kvm_uncoalesce_mmio_region(addr, size);
2330}
2331
62a2744c
SY
2332void qemu_flush_coalesced_mmio_buffer(void)
2333{
2334 if (kvm_enabled())
2335 kvm_flush_coalesced_mmio_buffer();
2336}
2337
c902760f
MT
2338#if defined(__linux__) && !defined(TARGET_S390X)
2339
2340#include <sys/vfs.h>
2341
2342#define HUGETLBFS_MAGIC 0x958458f6
2343
2344static long gethugepagesize(const char *path)
2345{
2346 struct statfs fs;
2347 int ret;
2348
2349 do {
9742bf26 2350 ret = statfs(path, &fs);
c902760f
MT
2351 } while (ret != 0 && errno == EINTR);
2352
2353 if (ret != 0) {
9742bf26
YT
2354 perror(path);
2355 return 0;
c902760f
MT
2356 }
2357
2358 if (fs.f_type != HUGETLBFS_MAGIC)
9742bf26 2359 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
c902760f
MT
2360
2361 return fs.f_bsize;
2362}
2363
04b16653
AW
2364static void *file_ram_alloc(RAMBlock *block,
2365 ram_addr_t memory,
2366 const char *path)
c902760f
MT
2367{
2368 char *filename;
2369 void *area;
2370 int fd;
2371#ifdef MAP_POPULATE
2372 int flags;
2373#endif
2374 unsigned long hpagesize;
2375
2376 hpagesize = gethugepagesize(path);
2377 if (!hpagesize) {
9742bf26 2378 return NULL;
c902760f
MT
2379 }
2380
2381 if (memory < hpagesize) {
2382 return NULL;
2383 }
2384
2385 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2386 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2387 return NULL;
2388 }
2389
2390 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
9742bf26 2391 return NULL;
c902760f
MT
2392 }
2393
2394 fd = mkstemp(filename);
2395 if (fd < 0) {
9742bf26
YT
2396 perror("unable to create backing store for hugepages");
2397 free(filename);
2398 return NULL;
c902760f
MT
2399 }
2400 unlink(filename);
2401 free(filename);
2402
2403 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2404
2405 /*
2406 * ftruncate is not supported by hugetlbfs in older
2407 * hosts, so don't bother bailing out on errors.
2408 * If anything goes wrong with it under other filesystems,
2409 * mmap will fail.
2410 */
2411 if (ftruncate(fd, memory))
9742bf26 2412 perror("ftruncate");
c902760f
MT
2413
2414#ifdef MAP_POPULATE
2415 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2416 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2417 * to sidestep this quirk.
2418 */
2419 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2420 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2421#else
2422 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2423#endif
2424 if (area == MAP_FAILED) {
9742bf26
YT
2425 perror("file_ram_alloc: can't mmap RAM pages");
2426 close(fd);
2427 return (NULL);
c902760f 2428 }
04b16653 2429 block->fd = fd;
c902760f
MT
2430 return area;
2431}
2432#endif
2433
d17b5288 2434static ram_addr_t find_ram_offset(ram_addr_t size)
04b16653
AW
2435{
2436 RAMBlock *block, *next_block;
3e837b2c 2437 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
04b16653
AW
2438
2439 if (QLIST_EMPTY(&ram_list.blocks))
2440 return 0;
2441
2442 QLIST_FOREACH(block, &ram_list.blocks, next) {
f15fbc4b 2443 ram_addr_t end, next = RAM_ADDR_MAX;
04b16653
AW
2444
2445 end = block->offset + block->length;
2446
2447 QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2448 if (next_block->offset >= end) {
2449 next = MIN(next, next_block->offset);
2450 }
2451 }
2452 if (next - end >= size && next - end < mingap) {
3e837b2c 2453 offset = end;
04b16653
AW
2454 mingap = next - end;
2455 }
2456 }
3e837b2c
AW
2457
2458 if (offset == RAM_ADDR_MAX) {
2459 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
2460 (uint64_t)size);
2461 abort();
2462 }
2463
04b16653
AW
2464 return offset;
2465}
2466
2467static ram_addr_t last_ram_offset(void)
d17b5288
AW
2468{
2469 RAMBlock *block;
2470 ram_addr_t last = 0;
2471
2472 QLIST_FOREACH(block, &ram_list.blocks, next)
2473 last = MAX(last, block->offset + block->length);
2474
2475 return last;
2476}
2477
ddb97f1d
JB
2478static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
2479{
2480 int ret;
2481 QemuOpts *machine_opts;
2482
2483 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
2484 machine_opts = qemu_opts_find(qemu_find_opts("machine"), 0);
2485 if (machine_opts &&
2486 !qemu_opt_get_bool(machine_opts, "dump-guest-core", true)) {
2487 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
2488 if (ret) {
2489 perror("qemu_madvise");
2490 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
2491 "but dump_guest_core=off specified\n");
2492 }
2493 }
2494}
2495
c5705a77 2496void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
84b89d78
CM
2497{
2498 RAMBlock *new_block, *block;
2499
c5705a77
AK
2500 new_block = NULL;
2501 QLIST_FOREACH(block, &ram_list.blocks, next) {
2502 if (block->offset == addr) {
2503 new_block = block;
2504 break;
2505 }
2506 }
2507 assert(new_block);
2508 assert(!new_block->idstr[0]);
84b89d78 2509
09e5ab63
AL
2510 if (dev) {
2511 char *id = qdev_get_dev_path(dev);
84b89d78
CM
2512 if (id) {
2513 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
7267c094 2514 g_free(id);
84b89d78
CM
2515 }
2516 }
2517 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2518
2519 QLIST_FOREACH(block, &ram_list.blocks, next) {
c5705a77 2520 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
84b89d78
CM
2521 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2522 new_block->idstr);
2523 abort();
2524 }
2525 }
c5705a77
AK
2526}
2527
2528ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
2529 MemoryRegion *mr)
2530{
2531 RAMBlock *new_block;
2532
2533 size = TARGET_PAGE_ALIGN(size);
2534 new_block = g_malloc0(sizeof(*new_block));
84b89d78 2535
7c637366 2536 new_block->mr = mr;
432d268c 2537 new_block->offset = find_ram_offset(size);
6977dfe6
YT
2538 if (host) {
2539 new_block->host = host;
cd19cfa2 2540 new_block->flags |= RAM_PREALLOC_MASK;
6977dfe6
YT
2541 } else {
2542 if (mem_path) {
c902760f 2543#if defined (__linux__) && !defined(TARGET_S390X)
6977dfe6
YT
2544 new_block->host = file_ram_alloc(new_block, size, mem_path);
2545 if (!new_block->host) {
2546 new_block->host = qemu_vmalloc(size);
e78815a5 2547 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
6977dfe6 2548 }
c902760f 2549#else
6977dfe6
YT
2550 fprintf(stderr, "-mem-path option unsupported\n");
2551 exit(1);
c902760f 2552#endif
6977dfe6 2553 } else {
868bb33f 2554 if (xen_enabled()) {
fce537d4 2555 xen_ram_alloc(new_block->offset, size, mr);
fdec9918
CB
2556 } else if (kvm_enabled()) {
2557 /* some s390/kvm configurations have special constraints */
2558 new_block->host = kvm_vmalloc(size);
432d268c
JN
2559 } else {
2560 new_block->host = qemu_vmalloc(size);
2561 }
e78815a5 2562 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
6977dfe6 2563 }
c902760f 2564 }
94a6b54f
PB
2565 new_block->length = size;
2566
f471a17e 2567 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
94a6b54f 2568
7267c094 2569 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
04b16653 2570 last_ram_offset() >> TARGET_PAGE_BITS);
5fda043f
IM
2571 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
2572 0, size >> TARGET_PAGE_BITS);
1720aeee 2573 cpu_physical_memory_set_dirty_range(new_block->offset, size, 0xff);
94a6b54f 2574
ddb97f1d
JB
2575 qemu_ram_setup_dump(new_block->host, size);
2576
6f0437e8
JK
2577 if (kvm_enabled())
2578 kvm_setup_guest_memory(new_block->host, size);
2579
94a6b54f
PB
2580 return new_block->offset;
2581}
e9a1ab19 2582
c5705a77 2583ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
6977dfe6 2584{
c5705a77 2585 return qemu_ram_alloc_from_ptr(size, NULL, mr);
6977dfe6
YT
2586}
2587
1f2e98b6
AW
2588void qemu_ram_free_from_ptr(ram_addr_t addr)
2589{
2590 RAMBlock *block;
2591
2592 QLIST_FOREACH(block, &ram_list.blocks, next) {
2593 if (addr == block->offset) {
2594 QLIST_REMOVE(block, next);
7267c094 2595 g_free(block);
1f2e98b6
AW
2596 return;
2597 }
2598 }
2599}
2600
c227f099 2601void qemu_ram_free(ram_addr_t addr)
e9a1ab19 2602{
04b16653
AW
2603 RAMBlock *block;
2604
2605 QLIST_FOREACH(block, &ram_list.blocks, next) {
2606 if (addr == block->offset) {
2607 QLIST_REMOVE(block, next);
cd19cfa2
HY
2608 if (block->flags & RAM_PREALLOC_MASK) {
2609 ;
2610 } else if (mem_path) {
04b16653
AW
2611#if defined (__linux__) && !defined(TARGET_S390X)
2612 if (block->fd) {
2613 munmap(block->host, block->length);
2614 close(block->fd);
2615 } else {
2616 qemu_vfree(block->host);
2617 }
fd28aa13
JK
2618#else
2619 abort();
04b16653
AW
2620#endif
2621 } else {
2622#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2623 munmap(block->host, block->length);
2624#else
868bb33f 2625 if (xen_enabled()) {
e41d7c69 2626 xen_invalidate_map_cache_entry(block->host);
432d268c
JN
2627 } else {
2628 qemu_vfree(block->host);
2629 }
04b16653
AW
2630#endif
2631 }
7267c094 2632 g_free(block);
04b16653
AW
2633 return;
2634 }
2635 }
2636
e9a1ab19
FB
2637}
2638
cd19cfa2
HY
2639#ifndef _WIN32
2640void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
2641{
2642 RAMBlock *block;
2643 ram_addr_t offset;
2644 int flags;
2645 void *area, *vaddr;
2646
2647 QLIST_FOREACH(block, &ram_list.blocks, next) {
2648 offset = addr - block->offset;
2649 if (offset < block->length) {
2650 vaddr = block->host + offset;
2651 if (block->flags & RAM_PREALLOC_MASK) {
2652 ;
2653 } else {
2654 flags = MAP_FIXED;
2655 munmap(vaddr, length);
2656 if (mem_path) {
2657#if defined(__linux__) && !defined(TARGET_S390X)
2658 if (block->fd) {
2659#ifdef MAP_POPULATE
2660 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
2661 MAP_PRIVATE;
2662#else
2663 flags |= MAP_PRIVATE;
2664#endif
2665 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2666 flags, block->fd, offset);
2667 } else {
2668 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2669 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2670 flags, -1, 0);
2671 }
fd28aa13
JK
2672#else
2673 abort();
cd19cfa2
HY
2674#endif
2675 } else {
2676#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2677 flags |= MAP_SHARED | MAP_ANONYMOUS;
2678 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
2679 flags, -1, 0);
2680#else
2681 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2682 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2683 flags, -1, 0);
2684#endif
2685 }
2686 if (area != vaddr) {
f15fbc4b
AP
2687 fprintf(stderr, "Could not remap addr: "
2688 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
cd19cfa2
HY
2689 length, addr);
2690 exit(1);
2691 }
2692 qemu_madvise(vaddr, length, QEMU_MADV_MERGEABLE);
ddb97f1d 2693 qemu_ram_setup_dump(vaddr, length);
cd19cfa2
HY
2694 }
2695 return;
2696 }
2697 }
2698}
2699#endif /* !_WIN32 */
2700
dc828ca1 2701/* Return a host pointer to ram allocated with qemu_ram_alloc.
5579c7f3
PB
2702 With the exception of the softmmu code in this file, this should
2703 only be used for local memory (e.g. video ram) that the device owns,
2704 and knows it isn't going to access beyond the end of the block.
2705
2706 It should not be used for general purpose DMA.
2707 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2708 */
c227f099 2709void *qemu_get_ram_ptr(ram_addr_t addr)
dc828ca1 2710{
94a6b54f
PB
2711 RAMBlock *block;
2712
f471a17e
AW
2713 QLIST_FOREACH(block, &ram_list.blocks, next) {
2714 if (addr - block->offset < block->length) {
7d82af38
VP
2715 /* Move this entry to to start of the list. */
2716 if (block != QLIST_FIRST(&ram_list.blocks)) {
2717 QLIST_REMOVE(block, next);
2718 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
2719 }
868bb33f 2720 if (xen_enabled()) {
432d268c
JN
2721 /* We need to check if the requested address is in the RAM
2722 * because we don't want to map the entire memory in QEMU.
712c2b41 2723 * In that case just map until the end of the page.
432d268c
JN
2724 */
2725 if (block->offset == 0) {
e41d7c69 2726 return xen_map_cache(addr, 0, 0);
432d268c 2727 } else if (block->host == NULL) {
e41d7c69
JK
2728 block->host =
2729 xen_map_cache(block->offset, block->length, 1);
432d268c
JN
2730 }
2731 }
f471a17e
AW
2732 return block->host + (addr - block->offset);
2733 }
94a6b54f 2734 }
f471a17e
AW
2735
2736 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2737 abort();
2738
2739 return NULL;
dc828ca1
PB
2740}
2741
b2e0a138
MT
2742/* Return a host pointer to ram allocated with qemu_ram_alloc.
2743 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
2744 */
2745void *qemu_safe_ram_ptr(ram_addr_t addr)
2746{
2747 RAMBlock *block;
2748
2749 QLIST_FOREACH(block, &ram_list.blocks, next) {
2750 if (addr - block->offset < block->length) {
868bb33f 2751 if (xen_enabled()) {
432d268c
JN
2752 /* We need to check if the requested address is in the RAM
2753 * because we don't want to map the entire memory in QEMU.
712c2b41 2754 * In that case just map until the end of the page.
432d268c
JN
2755 */
2756 if (block->offset == 0) {
e41d7c69 2757 return xen_map_cache(addr, 0, 0);
432d268c 2758 } else if (block->host == NULL) {
e41d7c69
JK
2759 block->host =
2760 xen_map_cache(block->offset, block->length, 1);
432d268c
JN
2761 }
2762 }
b2e0a138
MT
2763 return block->host + (addr - block->offset);
2764 }
2765 }
2766
2767 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2768 abort();
2769
2770 return NULL;
2771}
2772
38bee5dc
SS
2773/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
2774 * but takes a size argument */
8ab934f9 2775void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
38bee5dc 2776{
8ab934f9
SS
2777 if (*size == 0) {
2778 return NULL;
2779 }
868bb33f 2780 if (xen_enabled()) {
e41d7c69 2781 return xen_map_cache(addr, *size, 1);
868bb33f 2782 } else {
38bee5dc
SS
2783 RAMBlock *block;
2784
2785 QLIST_FOREACH(block, &ram_list.blocks, next) {
2786 if (addr - block->offset < block->length) {
2787 if (addr - block->offset + *size > block->length)
2788 *size = block->length - addr + block->offset;
2789 return block->host + (addr - block->offset);
2790 }
2791 }
2792
2793 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2794 abort();
38bee5dc
SS
2795 }
2796}
2797
050a0ddf
AP
2798void qemu_put_ram_ptr(void *addr)
2799{
2800 trace_qemu_put_ram_ptr(addr);
050a0ddf
AP
2801}
2802
e890261f 2803int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
5579c7f3 2804{
94a6b54f
PB
2805 RAMBlock *block;
2806 uint8_t *host = ptr;
2807
868bb33f 2808 if (xen_enabled()) {
e41d7c69 2809 *ram_addr = xen_ram_addr_from_mapcache(ptr);
712c2b41
SS
2810 return 0;
2811 }
2812
f471a17e 2813 QLIST_FOREACH(block, &ram_list.blocks, next) {
432d268c
JN
2814 /* This case append when the block is not mapped. */
2815 if (block->host == NULL) {
2816 continue;
2817 }
f471a17e 2818 if (host - block->host < block->length) {
e890261f
MT
2819 *ram_addr = block->offset + (host - block->host);
2820 return 0;
f471a17e 2821 }
94a6b54f 2822 }
432d268c 2823
e890261f
MT
2824 return -1;
2825}
f471a17e 2826
e890261f
MT
2827/* Some of the softmmu routines need to translate from a host pointer
2828 (typically a TLB entry) back to a ram offset. */
2829ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
2830{
2831 ram_addr_t ram_addr;
f471a17e 2832
e890261f
MT
2833 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
2834 fprintf(stderr, "Bad ram pointer %p\n", ptr);
2835 abort();
2836 }
2837 return ram_addr;
5579c7f3
PB
2838}
2839
0e0df1e2
AK
2840static uint64_t unassigned_mem_read(void *opaque, target_phys_addr_t addr,
2841 unsigned size)
e18231a3
BS
2842{
2843#ifdef DEBUG_UNASSIGNED
2844 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2845#endif
5b450407 2846#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
0e0df1e2 2847 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
e18231a3
BS
2848#endif
2849 return 0;
2850}
2851
0e0df1e2
AK
2852static void unassigned_mem_write(void *opaque, target_phys_addr_t addr,
2853 uint64_t val, unsigned size)
e18231a3
BS
2854{
2855#ifdef DEBUG_UNASSIGNED
0e0df1e2 2856 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
e18231a3 2857#endif
5b450407 2858#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
0e0df1e2 2859 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
67d3b957 2860#endif
33417e70
FB
2861}
2862
0e0df1e2
AK
2863static const MemoryRegionOps unassigned_mem_ops = {
2864 .read = unassigned_mem_read,
2865 .write = unassigned_mem_write,
2866 .endianness = DEVICE_NATIVE_ENDIAN,
2867};
e18231a3 2868
0e0df1e2
AK
2869static uint64_t error_mem_read(void *opaque, target_phys_addr_t addr,
2870 unsigned size)
e18231a3 2871{
0e0df1e2 2872 abort();
e18231a3
BS
2873}
2874
0e0df1e2
AK
2875static void error_mem_write(void *opaque, target_phys_addr_t addr,
2876 uint64_t value, unsigned size)
e18231a3 2877{
0e0df1e2 2878 abort();
33417e70
FB
2879}
2880
0e0df1e2
AK
2881static const MemoryRegionOps error_mem_ops = {
2882 .read = error_mem_read,
2883 .write = error_mem_write,
2884 .endianness = DEVICE_NATIVE_ENDIAN,
33417e70
FB
2885};
2886
0e0df1e2
AK
2887static const MemoryRegionOps rom_mem_ops = {
2888 .read = error_mem_read,
2889 .write = unassigned_mem_write,
2890 .endianness = DEVICE_NATIVE_ENDIAN,
33417e70
FB
2891};
2892
0e0df1e2
AK
2893static void notdirty_mem_write(void *opaque, target_phys_addr_t ram_addr,
2894 uint64_t val, unsigned size)
9fa3e853 2895{
3a7d929e 2896 int dirty_flags;
f7c11b53 2897 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3a7d929e 2898 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2899#if !defined(CONFIG_USER_ONLY)
0e0df1e2 2900 tb_invalidate_phys_page_fast(ram_addr, size);
f7c11b53 2901 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
9fa3e853 2902#endif
3a7d929e 2903 }
0e0df1e2
AK
2904 switch (size) {
2905 case 1:
2906 stb_p(qemu_get_ram_ptr(ram_addr), val);
2907 break;
2908 case 2:
2909 stw_p(qemu_get_ram_ptr(ram_addr), val);
2910 break;
2911 case 4:
2912 stl_p(qemu_get_ram_ptr(ram_addr), val);
2913 break;
2914 default:
2915 abort();
3a7d929e 2916 }
f23db169 2917 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
f7c11b53 2918 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
f23db169
FB
2919 /* we remove the notdirty callback only if the code has been
2920 flushed */
2921 if (dirty_flags == 0xff)
2e70f6ef 2922 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
2923}
2924
0e0df1e2
AK
2925static const MemoryRegionOps notdirty_mem_ops = {
2926 .read = error_mem_read,
2927 .write = notdirty_mem_write,
2928 .endianness = DEVICE_NATIVE_ENDIAN,
1ccde1cb
FB
2929};
2930
0f459d16 2931/* Generate a debug exception if a watchpoint has been hit. */
b4051334 2932static void check_watchpoint(int offset, int len_mask, int flags)
0f459d16 2933{
9349b4f9 2934 CPUArchState *env = cpu_single_env;
06d55cc1
AL
2935 target_ulong pc, cs_base;
2936 TranslationBlock *tb;
0f459d16 2937 target_ulong vaddr;
a1d1bb31 2938 CPUWatchpoint *wp;
06d55cc1 2939 int cpu_flags;
0f459d16 2940
06d55cc1
AL
2941 if (env->watchpoint_hit) {
2942 /* We re-entered the check after replacing the TB. Now raise
2943 * the debug interrupt so that is will trigger after the
2944 * current instruction. */
2945 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2946 return;
2947 }
2e70f6ef 2948 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
72cf2d4f 2949 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334
AL
2950 if ((vaddr == (wp->vaddr & len_mask) ||
2951 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
6e140f28
AL
2952 wp->flags |= BP_WATCHPOINT_HIT;
2953 if (!env->watchpoint_hit) {
2954 env->watchpoint_hit = wp;
2955 tb = tb_find_pc(env->mem_io_pc);
2956 if (!tb) {
2957 cpu_abort(env, "check_watchpoint: could not find TB for "
2958 "pc=%p", (void *)env->mem_io_pc);
2959 }
618ba8e6 2960 cpu_restore_state(tb, env, env->mem_io_pc);
6e140f28
AL
2961 tb_phys_invalidate(tb, -1);
2962 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2963 env->exception_index = EXCP_DEBUG;
488d6577 2964 cpu_loop_exit(env);
6e140f28
AL
2965 } else {
2966 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2967 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
488d6577 2968 cpu_resume_from_signal(env, NULL);
6e140f28 2969 }
06d55cc1 2970 }
6e140f28
AL
2971 } else {
2972 wp->flags &= ~BP_WATCHPOINT_HIT;
0f459d16
PB
2973 }
2974 }
2975}
2976
6658ffb8
PB
2977/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2978 so these check for a hit then pass through to the normal out-of-line
2979 phys routines. */
1ec9b909
AK
2980static uint64_t watch_mem_read(void *opaque, target_phys_addr_t addr,
2981 unsigned size)
6658ffb8 2982{
1ec9b909
AK
2983 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
2984 switch (size) {
2985 case 1: return ldub_phys(addr);
2986 case 2: return lduw_phys(addr);
2987 case 4: return ldl_phys(addr);
2988 default: abort();
2989 }
6658ffb8
PB
2990}
2991
1ec9b909
AK
2992static void watch_mem_write(void *opaque, target_phys_addr_t addr,
2993 uint64_t val, unsigned size)
6658ffb8 2994{
1ec9b909
AK
2995 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
2996 switch (size) {
67364150
MF
2997 case 1:
2998 stb_phys(addr, val);
2999 break;
3000 case 2:
3001 stw_phys(addr, val);
3002 break;
3003 case 4:
3004 stl_phys(addr, val);
3005 break;
1ec9b909
AK
3006 default: abort();
3007 }
6658ffb8
PB
3008}
3009
1ec9b909
AK
3010static const MemoryRegionOps watch_mem_ops = {
3011 .read = watch_mem_read,
3012 .write = watch_mem_write,
3013 .endianness = DEVICE_NATIVE_ENDIAN,
6658ffb8 3014};
6658ffb8 3015
70c68e44
AK
3016static uint64_t subpage_read(void *opaque, target_phys_addr_t addr,
3017 unsigned len)
db7b5426 3018{
70c68e44 3019 subpage_t *mmio = opaque;
f6405247 3020 unsigned int idx = SUBPAGE_IDX(addr);
5312bd8b 3021 MemoryRegionSection *section;
db7b5426
BS
3022#if defined(DEBUG_SUBPAGE)
3023 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3024 mmio, len, addr, idx);
3025#endif
db7b5426 3026
5312bd8b
AK
3027 section = &phys_sections[mmio->sub_section[idx]];
3028 addr += mmio->base;
3029 addr -= section->offset_within_address_space;
3030 addr += section->offset_within_region;
37ec01d4 3031 return io_mem_read(section->mr, addr, len);
db7b5426
BS
3032}
3033
70c68e44
AK
3034static void subpage_write(void *opaque, target_phys_addr_t addr,
3035 uint64_t value, unsigned len)
db7b5426 3036{
70c68e44 3037 subpage_t *mmio = opaque;
f6405247 3038 unsigned int idx = SUBPAGE_IDX(addr);
5312bd8b 3039 MemoryRegionSection *section;
db7b5426 3040#if defined(DEBUG_SUBPAGE)
70c68e44
AK
3041 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
3042 " idx %d value %"PRIx64"\n",
f6405247 3043 __func__, mmio, len, addr, idx, value);
db7b5426 3044#endif
f6405247 3045
5312bd8b
AK
3046 section = &phys_sections[mmio->sub_section[idx]];
3047 addr += mmio->base;
3048 addr -= section->offset_within_address_space;
3049 addr += section->offset_within_region;
37ec01d4 3050 io_mem_write(section->mr, addr, value, len);
db7b5426
BS
3051}
3052
70c68e44
AK
3053static const MemoryRegionOps subpage_ops = {
3054 .read = subpage_read,
3055 .write = subpage_write,
3056 .endianness = DEVICE_NATIVE_ENDIAN,
db7b5426
BS
3057};
3058
de712f94
AK
3059static uint64_t subpage_ram_read(void *opaque, target_phys_addr_t addr,
3060 unsigned size)
56384e8b
AF
3061{
3062 ram_addr_t raddr = addr;
3063 void *ptr = qemu_get_ram_ptr(raddr);
de712f94
AK
3064 switch (size) {
3065 case 1: return ldub_p(ptr);
3066 case 2: return lduw_p(ptr);
3067 case 4: return ldl_p(ptr);
3068 default: abort();
3069 }
56384e8b
AF
3070}
3071
de712f94
AK
3072static void subpage_ram_write(void *opaque, target_phys_addr_t addr,
3073 uint64_t value, unsigned size)
56384e8b
AF
3074{
3075 ram_addr_t raddr = addr;
3076 void *ptr = qemu_get_ram_ptr(raddr);
de712f94
AK
3077 switch (size) {
3078 case 1: return stb_p(ptr, value);
3079 case 2: return stw_p(ptr, value);
3080 case 4: return stl_p(ptr, value);
3081 default: abort();
3082 }
56384e8b
AF
3083}
3084
de712f94
AK
3085static const MemoryRegionOps subpage_ram_ops = {
3086 .read = subpage_ram_read,
3087 .write = subpage_ram_write,
3088 .endianness = DEVICE_NATIVE_ENDIAN,
56384e8b
AF
3089};
3090
c227f099 3091static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 3092 uint16_t section)
db7b5426
BS
3093{
3094 int idx, eidx;
3095
3096 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3097 return -1;
3098 idx = SUBPAGE_IDX(start);
3099 eidx = SUBPAGE_IDX(end);
3100#if defined(DEBUG_SUBPAGE)
0bf9e31a 3101 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
db7b5426
BS
3102 mmio, start, end, idx, eidx, memory);
3103#endif
5312bd8b
AK
3104 if (memory_region_is_ram(phys_sections[section].mr)) {
3105 MemoryRegionSection new_section = phys_sections[section];
3106 new_section.mr = &io_mem_subpage_ram;
3107 section = phys_section_add(&new_section);
56384e8b 3108 }
db7b5426 3109 for (; idx <= eidx; idx++) {
5312bd8b 3110 mmio->sub_section[idx] = section;
db7b5426
BS
3111 }
3112
3113 return 0;
3114}
3115
0f0cb164 3116static subpage_t *subpage_init(target_phys_addr_t base)
db7b5426 3117{
c227f099 3118 subpage_t *mmio;
db7b5426 3119
7267c094 3120 mmio = g_malloc0(sizeof(subpage_t));
1eec614b
AL
3121
3122 mmio->base = base;
70c68e44
AK
3123 memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
3124 "subpage", TARGET_PAGE_SIZE);
b3b00c78 3125 mmio->iomem.subpage = true;
db7b5426 3126#if defined(DEBUG_SUBPAGE)
1eec614b
AL
3127 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3128 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
db7b5426 3129#endif
0f0cb164 3130 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, phys_section_unassigned);
db7b5426
BS
3131
3132 return mmio;
3133}
3134
5312bd8b
AK
3135static uint16_t dummy_section(MemoryRegion *mr)
3136{
3137 MemoryRegionSection section = {
3138 .mr = mr,
3139 .offset_within_address_space = 0,
3140 .offset_within_region = 0,
3141 .size = UINT64_MAX,
3142 };
3143
3144 return phys_section_add(&section);
3145}
3146
37ec01d4 3147MemoryRegion *iotlb_to_region(target_phys_addr_t index)
aa102231 3148{
37ec01d4 3149 return phys_sections[index & ~TARGET_PAGE_MASK].mr;
aa102231
AK
3150}
3151
e9179ce1
AK
3152static void io_mem_init(void)
3153{
0e0df1e2 3154 memory_region_init_io(&io_mem_ram, &error_mem_ops, NULL, "ram", UINT64_MAX);
0e0df1e2
AK
3155 memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
3156 memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
3157 "unassigned", UINT64_MAX);
3158 memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
3159 "notdirty", UINT64_MAX);
de712f94
AK
3160 memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
3161 "subpage-ram", UINT64_MAX);
1ec9b909
AK
3162 memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
3163 "watch", UINT64_MAX);
e9179ce1
AK
3164}
3165
50c1e149
AK
3166static void core_begin(MemoryListener *listener)
3167{
54688b1e 3168 destroy_all_mappings();
5312bd8b 3169 phys_sections_clear();
c19e8800 3170 phys_map.ptr = PHYS_MAP_NODE_NIL;
5312bd8b 3171 phys_section_unassigned = dummy_section(&io_mem_unassigned);
aa102231
AK
3172 phys_section_notdirty = dummy_section(&io_mem_notdirty);
3173 phys_section_rom = dummy_section(&io_mem_rom);
3174 phys_section_watch = dummy_section(&io_mem_watch);
50c1e149
AK
3175}
3176
3177static void core_commit(MemoryListener *listener)
3178{
9349b4f9 3179 CPUArchState *env;
117712c3
AK
3180
3181 /* since each CPU stores ram addresses in its TLB cache, we must
3182 reset the modified entries */
3183 /* XXX: slow ! */
3184 for(env = first_cpu; env != NULL; env = env->next_cpu) {
3185 tlb_flush(env, 1);
3186 }
50c1e149
AK
3187}
3188
93632747
AK
3189static void core_region_add(MemoryListener *listener,
3190 MemoryRegionSection *section)
3191{
4855d41a 3192 cpu_register_physical_memory_log(section, section->readonly);
93632747
AK
3193}
3194
3195static void core_region_del(MemoryListener *listener,
3196 MemoryRegionSection *section)
3197{
93632747
AK
3198}
3199
50c1e149
AK
3200static void core_region_nop(MemoryListener *listener,
3201 MemoryRegionSection *section)
3202{
54688b1e 3203 cpu_register_physical_memory_log(section, section->readonly);
50c1e149
AK
3204}
3205
93632747
AK
3206static void core_log_start(MemoryListener *listener,
3207 MemoryRegionSection *section)
3208{
3209}
3210
3211static void core_log_stop(MemoryListener *listener,
3212 MemoryRegionSection *section)
3213{
3214}
3215
3216static void core_log_sync(MemoryListener *listener,
3217 MemoryRegionSection *section)
3218{
3219}
3220
3221static void core_log_global_start(MemoryListener *listener)
3222{
3223 cpu_physical_memory_set_dirty_tracking(1);
3224}
3225
3226static void core_log_global_stop(MemoryListener *listener)
3227{
3228 cpu_physical_memory_set_dirty_tracking(0);
3229}
3230
3231static void core_eventfd_add(MemoryListener *listener,
3232 MemoryRegionSection *section,
753d5e14 3233 bool match_data, uint64_t data, EventNotifier *e)
93632747
AK
3234{
3235}
3236
3237static void core_eventfd_del(MemoryListener *listener,
3238 MemoryRegionSection *section,
753d5e14 3239 bool match_data, uint64_t data, EventNotifier *e)
93632747
AK
3240{
3241}
3242
50c1e149
AK
3243static void io_begin(MemoryListener *listener)
3244{
3245}
3246
3247static void io_commit(MemoryListener *listener)
3248{
3249}
3250
4855d41a
AK
3251static void io_region_add(MemoryListener *listener,
3252 MemoryRegionSection *section)
3253{
a2d33521
AK
3254 MemoryRegionIORange *mrio = g_new(MemoryRegionIORange, 1);
3255
3256 mrio->mr = section->mr;
3257 mrio->offset = section->offset_within_region;
3258 iorange_init(&mrio->iorange, &memory_region_iorange_ops,
4855d41a 3259 section->offset_within_address_space, section->size);
a2d33521 3260 ioport_register(&mrio->iorange);
4855d41a
AK
3261}
3262
3263static void io_region_del(MemoryListener *listener,
3264 MemoryRegionSection *section)
3265{
3266 isa_unassign_ioport(section->offset_within_address_space, section->size);
3267}
3268
50c1e149
AK
3269static void io_region_nop(MemoryListener *listener,
3270 MemoryRegionSection *section)
3271{
3272}
3273
4855d41a
AK
3274static void io_log_start(MemoryListener *listener,
3275 MemoryRegionSection *section)
3276{
3277}
3278
3279static void io_log_stop(MemoryListener *listener,
3280 MemoryRegionSection *section)
3281{
3282}
3283
3284static void io_log_sync(MemoryListener *listener,
3285 MemoryRegionSection *section)
3286{
3287}
3288
3289static void io_log_global_start(MemoryListener *listener)
3290{
3291}
3292
3293static void io_log_global_stop(MemoryListener *listener)
3294{
3295}
3296
3297static void io_eventfd_add(MemoryListener *listener,
3298 MemoryRegionSection *section,
753d5e14 3299 bool match_data, uint64_t data, EventNotifier *e)
4855d41a
AK
3300{
3301}
3302
3303static void io_eventfd_del(MemoryListener *listener,
3304 MemoryRegionSection *section,
753d5e14 3305 bool match_data, uint64_t data, EventNotifier *e)
4855d41a
AK
3306{
3307}
3308
93632747 3309static MemoryListener core_memory_listener = {
50c1e149
AK
3310 .begin = core_begin,
3311 .commit = core_commit,
93632747
AK
3312 .region_add = core_region_add,
3313 .region_del = core_region_del,
50c1e149 3314 .region_nop = core_region_nop,
93632747
AK
3315 .log_start = core_log_start,
3316 .log_stop = core_log_stop,
3317 .log_sync = core_log_sync,
3318 .log_global_start = core_log_global_start,
3319 .log_global_stop = core_log_global_stop,
3320 .eventfd_add = core_eventfd_add,
3321 .eventfd_del = core_eventfd_del,
3322 .priority = 0,
3323};
3324
4855d41a 3325static MemoryListener io_memory_listener = {
50c1e149
AK
3326 .begin = io_begin,
3327 .commit = io_commit,
4855d41a
AK
3328 .region_add = io_region_add,
3329 .region_del = io_region_del,
50c1e149 3330 .region_nop = io_region_nop,
4855d41a
AK
3331 .log_start = io_log_start,
3332 .log_stop = io_log_stop,
3333 .log_sync = io_log_sync,
3334 .log_global_start = io_log_global_start,
3335 .log_global_stop = io_log_global_stop,
3336 .eventfd_add = io_eventfd_add,
3337 .eventfd_del = io_eventfd_del,
3338 .priority = 0,
3339};
3340
62152b8a
AK
3341static void memory_map_init(void)
3342{
7267c094 3343 system_memory = g_malloc(sizeof(*system_memory));
8417cebf 3344 memory_region_init(system_memory, "system", INT64_MAX);
62152b8a 3345 set_system_memory_map(system_memory);
309cb471 3346
7267c094 3347 system_io = g_malloc(sizeof(*system_io));
309cb471
AK
3348 memory_region_init(system_io, "io", 65536);
3349 set_system_io_map(system_io);
93632747 3350
4855d41a
AK
3351 memory_listener_register(&core_memory_listener, system_memory);
3352 memory_listener_register(&io_memory_listener, system_io);
62152b8a
AK
3353}
3354
3355MemoryRegion *get_system_memory(void)
3356{
3357 return system_memory;
3358}
3359
309cb471
AK
3360MemoryRegion *get_system_io(void)
3361{
3362 return system_io;
3363}
3364
e2eef170
PB
3365#endif /* !defined(CONFIG_USER_ONLY) */
3366
13eb76e0
FB
3367/* physical memory access (slow version, mainly for debug) */
3368#if defined(CONFIG_USER_ONLY)
9349b4f9 3369int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
a68fe89c 3370 uint8_t *buf, int len, int is_write)
13eb76e0
FB
3371{
3372 int l, flags;
3373 target_ulong page;
53a5960a 3374 void * p;
13eb76e0
FB
3375
3376 while (len > 0) {
3377 page = addr & TARGET_PAGE_MASK;
3378 l = (page + TARGET_PAGE_SIZE) - addr;
3379 if (l > len)
3380 l = len;
3381 flags = page_get_flags(page);
3382 if (!(flags & PAGE_VALID))
a68fe89c 3383 return -1;
13eb76e0
FB
3384 if (is_write) {
3385 if (!(flags & PAGE_WRITE))
a68fe89c 3386 return -1;
579a97f7 3387 /* XXX: this code should not depend on lock_user */
72fb7daa 3388 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
a68fe89c 3389 return -1;
72fb7daa
AJ
3390 memcpy(p, buf, l);
3391 unlock_user(p, addr, l);
13eb76e0
FB
3392 } else {
3393 if (!(flags & PAGE_READ))
a68fe89c 3394 return -1;
579a97f7 3395 /* XXX: this code should not depend on lock_user */
72fb7daa 3396 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
a68fe89c 3397 return -1;
72fb7daa 3398 memcpy(buf, p, l);
5b257578 3399 unlock_user(p, addr, 0);
13eb76e0
FB
3400 }
3401 len -= l;
3402 buf += l;
3403 addr += l;
3404 }
a68fe89c 3405 return 0;
13eb76e0 3406}
8df1cd07 3407
13eb76e0 3408#else
c227f099 3409void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
3410 int len, int is_write)
3411{
37ec01d4 3412 int l;
13eb76e0
FB
3413 uint8_t *ptr;
3414 uint32_t val;
c227f099 3415 target_phys_addr_t page;
f3705d53 3416 MemoryRegionSection *section;
3b46e624 3417
13eb76e0
FB
3418 while (len > 0) {
3419 page = addr & TARGET_PAGE_MASK;
3420 l = (page + TARGET_PAGE_SIZE) - addr;
3421 if (l > len)
3422 l = len;
06ef3525 3423 section = phys_page_find(page >> TARGET_PAGE_BITS);
3b46e624 3424
13eb76e0 3425 if (is_write) {
f3705d53 3426 if (!memory_region_is_ram(section->mr)) {
f1f6e3b8 3427 target_phys_addr_t addr1;
cc5bea60 3428 addr1 = memory_region_section_addr(section, addr);
6a00d601
FB
3429 /* XXX: could force cpu_single_env to NULL to avoid
3430 potential bugs */
6c2934db 3431 if (l >= 4 && ((addr1 & 3) == 0)) {
1c213d19 3432 /* 32 bit write access */
c27004ec 3433 val = ldl_p(buf);
37ec01d4 3434 io_mem_write(section->mr, addr1, val, 4);
13eb76e0 3435 l = 4;
6c2934db 3436 } else if (l >= 2 && ((addr1 & 1) == 0)) {
1c213d19 3437 /* 16 bit write access */
c27004ec 3438 val = lduw_p(buf);
37ec01d4 3439 io_mem_write(section->mr, addr1, val, 2);
13eb76e0
FB
3440 l = 2;
3441 } else {
1c213d19 3442 /* 8 bit write access */
c27004ec 3443 val = ldub_p(buf);
37ec01d4 3444 io_mem_write(section->mr, addr1, val, 1);
13eb76e0
FB
3445 l = 1;
3446 }
f3705d53 3447 } else if (!section->readonly) {
8ca5692d 3448 ram_addr_t addr1;
f3705d53 3449 addr1 = memory_region_get_ram_addr(section->mr)
cc5bea60 3450 + memory_region_section_addr(section, addr);
13eb76e0 3451 /* RAM case */
5579c7f3 3452 ptr = qemu_get_ram_ptr(addr1);
13eb76e0 3453 memcpy(ptr, buf, l);
3a7d929e
FB
3454 if (!cpu_physical_memory_is_dirty(addr1)) {
3455 /* invalidate code */
3456 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3457 /* set dirty bit */
f7c11b53
YT
3458 cpu_physical_memory_set_dirty_flags(
3459 addr1, (0xff & ~CODE_DIRTY_FLAG));
3a7d929e 3460 }
050a0ddf 3461 qemu_put_ram_ptr(ptr);
13eb76e0
FB
3462 }
3463 } else {
cc5bea60
BS
3464 if (!(memory_region_is_ram(section->mr) ||
3465 memory_region_is_romd(section->mr))) {
f1f6e3b8 3466 target_phys_addr_t addr1;
13eb76e0 3467 /* I/O case */
cc5bea60 3468 addr1 = memory_region_section_addr(section, addr);
6c2934db 3469 if (l >= 4 && ((addr1 & 3) == 0)) {
13eb76e0 3470 /* 32 bit read access */
37ec01d4 3471 val = io_mem_read(section->mr, addr1, 4);
c27004ec 3472 stl_p(buf, val);
13eb76e0 3473 l = 4;
6c2934db 3474 } else if (l >= 2 && ((addr1 & 1) == 0)) {
13eb76e0 3475 /* 16 bit read access */
37ec01d4 3476 val = io_mem_read(section->mr, addr1, 2);
c27004ec 3477 stw_p(buf, val);
13eb76e0
FB
3478 l = 2;
3479 } else {
1c213d19 3480 /* 8 bit read access */
37ec01d4 3481 val = io_mem_read(section->mr, addr1, 1);
c27004ec 3482 stb_p(buf, val);
13eb76e0
FB
3483 l = 1;
3484 }
3485 } else {
3486 /* RAM case */
0a1b357f 3487 ptr = qemu_get_ram_ptr(section->mr->ram_addr
cc5bea60
BS
3488 + memory_region_section_addr(section,
3489 addr));
f3705d53 3490 memcpy(buf, ptr, l);
050a0ddf 3491 qemu_put_ram_ptr(ptr);
13eb76e0
FB
3492 }
3493 }
3494 len -= l;
3495 buf += l;
3496 addr += l;
3497 }
3498}
8df1cd07 3499
d0ecd2aa 3500/* used for ROM loading : can write in RAM and ROM */
c227f099 3501void cpu_physical_memory_write_rom(target_phys_addr_t addr,
d0ecd2aa
FB
3502 const uint8_t *buf, int len)
3503{
3504 int l;
3505 uint8_t *ptr;
c227f099 3506 target_phys_addr_t page;
f3705d53 3507 MemoryRegionSection *section;
3b46e624 3508
d0ecd2aa
FB
3509 while (len > 0) {
3510 page = addr & TARGET_PAGE_MASK;
3511 l = (page + TARGET_PAGE_SIZE) - addr;
3512 if (l > len)
3513 l = len;
06ef3525 3514 section = phys_page_find(page >> TARGET_PAGE_BITS);
3b46e624 3515
cc5bea60
BS
3516 if (!(memory_region_is_ram(section->mr) ||
3517 memory_region_is_romd(section->mr))) {
d0ecd2aa
FB
3518 /* do nothing */
3519 } else {
3520 unsigned long addr1;
f3705d53 3521 addr1 = memory_region_get_ram_addr(section->mr)
cc5bea60 3522 + memory_region_section_addr(section, addr);
d0ecd2aa 3523 /* ROM/RAM case */
5579c7f3 3524 ptr = qemu_get_ram_ptr(addr1);
d0ecd2aa 3525 memcpy(ptr, buf, l);
050a0ddf 3526 qemu_put_ram_ptr(ptr);
d0ecd2aa
FB
3527 }
3528 len -= l;
3529 buf += l;
3530 addr += l;
3531 }
3532}
3533
6d16c2f8
AL
3534typedef struct {
3535 void *buffer;
c227f099
AL
3536 target_phys_addr_t addr;
3537 target_phys_addr_t len;
6d16c2f8
AL
3538} BounceBuffer;
3539
3540static BounceBuffer bounce;
3541
ba223c29
AL
3542typedef struct MapClient {
3543 void *opaque;
3544 void (*callback)(void *opaque);
72cf2d4f 3545 QLIST_ENTRY(MapClient) link;
ba223c29
AL
3546} MapClient;
3547
72cf2d4f
BS
3548static QLIST_HEAD(map_client_list, MapClient) map_client_list
3549 = QLIST_HEAD_INITIALIZER(map_client_list);
ba223c29
AL
3550
3551void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3552{
7267c094 3553 MapClient *client = g_malloc(sizeof(*client));
ba223c29
AL
3554
3555 client->opaque = opaque;
3556 client->callback = callback;
72cf2d4f 3557 QLIST_INSERT_HEAD(&map_client_list, client, link);
ba223c29
AL
3558 return client;
3559}
3560
3561void cpu_unregister_map_client(void *_client)
3562{
3563 MapClient *client = (MapClient *)_client;
3564
72cf2d4f 3565 QLIST_REMOVE(client, link);
7267c094 3566 g_free(client);
ba223c29
AL
3567}
3568
3569static void cpu_notify_map_clients(void)
3570{
3571 MapClient *client;
3572
72cf2d4f
BS
3573 while (!QLIST_EMPTY(&map_client_list)) {
3574 client = QLIST_FIRST(&map_client_list);
ba223c29 3575 client->callback(client->opaque);
34d5e948 3576 cpu_unregister_map_client(client);
ba223c29
AL
3577 }
3578}
3579
6d16c2f8
AL
3580/* Map a physical memory region into a host virtual address.
3581 * May map a subset of the requested range, given by and returned in *plen.
3582 * May return NULL if resources needed to perform the mapping are exhausted.
3583 * Use only for reads OR writes - not for read-modify-write operations.
ba223c29
AL
3584 * Use cpu_register_map_client() to know when retrying the map operation is
3585 * likely to succeed.
6d16c2f8 3586 */
c227f099
AL
3587void *cpu_physical_memory_map(target_phys_addr_t addr,
3588 target_phys_addr_t *plen,
6d16c2f8
AL
3589 int is_write)
3590{
c227f099 3591 target_phys_addr_t len = *plen;
38bee5dc 3592 target_phys_addr_t todo = 0;
6d16c2f8 3593 int l;
c227f099 3594 target_phys_addr_t page;
f3705d53 3595 MemoryRegionSection *section;
f15fbc4b 3596 ram_addr_t raddr = RAM_ADDR_MAX;
8ab934f9
SS
3597 ram_addr_t rlen;
3598 void *ret;
6d16c2f8
AL
3599
3600 while (len > 0) {
3601 page = addr & TARGET_PAGE_MASK;
3602 l = (page + TARGET_PAGE_SIZE) - addr;
3603 if (l > len)
3604 l = len;
06ef3525 3605 section = phys_page_find(page >> TARGET_PAGE_BITS);
6d16c2f8 3606
f3705d53 3607 if (!(memory_region_is_ram(section->mr) && !section->readonly)) {
38bee5dc 3608 if (todo || bounce.buffer) {
6d16c2f8
AL
3609 break;
3610 }
3611 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3612 bounce.addr = addr;
3613 bounce.len = l;
3614 if (!is_write) {
54f7b4a3 3615 cpu_physical_memory_read(addr, bounce.buffer, l);
6d16c2f8 3616 }
38bee5dc
SS
3617
3618 *plen = l;
3619 return bounce.buffer;
6d16c2f8 3620 }
8ab934f9 3621 if (!todo) {
f3705d53 3622 raddr = memory_region_get_ram_addr(section->mr)
cc5bea60 3623 + memory_region_section_addr(section, addr);
8ab934f9 3624 }
6d16c2f8
AL
3625
3626 len -= l;
3627 addr += l;
38bee5dc 3628 todo += l;
6d16c2f8 3629 }
8ab934f9
SS
3630 rlen = todo;
3631 ret = qemu_ram_ptr_length(raddr, &rlen);
3632 *plen = rlen;
3633 return ret;
6d16c2f8
AL
3634}
3635
3636/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3637 * Will also mark the memory as dirty if is_write == 1. access_len gives
3638 * the amount of memory that was actually read or written by the caller.
3639 */
c227f099
AL
3640void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3641 int is_write, target_phys_addr_t access_len)
6d16c2f8
AL
3642{
3643 if (buffer != bounce.buffer) {
3644 if (is_write) {
e890261f 3645 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
6d16c2f8
AL
3646 while (access_len) {
3647 unsigned l;
3648 l = TARGET_PAGE_SIZE;
3649 if (l > access_len)
3650 l = access_len;
3651 if (!cpu_physical_memory_is_dirty(addr1)) {
3652 /* invalidate code */
3653 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3654 /* set dirty bit */
f7c11b53
YT
3655 cpu_physical_memory_set_dirty_flags(
3656 addr1, (0xff & ~CODE_DIRTY_FLAG));
6d16c2f8
AL
3657 }
3658 addr1 += l;
3659 access_len -= l;
3660 }
3661 }
868bb33f 3662 if (xen_enabled()) {
e41d7c69 3663 xen_invalidate_map_cache_entry(buffer);
050a0ddf 3664 }
6d16c2f8
AL
3665 return;
3666 }
3667 if (is_write) {
3668 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3669 }
f8a83245 3670 qemu_vfree(bounce.buffer);
6d16c2f8 3671 bounce.buffer = NULL;
ba223c29 3672 cpu_notify_map_clients();
6d16c2f8 3673}
d0ecd2aa 3674
8df1cd07 3675/* warning: addr must be aligned */
1e78bcc1
AG
3676static inline uint32_t ldl_phys_internal(target_phys_addr_t addr,
3677 enum device_endian endian)
8df1cd07 3678{
8df1cd07
FB
3679 uint8_t *ptr;
3680 uint32_t val;
f3705d53 3681 MemoryRegionSection *section;
8df1cd07 3682
06ef3525 3683 section = phys_page_find(addr >> TARGET_PAGE_BITS);
3b46e624 3684
cc5bea60
BS
3685 if (!(memory_region_is_ram(section->mr) ||
3686 memory_region_is_romd(section->mr))) {
8df1cd07 3687 /* I/O case */
cc5bea60 3688 addr = memory_region_section_addr(section, addr);
37ec01d4 3689 val = io_mem_read(section->mr, addr, 4);
1e78bcc1
AG
3690#if defined(TARGET_WORDS_BIGENDIAN)
3691 if (endian == DEVICE_LITTLE_ENDIAN) {
3692 val = bswap32(val);
3693 }
3694#else
3695 if (endian == DEVICE_BIG_ENDIAN) {
3696 val = bswap32(val);
3697 }
3698#endif
8df1cd07
FB
3699 } else {
3700 /* RAM case */
f3705d53 3701 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
06ef3525 3702 & TARGET_PAGE_MASK)
cc5bea60 3703 + memory_region_section_addr(section, addr));
1e78bcc1
AG
3704 switch (endian) {
3705 case DEVICE_LITTLE_ENDIAN:
3706 val = ldl_le_p(ptr);
3707 break;
3708 case DEVICE_BIG_ENDIAN:
3709 val = ldl_be_p(ptr);
3710 break;
3711 default:
3712 val = ldl_p(ptr);
3713 break;
3714 }
8df1cd07
FB
3715 }
3716 return val;
3717}
3718
1e78bcc1
AG
3719uint32_t ldl_phys(target_phys_addr_t addr)
3720{
3721 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3722}
3723
3724uint32_t ldl_le_phys(target_phys_addr_t addr)
3725{
3726 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3727}
3728
3729uint32_t ldl_be_phys(target_phys_addr_t addr)
3730{
3731 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
3732}
3733
84b7b8e7 3734/* warning: addr must be aligned */
1e78bcc1
AG
3735static inline uint64_t ldq_phys_internal(target_phys_addr_t addr,
3736 enum device_endian endian)
84b7b8e7 3737{
84b7b8e7
FB
3738 uint8_t *ptr;
3739 uint64_t val;
f3705d53 3740 MemoryRegionSection *section;
84b7b8e7 3741
06ef3525 3742 section = phys_page_find(addr >> TARGET_PAGE_BITS);
3b46e624 3743
cc5bea60
BS
3744 if (!(memory_region_is_ram(section->mr) ||
3745 memory_region_is_romd(section->mr))) {
84b7b8e7 3746 /* I/O case */
cc5bea60 3747 addr = memory_region_section_addr(section, addr);
1e78bcc1
AG
3748
3749 /* XXX This is broken when device endian != cpu endian.
3750 Fix and add "endian" variable check */
84b7b8e7 3751#ifdef TARGET_WORDS_BIGENDIAN
37ec01d4
AK
3752 val = io_mem_read(section->mr, addr, 4) << 32;
3753 val |= io_mem_read(section->mr, addr + 4, 4);
84b7b8e7 3754#else
37ec01d4
AK
3755 val = io_mem_read(section->mr, addr, 4);
3756 val |= io_mem_read(section->mr, addr + 4, 4) << 32;
84b7b8e7
FB
3757#endif
3758 } else {
3759 /* RAM case */
f3705d53 3760 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
06ef3525 3761 & TARGET_PAGE_MASK)
cc5bea60 3762 + memory_region_section_addr(section, addr));
1e78bcc1
AG
3763 switch (endian) {
3764 case DEVICE_LITTLE_ENDIAN:
3765 val = ldq_le_p(ptr);
3766 break;
3767 case DEVICE_BIG_ENDIAN:
3768 val = ldq_be_p(ptr);
3769 break;
3770 default:
3771 val = ldq_p(ptr);
3772 break;
3773 }
84b7b8e7
FB
3774 }
3775 return val;
3776}
3777
1e78bcc1
AG
3778uint64_t ldq_phys(target_phys_addr_t addr)
3779{
3780 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3781}
3782
3783uint64_t ldq_le_phys(target_phys_addr_t addr)
3784{
3785 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3786}
3787
3788uint64_t ldq_be_phys(target_phys_addr_t addr)
3789{
3790 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
3791}
3792
aab33094 3793/* XXX: optimize */
c227f099 3794uint32_t ldub_phys(target_phys_addr_t addr)
aab33094
FB
3795{
3796 uint8_t val;
3797 cpu_physical_memory_read(addr, &val, 1);
3798 return val;
3799}
3800
733f0b02 3801/* warning: addr must be aligned */
1e78bcc1
AG
3802static inline uint32_t lduw_phys_internal(target_phys_addr_t addr,
3803 enum device_endian endian)
aab33094 3804{
733f0b02
MT
3805 uint8_t *ptr;
3806 uint64_t val;
f3705d53 3807 MemoryRegionSection *section;
733f0b02 3808
06ef3525 3809 section = phys_page_find(addr >> TARGET_PAGE_BITS);
733f0b02 3810
cc5bea60
BS
3811 if (!(memory_region_is_ram(section->mr) ||
3812 memory_region_is_romd(section->mr))) {
733f0b02 3813 /* I/O case */
cc5bea60 3814 addr = memory_region_section_addr(section, addr);
37ec01d4 3815 val = io_mem_read(section->mr, addr, 2);
1e78bcc1
AG
3816#if defined(TARGET_WORDS_BIGENDIAN)
3817 if (endian == DEVICE_LITTLE_ENDIAN) {
3818 val = bswap16(val);
3819 }
3820#else
3821 if (endian == DEVICE_BIG_ENDIAN) {
3822 val = bswap16(val);
3823 }
3824#endif
733f0b02
MT
3825 } else {
3826 /* RAM case */
f3705d53 3827 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
06ef3525 3828 & TARGET_PAGE_MASK)
cc5bea60 3829 + memory_region_section_addr(section, addr));
1e78bcc1
AG
3830 switch (endian) {
3831 case DEVICE_LITTLE_ENDIAN:
3832 val = lduw_le_p(ptr);
3833 break;
3834 case DEVICE_BIG_ENDIAN:
3835 val = lduw_be_p(ptr);
3836 break;
3837 default:
3838 val = lduw_p(ptr);
3839 break;
3840 }
733f0b02
MT
3841 }
3842 return val;
aab33094
FB
3843}
3844
1e78bcc1
AG
3845uint32_t lduw_phys(target_phys_addr_t addr)
3846{
3847 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3848}
3849
3850uint32_t lduw_le_phys(target_phys_addr_t addr)
3851{
3852 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3853}
3854
3855uint32_t lduw_be_phys(target_phys_addr_t addr)
3856{
3857 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
3858}
3859
8df1cd07
FB
3860/* warning: addr must be aligned. The ram page is not masked as dirty
3861 and the code inside is not invalidated. It is useful if the dirty
3862 bits are used to track modified PTEs */
c227f099 3863void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
8df1cd07 3864{
8df1cd07 3865 uint8_t *ptr;
f3705d53 3866 MemoryRegionSection *section;
8df1cd07 3867
06ef3525 3868 section = phys_page_find(addr >> TARGET_PAGE_BITS);
3b46e624 3869
f3705d53 3870 if (!memory_region_is_ram(section->mr) || section->readonly) {
cc5bea60 3871 addr = memory_region_section_addr(section, addr);
f3705d53 3872 if (memory_region_is_ram(section->mr)) {
37ec01d4 3873 section = &phys_sections[phys_section_rom];
06ef3525 3874 }
37ec01d4 3875 io_mem_write(section->mr, addr, val, 4);
8df1cd07 3876 } else {
f3705d53 3877 unsigned long addr1 = (memory_region_get_ram_addr(section->mr)
06ef3525 3878 & TARGET_PAGE_MASK)
cc5bea60 3879 + memory_region_section_addr(section, addr);
5579c7f3 3880 ptr = qemu_get_ram_ptr(addr1);
8df1cd07 3881 stl_p(ptr, val);
74576198
AL
3882
3883 if (unlikely(in_migration)) {
3884 if (!cpu_physical_memory_is_dirty(addr1)) {
3885 /* invalidate code */
3886 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3887 /* set dirty bit */
f7c11b53
YT
3888 cpu_physical_memory_set_dirty_flags(
3889 addr1, (0xff & ~CODE_DIRTY_FLAG));
74576198
AL
3890 }
3891 }
8df1cd07
FB
3892 }
3893}
3894
c227f099 3895void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
bc98a7ef 3896{
bc98a7ef 3897 uint8_t *ptr;
f3705d53 3898 MemoryRegionSection *section;
bc98a7ef 3899
06ef3525 3900 section = phys_page_find(addr >> TARGET_PAGE_BITS);
3b46e624 3901
f3705d53 3902 if (!memory_region_is_ram(section->mr) || section->readonly) {
cc5bea60 3903 addr = memory_region_section_addr(section, addr);
f3705d53 3904 if (memory_region_is_ram(section->mr)) {
37ec01d4 3905 section = &phys_sections[phys_section_rom];
06ef3525 3906 }
bc98a7ef 3907#ifdef TARGET_WORDS_BIGENDIAN
37ec01d4
AK
3908 io_mem_write(section->mr, addr, val >> 32, 4);
3909 io_mem_write(section->mr, addr + 4, (uint32_t)val, 4);
bc98a7ef 3910#else
37ec01d4
AK
3911 io_mem_write(section->mr, addr, (uint32_t)val, 4);
3912 io_mem_write(section->mr, addr + 4, val >> 32, 4);
bc98a7ef
JM
3913#endif
3914 } else {
f3705d53 3915 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
06ef3525 3916 & TARGET_PAGE_MASK)
cc5bea60 3917 + memory_region_section_addr(section, addr));
bc98a7ef
JM
3918 stq_p(ptr, val);
3919 }
3920}
3921
8df1cd07 3922/* warning: addr must be aligned */
1e78bcc1
AG
3923static inline void stl_phys_internal(target_phys_addr_t addr, uint32_t val,
3924 enum device_endian endian)
8df1cd07 3925{
8df1cd07 3926 uint8_t *ptr;
f3705d53 3927 MemoryRegionSection *section;
8df1cd07 3928
06ef3525 3929 section = phys_page_find(addr >> TARGET_PAGE_BITS);
3b46e624 3930
f3705d53 3931 if (!memory_region_is_ram(section->mr) || section->readonly) {
cc5bea60 3932 addr = memory_region_section_addr(section, addr);
f3705d53 3933 if (memory_region_is_ram(section->mr)) {
37ec01d4 3934 section = &phys_sections[phys_section_rom];
06ef3525 3935 }
1e78bcc1
AG
3936#if defined(TARGET_WORDS_BIGENDIAN)
3937 if (endian == DEVICE_LITTLE_ENDIAN) {
3938 val = bswap32(val);
3939 }
3940#else
3941 if (endian == DEVICE_BIG_ENDIAN) {
3942 val = bswap32(val);
3943 }
3944#endif
37ec01d4 3945 io_mem_write(section->mr, addr, val, 4);
8df1cd07
FB
3946 } else {
3947 unsigned long addr1;
f3705d53 3948 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
cc5bea60 3949 + memory_region_section_addr(section, addr);
8df1cd07 3950 /* RAM case */
5579c7f3 3951 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
3952 switch (endian) {
3953 case DEVICE_LITTLE_ENDIAN:
3954 stl_le_p(ptr, val);
3955 break;
3956 case DEVICE_BIG_ENDIAN:
3957 stl_be_p(ptr, val);
3958 break;
3959 default:
3960 stl_p(ptr, val);
3961 break;
3962 }
3a7d929e
FB
3963 if (!cpu_physical_memory_is_dirty(addr1)) {
3964 /* invalidate code */
3965 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3966 /* set dirty bit */
f7c11b53
YT
3967 cpu_physical_memory_set_dirty_flags(addr1,
3968 (0xff & ~CODE_DIRTY_FLAG));
3a7d929e 3969 }
8df1cd07
FB
3970 }
3971}
3972
1e78bcc1
AG
3973void stl_phys(target_phys_addr_t addr, uint32_t val)
3974{
3975 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
3976}
3977
3978void stl_le_phys(target_phys_addr_t addr, uint32_t val)
3979{
3980 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
3981}
3982
3983void stl_be_phys(target_phys_addr_t addr, uint32_t val)
3984{
3985 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
3986}
3987
aab33094 3988/* XXX: optimize */
c227f099 3989void stb_phys(target_phys_addr_t addr, uint32_t val)
aab33094
FB
3990{
3991 uint8_t v = val;
3992 cpu_physical_memory_write(addr, &v, 1);
3993}
3994
733f0b02 3995/* warning: addr must be aligned */
1e78bcc1
AG
3996static inline void stw_phys_internal(target_phys_addr_t addr, uint32_t val,
3997 enum device_endian endian)
aab33094 3998{
733f0b02 3999 uint8_t *ptr;
f3705d53 4000 MemoryRegionSection *section;
733f0b02 4001
06ef3525 4002 section = phys_page_find(addr >> TARGET_PAGE_BITS);
733f0b02 4003
f3705d53 4004 if (!memory_region_is_ram(section->mr) || section->readonly) {
cc5bea60 4005 addr = memory_region_section_addr(section, addr);
f3705d53 4006 if (memory_region_is_ram(section->mr)) {
37ec01d4 4007 section = &phys_sections[phys_section_rom];
06ef3525 4008 }
1e78bcc1
AG
4009#if defined(TARGET_WORDS_BIGENDIAN)
4010 if (endian == DEVICE_LITTLE_ENDIAN) {
4011 val = bswap16(val);
4012 }
4013#else
4014 if (endian == DEVICE_BIG_ENDIAN) {
4015 val = bswap16(val);
4016 }
4017#endif
37ec01d4 4018 io_mem_write(section->mr, addr, val, 2);
733f0b02
MT
4019 } else {
4020 unsigned long addr1;
f3705d53 4021 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
cc5bea60 4022 + memory_region_section_addr(section, addr);
733f0b02
MT
4023 /* RAM case */
4024 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
4025 switch (endian) {
4026 case DEVICE_LITTLE_ENDIAN:
4027 stw_le_p(ptr, val);
4028 break;
4029 case DEVICE_BIG_ENDIAN:
4030 stw_be_p(ptr, val);
4031 break;
4032 default:
4033 stw_p(ptr, val);
4034 break;
4035 }
733f0b02
MT
4036 if (!cpu_physical_memory_is_dirty(addr1)) {
4037 /* invalidate code */
4038 tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
4039 /* set dirty bit */
4040 cpu_physical_memory_set_dirty_flags(addr1,
4041 (0xff & ~CODE_DIRTY_FLAG));
4042 }
4043 }
aab33094
FB
4044}
4045
1e78bcc1
AG
4046void stw_phys(target_phys_addr_t addr, uint32_t val)
4047{
4048 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4049}
4050
4051void stw_le_phys(target_phys_addr_t addr, uint32_t val)
4052{
4053 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4054}
4055
4056void stw_be_phys(target_phys_addr_t addr, uint32_t val)
4057{
4058 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4059}
4060
aab33094 4061/* XXX: optimize */
c227f099 4062void stq_phys(target_phys_addr_t addr, uint64_t val)
aab33094
FB
4063{
4064 val = tswap64(val);
71d2b725 4065 cpu_physical_memory_write(addr, &val, 8);
aab33094
FB
4066}
4067
1e78bcc1
AG
4068void stq_le_phys(target_phys_addr_t addr, uint64_t val)
4069{
4070 val = cpu_to_le64(val);
4071 cpu_physical_memory_write(addr, &val, 8);
4072}
4073
4074void stq_be_phys(target_phys_addr_t addr, uint64_t val)
4075{
4076 val = cpu_to_be64(val);
4077 cpu_physical_memory_write(addr, &val, 8);
4078}
4079
5e2972fd 4080/* virtual memory access for debug (includes writing to ROM) */
9349b4f9 4081int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
b448f2f3 4082 uint8_t *buf, int len, int is_write)
13eb76e0
FB
4083{
4084 int l;
c227f099 4085 target_phys_addr_t phys_addr;
9b3c35e0 4086 target_ulong page;
13eb76e0
FB
4087
4088 while (len > 0) {
4089 page = addr & TARGET_PAGE_MASK;
4090 phys_addr = cpu_get_phys_page_debug(env, page);
4091 /* if no physical page mapped, return an error */
4092 if (phys_addr == -1)
4093 return -1;
4094 l = (page + TARGET_PAGE_SIZE) - addr;
4095 if (l > len)
4096 l = len;
5e2972fd 4097 phys_addr += (addr & ~TARGET_PAGE_MASK);
5e2972fd
AL
4098 if (is_write)
4099 cpu_physical_memory_write_rom(phys_addr, buf, l);
4100 else
5e2972fd 4101 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
13eb76e0
FB
4102 len -= l;
4103 buf += l;
4104 addr += l;
4105 }
4106 return 0;
4107}
a68fe89c 4108#endif
13eb76e0 4109
2e70f6ef
PB
4110/* in deterministic execution mode, instructions doing device I/Os
4111 must be at the end of the TB */
20503968 4112void cpu_io_recompile(CPUArchState *env, uintptr_t retaddr)
2e70f6ef
PB
4113{
4114 TranslationBlock *tb;
4115 uint32_t n, cflags;
4116 target_ulong pc, cs_base;
4117 uint64_t flags;
4118
20503968 4119 tb = tb_find_pc(retaddr);
2e70f6ef
PB
4120 if (!tb) {
4121 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
20503968 4122 (void *)retaddr);
2e70f6ef
PB
4123 }
4124 n = env->icount_decr.u16.low + tb->icount;
20503968 4125 cpu_restore_state(tb, env, retaddr);
2e70f6ef 4126 /* Calculate how many instructions had been executed before the fault
bf20dc07 4127 occurred. */
2e70f6ef
PB
4128 n = n - env->icount_decr.u16.low;
4129 /* Generate a new TB ending on the I/O insn. */
4130 n++;
4131 /* On MIPS and SH, delay slot instructions can only be restarted if
4132 they were already the first instruction in the TB. If this is not
bf20dc07 4133 the first instruction in a TB then re-execute the preceding
2e70f6ef
PB
4134 branch. */
4135#if defined(TARGET_MIPS)
4136 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4137 env->active_tc.PC -= 4;
4138 env->icount_decr.u16.low++;
4139 env->hflags &= ~MIPS_HFLAG_BMASK;
4140 }
4141#elif defined(TARGET_SH4)
4142 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4143 && n > 1) {
4144 env->pc -= 2;
4145 env->icount_decr.u16.low++;
4146 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4147 }
4148#endif
4149 /* This should never happen. */
4150 if (n > CF_COUNT_MASK)
4151 cpu_abort(env, "TB too big during recompile");
4152
4153 cflags = n | CF_LAST_IO;
4154 pc = tb->pc;
4155 cs_base = tb->cs_base;
4156 flags = tb->flags;
4157 tb_phys_invalidate(tb, -1);
4158 /* FIXME: In theory this could raise an exception. In practice
4159 we have already translated the block once so it's probably ok. */
4160 tb_gen_code(env, pc, cs_base, flags, cflags);
bf20dc07 4161 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
2e70f6ef
PB
4162 the first in the TB) then we end up generating a whole new TB and
4163 repeating the fault, which is horribly inefficient.
4164 Better would be to execute just this insn uncached, or generate a
4165 second new TB. */
4166 cpu_resume_from_signal(env, NULL);
4167}
4168
b3755a91
PB
4169#if !defined(CONFIG_USER_ONLY)
4170
055403b2 4171void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
e3db7226
FB
4172{
4173 int i, target_code_size, max_target_code_size;
4174 int direct_jmp_count, direct_jmp2_count, cross_page;
4175 TranslationBlock *tb;
3b46e624 4176
e3db7226
FB
4177 target_code_size = 0;
4178 max_target_code_size = 0;
4179 cross_page = 0;
4180 direct_jmp_count = 0;
4181 direct_jmp2_count = 0;
4182 for(i = 0; i < nb_tbs; i++) {
4183 tb = &tbs[i];
4184 target_code_size += tb->size;
4185 if (tb->size > max_target_code_size)
4186 max_target_code_size = tb->size;
4187 if (tb->page_addr[1] != -1)
4188 cross_page++;
4189 if (tb->tb_next_offset[0] != 0xffff) {
4190 direct_jmp_count++;
4191 if (tb->tb_next_offset[1] != 0xffff) {
4192 direct_jmp2_count++;
4193 }
4194 }
4195 }
4196 /* XXX: avoid using doubles ? */
57fec1fe 4197 cpu_fprintf(f, "Translation buffer state:\n");
055403b2 4198 cpu_fprintf(f, "gen code size %td/%ld\n",
26a5f13b
FB
4199 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4200 cpu_fprintf(f, "TB count %d/%d\n",
4201 nb_tbs, code_gen_max_blocks);
5fafdf24 4202 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
e3db7226
FB
4203 nb_tbs ? target_code_size / nb_tbs : 0,
4204 max_target_code_size);
055403b2 4205 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
e3db7226
FB
4206 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4207 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
5fafdf24
TS
4208 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4209 cross_page,
e3db7226
FB
4210 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4211 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
5fafdf24 4212 direct_jmp_count,
e3db7226
FB
4213 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4214 direct_jmp2_count,
4215 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
57fec1fe 4216 cpu_fprintf(f, "\nStatistics:\n");
e3db7226
FB
4217 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4218 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4219 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
b67d9a52 4220 tcg_dump_info(f, cpu_fprintf);
e3db7226
FB
4221}
4222
82afa586
BH
4223/*
4224 * A helper function for the _utterly broken_ virtio device model to find out if
4225 * it's running on a big endian machine. Don't do this at home kids!
4226 */
4227bool virtio_is_big_endian(void);
4228bool virtio_is_big_endian(void)
4229{
4230#if defined(TARGET_WORDS_BIGENDIAN)
4231 return true;
4232#else
4233 return false;
4234#endif
4235}
4236
61382a50 4237#endif
76f35538
WC
4238
4239#ifndef CONFIG_USER_ONLY
4240bool cpu_physical_memory_is_io(target_phys_addr_t phys_addr)
4241{
4242 MemoryRegionSection *section;
4243
4244 section = phys_page_find(phys_addr >> TARGET_PAGE_BITS);
4245
4246 return !(memory_region_is_ram(section->mr) ||
4247 memory_region_is_romd(section->mr));
4248}
4249#endif