]> git.proxmox.com Git - mirror_qemu.git/blame - exec.c
configure: Check for -Werror causing failures when compiling tests
[mirror_qemu.git] / exec.c
CommitLineData
54936004 1/*
fd6ce8f6 2 * virtual page mapping and translated block handling
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
54936004 18 */
67b915a5 19#include "config.h"
d5a8f07c
FB
20#ifdef _WIN32
21#include <windows.h>
22#else
a98d49b1 23#include <sys/types.h>
d5a8f07c
FB
24#include <sys/mman.h>
25#endif
54936004 26
055403b2 27#include "qemu-common.h"
6180a181 28#include "cpu.h"
b67d9a52 29#include "tcg.h"
b3c7724c 30#include "hw/hw.h"
cc9e98cb 31#include "hw/qdev.h"
74576198 32#include "osdep.h"
7ba1e619 33#include "kvm.h"
432d268c 34#include "hw/xen.h"
29e922b6 35#include "qemu-timer.h"
62152b8a
AK
36#include "memory.h"
37#include "exec-memory.h"
53a5960a
PB
38#if defined(CONFIG_USER_ONLY)
39#include <qemu.h>
f01576f1
JL
40#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41#include <sys/param.h>
42#if __FreeBSD_version >= 700104
43#define HAVE_KINFO_GETVMMAP
44#define sigqueue sigqueue_freebsd /* avoid redefinition */
45#include <sys/time.h>
46#include <sys/proc.h>
47#include <machine/profile.h>
48#define _KERNEL
49#include <sys/user.h>
50#undef _KERNEL
51#undef sigqueue
52#include <libutil.h>
53#endif
54#endif
432d268c
JN
55#else /* !CONFIG_USER_ONLY */
56#include "xen-mapcache.h"
6506e4f9 57#include "trace.h"
53a5960a 58#endif
54936004 59
0cac1b66
BS
60#include "cputlb.h"
61
67d95c15
AK
62#define WANT_EXEC_OBSOLETE
63#include "exec-obsolete.h"
64
fd6ce8f6 65//#define DEBUG_TB_INVALIDATE
66e85a21 66//#define DEBUG_FLUSH
67d3b957 67//#define DEBUG_UNASSIGNED
fd6ce8f6
FB
68
69/* make various TB consistency checks */
5fafdf24 70//#define DEBUG_TB_CHECK
fd6ce8f6 71
1196be37 72//#define DEBUG_IOPORT
db7b5426 73//#define DEBUG_SUBPAGE
1196be37 74
99773bd4
PB
75#if !defined(CONFIG_USER_ONLY)
76/* TB consistency checks only implemented for usermode emulation. */
77#undef DEBUG_TB_CHECK
78#endif
79
9fa3e853
FB
80#define SMC_BITMAP_USE_THRESHOLD 10
81
bdaf78e0 82static TranslationBlock *tbs;
24ab68ac 83static int code_gen_max_blocks;
9fa3e853 84TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
bdaf78e0 85static int nb_tbs;
eb51d102 86/* any access to the tbs or the page table must use this lock */
c227f099 87spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
fd6ce8f6 88
141ac468
BS
89#if defined(__arm__) || defined(__sparc_v9__)
90/* The prologue must be reachable with a direct jump. ARM and Sparc64
91 have limited branch ranges (possibly also PPC) so place it in a
d03d860b
BS
92 section close to code segment. */
93#define code_gen_section \
94 __attribute__((__section__(".gen_code"))) \
95 __attribute__((aligned (32)))
6840981d 96#elif defined(_WIN32) && !defined(_WIN64)
f8e2af11
SW
97#define code_gen_section \
98 __attribute__((aligned (16)))
d03d860b
BS
99#else
100#define code_gen_section \
101 __attribute__((aligned (32)))
102#endif
103
104uint8_t code_gen_prologue[1024] code_gen_section;
bdaf78e0
BS
105static uint8_t *code_gen_buffer;
106static unsigned long code_gen_buffer_size;
26a5f13b 107/* threshold to flush the translated code buffer */
bdaf78e0 108static unsigned long code_gen_buffer_max_size;
24ab68ac 109static uint8_t *code_gen_ptr;
fd6ce8f6 110
e2eef170 111#if !defined(CONFIG_USER_ONLY)
9fa3e853 112int phys_ram_fd;
74576198 113static int in_migration;
94a6b54f 114
85d59fef 115RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
62152b8a
AK
116
117static MemoryRegion *system_memory;
309cb471 118static MemoryRegion *system_io;
62152b8a 119
0e0df1e2 120MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty;
de712f94 121static MemoryRegion io_mem_subpage_ram;
0e0df1e2 122
e2eef170 123#endif
9fa3e853 124
9349b4f9 125CPUArchState *first_cpu;
6a00d601
FB
126/* current CPU in the current thread. It is only valid inside
127 cpu_exec() */
9349b4f9 128DEFINE_TLS(CPUArchState *,cpu_single_env);
2e70f6ef 129/* 0 = Do not count executed instructions.
bf20dc07 130 1 = Precise instruction counting.
2e70f6ef
PB
131 2 = Adaptive rate instruction counting. */
132int use_icount = 0;
6a00d601 133
54936004 134typedef struct PageDesc {
92e873b9 135 /* list of TBs intersecting this ram page */
fd6ce8f6 136 TranslationBlock *first_tb;
9fa3e853
FB
137 /* in order to optimize self modifying code, we count the number
138 of lookups we do to a given page to use a bitmap */
139 unsigned int code_write_count;
140 uint8_t *code_bitmap;
141#if defined(CONFIG_USER_ONLY)
142 unsigned long flags;
143#endif
54936004
FB
144} PageDesc;
145
41c1b1c9 146/* In system mode we want L1_MAP to be based on ram offsets,
5cd2c5b6
RH
147 while in user mode we want it to be based on virtual addresses. */
148#if !defined(CONFIG_USER_ONLY)
41c1b1c9
PB
149#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
150# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
151#else
5cd2c5b6 152# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
41c1b1c9 153#endif
bedb69ea 154#else
5cd2c5b6 155# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
bedb69ea 156#endif
54936004 157
5cd2c5b6
RH
158/* Size of the L2 (and L3, etc) page tables. */
159#define L2_BITS 10
54936004
FB
160#define L2_SIZE (1 << L2_BITS)
161
3eef53df
AK
162#define P_L2_LEVELS \
163 (((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / L2_BITS) + 1)
164
5cd2c5b6 165/* The bits remaining after N lower levels of page tables. */
5cd2c5b6
RH
166#define V_L1_BITS_REM \
167 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
168
5cd2c5b6
RH
169#if V_L1_BITS_REM < 4
170#define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
171#else
172#define V_L1_BITS V_L1_BITS_REM
173#endif
174
5cd2c5b6
RH
175#define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
176
5cd2c5b6
RH
177#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
178
c6d50674
SW
179uintptr_t qemu_real_host_page_size;
180uintptr_t qemu_host_page_size;
181uintptr_t qemu_host_page_mask;
54936004 182
5cd2c5b6
RH
183/* This is a multi-level map on the virtual address space.
184 The bottom level has pointers to PageDesc. */
185static void *l1_map[V_L1_SIZE];
54936004 186
e2eef170 187#if !defined(CONFIG_USER_ONLY)
4346ae3e
AK
188typedef struct PhysPageEntry PhysPageEntry;
189
5312bd8b
AK
190static MemoryRegionSection *phys_sections;
191static unsigned phys_sections_nb, phys_sections_nb_alloc;
192static uint16_t phys_section_unassigned;
aa102231
AK
193static uint16_t phys_section_notdirty;
194static uint16_t phys_section_rom;
195static uint16_t phys_section_watch;
5312bd8b 196
4346ae3e 197struct PhysPageEntry {
07f07b31
AK
198 uint16_t is_leaf : 1;
199 /* index into phys_sections (is_leaf) or phys_map_nodes (!is_leaf) */
200 uint16_t ptr : 15;
4346ae3e
AK
201};
202
d6f2ea22
AK
203/* Simple allocator for PhysPageEntry nodes */
204static PhysPageEntry (*phys_map_nodes)[L2_SIZE];
205static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc;
206
07f07b31 207#define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
d6f2ea22 208
5cd2c5b6 209/* This is a multi-level map on the physical address space.
06ef3525 210 The bottom level has pointers to MemoryRegionSections. */
07f07b31 211static PhysPageEntry phys_map = { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
6d9a1304 212
e2eef170 213static void io_mem_init(void);
62152b8a 214static void memory_map_init(void);
e2eef170 215
1ec9b909 216static MemoryRegion io_mem_watch;
6658ffb8 217#endif
33417e70 218
e3db7226 219/* statistics */
e3db7226
FB
220static int tb_flush_count;
221static int tb_phys_invalidate_count;
222
7cb69cae
FB
223#ifdef _WIN32
224static void map_exec(void *addr, long size)
225{
226 DWORD old_protect;
227 VirtualProtect(addr, size,
228 PAGE_EXECUTE_READWRITE, &old_protect);
229
230}
231#else
232static void map_exec(void *addr, long size)
233{
4369415f 234 unsigned long start, end, page_size;
7cb69cae 235
4369415f 236 page_size = getpagesize();
7cb69cae 237 start = (unsigned long)addr;
4369415f 238 start &= ~(page_size - 1);
7cb69cae
FB
239
240 end = (unsigned long)addr + size;
4369415f
FB
241 end += page_size - 1;
242 end &= ~(page_size - 1);
7cb69cae
FB
243
244 mprotect((void *)start, end - start,
245 PROT_READ | PROT_WRITE | PROT_EXEC);
246}
247#endif
248
b346ff46 249static void page_init(void)
54936004 250{
83fb7adf 251 /* NOTE: we can always suppose that qemu_host_page_size >=
54936004 252 TARGET_PAGE_SIZE */
c2b48b69
AL
253#ifdef _WIN32
254 {
255 SYSTEM_INFO system_info;
256
257 GetSystemInfo(&system_info);
258 qemu_real_host_page_size = system_info.dwPageSize;
259 }
260#else
261 qemu_real_host_page_size = getpagesize();
262#endif
83fb7adf
FB
263 if (qemu_host_page_size == 0)
264 qemu_host_page_size = qemu_real_host_page_size;
265 if (qemu_host_page_size < TARGET_PAGE_SIZE)
266 qemu_host_page_size = TARGET_PAGE_SIZE;
83fb7adf 267 qemu_host_page_mask = ~(qemu_host_page_size - 1);
50a9569b 268
2e9a5713 269#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
50a9569b 270 {
f01576f1
JL
271#ifdef HAVE_KINFO_GETVMMAP
272 struct kinfo_vmentry *freep;
273 int i, cnt;
274
275 freep = kinfo_getvmmap(getpid(), &cnt);
276 if (freep) {
277 mmap_lock();
278 for (i = 0; i < cnt; i++) {
279 unsigned long startaddr, endaddr;
280
281 startaddr = freep[i].kve_start;
282 endaddr = freep[i].kve_end;
283 if (h2g_valid(startaddr)) {
284 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
285
286 if (h2g_valid(endaddr)) {
287 endaddr = h2g(endaddr);
fd436907 288 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
f01576f1
JL
289 } else {
290#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
291 endaddr = ~0ul;
fd436907 292 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
f01576f1
JL
293#endif
294 }
295 }
296 }
297 free(freep);
298 mmap_unlock();
299 }
300#else
50a9569b 301 FILE *f;
50a9569b 302
0776590d 303 last_brk = (unsigned long)sbrk(0);
5cd2c5b6 304
fd436907 305 f = fopen("/compat/linux/proc/self/maps", "r");
50a9569b 306 if (f) {
5cd2c5b6
RH
307 mmap_lock();
308
50a9569b 309 do {
5cd2c5b6
RH
310 unsigned long startaddr, endaddr;
311 int n;
312
313 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
314
315 if (n == 2 && h2g_valid(startaddr)) {
316 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
317
318 if (h2g_valid(endaddr)) {
319 endaddr = h2g(endaddr);
320 } else {
321 endaddr = ~0ul;
322 }
323 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
50a9569b
AZ
324 }
325 } while (!feof(f));
5cd2c5b6 326
50a9569b 327 fclose(f);
5cd2c5b6 328 mmap_unlock();
50a9569b 329 }
f01576f1 330#endif
50a9569b
AZ
331 }
332#endif
54936004
FB
333}
334
41c1b1c9 335static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
54936004 336{
41c1b1c9
PB
337 PageDesc *pd;
338 void **lp;
339 int i;
340
5cd2c5b6 341#if defined(CONFIG_USER_ONLY)
7267c094 342 /* We can't use g_malloc because it may recurse into a locked mutex. */
5cd2c5b6
RH
343# define ALLOC(P, SIZE) \
344 do { \
345 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
346 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
5cd2c5b6
RH
347 } while (0)
348#else
349# define ALLOC(P, SIZE) \
7267c094 350 do { P = g_malloc0(SIZE); } while (0)
17e2377a 351#endif
434929bf 352
5cd2c5b6
RH
353 /* Level 1. Always allocated. */
354 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
355
356 /* Level 2..N-1. */
357 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
358 void **p = *lp;
359
360 if (p == NULL) {
361 if (!alloc) {
362 return NULL;
363 }
364 ALLOC(p, sizeof(void *) * L2_SIZE);
365 *lp = p;
17e2377a 366 }
5cd2c5b6
RH
367
368 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
369 }
370
371 pd = *lp;
372 if (pd == NULL) {
373 if (!alloc) {
374 return NULL;
375 }
376 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
377 *lp = pd;
54936004 378 }
5cd2c5b6
RH
379
380#undef ALLOC
5cd2c5b6
RH
381
382 return pd + (index & (L2_SIZE - 1));
54936004
FB
383}
384
41c1b1c9 385static inline PageDesc *page_find(tb_page_addr_t index)
54936004 386{
5cd2c5b6 387 return page_find_alloc(index, 0);
fd6ce8f6
FB
388}
389
6d9a1304 390#if !defined(CONFIG_USER_ONLY)
d6f2ea22 391
f7bf5461 392static void phys_map_node_reserve(unsigned nodes)
d6f2ea22 393{
f7bf5461 394 if (phys_map_nodes_nb + nodes > phys_map_nodes_nb_alloc) {
d6f2ea22
AK
395 typedef PhysPageEntry Node[L2_SIZE];
396 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16);
f7bf5461
AK
397 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc,
398 phys_map_nodes_nb + nodes);
d6f2ea22
AK
399 phys_map_nodes = g_renew(Node, phys_map_nodes,
400 phys_map_nodes_nb_alloc);
401 }
f7bf5461
AK
402}
403
404static uint16_t phys_map_node_alloc(void)
405{
406 unsigned i;
407 uint16_t ret;
408
409 ret = phys_map_nodes_nb++;
410 assert(ret != PHYS_MAP_NODE_NIL);
411 assert(ret != phys_map_nodes_nb_alloc);
d6f2ea22 412 for (i = 0; i < L2_SIZE; ++i) {
07f07b31 413 phys_map_nodes[ret][i].is_leaf = 0;
c19e8800 414 phys_map_nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
d6f2ea22 415 }
f7bf5461 416 return ret;
d6f2ea22
AK
417}
418
419static void phys_map_nodes_reset(void)
420{
421 phys_map_nodes_nb = 0;
422}
423
92e873b9 424
2999097b
AK
425static void phys_page_set_level(PhysPageEntry *lp, target_phys_addr_t *index,
426 target_phys_addr_t *nb, uint16_t leaf,
427 int level)
f7bf5461
AK
428{
429 PhysPageEntry *p;
430 int i;
07f07b31 431 target_phys_addr_t step = (target_phys_addr_t)1 << (level * L2_BITS);
108c49b8 432
07f07b31 433 if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
c19e8800
AK
434 lp->ptr = phys_map_node_alloc();
435 p = phys_map_nodes[lp->ptr];
f7bf5461
AK
436 if (level == 0) {
437 for (i = 0; i < L2_SIZE; i++) {
07f07b31 438 p[i].is_leaf = 1;
c19e8800 439 p[i].ptr = phys_section_unassigned;
4346ae3e 440 }
67c4d23c 441 }
f7bf5461 442 } else {
c19e8800 443 p = phys_map_nodes[lp->ptr];
92e873b9 444 }
2999097b 445 lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
f7bf5461 446
2999097b 447 while (*nb && lp < &p[L2_SIZE]) {
07f07b31
AK
448 if ((*index & (step - 1)) == 0 && *nb >= step) {
449 lp->is_leaf = true;
c19e8800 450 lp->ptr = leaf;
07f07b31
AK
451 *index += step;
452 *nb -= step;
2999097b
AK
453 } else {
454 phys_page_set_level(lp, index, nb, leaf, level - 1);
455 }
456 ++lp;
f7bf5461
AK
457 }
458}
459
2999097b
AK
460static void phys_page_set(target_phys_addr_t index, target_phys_addr_t nb,
461 uint16_t leaf)
f7bf5461 462{
2999097b 463 /* Wildly overreserve - it doesn't matter much. */
07f07b31 464 phys_map_node_reserve(3 * P_L2_LEVELS);
5cd2c5b6 465
2999097b 466 phys_page_set_level(&phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
92e873b9
FB
467}
468
0cac1b66 469MemoryRegionSection *phys_page_find(target_phys_addr_t index)
92e873b9 470{
31ab2b4a
AK
471 PhysPageEntry lp = phys_map;
472 PhysPageEntry *p;
473 int i;
31ab2b4a 474 uint16_t s_index = phys_section_unassigned;
f1f6e3b8 475
07f07b31 476 for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
c19e8800 477 if (lp.ptr == PHYS_MAP_NODE_NIL) {
31ab2b4a
AK
478 goto not_found;
479 }
c19e8800 480 p = phys_map_nodes[lp.ptr];
31ab2b4a 481 lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
5312bd8b 482 }
31ab2b4a 483
c19e8800 484 s_index = lp.ptr;
31ab2b4a 485not_found:
f3705d53
AK
486 return &phys_sections[s_index];
487}
488
e5548617
BS
489bool memory_region_is_unassigned(MemoryRegion *mr)
490{
491 return mr != &io_mem_ram && mr != &io_mem_rom
492 && mr != &io_mem_notdirty && !mr->rom_device
493 && mr != &io_mem_watch;
494}
495
c8a706fe
PB
496#define mmap_lock() do { } while(0)
497#define mmap_unlock() do { } while(0)
9fa3e853 498#endif
fd6ce8f6 499
4369415f
FB
500#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
501
502#if defined(CONFIG_USER_ONLY)
ccbb4d44 503/* Currently it is not recommended to allocate big chunks of data in
4369415f
FB
504 user mode. It will change when a dedicated libc will be used */
505#define USE_STATIC_CODE_GEN_BUFFER
506#endif
507
508#ifdef USE_STATIC_CODE_GEN_BUFFER
ebf50fb3
AJ
509static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
510 __attribute__((aligned (CODE_GEN_ALIGN)));
4369415f
FB
511#endif
512
8fcd3692 513static void code_gen_alloc(unsigned long tb_size)
26a5f13b 514{
4369415f
FB
515#ifdef USE_STATIC_CODE_GEN_BUFFER
516 code_gen_buffer = static_code_gen_buffer;
517 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
518 map_exec(code_gen_buffer, code_gen_buffer_size);
519#else
26a5f13b
FB
520 code_gen_buffer_size = tb_size;
521 if (code_gen_buffer_size == 0) {
4369415f 522#if defined(CONFIG_USER_ONLY)
4369415f
FB
523 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
524#else
ccbb4d44 525 /* XXX: needs adjustments */
94a6b54f 526 code_gen_buffer_size = (unsigned long)(ram_size / 4);
4369415f 527#endif
26a5f13b
FB
528 }
529 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
530 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
531 /* The code gen buffer location may have constraints depending on
532 the host cpu and OS */
533#if defined(__linux__)
534 {
535 int flags;
141ac468
BS
536 void *start = NULL;
537
26a5f13b
FB
538 flags = MAP_PRIVATE | MAP_ANONYMOUS;
539#if defined(__x86_64__)
540 flags |= MAP_32BIT;
541 /* Cannot map more than that */
542 if (code_gen_buffer_size > (800 * 1024 * 1024))
543 code_gen_buffer_size = (800 * 1024 * 1024);
141ac468
BS
544#elif defined(__sparc_v9__)
545 // Map the buffer below 2G, so we can use direct calls and branches
546 flags |= MAP_FIXED;
547 start = (void *) 0x60000000UL;
548 if (code_gen_buffer_size > (512 * 1024 * 1024))
549 code_gen_buffer_size = (512 * 1024 * 1024);
1cb0661e 550#elif defined(__arm__)
5c84bd90 551 /* Keep the buffer no bigger than 16MB to branch between blocks */
1cb0661e
AZ
552 if (code_gen_buffer_size > 16 * 1024 * 1024)
553 code_gen_buffer_size = 16 * 1024 * 1024;
eba0b893
RH
554#elif defined(__s390x__)
555 /* Map the buffer so that we can use direct calls and branches. */
556 /* We have a +- 4GB range on the branches; leave some slop. */
557 if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
558 code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
559 }
560 start = (void *)0x90000000UL;
26a5f13b 561#endif
141ac468
BS
562 code_gen_buffer = mmap(start, code_gen_buffer_size,
563 PROT_WRITE | PROT_READ | PROT_EXEC,
26a5f13b
FB
564 flags, -1, 0);
565 if (code_gen_buffer == MAP_FAILED) {
566 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
567 exit(1);
568 }
569 }
cbb608a5 570#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
9f4b09a4
TN
571 || defined(__DragonFly__) || defined(__OpenBSD__) \
572 || defined(__NetBSD__)
06e67a82
AL
573 {
574 int flags;
575 void *addr = NULL;
576 flags = MAP_PRIVATE | MAP_ANONYMOUS;
577#if defined(__x86_64__)
578 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
579 * 0x40000000 is free */
580 flags |= MAP_FIXED;
581 addr = (void *)0x40000000;
582 /* Cannot map more than that */
583 if (code_gen_buffer_size > (800 * 1024 * 1024))
584 code_gen_buffer_size = (800 * 1024 * 1024);
4cd31ad2
BS
585#elif defined(__sparc_v9__)
586 // Map the buffer below 2G, so we can use direct calls and branches
587 flags |= MAP_FIXED;
588 addr = (void *) 0x60000000UL;
589 if (code_gen_buffer_size > (512 * 1024 * 1024)) {
590 code_gen_buffer_size = (512 * 1024 * 1024);
591 }
06e67a82
AL
592#endif
593 code_gen_buffer = mmap(addr, code_gen_buffer_size,
594 PROT_WRITE | PROT_READ | PROT_EXEC,
595 flags, -1, 0);
596 if (code_gen_buffer == MAP_FAILED) {
597 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
598 exit(1);
599 }
600 }
26a5f13b 601#else
7267c094 602 code_gen_buffer = g_malloc(code_gen_buffer_size);
26a5f13b
FB
603 map_exec(code_gen_buffer, code_gen_buffer_size);
604#endif
4369415f 605#endif /* !USE_STATIC_CODE_GEN_BUFFER */
26a5f13b 606 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
a884da8a
PM
607 code_gen_buffer_max_size = code_gen_buffer_size -
608 (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
26a5f13b 609 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
7267c094 610 tbs = g_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
26a5f13b
FB
611}
612
613/* Must be called before using the QEMU cpus. 'tb_size' is the size
614 (in bytes) allocated to the translation buffer. Zero means default
615 size. */
d5ab9713 616void tcg_exec_init(unsigned long tb_size)
26a5f13b 617{
26a5f13b
FB
618 cpu_gen_init();
619 code_gen_alloc(tb_size);
620 code_gen_ptr = code_gen_buffer;
813da627 621 tcg_register_jit(code_gen_buffer, code_gen_buffer_size);
4369415f 622 page_init();
9002ec79
RH
623#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
624 /* There's no guest base to take into account, so go ahead and
625 initialize the prologue now. */
626 tcg_prologue_init(&tcg_ctx);
627#endif
26a5f13b
FB
628}
629
d5ab9713
JK
630bool tcg_enabled(void)
631{
632 return code_gen_buffer != NULL;
633}
634
635void cpu_exec_init_all(void)
636{
637#if !defined(CONFIG_USER_ONLY)
638 memory_map_init();
639 io_mem_init();
640#endif
641}
642
9656f324
PB
643#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
644
e59fb374 645static int cpu_common_post_load(void *opaque, int version_id)
e7f4eff7 646{
9349b4f9 647 CPUArchState *env = opaque;
9656f324 648
3098dba0
AJ
649 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
650 version_id is increased. */
651 env->interrupt_request &= ~0x01;
9656f324
PB
652 tlb_flush(env, 1);
653
654 return 0;
655}
e7f4eff7
JQ
656
657static const VMStateDescription vmstate_cpu_common = {
658 .name = "cpu_common",
659 .version_id = 1,
660 .minimum_version_id = 1,
661 .minimum_version_id_old = 1,
e7f4eff7
JQ
662 .post_load = cpu_common_post_load,
663 .fields = (VMStateField []) {
9349b4f9
AF
664 VMSTATE_UINT32(halted, CPUArchState),
665 VMSTATE_UINT32(interrupt_request, CPUArchState),
e7f4eff7
JQ
666 VMSTATE_END_OF_LIST()
667 }
668};
9656f324
PB
669#endif
670
9349b4f9 671CPUArchState *qemu_get_cpu(int cpu)
950f1472 672{
9349b4f9 673 CPUArchState *env = first_cpu;
950f1472
GC
674
675 while (env) {
676 if (env->cpu_index == cpu)
677 break;
678 env = env->next_cpu;
679 }
680
681 return env;
682}
683
9349b4f9 684void cpu_exec_init(CPUArchState *env)
fd6ce8f6 685{
9349b4f9 686 CPUArchState **penv;
6a00d601
FB
687 int cpu_index;
688
c2764719
PB
689#if defined(CONFIG_USER_ONLY)
690 cpu_list_lock();
691#endif
6a00d601
FB
692 env->next_cpu = NULL;
693 penv = &first_cpu;
694 cpu_index = 0;
695 while (*penv != NULL) {
1e9fa730 696 penv = &(*penv)->next_cpu;
6a00d601
FB
697 cpu_index++;
698 }
699 env->cpu_index = cpu_index;
268a362c 700 env->numa_node = 0;
72cf2d4f
BS
701 QTAILQ_INIT(&env->breakpoints);
702 QTAILQ_INIT(&env->watchpoints);
dc7a09cf
JK
703#ifndef CONFIG_USER_ONLY
704 env->thread_id = qemu_get_thread_id();
705#endif
6a00d601 706 *penv = env;
c2764719
PB
707#if defined(CONFIG_USER_ONLY)
708 cpu_list_unlock();
709#endif
b3c7724c 710#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
0be71e32
AW
711 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
712 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
b3c7724c
PB
713 cpu_save, cpu_load, env);
714#endif
fd6ce8f6
FB
715}
716
d1a1eb74
TG
717/* Allocate a new translation block. Flush the translation buffer if
718 too many translation blocks or too much generated code. */
719static TranslationBlock *tb_alloc(target_ulong pc)
720{
721 TranslationBlock *tb;
722
723 if (nb_tbs >= code_gen_max_blocks ||
724 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
725 return NULL;
726 tb = &tbs[nb_tbs++];
727 tb->pc = pc;
728 tb->cflags = 0;
729 return tb;
730}
731
732void tb_free(TranslationBlock *tb)
733{
734 /* In practice this is mostly used for single use temporary TB
735 Ignore the hard cases and just back up if this TB happens to
736 be the last one generated. */
737 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
738 code_gen_ptr = tb->tc_ptr;
739 nb_tbs--;
740 }
741}
742
9fa3e853
FB
743static inline void invalidate_page_bitmap(PageDesc *p)
744{
745 if (p->code_bitmap) {
7267c094 746 g_free(p->code_bitmap);
9fa3e853
FB
747 p->code_bitmap = NULL;
748 }
749 p->code_write_count = 0;
750}
751
5cd2c5b6
RH
752/* Set to NULL all the 'first_tb' fields in all PageDescs. */
753
754static void page_flush_tb_1 (int level, void **lp)
fd6ce8f6 755{
5cd2c5b6 756 int i;
fd6ce8f6 757
5cd2c5b6
RH
758 if (*lp == NULL) {
759 return;
760 }
761 if (level == 0) {
762 PageDesc *pd = *lp;
7296abac 763 for (i = 0; i < L2_SIZE; ++i) {
5cd2c5b6
RH
764 pd[i].first_tb = NULL;
765 invalidate_page_bitmap(pd + i);
fd6ce8f6 766 }
5cd2c5b6
RH
767 } else {
768 void **pp = *lp;
7296abac 769 for (i = 0; i < L2_SIZE; ++i) {
5cd2c5b6
RH
770 page_flush_tb_1 (level - 1, pp + i);
771 }
772 }
773}
774
775static void page_flush_tb(void)
776{
777 int i;
778 for (i = 0; i < V_L1_SIZE; i++) {
779 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
fd6ce8f6
FB
780 }
781}
782
783/* flush all the translation blocks */
d4e8164f 784/* XXX: tb_flush is currently not thread safe */
9349b4f9 785void tb_flush(CPUArchState *env1)
fd6ce8f6 786{
9349b4f9 787 CPUArchState *env;
0124311e 788#if defined(DEBUG_FLUSH)
ab3d1727
BS
789 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
790 (unsigned long)(code_gen_ptr - code_gen_buffer),
791 nb_tbs, nb_tbs > 0 ?
792 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
fd6ce8f6 793#endif
26a5f13b 794 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
a208e54a
PB
795 cpu_abort(env1, "Internal error: code buffer overflow\n");
796
fd6ce8f6 797 nb_tbs = 0;
3b46e624 798
6a00d601
FB
799 for(env = first_cpu; env != NULL; env = env->next_cpu) {
800 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
801 }
9fa3e853 802
8a8a608f 803 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
fd6ce8f6 804 page_flush_tb();
9fa3e853 805
fd6ce8f6 806 code_gen_ptr = code_gen_buffer;
d4e8164f
FB
807 /* XXX: flush processor icache at this point if cache flush is
808 expensive */
e3db7226 809 tb_flush_count++;
fd6ce8f6
FB
810}
811
812#ifdef DEBUG_TB_CHECK
813
bc98a7ef 814static void tb_invalidate_check(target_ulong address)
fd6ce8f6
FB
815{
816 TranslationBlock *tb;
817 int i;
818 address &= TARGET_PAGE_MASK;
99773bd4
PB
819 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
820 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
821 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
822 address >= tb->pc + tb->size)) {
0bf9e31a
BS
823 printf("ERROR invalidate: address=" TARGET_FMT_lx
824 " PC=%08lx size=%04x\n",
99773bd4 825 address, (long)tb->pc, tb->size);
fd6ce8f6
FB
826 }
827 }
828 }
829}
830
831/* verify that all the pages have correct rights for code */
832static void tb_page_check(void)
833{
834 TranslationBlock *tb;
835 int i, flags1, flags2;
3b46e624 836
99773bd4
PB
837 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
838 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
839 flags1 = page_get_flags(tb->pc);
840 flags2 = page_get_flags(tb->pc + tb->size - 1);
841 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
842 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
99773bd4 843 (long)tb->pc, tb->size, flags1, flags2);
fd6ce8f6
FB
844 }
845 }
846 }
847}
848
849#endif
850
851/* invalidate one TB */
852static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
853 int next_offset)
854{
855 TranslationBlock *tb1;
856 for(;;) {
857 tb1 = *ptb;
858 if (tb1 == tb) {
859 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
860 break;
861 }
862 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
863 }
864}
865
9fa3e853
FB
866static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
867{
868 TranslationBlock *tb1;
869 unsigned int n1;
870
871 for(;;) {
872 tb1 = *ptb;
8efe0ca8
SW
873 n1 = (uintptr_t)tb1 & 3;
874 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
9fa3e853
FB
875 if (tb1 == tb) {
876 *ptb = tb1->page_next[n1];
877 break;
878 }
879 ptb = &tb1->page_next[n1];
880 }
881}
882
d4e8164f
FB
883static inline void tb_jmp_remove(TranslationBlock *tb, int n)
884{
885 TranslationBlock *tb1, **ptb;
886 unsigned int n1;
887
888 ptb = &tb->jmp_next[n];
889 tb1 = *ptb;
890 if (tb1) {
891 /* find tb(n) in circular list */
892 for(;;) {
893 tb1 = *ptb;
8efe0ca8
SW
894 n1 = (uintptr_t)tb1 & 3;
895 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
d4e8164f
FB
896 if (n1 == n && tb1 == tb)
897 break;
898 if (n1 == 2) {
899 ptb = &tb1->jmp_first;
900 } else {
901 ptb = &tb1->jmp_next[n1];
902 }
903 }
904 /* now we can suppress tb(n) from the list */
905 *ptb = tb->jmp_next[n];
906
907 tb->jmp_next[n] = NULL;
908 }
909}
910
911/* reset the jump entry 'n' of a TB so that it is not chained to
912 another TB */
913static inline void tb_reset_jump(TranslationBlock *tb, int n)
914{
8efe0ca8 915 tb_set_jmp_target(tb, n, (uintptr_t)(tb->tc_ptr + tb->tb_next_offset[n]));
d4e8164f
FB
916}
917
41c1b1c9 918void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
fd6ce8f6 919{
9349b4f9 920 CPUArchState *env;
8a40a180 921 PageDesc *p;
d4e8164f 922 unsigned int h, n1;
41c1b1c9 923 tb_page_addr_t phys_pc;
8a40a180 924 TranslationBlock *tb1, *tb2;
3b46e624 925
8a40a180
FB
926 /* remove the TB from the hash list */
927 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
928 h = tb_phys_hash_func(phys_pc);
5fafdf24 929 tb_remove(&tb_phys_hash[h], tb,
8a40a180
FB
930 offsetof(TranslationBlock, phys_hash_next));
931
932 /* remove the TB from the page list */
933 if (tb->page_addr[0] != page_addr) {
934 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
935 tb_page_remove(&p->first_tb, tb);
936 invalidate_page_bitmap(p);
937 }
938 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
939 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
940 tb_page_remove(&p->first_tb, tb);
941 invalidate_page_bitmap(p);
942 }
943
36bdbe54 944 tb_invalidated_flag = 1;
59817ccb 945
fd6ce8f6 946 /* remove the TB from the hash list */
8a40a180 947 h = tb_jmp_cache_hash_func(tb->pc);
6a00d601
FB
948 for(env = first_cpu; env != NULL; env = env->next_cpu) {
949 if (env->tb_jmp_cache[h] == tb)
950 env->tb_jmp_cache[h] = NULL;
951 }
d4e8164f
FB
952
953 /* suppress this TB from the two jump lists */
954 tb_jmp_remove(tb, 0);
955 tb_jmp_remove(tb, 1);
956
957 /* suppress any remaining jumps to this TB */
958 tb1 = tb->jmp_first;
959 for(;;) {
8efe0ca8 960 n1 = (uintptr_t)tb1 & 3;
d4e8164f
FB
961 if (n1 == 2)
962 break;
8efe0ca8 963 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
d4e8164f
FB
964 tb2 = tb1->jmp_next[n1];
965 tb_reset_jump(tb1, n1);
966 tb1->jmp_next[n1] = NULL;
967 tb1 = tb2;
968 }
8efe0ca8 969 tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2); /* fail safe */
9fa3e853 970
e3db7226 971 tb_phys_invalidate_count++;
9fa3e853
FB
972}
973
974static inline void set_bits(uint8_t *tab, int start, int len)
975{
976 int end, mask, end1;
977
978 end = start + len;
979 tab += start >> 3;
980 mask = 0xff << (start & 7);
981 if ((start & ~7) == (end & ~7)) {
982 if (start < end) {
983 mask &= ~(0xff << (end & 7));
984 *tab |= mask;
985 }
986 } else {
987 *tab++ |= mask;
988 start = (start + 8) & ~7;
989 end1 = end & ~7;
990 while (start < end1) {
991 *tab++ = 0xff;
992 start += 8;
993 }
994 if (start < end) {
995 mask = ~(0xff << (end & 7));
996 *tab |= mask;
997 }
998 }
999}
1000
1001static void build_page_bitmap(PageDesc *p)
1002{
1003 int n, tb_start, tb_end;
1004 TranslationBlock *tb;
3b46e624 1005
7267c094 1006 p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8);
9fa3e853
FB
1007
1008 tb = p->first_tb;
1009 while (tb != NULL) {
8efe0ca8
SW
1010 n = (uintptr_t)tb & 3;
1011 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
9fa3e853
FB
1012 /* NOTE: this is subtle as a TB may span two physical pages */
1013 if (n == 0) {
1014 /* NOTE: tb_end may be after the end of the page, but
1015 it is not a problem */
1016 tb_start = tb->pc & ~TARGET_PAGE_MASK;
1017 tb_end = tb_start + tb->size;
1018 if (tb_end > TARGET_PAGE_SIZE)
1019 tb_end = TARGET_PAGE_SIZE;
1020 } else {
1021 tb_start = 0;
1022 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1023 }
1024 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
1025 tb = tb->page_next[n];
1026 }
1027}
1028
9349b4f9 1029TranslationBlock *tb_gen_code(CPUArchState *env,
2e70f6ef
PB
1030 target_ulong pc, target_ulong cs_base,
1031 int flags, int cflags)
d720b93d
FB
1032{
1033 TranslationBlock *tb;
1034 uint8_t *tc_ptr;
41c1b1c9
PB
1035 tb_page_addr_t phys_pc, phys_page2;
1036 target_ulong virt_page2;
d720b93d
FB
1037 int code_gen_size;
1038
41c1b1c9 1039 phys_pc = get_page_addr_code(env, pc);
c27004ec 1040 tb = tb_alloc(pc);
d720b93d
FB
1041 if (!tb) {
1042 /* flush must be done */
1043 tb_flush(env);
1044 /* cannot fail at this point */
c27004ec 1045 tb = tb_alloc(pc);
2e70f6ef
PB
1046 /* Don't forget to invalidate previous TB info. */
1047 tb_invalidated_flag = 1;
d720b93d
FB
1048 }
1049 tc_ptr = code_gen_ptr;
1050 tb->tc_ptr = tc_ptr;
1051 tb->cs_base = cs_base;
1052 tb->flags = flags;
1053 tb->cflags = cflags;
d07bde88 1054 cpu_gen_code(env, tb, &code_gen_size);
8efe0ca8
SW
1055 code_gen_ptr = (void *)(((uintptr_t)code_gen_ptr + code_gen_size +
1056 CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
3b46e624 1057
d720b93d 1058 /* check next page if needed */
c27004ec 1059 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
d720b93d 1060 phys_page2 = -1;
c27004ec 1061 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
41c1b1c9 1062 phys_page2 = get_page_addr_code(env, virt_page2);
d720b93d 1063 }
41c1b1c9 1064 tb_link_page(tb, phys_pc, phys_page2);
2e70f6ef 1065 return tb;
d720b93d 1066}
3b46e624 1067
77a8f1a5 1068/*
8e0fdce3
JK
1069 * Invalidate all TBs which intersect with the target physical address range
1070 * [start;end[. NOTE: start and end may refer to *different* physical pages.
1071 * 'is_cpu_write_access' should be true if called from a real cpu write
1072 * access: the virtual CPU will exit the current TB if code is modified inside
1073 * this TB.
77a8f1a5
AG
1074 */
1075void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end,
1076 int is_cpu_write_access)
1077{
1078 while (start < end) {
1079 tb_invalidate_phys_page_range(start, end, is_cpu_write_access);
1080 start &= TARGET_PAGE_MASK;
1081 start += TARGET_PAGE_SIZE;
1082 }
1083}
1084
8e0fdce3
JK
1085/*
1086 * Invalidate all TBs which intersect with the target physical address range
1087 * [start;end[. NOTE: start and end must refer to the *same* physical page.
1088 * 'is_cpu_write_access' should be true if called from a real cpu write
1089 * access: the virtual CPU will exit the current TB if code is modified inside
1090 * this TB.
1091 */
41c1b1c9 1092void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
d720b93d
FB
1093 int is_cpu_write_access)
1094{
6b917547 1095 TranslationBlock *tb, *tb_next, *saved_tb;
9349b4f9 1096 CPUArchState *env = cpu_single_env;
41c1b1c9 1097 tb_page_addr_t tb_start, tb_end;
6b917547
AL
1098 PageDesc *p;
1099 int n;
1100#ifdef TARGET_HAS_PRECISE_SMC
1101 int current_tb_not_found = is_cpu_write_access;
1102 TranslationBlock *current_tb = NULL;
1103 int current_tb_modified = 0;
1104 target_ulong current_pc = 0;
1105 target_ulong current_cs_base = 0;
1106 int current_flags = 0;
1107#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
1108
1109 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 1110 if (!p)
9fa3e853 1111 return;
5fafdf24 1112 if (!p->code_bitmap &&
d720b93d
FB
1113 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1114 is_cpu_write_access) {
9fa3e853
FB
1115 /* build code bitmap */
1116 build_page_bitmap(p);
1117 }
1118
1119 /* we remove all the TBs in the range [start, end[ */
1120 /* XXX: see if in some cases it could be faster to invalidate all the code */
1121 tb = p->first_tb;
1122 while (tb != NULL) {
8efe0ca8
SW
1123 n = (uintptr_t)tb & 3;
1124 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
9fa3e853
FB
1125 tb_next = tb->page_next[n];
1126 /* NOTE: this is subtle as a TB may span two physical pages */
1127 if (n == 0) {
1128 /* NOTE: tb_end may be after the end of the page, but
1129 it is not a problem */
1130 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1131 tb_end = tb_start + tb->size;
1132 } else {
1133 tb_start = tb->page_addr[1];
1134 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1135 }
1136 if (!(tb_end <= start || tb_start >= end)) {
d720b93d
FB
1137#ifdef TARGET_HAS_PRECISE_SMC
1138 if (current_tb_not_found) {
1139 current_tb_not_found = 0;
1140 current_tb = NULL;
2e70f6ef 1141 if (env->mem_io_pc) {
d720b93d 1142 /* now we have a real cpu fault */
2e70f6ef 1143 current_tb = tb_find_pc(env->mem_io_pc);
d720b93d
FB
1144 }
1145 }
1146 if (current_tb == tb &&
2e70f6ef 1147 (current_tb->cflags & CF_COUNT_MASK) != 1) {
d720b93d
FB
1148 /* If we are modifying the current TB, we must stop
1149 its execution. We could be more precise by checking
1150 that the modification is after the current PC, but it
1151 would require a specialized function to partially
1152 restore the CPU state */
3b46e624 1153
d720b93d 1154 current_tb_modified = 1;
618ba8e6 1155 cpu_restore_state(current_tb, env, env->mem_io_pc);
6b917547
AL
1156 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1157 &current_flags);
d720b93d
FB
1158 }
1159#endif /* TARGET_HAS_PRECISE_SMC */
6f5a9f7e
FB
1160 /* we need to do that to handle the case where a signal
1161 occurs while doing tb_phys_invalidate() */
1162 saved_tb = NULL;
1163 if (env) {
1164 saved_tb = env->current_tb;
1165 env->current_tb = NULL;
1166 }
9fa3e853 1167 tb_phys_invalidate(tb, -1);
6f5a9f7e
FB
1168 if (env) {
1169 env->current_tb = saved_tb;
1170 if (env->interrupt_request && env->current_tb)
1171 cpu_interrupt(env, env->interrupt_request);
1172 }
9fa3e853
FB
1173 }
1174 tb = tb_next;
1175 }
1176#if !defined(CONFIG_USER_ONLY)
1177 /* if no code remaining, no need to continue to use slow writes */
1178 if (!p->first_tb) {
1179 invalidate_page_bitmap(p);
d720b93d 1180 if (is_cpu_write_access) {
2e70f6ef 1181 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
d720b93d
FB
1182 }
1183 }
1184#endif
1185#ifdef TARGET_HAS_PRECISE_SMC
1186 if (current_tb_modified) {
1187 /* we generate a block containing just the instruction
1188 modifying the memory. It will ensure that it cannot modify
1189 itself */
ea1c1802 1190 env->current_tb = NULL;
2e70f6ef 1191 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
d720b93d 1192 cpu_resume_from_signal(env, NULL);
9fa3e853 1193 }
fd6ce8f6 1194#endif
9fa3e853 1195}
fd6ce8f6 1196
9fa3e853 1197/* len must be <= 8 and start must be a multiple of len */
41c1b1c9 1198static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
9fa3e853
FB
1199{
1200 PageDesc *p;
1201 int offset, b;
59817ccb 1202#if 0
a4193c8a 1203 if (1) {
93fcfe39
AL
1204 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1205 cpu_single_env->mem_io_vaddr, len,
1206 cpu_single_env->eip,
8efe0ca8
SW
1207 cpu_single_env->eip +
1208 (intptr_t)cpu_single_env->segs[R_CS].base);
59817ccb
FB
1209 }
1210#endif
9fa3e853 1211 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 1212 if (!p)
9fa3e853
FB
1213 return;
1214 if (p->code_bitmap) {
1215 offset = start & ~TARGET_PAGE_MASK;
1216 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1217 if (b & ((1 << len) - 1))
1218 goto do_invalidate;
1219 } else {
1220 do_invalidate:
d720b93d 1221 tb_invalidate_phys_page_range(start, start + len, 1);
9fa3e853
FB
1222 }
1223}
1224
9fa3e853 1225#if !defined(CONFIG_SOFTMMU)
41c1b1c9 1226static void tb_invalidate_phys_page(tb_page_addr_t addr,
20503968 1227 uintptr_t pc, void *puc)
9fa3e853 1228{
6b917547 1229 TranslationBlock *tb;
9fa3e853 1230 PageDesc *p;
6b917547 1231 int n;
d720b93d 1232#ifdef TARGET_HAS_PRECISE_SMC
6b917547 1233 TranslationBlock *current_tb = NULL;
9349b4f9 1234 CPUArchState *env = cpu_single_env;
6b917547
AL
1235 int current_tb_modified = 0;
1236 target_ulong current_pc = 0;
1237 target_ulong current_cs_base = 0;
1238 int current_flags = 0;
d720b93d 1239#endif
9fa3e853
FB
1240
1241 addr &= TARGET_PAGE_MASK;
1242 p = page_find(addr >> TARGET_PAGE_BITS);
5fafdf24 1243 if (!p)
9fa3e853
FB
1244 return;
1245 tb = p->first_tb;
d720b93d
FB
1246#ifdef TARGET_HAS_PRECISE_SMC
1247 if (tb && pc != 0) {
1248 current_tb = tb_find_pc(pc);
1249 }
1250#endif
9fa3e853 1251 while (tb != NULL) {
8efe0ca8
SW
1252 n = (uintptr_t)tb & 3;
1253 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
d720b93d
FB
1254#ifdef TARGET_HAS_PRECISE_SMC
1255 if (current_tb == tb &&
2e70f6ef 1256 (current_tb->cflags & CF_COUNT_MASK) != 1) {
d720b93d
FB
1257 /* If we are modifying the current TB, we must stop
1258 its execution. We could be more precise by checking
1259 that the modification is after the current PC, but it
1260 would require a specialized function to partially
1261 restore the CPU state */
3b46e624 1262
d720b93d 1263 current_tb_modified = 1;
618ba8e6 1264 cpu_restore_state(current_tb, env, pc);
6b917547
AL
1265 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1266 &current_flags);
d720b93d
FB
1267 }
1268#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
1269 tb_phys_invalidate(tb, addr);
1270 tb = tb->page_next[n];
1271 }
fd6ce8f6 1272 p->first_tb = NULL;
d720b93d
FB
1273#ifdef TARGET_HAS_PRECISE_SMC
1274 if (current_tb_modified) {
1275 /* we generate a block containing just the instruction
1276 modifying the memory. It will ensure that it cannot modify
1277 itself */
ea1c1802 1278 env->current_tb = NULL;
2e70f6ef 1279 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
d720b93d
FB
1280 cpu_resume_from_signal(env, puc);
1281 }
1282#endif
fd6ce8f6 1283}
9fa3e853 1284#endif
fd6ce8f6
FB
1285
1286/* add the tb in the target page and protect it if necessary */
5fafdf24 1287static inline void tb_alloc_page(TranslationBlock *tb,
41c1b1c9 1288 unsigned int n, tb_page_addr_t page_addr)
fd6ce8f6
FB
1289{
1290 PageDesc *p;
4429ab44
JQ
1291#ifndef CONFIG_USER_ONLY
1292 bool page_already_protected;
1293#endif
9fa3e853
FB
1294
1295 tb->page_addr[n] = page_addr;
5cd2c5b6 1296 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
9fa3e853 1297 tb->page_next[n] = p->first_tb;
4429ab44
JQ
1298#ifndef CONFIG_USER_ONLY
1299 page_already_protected = p->first_tb != NULL;
1300#endif
8efe0ca8 1301 p->first_tb = (TranslationBlock *)((uintptr_t)tb | n);
9fa3e853 1302 invalidate_page_bitmap(p);
fd6ce8f6 1303
107db443 1304#if defined(TARGET_HAS_SMC) || 1
d720b93d 1305
9fa3e853 1306#if defined(CONFIG_USER_ONLY)
fd6ce8f6 1307 if (p->flags & PAGE_WRITE) {
53a5960a
PB
1308 target_ulong addr;
1309 PageDesc *p2;
9fa3e853
FB
1310 int prot;
1311
fd6ce8f6
FB
1312 /* force the host page as non writable (writes will have a
1313 page fault + mprotect overhead) */
53a5960a 1314 page_addr &= qemu_host_page_mask;
fd6ce8f6 1315 prot = 0;
53a5960a
PB
1316 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1317 addr += TARGET_PAGE_SIZE) {
1318
1319 p2 = page_find (addr >> TARGET_PAGE_BITS);
1320 if (!p2)
1321 continue;
1322 prot |= p2->flags;
1323 p2->flags &= ~PAGE_WRITE;
53a5960a 1324 }
5fafdf24 1325 mprotect(g2h(page_addr), qemu_host_page_size,
fd6ce8f6
FB
1326 (prot & PAGE_BITS) & ~PAGE_WRITE);
1327#ifdef DEBUG_TB_INVALIDATE
ab3d1727 1328 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
53a5960a 1329 page_addr);
fd6ce8f6 1330#endif
fd6ce8f6 1331 }
9fa3e853
FB
1332#else
1333 /* if some code is already present, then the pages are already
1334 protected. So we handle the case where only the first TB is
1335 allocated in a physical page */
4429ab44 1336 if (!page_already_protected) {
6a00d601 1337 tlb_protect_code(page_addr);
9fa3e853
FB
1338 }
1339#endif
d720b93d
FB
1340
1341#endif /* TARGET_HAS_SMC */
fd6ce8f6
FB
1342}
1343
9fa3e853
FB
1344/* add a new TB and link it to the physical page tables. phys_page2 is
1345 (-1) to indicate that only one page contains the TB. */
41c1b1c9
PB
1346void tb_link_page(TranslationBlock *tb,
1347 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
d4e8164f 1348{
9fa3e853
FB
1349 unsigned int h;
1350 TranslationBlock **ptb;
1351
c8a706fe
PB
1352 /* Grab the mmap lock to stop another thread invalidating this TB
1353 before we are done. */
1354 mmap_lock();
9fa3e853
FB
1355 /* add in the physical hash table */
1356 h = tb_phys_hash_func(phys_pc);
1357 ptb = &tb_phys_hash[h];
1358 tb->phys_hash_next = *ptb;
1359 *ptb = tb;
fd6ce8f6
FB
1360
1361 /* add in the page list */
9fa3e853
FB
1362 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1363 if (phys_page2 != -1)
1364 tb_alloc_page(tb, 1, phys_page2);
1365 else
1366 tb->page_addr[1] = -1;
9fa3e853 1367
8efe0ca8 1368 tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2);
d4e8164f
FB
1369 tb->jmp_next[0] = NULL;
1370 tb->jmp_next[1] = NULL;
1371
1372 /* init original jump addresses */
1373 if (tb->tb_next_offset[0] != 0xffff)
1374 tb_reset_jump(tb, 0);
1375 if (tb->tb_next_offset[1] != 0xffff)
1376 tb_reset_jump(tb, 1);
8a40a180
FB
1377
1378#ifdef DEBUG_TB_CHECK
1379 tb_page_check();
1380#endif
c8a706fe 1381 mmap_unlock();
fd6ce8f6
FB
1382}
1383
9fa3e853
FB
1384/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1385 tb[1].tc_ptr. Return NULL if not found */
6375e09e 1386TranslationBlock *tb_find_pc(uintptr_t tc_ptr)
fd6ce8f6 1387{
9fa3e853 1388 int m_min, m_max, m;
8efe0ca8 1389 uintptr_t v;
9fa3e853 1390 TranslationBlock *tb;
a513fe19
FB
1391
1392 if (nb_tbs <= 0)
1393 return NULL;
8efe0ca8
SW
1394 if (tc_ptr < (uintptr_t)code_gen_buffer ||
1395 tc_ptr >= (uintptr_t)code_gen_ptr) {
a513fe19 1396 return NULL;
8efe0ca8 1397 }
a513fe19
FB
1398 /* binary search (cf Knuth) */
1399 m_min = 0;
1400 m_max = nb_tbs - 1;
1401 while (m_min <= m_max) {
1402 m = (m_min + m_max) >> 1;
1403 tb = &tbs[m];
8efe0ca8 1404 v = (uintptr_t)tb->tc_ptr;
a513fe19
FB
1405 if (v == tc_ptr)
1406 return tb;
1407 else if (tc_ptr < v) {
1408 m_max = m - 1;
1409 } else {
1410 m_min = m + 1;
1411 }
5fafdf24 1412 }
a513fe19
FB
1413 return &tbs[m_max];
1414}
7501267e 1415
ea041c0e
FB
1416static void tb_reset_jump_recursive(TranslationBlock *tb);
1417
1418static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1419{
1420 TranslationBlock *tb1, *tb_next, **ptb;
1421 unsigned int n1;
1422
1423 tb1 = tb->jmp_next[n];
1424 if (tb1 != NULL) {
1425 /* find head of list */
1426 for(;;) {
8efe0ca8
SW
1427 n1 = (uintptr_t)tb1 & 3;
1428 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
ea041c0e
FB
1429 if (n1 == 2)
1430 break;
1431 tb1 = tb1->jmp_next[n1];
1432 }
1433 /* we are now sure now that tb jumps to tb1 */
1434 tb_next = tb1;
1435
1436 /* remove tb from the jmp_first list */
1437 ptb = &tb_next->jmp_first;
1438 for(;;) {
1439 tb1 = *ptb;
8efe0ca8
SW
1440 n1 = (uintptr_t)tb1 & 3;
1441 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
ea041c0e
FB
1442 if (n1 == n && tb1 == tb)
1443 break;
1444 ptb = &tb1->jmp_next[n1];
1445 }
1446 *ptb = tb->jmp_next[n];
1447 tb->jmp_next[n] = NULL;
3b46e624 1448
ea041c0e
FB
1449 /* suppress the jump to next tb in generated code */
1450 tb_reset_jump(tb, n);
1451
0124311e 1452 /* suppress jumps in the tb on which we could have jumped */
ea041c0e
FB
1453 tb_reset_jump_recursive(tb_next);
1454 }
1455}
1456
1457static void tb_reset_jump_recursive(TranslationBlock *tb)
1458{
1459 tb_reset_jump_recursive2(tb, 0);
1460 tb_reset_jump_recursive2(tb, 1);
1461}
1462
1fddef4b 1463#if defined(TARGET_HAS_ICE)
94df27fd 1464#if defined(CONFIG_USER_ONLY)
9349b4f9 1465static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
94df27fd
PB
1466{
1467 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1468}
1469#else
1e7855a5 1470void tb_invalidate_phys_addr(target_phys_addr_t addr)
d720b93d 1471{
c227f099 1472 ram_addr_t ram_addr;
f3705d53 1473 MemoryRegionSection *section;
d720b93d 1474
06ef3525 1475 section = phys_page_find(addr >> TARGET_PAGE_BITS);
f3705d53
AK
1476 if (!(memory_region_is_ram(section->mr)
1477 || (section->mr->rom_device && section->mr->readable))) {
06ef3525
AK
1478 return;
1479 }
f3705d53 1480 ram_addr = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
cc5bea60 1481 + memory_region_section_addr(section, addr);
706cd4b5 1482 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
d720b93d 1483}
1e7855a5
MF
1484
1485static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
1486{
9d70c4b7
MF
1487 tb_invalidate_phys_addr(cpu_get_phys_page_debug(env, pc) |
1488 (pc & ~TARGET_PAGE_MASK));
1e7855a5 1489}
c27004ec 1490#endif
94df27fd 1491#endif /* TARGET_HAS_ICE */
d720b93d 1492
c527ee8f 1493#if defined(CONFIG_USER_ONLY)
9349b4f9 1494void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
c527ee8f
PB
1495
1496{
1497}
1498
9349b4f9 1499int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
c527ee8f
PB
1500 int flags, CPUWatchpoint **watchpoint)
1501{
1502 return -ENOSYS;
1503}
1504#else
6658ffb8 1505/* Add a watchpoint. */
9349b4f9 1506int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
a1d1bb31 1507 int flags, CPUWatchpoint **watchpoint)
6658ffb8 1508{
b4051334 1509 target_ulong len_mask = ~(len - 1);
c0ce998e 1510 CPUWatchpoint *wp;
6658ffb8 1511
b4051334 1512 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
0dc23828
MF
1513 if ((len & (len - 1)) || (addr & ~len_mask) ||
1514 len == 0 || len > TARGET_PAGE_SIZE) {
b4051334
AL
1515 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1516 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1517 return -EINVAL;
1518 }
7267c094 1519 wp = g_malloc(sizeof(*wp));
a1d1bb31
AL
1520
1521 wp->vaddr = addr;
b4051334 1522 wp->len_mask = len_mask;
a1d1bb31
AL
1523 wp->flags = flags;
1524
2dc9f411 1525 /* keep all GDB-injected watchpoints in front */
c0ce998e 1526 if (flags & BP_GDB)
72cf2d4f 1527 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
c0ce998e 1528 else
72cf2d4f 1529 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
6658ffb8 1530
6658ffb8 1531 tlb_flush_page(env, addr);
a1d1bb31
AL
1532
1533 if (watchpoint)
1534 *watchpoint = wp;
1535 return 0;
6658ffb8
PB
1536}
1537
a1d1bb31 1538/* Remove a specific watchpoint. */
9349b4f9 1539int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
a1d1bb31 1540 int flags)
6658ffb8 1541{
b4051334 1542 target_ulong len_mask = ~(len - 1);
a1d1bb31 1543 CPUWatchpoint *wp;
6658ffb8 1544
72cf2d4f 1545 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334 1546 if (addr == wp->vaddr && len_mask == wp->len_mask
6e140f28 1547 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
a1d1bb31 1548 cpu_watchpoint_remove_by_ref(env, wp);
6658ffb8
PB
1549 return 0;
1550 }
1551 }
a1d1bb31 1552 return -ENOENT;
6658ffb8
PB
1553}
1554
a1d1bb31 1555/* Remove a specific watchpoint by reference. */
9349b4f9 1556void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
a1d1bb31 1557{
72cf2d4f 1558 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
7d03f82f 1559
a1d1bb31
AL
1560 tlb_flush_page(env, watchpoint->vaddr);
1561
7267c094 1562 g_free(watchpoint);
a1d1bb31
AL
1563}
1564
1565/* Remove all matching watchpoints. */
9349b4f9 1566void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
a1d1bb31 1567{
c0ce998e 1568 CPUWatchpoint *wp, *next;
a1d1bb31 1569
72cf2d4f 1570 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
a1d1bb31
AL
1571 if (wp->flags & mask)
1572 cpu_watchpoint_remove_by_ref(env, wp);
c0ce998e 1573 }
7d03f82f 1574}
c527ee8f 1575#endif
7d03f82f 1576
a1d1bb31 1577/* Add a breakpoint. */
9349b4f9 1578int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
a1d1bb31 1579 CPUBreakpoint **breakpoint)
4c3a88a2 1580{
1fddef4b 1581#if defined(TARGET_HAS_ICE)
c0ce998e 1582 CPUBreakpoint *bp;
3b46e624 1583
7267c094 1584 bp = g_malloc(sizeof(*bp));
4c3a88a2 1585
a1d1bb31
AL
1586 bp->pc = pc;
1587 bp->flags = flags;
1588
2dc9f411 1589 /* keep all GDB-injected breakpoints in front */
c0ce998e 1590 if (flags & BP_GDB)
72cf2d4f 1591 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
c0ce998e 1592 else
72cf2d4f 1593 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
3b46e624 1594
d720b93d 1595 breakpoint_invalidate(env, pc);
a1d1bb31
AL
1596
1597 if (breakpoint)
1598 *breakpoint = bp;
4c3a88a2
FB
1599 return 0;
1600#else
a1d1bb31 1601 return -ENOSYS;
4c3a88a2
FB
1602#endif
1603}
1604
a1d1bb31 1605/* Remove a specific breakpoint. */
9349b4f9 1606int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
a1d1bb31 1607{
7d03f82f 1608#if defined(TARGET_HAS_ICE)
a1d1bb31
AL
1609 CPUBreakpoint *bp;
1610
72cf2d4f 1611 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31
AL
1612 if (bp->pc == pc && bp->flags == flags) {
1613 cpu_breakpoint_remove_by_ref(env, bp);
1614 return 0;
1615 }
7d03f82f 1616 }
a1d1bb31
AL
1617 return -ENOENT;
1618#else
1619 return -ENOSYS;
7d03f82f
EI
1620#endif
1621}
1622
a1d1bb31 1623/* Remove a specific breakpoint by reference. */
9349b4f9 1624void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
4c3a88a2 1625{
1fddef4b 1626#if defined(TARGET_HAS_ICE)
72cf2d4f 1627 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
d720b93d 1628
a1d1bb31
AL
1629 breakpoint_invalidate(env, breakpoint->pc);
1630
7267c094 1631 g_free(breakpoint);
a1d1bb31
AL
1632#endif
1633}
1634
1635/* Remove all matching breakpoints. */
9349b4f9 1636void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
a1d1bb31
AL
1637{
1638#if defined(TARGET_HAS_ICE)
c0ce998e 1639 CPUBreakpoint *bp, *next;
a1d1bb31 1640
72cf2d4f 1641 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
a1d1bb31
AL
1642 if (bp->flags & mask)
1643 cpu_breakpoint_remove_by_ref(env, bp);
c0ce998e 1644 }
4c3a88a2
FB
1645#endif
1646}
1647
c33a346e
FB
1648/* enable or disable single step mode. EXCP_DEBUG is returned by the
1649 CPU loop after each instruction */
9349b4f9 1650void cpu_single_step(CPUArchState *env, int enabled)
c33a346e 1651{
1fddef4b 1652#if defined(TARGET_HAS_ICE)
c33a346e
FB
1653 if (env->singlestep_enabled != enabled) {
1654 env->singlestep_enabled = enabled;
e22a25c9
AL
1655 if (kvm_enabled())
1656 kvm_update_guest_debug(env, 0);
1657 else {
ccbb4d44 1658 /* must flush all the translated code to avoid inconsistencies */
e22a25c9
AL
1659 /* XXX: only flush what is necessary */
1660 tb_flush(env);
1661 }
c33a346e
FB
1662 }
1663#endif
1664}
1665
9349b4f9 1666static void cpu_unlink_tb(CPUArchState *env)
ea041c0e 1667{
3098dba0
AJ
1668 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1669 problem and hope the cpu will stop of its own accord. For userspace
1670 emulation this often isn't actually as bad as it sounds. Often
1671 signals are used primarily to interrupt blocking syscalls. */
ea041c0e 1672 TranslationBlock *tb;
c227f099 1673 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
59817ccb 1674
cab1b4bd 1675 spin_lock(&interrupt_lock);
3098dba0
AJ
1676 tb = env->current_tb;
1677 /* if the cpu is currently executing code, we must unlink it and
1678 all the potentially executing TB */
f76cfe56 1679 if (tb) {
3098dba0
AJ
1680 env->current_tb = NULL;
1681 tb_reset_jump_recursive(tb);
be214e6c 1682 }
cab1b4bd 1683 spin_unlock(&interrupt_lock);
3098dba0
AJ
1684}
1685
97ffbd8d 1686#ifndef CONFIG_USER_ONLY
3098dba0 1687/* mask must never be zero, except for A20 change call */
9349b4f9 1688static void tcg_handle_interrupt(CPUArchState *env, int mask)
3098dba0
AJ
1689{
1690 int old_mask;
be214e6c 1691
2e70f6ef 1692 old_mask = env->interrupt_request;
68a79315 1693 env->interrupt_request |= mask;
3098dba0 1694
8edac960
AL
1695 /*
1696 * If called from iothread context, wake the target cpu in
1697 * case its halted.
1698 */
b7680cb6 1699 if (!qemu_cpu_is_self(env)) {
8edac960
AL
1700 qemu_cpu_kick(env);
1701 return;
1702 }
8edac960 1703
2e70f6ef 1704 if (use_icount) {
266910c4 1705 env->icount_decr.u16.high = 0xffff;
2e70f6ef 1706 if (!can_do_io(env)
be214e6c 1707 && (mask & ~old_mask) != 0) {
2e70f6ef
PB
1708 cpu_abort(env, "Raised interrupt while not in I/O function");
1709 }
2e70f6ef 1710 } else {
3098dba0 1711 cpu_unlink_tb(env);
ea041c0e
FB
1712 }
1713}
1714
ec6959d0
JK
1715CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1716
97ffbd8d
JK
1717#else /* CONFIG_USER_ONLY */
1718
9349b4f9 1719void cpu_interrupt(CPUArchState *env, int mask)
97ffbd8d
JK
1720{
1721 env->interrupt_request |= mask;
1722 cpu_unlink_tb(env);
1723}
1724#endif /* CONFIG_USER_ONLY */
1725
9349b4f9 1726void cpu_reset_interrupt(CPUArchState *env, int mask)
b54ad049
FB
1727{
1728 env->interrupt_request &= ~mask;
1729}
1730
9349b4f9 1731void cpu_exit(CPUArchState *env)
3098dba0
AJ
1732{
1733 env->exit_request = 1;
1734 cpu_unlink_tb(env);
1735}
1736
9349b4f9 1737void cpu_abort(CPUArchState *env, const char *fmt, ...)
7501267e
FB
1738{
1739 va_list ap;
493ae1f0 1740 va_list ap2;
7501267e
FB
1741
1742 va_start(ap, fmt);
493ae1f0 1743 va_copy(ap2, ap);
7501267e
FB
1744 fprintf(stderr, "qemu: fatal: ");
1745 vfprintf(stderr, fmt, ap);
1746 fprintf(stderr, "\n");
1747#ifdef TARGET_I386
7fe48483
FB
1748 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1749#else
1750 cpu_dump_state(env, stderr, fprintf, 0);
7501267e 1751#endif
93fcfe39
AL
1752 if (qemu_log_enabled()) {
1753 qemu_log("qemu: fatal: ");
1754 qemu_log_vprintf(fmt, ap2);
1755 qemu_log("\n");
f9373291 1756#ifdef TARGET_I386
93fcfe39 1757 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
f9373291 1758#else
93fcfe39 1759 log_cpu_state(env, 0);
f9373291 1760#endif
31b1a7b4 1761 qemu_log_flush();
93fcfe39 1762 qemu_log_close();
924edcae 1763 }
493ae1f0 1764 va_end(ap2);
f9373291 1765 va_end(ap);
fd052bf6
RV
1766#if defined(CONFIG_USER_ONLY)
1767 {
1768 struct sigaction act;
1769 sigfillset(&act.sa_mask);
1770 act.sa_handler = SIG_DFL;
1771 sigaction(SIGABRT, &act, NULL);
1772 }
1773#endif
7501267e
FB
1774 abort();
1775}
1776
9349b4f9 1777CPUArchState *cpu_copy(CPUArchState *env)
c5be9f08 1778{
9349b4f9
AF
1779 CPUArchState *new_env = cpu_init(env->cpu_model_str);
1780 CPUArchState *next_cpu = new_env->next_cpu;
c5be9f08 1781 int cpu_index = new_env->cpu_index;
5a38f081
AL
1782#if defined(TARGET_HAS_ICE)
1783 CPUBreakpoint *bp;
1784 CPUWatchpoint *wp;
1785#endif
1786
9349b4f9 1787 memcpy(new_env, env, sizeof(CPUArchState));
5a38f081
AL
1788
1789 /* Preserve chaining and index. */
c5be9f08
TS
1790 new_env->next_cpu = next_cpu;
1791 new_env->cpu_index = cpu_index;
5a38f081
AL
1792
1793 /* Clone all break/watchpoints.
1794 Note: Once we support ptrace with hw-debug register access, make sure
1795 BP_CPU break/watchpoints are handled correctly on clone. */
72cf2d4f
BS
1796 QTAILQ_INIT(&env->breakpoints);
1797 QTAILQ_INIT(&env->watchpoints);
5a38f081 1798#if defined(TARGET_HAS_ICE)
72cf2d4f 1799 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
5a38f081
AL
1800 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1801 }
72cf2d4f 1802 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
5a38f081
AL
1803 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1804 wp->flags, NULL);
1805 }
1806#endif
1807
c5be9f08
TS
1808 return new_env;
1809}
1810
0124311e 1811#if !defined(CONFIG_USER_ONLY)
0cac1b66 1812void tb_flush_jmp_cache(CPUArchState *env, target_ulong addr)
5c751e99
EI
1813{
1814 unsigned int i;
1815
1816 /* Discard jump cache entries for any tb which might potentially
1817 overlap the flushed page. */
1818 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1819 memset (&env->tb_jmp_cache[i], 0,
9742bf26 1820 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
5c751e99
EI
1821
1822 i = tb_jmp_cache_hash_page(addr);
1823 memset (&env->tb_jmp_cache[i], 0,
9742bf26 1824 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
5c751e99
EI
1825}
1826
d24981d3
JQ
1827static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end,
1828 uintptr_t length)
1829{
1830 uintptr_t start1;
1831
1832 /* we modify the TLB cache so that the dirty bit will be set again
1833 when accessing the range */
1834 start1 = (uintptr_t)qemu_safe_ram_ptr(start);
1835 /* Check that we don't span multiple blocks - this breaks the
1836 address comparisons below. */
1837 if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1
1838 != (end - 1) - start) {
1839 abort();
1840 }
1841 cpu_tlb_reset_dirty_all(start1, length);
1842
1843}
1844
5579c7f3 1845/* Note: start and end must be within the same ram block. */
c227f099 1846void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 1847 int dirty_flags)
1ccde1cb 1848{
d24981d3 1849 uintptr_t length;
1ccde1cb
FB
1850
1851 start &= TARGET_PAGE_MASK;
1852 end = TARGET_PAGE_ALIGN(end);
1853
1854 length = end - start;
1855 if (length == 0)
1856 return;
f7c11b53 1857 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
f23db169 1858
d24981d3
JQ
1859 if (tcg_enabled()) {
1860 tlb_reset_dirty_range_all(start, end, length);
5579c7f3 1861 }
1ccde1cb
FB
1862}
1863
74576198
AL
1864int cpu_physical_memory_set_dirty_tracking(int enable)
1865{
f6f3fbca 1866 int ret = 0;
74576198 1867 in_migration = enable;
f6f3fbca 1868 return ret;
74576198
AL
1869}
1870
e5548617
BS
1871target_phys_addr_t memory_region_section_get_iotlb(CPUArchState *env,
1872 MemoryRegionSection *section,
1873 target_ulong vaddr,
1874 target_phys_addr_t paddr,
1875 int prot,
1876 target_ulong *address)
1877{
1878 target_phys_addr_t iotlb;
1879 CPUWatchpoint *wp;
1880
cc5bea60 1881 if (memory_region_is_ram(section->mr)) {
e5548617
BS
1882 /* Normal RAM. */
1883 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
cc5bea60 1884 + memory_region_section_addr(section, paddr);
e5548617
BS
1885 if (!section->readonly) {
1886 iotlb |= phys_section_notdirty;
1887 } else {
1888 iotlb |= phys_section_rom;
1889 }
1890 } else {
1891 /* IO handlers are currently passed a physical address.
1892 It would be nice to pass an offset from the base address
1893 of that region. This would avoid having to special case RAM,
1894 and avoid full address decoding in every device.
1895 We can't use the high bits of pd for this because
1896 IO_MEM_ROMD uses these as a ram address. */
1897 iotlb = section - phys_sections;
cc5bea60 1898 iotlb += memory_region_section_addr(section, paddr);
e5548617
BS
1899 }
1900
1901 /* Make accesses to pages with watchpoints go via the
1902 watchpoint trap routines. */
1903 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1904 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
1905 /* Avoid trapping reads of pages with a write breakpoint. */
1906 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
1907 iotlb = phys_section_watch + paddr;
1908 *address |= TLB_MMIO;
1909 break;
1910 }
1911 }
1912 }
1913
1914 return iotlb;
1915}
1916
0124311e 1917#else
edf8e2af
MW
1918/*
1919 * Walks guest process memory "regions" one by one
1920 * and calls callback function 'fn' for each region.
1921 */
5cd2c5b6
RH
1922
1923struct walk_memory_regions_data
1924{
1925 walk_memory_regions_fn fn;
1926 void *priv;
8efe0ca8 1927 uintptr_t start;
5cd2c5b6
RH
1928 int prot;
1929};
1930
1931static int walk_memory_regions_end(struct walk_memory_regions_data *data,
b480d9b7 1932 abi_ulong end, int new_prot)
5cd2c5b6
RH
1933{
1934 if (data->start != -1ul) {
1935 int rc = data->fn(data->priv, data->start, end, data->prot);
1936 if (rc != 0) {
1937 return rc;
1938 }
1939 }
1940
1941 data->start = (new_prot ? end : -1ul);
1942 data->prot = new_prot;
1943
1944 return 0;
1945}
1946
1947static int walk_memory_regions_1(struct walk_memory_regions_data *data,
b480d9b7 1948 abi_ulong base, int level, void **lp)
5cd2c5b6 1949{
b480d9b7 1950 abi_ulong pa;
5cd2c5b6
RH
1951 int i, rc;
1952
1953 if (*lp == NULL) {
1954 return walk_memory_regions_end(data, base, 0);
1955 }
1956
1957 if (level == 0) {
1958 PageDesc *pd = *lp;
7296abac 1959 for (i = 0; i < L2_SIZE; ++i) {
5cd2c5b6
RH
1960 int prot = pd[i].flags;
1961
1962 pa = base | (i << TARGET_PAGE_BITS);
1963 if (prot != data->prot) {
1964 rc = walk_memory_regions_end(data, pa, prot);
1965 if (rc != 0) {
1966 return rc;
9fa3e853 1967 }
9fa3e853 1968 }
5cd2c5b6
RH
1969 }
1970 } else {
1971 void **pp = *lp;
7296abac 1972 for (i = 0; i < L2_SIZE; ++i) {
b480d9b7
PB
1973 pa = base | ((abi_ulong)i <<
1974 (TARGET_PAGE_BITS + L2_BITS * level));
5cd2c5b6
RH
1975 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
1976 if (rc != 0) {
1977 return rc;
1978 }
1979 }
1980 }
1981
1982 return 0;
1983}
1984
1985int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
1986{
1987 struct walk_memory_regions_data data;
8efe0ca8 1988 uintptr_t i;
5cd2c5b6
RH
1989
1990 data.fn = fn;
1991 data.priv = priv;
1992 data.start = -1ul;
1993 data.prot = 0;
1994
1995 for (i = 0; i < V_L1_SIZE; i++) {
b480d9b7 1996 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
5cd2c5b6
RH
1997 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
1998 if (rc != 0) {
1999 return rc;
9fa3e853 2000 }
33417e70 2001 }
5cd2c5b6
RH
2002
2003 return walk_memory_regions_end(&data, 0, 0);
edf8e2af
MW
2004}
2005
b480d9b7
PB
2006static int dump_region(void *priv, abi_ulong start,
2007 abi_ulong end, unsigned long prot)
edf8e2af
MW
2008{
2009 FILE *f = (FILE *)priv;
2010
b480d9b7
PB
2011 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2012 " "TARGET_ABI_FMT_lx" %c%c%c\n",
edf8e2af
MW
2013 start, end, end - start,
2014 ((prot & PAGE_READ) ? 'r' : '-'),
2015 ((prot & PAGE_WRITE) ? 'w' : '-'),
2016 ((prot & PAGE_EXEC) ? 'x' : '-'));
2017
2018 return (0);
2019}
2020
2021/* dump memory mappings */
2022void page_dump(FILE *f)
2023{
2024 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2025 "start", "end", "size", "prot");
2026 walk_memory_regions(f, dump_region);
33417e70
FB
2027}
2028
53a5960a 2029int page_get_flags(target_ulong address)
33417e70 2030{
9fa3e853
FB
2031 PageDesc *p;
2032
2033 p = page_find(address >> TARGET_PAGE_BITS);
33417e70 2034 if (!p)
9fa3e853
FB
2035 return 0;
2036 return p->flags;
2037}
2038
376a7909
RH
2039/* Modify the flags of a page and invalidate the code if necessary.
2040 The flag PAGE_WRITE_ORG is positioned automatically depending
2041 on PAGE_WRITE. The mmap_lock should already be held. */
53a5960a 2042void page_set_flags(target_ulong start, target_ulong end, int flags)
9fa3e853 2043{
376a7909
RH
2044 target_ulong addr, len;
2045
2046 /* This function should never be called with addresses outside the
2047 guest address space. If this assert fires, it probably indicates
2048 a missing call to h2g_valid. */
b480d9b7
PB
2049#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2050 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
376a7909
RH
2051#endif
2052 assert(start < end);
9fa3e853
FB
2053
2054 start = start & TARGET_PAGE_MASK;
2055 end = TARGET_PAGE_ALIGN(end);
376a7909
RH
2056
2057 if (flags & PAGE_WRITE) {
9fa3e853 2058 flags |= PAGE_WRITE_ORG;
376a7909
RH
2059 }
2060
2061 for (addr = start, len = end - start;
2062 len != 0;
2063 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2064 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2065
2066 /* If the write protection bit is set, then we invalidate
2067 the code inside. */
5fafdf24 2068 if (!(p->flags & PAGE_WRITE) &&
9fa3e853
FB
2069 (flags & PAGE_WRITE) &&
2070 p->first_tb) {
d720b93d 2071 tb_invalidate_phys_page(addr, 0, NULL);
9fa3e853
FB
2072 }
2073 p->flags = flags;
2074 }
33417e70
FB
2075}
2076
3d97b40b
TS
2077int page_check_range(target_ulong start, target_ulong len, int flags)
2078{
2079 PageDesc *p;
2080 target_ulong end;
2081 target_ulong addr;
2082
376a7909
RH
2083 /* This function should never be called with addresses outside the
2084 guest address space. If this assert fires, it probably indicates
2085 a missing call to h2g_valid. */
338e9e6c
BS
2086#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2087 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
376a7909
RH
2088#endif
2089
3e0650a9
RH
2090 if (len == 0) {
2091 return 0;
2092 }
376a7909
RH
2093 if (start + len - 1 < start) {
2094 /* We've wrapped around. */
55f280c9 2095 return -1;
376a7909 2096 }
55f280c9 2097
3d97b40b
TS
2098 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2099 start = start & TARGET_PAGE_MASK;
2100
376a7909
RH
2101 for (addr = start, len = end - start;
2102 len != 0;
2103 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
3d97b40b
TS
2104 p = page_find(addr >> TARGET_PAGE_BITS);
2105 if( !p )
2106 return -1;
2107 if( !(p->flags & PAGE_VALID) )
2108 return -1;
2109
dae3270c 2110 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
3d97b40b 2111 return -1;
dae3270c
FB
2112 if (flags & PAGE_WRITE) {
2113 if (!(p->flags & PAGE_WRITE_ORG))
2114 return -1;
2115 /* unprotect the page if it was put read-only because it
2116 contains translated code */
2117 if (!(p->flags & PAGE_WRITE)) {
2118 if (!page_unprotect(addr, 0, NULL))
2119 return -1;
2120 }
2121 return 0;
2122 }
3d97b40b
TS
2123 }
2124 return 0;
2125}
2126
9fa3e853 2127/* called from signal handler: invalidate the code and unprotect the
ccbb4d44 2128 page. Return TRUE if the fault was successfully handled. */
6375e09e 2129int page_unprotect(target_ulong address, uintptr_t pc, void *puc)
9fa3e853 2130{
45d679d6
AJ
2131 unsigned int prot;
2132 PageDesc *p;
53a5960a 2133 target_ulong host_start, host_end, addr;
9fa3e853 2134
c8a706fe
PB
2135 /* Technically this isn't safe inside a signal handler. However we
2136 know this only ever happens in a synchronous SEGV handler, so in
2137 practice it seems to be ok. */
2138 mmap_lock();
2139
45d679d6
AJ
2140 p = page_find(address >> TARGET_PAGE_BITS);
2141 if (!p) {
c8a706fe 2142 mmap_unlock();
9fa3e853 2143 return 0;
c8a706fe 2144 }
45d679d6 2145
9fa3e853
FB
2146 /* if the page was really writable, then we change its
2147 protection back to writable */
45d679d6
AJ
2148 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2149 host_start = address & qemu_host_page_mask;
2150 host_end = host_start + qemu_host_page_size;
2151
2152 prot = 0;
2153 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2154 p = page_find(addr >> TARGET_PAGE_BITS);
2155 p->flags |= PAGE_WRITE;
2156 prot |= p->flags;
2157
9fa3e853
FB
2158 /* and since the content will be modified, we must invalidate
2159 the corresponding translated code. */
45d679d6 2160 tb_invalidate_phys_page(addr, pc, puc);
9fa3e853 2161#ifdef DEBUG_TB_CHECK
45d679d6 2162 tb_invalidate_check(addr);
9fa3e853 2163#endif
9fa3e853 2164 }
45d679d6
AJ
2165 mprotect((void *)g2h(host_start), qemu_host_page_size,
2166 prot & PAGE_BITS);
2167
2168 mmap_unlock();
2169 return 1;
9fa3e853 2170 }
c8a706fe 2171 mmap_unlock();
9fa3e853
FB
2172 return 0;
2173}
9fa3e853
FB
2174#endif /* defined(CONFIG_USER_ONLY) */
2175
e2eef170 2176#if !defined(CONFIG_USER_ONLY)
8da3ff18 2177
c04b2b78
PB
2178#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2179typedef struct subpage_t {
70c68e44 2180 MemoryRegion iomem;
c04b2b78 2181 target_phys_addr_t base;
5312bd8b 2182 uint16_t sub_section[TARGET_PAGE_SIZE];
c04b2b78
PB
2183} subpage_t;
2184
c227f099 2185static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 2186 uint16_t section);
0f0cb164 2187static subpage_t *subpage_init(target_phys_addr_t base);
5312bd8b 2188static void destroy_page_desc(uint16_t section_index)
54688b1e 2189{
5312bd8b
AK
2190 MemoryRegionSection *section = &phys_sections[section_index];
2191 MemoryRegion *mr = section->mr;
54688b1e
AK
2192
2193 if (mr->subpage) {
2194 subpage_t *subpage = container_of(mr, subpage_t, iomem);
2195 memory_region_destroy(&subpage->iomem);
2196 g_free(subpage);
2197 }
2198}
2199
4346ae3e 2200static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level)
54688b1e
AK
2201{
2202 unsigned i;
d6f2ea22 2203 PhysPageEntry *p;
54688b1e 2204
c19e8800 2205 if (lp->ptr == PHYS_MAP_NODE_NIL) {
54688b1e
AK
2206 return;
2207 }
2208
c19e8800 2209 p = phys_map_nodes[lp->ptr];
4346ae3e 2210 for (i = 0; i < L2_SIZE; ++i) {
07f07b31 2211 if (!p[i].is_leaf) {
54688b1e 2212 destroy_l2_mapping(&p[i], level - 1);
4346ae3e 2213 } else {
c19e8800 2214 destroy_page_desc(p[i].ptr);
54688b1e 2215 }
54688b1e 2216 }
07f07b31 2217 lp->is_leaf = 0;
c19e8800 2218 lp->ptr = PHYS_MAP_NODE_NIL;
54688b1e
AK
2219}
2220
2221static void destroy_all_mappings(void)
2222{
3eef53df 2223 destroy_l2_mapping(&phys_map, P_L2_LEVELS - 1);
d6f2ea22 2224 phys_map_nodes_reset();
54688b1e
AK
2225}
2226
5312bd8b
AK
2227static uint16_t phys_section_add(MemoryRegionSection *section)
2228{
2229 if (phys_sections_nb == phys_sections_nb_alloc) {
2230 phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16);
2231 phys_sections = g_renew(MemoryRegionSection, phys_sections,
2232 phys_sections_nb_alloc);
2233 }
2234 phys_sections[phys_sections_nb] = *section;
2235 return phys_sections_nb++;
2236}
2237
2238static void phys_sections_clear(void)
2239{
2240 phys_sections_nb = 0;
2241}
2242
0f0cb164
AK
2243static void register_subpage(MemoryRegionSection *section)
2244{
2245 subpage_t *subpage;
2246 target_phys_addr_t base = section->offset_within_address_space
2247 & TARGET_PAGE_MASK;
f3705d53 2248 MemoryRegionSection *existing = phys_page_find(base >> TARGET_PAGE_BITS);
0f0cb164
AK
2249 MemoryRegionSection subsection = {
2250 .offset_within_address_space = base,
2251 .size = TARGET_PAGE_SIZE,
2252 };
0f0cb164
AK
2253 target_phys_addr_t start, end;
2254
f3705d53 2255 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
0f0cb164 2256
f3705d53 2257 if (!(existing->mr->subpage)) {
0f0cb164
AK
2258 subpage = subpage_init(base);
2259 subsection.mr = &subpage->iomem;
2999097b
AK
2260 phys_page_set(base >> TARGET_PAGE_BITS, 1,
2261 phys_section_add(&subsection));
0f0cb164 2262 } else {
f3705d53 2263 subpage = container_of(existing->mr, subpage_t, iomem);
0f0cb164
AK
2264 }
2265 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
adb2a9b5 2266 end = start + section->size - 1;
0f0cb164
AK
2267 subpage_register(subpage, start, end, phys_section_add(section));
2268}
2269
2270
2271static void register_multipage(MemoryRegionSection *section)
33417e70 2272{
dd81124b
AK
2273 target_phys_addr_t start_addr = section->offset_within_address_space;
2274 ram_addr_t size = section->size;
2999097b 2275 target_phys_addr_t addr;
5312bd8b 2276 uint16_t section_index = phys_section_add(section);
dd81124b 2277
3b8e6a2d 2278 assert(size);
f6f3fbca 2279
3b8e6a2d 2280 addr = start_addr;
2999097b
AK
2281 phys_page_set(addr >> TARGET_PAGE_BITS, size >> TARGET_PAGE_BITS,
2282 section_index);
33417e70
FB
2283}
2284
0f0cb164
AK
2285void cpu_register_physical_memory_log(MemoryRegionSection *section,
2286 bool readonly)
2287{
2288 MemoryRegionSection now = *section, remain = *section;
2289
2290 if ((now.offset_within_address_space & ~TARGET_PAGE_MASK)
2291 || (now.size < TARGET_PAGE_SIZE)) {
2292 now.size = MIN(TARGET_PAGE_ALIGN(now.offset_within_address_space)
2293 - now.offset_within_address_space,
2294 now.size);
2295 register_subpage(&now);
2296 remain.size -= now.size;
2297 remain.offset_within_address_space += now.size;
2298 remain.offset_within_region += now.size;
2299 }
69b67646
TH
2300 while (remain.size >= TARGET_PAGE_SIZE) {
2301 now = remain;
2302 if (remain.offset_within_region & ~TARGET_PAGE_MASK) {
2303 now.size = TARGET_PAGE_SIZE;
2304 register_subpage(&now);
2305 } else {
2306 now.size &= TARGET_PAGE_MASK;
2307 register_multipage(&now);
2308 }
0f0cb164
AK
2309 remain.size -= now.size;
2310 remain.offset_within_address_space += now.size;
2311 remain.offset_within_region += now.size;
2312 }
2313 now = remain;
2314 if (now.size) {
2315 register_subpage(&now);
2316 }
2317}
2318
2319
c227f099 2320void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
f65ed4c1
AL
2321{
2322 if (kvm_enabled())
2323 kvm_coalesce_mmio_region(addr, size);
2324}
2325
c227f099 2326void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
f65ed4c1
AL
2327{
2328 if (kvm_enabled())
2329 kvm_uncoalesce_mmio_region(addr, size);
2330}
2331
62a2744c
SY
2332void qemu_flush_coalesced_mmio_buffer(void)
2333{
2334 if (kvm_enabled())
2335 kvm_flush_coalesced_mmio_buffer();
2336}
2337
c902760f
MT
2338#if defined(__linux__) && !defined(TARGET_S390X)
2339
2340#include <sys/vfs.h>
2341
2342#define HUGETLBFS_MAGIC 0x958458f6
2343
2344static long gethugepagesize(const char *path)
2345{
2346 struct statfs fs;
2347 int ret;
2348
2349 do {
9742bf26 2350 ret = statfs(path, &fs);
c902760f
MT
2351 } while (ret != 0 && errno == EINTR);
2352
2353 if (ret != 0) {
9742bf26
YT
2354 perror(path);
2355 return 0;
c902760f
MT
2356 }
2357
2358 if (fs.f_type != HUGETLBFS_MAGIC)
9742bf26 2359 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
c902760f
MT
2360
2361 return fs.f_bsize;
2362}
2363
04b16653
AW
2364static void *file_ram_alloc(RAMBlock *block,
2365 ram_addr_t memory,
2366 const char *path)
c902760f
MT
2367{
2368 char *filename;
2369 void *area;
2370 int fd;
2371#ifdef MAP_POPULATE
2372 int flags;
2373#endif
2374 unsigned long hpagesize;
2375
2376 hpagesize = gethugepagesize(path);
2377 if (!hpagesize) {
9742bf26 2378 return NULL;
c902760f
MT
2379 }
2380
2381 if (memory < hpagesize) {
2382 return NULL;
2383 }
2384
2385 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2386 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2387 return NULL;
2388 }
2389
2390 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
9742bf26 2391 return NULL;
c902760f
MT
2392 }
2393
2394 fd = mkstemp(filename);
2395 if (fd < 0) {
9742bf26
YT
2396 perror("unable to create backing store for hugepages");
2397 free(filename);
2398 return NULL;
c902760f
MT
2399 }
2400 unlink(filename);
2401 free(filename);
2402
2403 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2404
2405 /*
2406 * ftruncate is not supported by hugetlbfs in older
2407 * hosts, so don't bother bailing out on errors.
2408 * If anything goes wrong with it under other filesystems,
2409 * mmap will fail.
2410 */
2411 if (ftruncate(fd, memory))
9742bf26 2412 perror("ftruncate");
c902760f
MT
2413
2414#ifdef MAP_POPULATE
2415 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2416 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2417 * to sidestep this quirk.
2418 */
2419 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2420 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2421#else
2422 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2423#endif
2424 if (area == MAP_FAILED) {
9742bf26
YT
2425 perror("file_ram_alloc: can't mmap RAM pages");
2426 close(fd);
2427 return (NULL);
c902760f 2428 }
04b16653 2429 block->fd = fd;
c902760f
MT
2430 return area;
2431}
2432#endif
2433
d17b5288 2434static ram_addr_t find_ram_offset(ram_addr_t size)
04b16653
AW
2435{
2436 RAMBlock *block, *next_block;
3e837b2c 2437 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
04b16653
AW
2438
2439 if (QLIST_EMPTY(&ram_list.blocks))
2440 return 0;
2441
2442 QLIST_FOREACH(block, &ram_list.blocks, next) {
f15fbc4b 2443 ram_addr_t end, next = RAM_ADDR_MAX;
04b16653
AW
2444
2445 end = block->offset + block->length;
2446
2447 QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2448 if (next_block->offset >= end) {
2449 next = MIN(next, next_block->offset);
2450 }
2451 }
2452 if (next - end >= size && next - end < mingap) {
3e837b2c 2453 offset = end;
04b16653
AW
2454 mingap = next - end;
2455 }
2456 }
3e837b2c
AW
2457
2458 if (offset == RAM_ADDR_MAX) {
2459 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
2460 (uint64_t)size);
2461 abort();
2462 }
2463
04b16653
AW
2464 return offset;
2465}
2466
2467static ram_addr_t last_ram_offset(void)
d17b5288
AW
2468{
2469 RAMBlock *block;
2470 ram_addr_t last = 0;
2471
2472 QLIST_FOREACH(block, &ram_list.blocks, next)
2473 last = MAX(last, block->offset + block->length);
2474
2475 return last;
2476}
2477
c5705a77 2478void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
84b89d78
CM
2479{
2480 RAMBlock *new_block, *block;
2481
c5705a77
AK
2482 new_block = NULL;
2483 QLIST_FOREACH(block, &ram_list.blocks, next) {
2484 if (block->offset == addr) {
2485 new_block = block;
2486 break;
2487 }
2488 }
2489 assert(new_block);
2490 assert(!new_block->idstr[0]);
84b89d78 2491
09e5ab63
AL
2492 if (dev) {
2493 char *id = qdev_get_dev_path(dev);
84b89d78
CM
2494 if (id) {
2495 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
7267c094 2496 g_free(id);
84b89d78
CM
2497 }
2498 }
2499 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2500
2501 QLIST_FOREACH(block, &ram_list.blocks, next) {
c5705a77 2502 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
84b89d78
CM
2503 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2504 new_block->idstr);
2505 abort();
2506 }
2507 }
c5705a77
AK
2508}
2509
2510ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
2511 MemoryRegion *mr)
2512{
2513 RAMBlock *new_block;
2514
2515 size = TARGET_PAGE_ALIGN(size);
2516 new_block = g_malloc0(sizeof(*new_block));
84b89d78 2517
7c637366 2518 new_block->mr = mr;
432d268c 2519 new_block->offset = find_ram_offset(size);
6977dfe6
YT
2520 if (host) {
2521 new_block->host = host;
cd19cfa2 2522 new_block->flags |= RAM_PREALLOC_MASK;
6977dfe6
YT
2523 } else {
2524 if (mem_path) {
c902760f 2525#if defined (__linux__) && !defined(TARGET_S390X)
6977dfe6
YT
2526 new_block->host = file_ram_alloc(new_block, size, mem_path);
2527 if (!new_block->host) {
2528 new_block->host = qemu_vmalloc(size);
e78815a5 2529 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
6977dfe6 2530 }
c902760f 2531#else
6977dfe6
YT
2532 fprintf(stderr, "-mem-path option unsupported\n");
2533 exit(1);
c902760f 2534#endif
6977dfe6 2535 } else {
868bb33f 2536 if (xen_enabled()) {
fce537d4 2537 xen_ram_alloc(new_block->offset, size, mr);
fdec9918
CB
2538 } else if (kvm_enabled()) {
2539 /* some s390/kvm configurations have special constraints */
2540 new_block->host = kvm_vmalloc(size);
432d268c
JN
2541 } else {
2542 new_block->host = qemu_vmalloc(size);
2543 }
e78815a5 2544 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
6977dfe6 2545 }
c902760f 2546 }
94a6b54f
PB
2547 new_block->length = size;
2548
f471a17e 2549 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
94a6b54f 2550
7267c094 2551 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
04b16653 2552 last_ram_offset() >> TARGET_PAGE_BITS);
5fda043f
IM
2553 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
2554 0, size >> TARGET_PAGE_BITS);
1720aeee 2555 cpu_physical_memory_set_dirty_range(new_block->offset, size, 0xff);
94a6b54f 2556
6f0437e8
JK
2557 if (kvm_enabled())
2558 kvm_setup_guest_memory(new_block->host, size);
2559
94a6b54f
PB
2560 return new_block->offset;
2561}
e9a1ab19 2562
c5705a77 2563ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
6977dfe6 2564{
c5705a77 2565 return qemu_ram_alloc_from_ptr(size, NULL, mr);
6977dfe6
YT
2566}
2567
1f2e98b6
AW
2568void qemu_ram_free_from_ptr(ram_addr_t addr)
2569{
2570 RAMBlock *block;
2571
2572 QLIST_FOREACH(block, &ram_list.blocks, next) {
2573 if (addr == block->offset) {
2574 QLIST_REMOVE(block, next);
7267c094 2575 g_free(block);
1f2e98b6
AW
2576 return;
2577 }
2578 }
2579}
2580
c227f099 2581void qemu_ram_free(ram_addr_t addr)
e9a1ab19 2582{
04b16653
AW
2583 RAMBlock *block;
2584
2585 QLIST_FOREACH(block, &ram_list.blocks, next) {
2586 if (addr == block->offset) {
2587 QLIST_REMOVE(block, next);
cd19cfa2
HY
2588 if (block->flags & RAM_PREALLOC_MASK) {
2589 ;
2590 } else if (mem_path) {
04b16653
AW
2591#if defined (__linux__) && !defined(TARGET_S390X)
2592 if (block->fd) {
2593 munmap(block->host, block->length);
2594 close(block->fd);
2595 } else {
2596 qemu_vfree(block->host);
2597 }
fd28aa13
JK
2598#else
2599 abort();
04b16653
AW
2600#endif
2601 } else {
2602#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2603 munmap(block->host, block->length);
2604#else
868bb33f 2605 if (xen_enabled()) {
e41d7c69 2606 xen_invalidate_map_cache_entry(block->host);
432d268c
JN
2607 } else {
2608 qemu_vfree(block->host);
2609 }
04b16653
AW
2610#endif
2611 }
7267c094 2612 g_free(block);
04b16653
AW
2613 return;
2614 }
2615 }
2616
e9a1ab19
FB
2617}
2618
cd19cfa2
HY
2619#ifndef _WIN32
2620void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
2621{
2622 RAMBlock *block;
2623 ram_addr_t offset;
2624 int flags;
2625 void *area, *vaddr;
2626
2627 QLIST_FOREACH(block, &ram_list.blocks, next) {
2628 offset = addr - block->offset;
2629 if (offset < block->length) {
2630 vaddr = block->host + offset;
2631 if (block->flags & RAM_PREALLOC_MASK) {
2632 ;
2633 } else {
2634 flags = MAP_FIXED;
2635 munmap(vaddr, length);
2636 if (mem_path) {
2637#if defined(__linux__) && !defined(TARGET_S390X)
2638 if (block->fd) {
2639#ifdef MAP_POPULATE
2640 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
2641 MAP_PRIVATE;
2642#else
2643 flags |= MAP_PRIVATE;
2644#endif
2645 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2646 flags, block->fd, offset);
2647 } else {
2648 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2649 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2650 flags, -1, 0);
2651 }
fd28aa13
JK
2652#else
2653 abort();
cd19cfa2
HY
2654#endif
2655 } else {
2656#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2657 flags |= MAP_SHARED | MAP_ANONYMOUS;
2658 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
2659 flags, -1, 0);
2660#else
2661 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2662 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2663 flags, -1, 0);
2664#endif
2665 }
2666 if (area != vaddr) {
f15fbc4b
AP
2667 fprintf(stderr, "Could not remap addr: "
2668 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
cd19cfa2
HY
2669 length, addr);
2670 exit(1);
2671 }
2672 qemu_madvise(vaddr, length, QEMU_MADV_MERGEABLE);
2673 }
2674 return;
2675 }
2676 }
2677}
2678#endif /* !_WIN32 */
2679
dc828ca1 2680/* Return a host pointer to ram allocated with qemu_ram_alloc.
5579c7f3
PB
2681 With the exception of the softmmu code in this file, this should
2682 only be used for local memory (e.g. video ram) that the device owns,
2683 and knows it isn't going to access beyond the end of the block.
2684
2685 It should not be used for general purpose DMA.
2686 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2687 */
c227f099 2688void *qemu_get_ram_ptr(ram_addr_t addr)
dc828ca1 2689{
94a6b54f
PB
2690 RAMBlock *block;
2691
f471a17e
AW
2692 QLIST_FOREACH(block, &ram_list.blocks, next) {
2693 if (addr - block->offset < block->length) {
7d82af38
VP
2694 /* Move this entry to to start of the list. */
2695 if (block != QLIST_FIRST(&ram_list.blocks)) {
2696 QLIST_REMOVE(block, next);
2697 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
2698 }
868bb33f 2699 if (xen_enabled()) {
432d268c
JN
2700 /* We need to check if the requested address is in the RAM
2701 * because we don't want to map the entire memory in QEMU.
712c2b41 2702 * In that case just map until the end of the page.
432d268c
JN
2703 */
2704 if (block->offset == 0) {
e41d7c69 2705 return xen_map_cache(addr, 0, 0);
432d268c 2706 } else if (block->host == NULL) {
e41d7c69
JK
2707 block->host =
2708 xen_map_cache(block->offset, block->length, 1);
432d268c
JN
2709 }
2710 }
f471a17e
AW
2711 return block->host + (addr - block->offset);
2712 }
94a6b54f 2713 }
f471a17e
AW
2714
2715 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2716 abort();
2717
2718 return NULL;
dc828ca1
PB
2719}
2720
b2e0a138
MT
2721/* Return a host pointer to ram allocated with qemu_ram_alloc.
2722 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
2723 */
2724void *qemu_safe_ram_ptr(ram_addr_t addr)
2725{
2726 RAMBlock *block;
2727
2728 QLIST_FOREACH(block, &ram_list.blocks, next) {
2729 if (addr - block->offset < block->length) {
868bb33f 2730 if (xen_enabled()) {
432d268c
JN
2731 /* We need to check if the requested address is in the RAM
2732 * because we don't want to map the entire memory in QEMU.
712c2b41 2733 * In that case just map until the end of the page.
432d268c
JN
2734 */
2735 if (block->offset == 0) {
e41d7c69 2736 return xen_map_cache(addr, 0, 0);
432d268c 2737 } else if (block->host == NULL) {
e41d7c69
JK
2738 block->host =
2739 xen_map_cache(block->offset, block->length, 1);
432d268c
JN
2740 }
2741 }
b2e0a138
MT
2742 return block->host + (addr - block->offset);
2743 }
2744 }
2745
2746 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2747 abort();
2748
2749 return NULL;
2750}
2751
38bee5dc
SS
2752/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
2753 * but takes a size argument */
8ab934f9 2754void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
38bee5dc 2755{
8ab934f9
SS
2756 if (*size == 0) {
2757 return NULL;
2758 }
868bb33f 2759 if (xen_enabled()) {
e41d7c69 2760 return xen_map_cache(addr, *size, 1);
868bb33f 2761 } else {
38bee5dc
SS
2762 RAMBlock *block;
2763
2764 QLIST_FOREACH(block, &ram_list.blocks, next) {
2765 if (addr - block->offset < block->length) {
2766 if (addr - block->offset + *size > block->length)
2767 *size = block->length - addr + block->offset;
2768 return block->host + (addr - block->offset);
2769 }
2770 }
2771
2772 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2773 abort();
38bee5dc
SS
2774 }
2775}
2776
050a0ddf
AP
2777void qemu_put_ram_ptr(void *addr)
2778{
2779 trace_qemu_put_ram_ptr(addr);
050a0ddf
AP
2780}
2781
e890261f 2782int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
5579c7f3 2783{
94a6b54f
PB
2784 RAMBlock *block;
2785 uint8_t *host = ptr;
2786
868bb33f 2787 if (xen_enabled()) {
e41d7c69 2788 *ram_addr = xen_ram_addr_from_mapcache(ptr);
712c2b41
SS
2789 return 0;
2790 }
2791
f471a17e 2792 QLIST_FOREACH(block, &ram_list.blocks, next) {
432d268c
JN
2793 /* This case append when the block is not mapped. */
2794 if (block->host == NULL) {
2795 continue;
2796 }
f471a17e 2797 if (host - block->host < block->length) {
e890261f
MT
2798 *ram_addr = block->offset + (host - block->host);
2799 return 0;
f471a17e 2800 }
94a6b54f 2801 }
432d268c 2802
e890261f
MT
2803 return -1;
2804}
f471a17e 2805
e890261f
MT
2806/* Some of the softmmu routines need to translate from a host pointer
2807 (typically a TLB entry) back to a ram offset. */
2808ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
2809{
2810 ram_addr_t ram_addr;
f471a17e 2811
e890261f
MT
2812 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
2813 fprintf(stderr, "Bad ram pointer %p\n", ptr);
2814 abort();
2815 }
2816 return ram_addr;
5579c7f3
PB
2817}
2818
0e0df1e2
AK
2819static uint64_t unassigned_mem_read(void *opaque, target_phys_addr_t addr,
2820 unsigned size)
e18231a3
BS
2821{
2822#ifdef DEBUG_UNASSIGNED
2823 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2824#endif
5b450407 2825#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
0e0df1e2 2826 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
e18231a3
BS
2827#endif
2828 return 0;
2829}
2830
0e0df1e2
AK
2831static void unassigned_mem_write(void *opaque, target_phys_addr_t addr,
2832 uint64_t val, unsigned size)
e18231a3
BS
2833{
2834#ifdef DEBUG_UNASSIGNED
0e0df1e2 2835 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
e18231a3 2836#endif
5b450407 2837#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
0e0df1e2 2838 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
67d3b957 2839#endif
33417e70
FB
2840}
2841
0e0df1e2
AK
2842static const MemoryRegionOps unassigned_mem_ops = {
2843 .read = unassigned_mem_read,
2844 .write = unassigned_mem_write,
2845 .endianness = DEVICE_NATIVE_ENDIAN,
2846};
e18231a3 2847
0e0df1e2
AK
2848static uint64_t error_mem_read(void *opaque, target_phys_addr_t addr,
2849 unsigned size)
e18231a3 2850{
0e0df1e2 2851 abort();
e18231a3
BS
2852}
2853
0e0df1e2
AK
2854static void error_mem_write(void *opaque, target_phys_addr_t addr,
2855 uint64_t value, unsigned size)
e18231a3 2856{
0e0df1e2 2857 abort();
33417e70
FB
2858}
2859
0e0df1e2
AK
2860static const MemoryRegionOps error_mem_ops = {
2861 .read = error_mem_read,
2862 .write = error_mem_write,
2863 .endianness = DEVICE_NATIVE_ENDIAN,
33417e70
FB
2864};
2865
0e0df1e2
AK
2866static const MemoryRegionOps rom_mem_ops = {
2867 .read = error_mem_read,
2868 .write = unassigned_mem_write,
2869 .endianness = DEVICE_NATIVE_ENDIAN,
33417e70
FB
2870};
2871
0e0df1e2
AK
2872static void notdirty_mem_write(void *opaque, target_phys_addr_t ram_addr,
2873 uint64_t val, unsigned size)
9fa3e853 2874{
3a7d929e 2875 int dirty_flags;
f7c11b53 2876 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3a7d929e 2877 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2878#if !defined(CONFIG_USER_ONLY)
0e0df1e2 2879 tb_invalidate_phys_page_fast(ram_addr, size);
f7c11b53 2880 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
9fa3e853 2881#endif
3a7d929e 2882 }
0e0df1e2
AK
2883 switch (size) {
2884 case 1:
2885 stb_p(qemu_get_ram_ptr(ram_addr), val);
2886 break;
2887 case 2:
2888 stw_p(qemu_get_ram_ptr(ram_addr), val);
2889 break;
2890 case 4:
2891 stl_p(qemu_get_ram_ptr(ram_addr), val);
2892 break;
2893 default:
2894 abort();
3a7d929e 2895 }
f23db169 2896 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
f7c11b53 2897 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
f23db169
FB
2898 /* we remove the notdirty callback only if the code has been
2899 flushed */
2900 if (dirty_flags == 0xff)
2e70f6ef 2901 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
2902}
2903
0e0df1e2
AK
2904static const MemoryRegionOps notdirty_mem_ops = {
2905 .read = error_mem_read,
2906 .write = notdirty_mem_write,
2907 .endianness = DEVICE_NATIVE_ENDIAN,
1ccde1cb
FB
2908};
2909
0f459d16 2910/* Generate a debug exception if a watchpoint has been hit. */
b4051334 2911static void check_watchpoint(int offset, int len_mask, int flags)
0f459d16 2912{
9349b4f9 2913 CPUArchState *env = cpu_single_env;
06d55cc1
AL
2914 target_ulong pc, cs_base;
2915 TranslationBlock *tb;
0f459d16 2916 target_ulong vaddr;
a1d1bb31 2917 CPUWatchpoint *wp;
06d55cc1 2918 int cpu_flags;
0f459d16 2919
06d55cc1
AL
2920 if (env->watchpoint_hit) {
2921 /* We re-entered the check after replacing the TB. Now raise
2922 * the debug interrupt so that is will trigger after the
2923 * current instruction. */
2924 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2925 return;
2926 }
2e70f6ef 2927 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
72cf2d4f 2928 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334
AL
2929 if ((vaddr == (wp->vaddr & len_mask) ||
2930 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
6e140f28
AL
2931 wp->flags |= BP_WATCHPOINT_HIT;
2932 if (!env->watchpoint_hit) {
2933 env->watchpoint_hit = wp;
2934 tb = tb_find_pc(env->mem_io_pc);
2935 if (!tb) {
2936 cpu_abort(env, "check_watchpoint: could not find TB for "
2937 "pc=%p", (void *)env->mem_io_pc);
2938 }
618ba8e6 2939 cpu_restore_state(tb, env, env->mem_io_pc);
6e140f28
AL
2940 tb_phys_invalidate(tb, -1);
2941 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2942 env->exception_index = EXCP_DEBUG;
488d6577 2943 cpu_loop_exit(env);
6e140f28
AL
2944 } else {
2945 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2946 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
488d6577 2947 cpu_resume_from_signal(env, NULL);
6e140f28 2948 }
06d55cc1 2949 }
6e140f28
AL
2950 } else {
2951 wp->flags &= ~BP_WATCHPOINT_HIT;
0f459d16
PB
2952 }
2953 }
2954}
2955
6658ffb8
PB
2956/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2957 so these check for a hit then pass through to the normal out-of-line
2958 phys routines. */
1ec9b909
AK
2959static uint64_t watch_mem_read(void *opaque, target_phys_addr_t addr,
2960 unsigned size)
6658ffb8 2961{
1ec9b909
AK
2962 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
2963 switch (size) {
2964 case 1: return ldub_phys(addr);
2965 case 2: return lduw_phys(addr);
2966 case 4: return ldl_phys(addr);
2967 default: abort();
2968 }
6658ffb8
PB
2969}
2970
1ec9b909
AK
2971static void watch_mem_write(void *opaque, target_phys_addr_t addr,
2972 uint64_t val, unsigned size)
6658ffb8 2973{
1ec9b909
AK
2974 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
2975 switch (size) {
67364150
MF
2976 case 1:
2977 stb_phys(addr, val);
2978 break;
2979 case 2:
2980 stw_phys(addr, val);
2981 break;
2982 case 4:
2983 stl_phys(addr, val);
2984 break;
1ec9b909
AK
2985 default: abort();
2986 }
6658ffb8
PB
2987}
2988
1ec9b909
AK
2989static const MemoryRegionOps watch_mem_ops = {
2990 .read = watch_mem_read,
2991 .write = watch_mem_write,
2992 .endianness = DEVICE_NATIVE_ENDIAN,
6658ffb8 2993};
6658ffb8 2994
70c68e44
AK
2995static uint64_t subpage_read(void *opaque, target_phys_addr_t addr,
2996 unsigned len)
db7b5426 2997{
70c68e44 2998 subpage_t *mmio = opaque;
f6405247 2999 unsigned int idx = SUBPAGE_IDX(addr);
5312bd8b 3000 MemoryRegionSection *section;
db7b5426
BS
3001#if defined(DEBUG_SUBPAGE)
3002 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3003 mmio, len, addr, idx);
3004#endif
db7b5426 3005
5312bd8b
AK
3006 section = &phys_sections[mmio->sub_section[idx]];
3007 addr += mmio->base;
3008 addr -= section->offset_within_address_space;
3009 addr += section->offset_within_region;
37ec01d4 3010 return io_mem_read(section->mr, addr, len);
db7b5426
BS
3011}
3012
70c68e44
AK
3013static void subpage_write(void *opaque, target_phys_addr_t addr,
3014 uint64_t value, unsigned len)
db7b5426 3015{
70c68e44 3016 subpage_t *mmio = opaque;
f6405247 3017 unsigned int idx = SUBPAGE_IDX(addr);
5312bd8b 3018 MemoryRegionSection *section;
db7b5426 3019#if defined(DEBUG_SUBPAGE)
70c68e44
AK
3020 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
3021 " idx %d value %"PRIx64"\n",
f6405247 3022 __func__, mmio, len, addr, idx, value);
db7b5426 3023#endif
f6405247 3024
5312bd8b
AK
3025 section = &phys_sections[mmio->sub_section[idx]];
3026 addr += mmio->base;
3027 addr -= section->offset_within_address_space;
3028 addr += section->offset_within_region;
37ec01d4 3029 io_mem_write(section->mr, addr, value, len);
db7b5426
BS
3030}
3031
70c68e44
AK
3032static const MemoryRegionOps subpage_ops = {
3033 .read = subpage_read,
3034 .write = subpage_write,
3035 .endianness = DEVICE_NATIVE_ENDIAN,
db7b5426
BS
3036};
3037
de712f94
AK
3038static uint64_t subpage_ram_read(void *opaque, target_phys_addr_t addr,
3039 unsigned size)
56384e8b
AF
3040{
3041 ram_addr_t raddr = addr;
3042 void *ptr = qemu_get_ram_ptr(raddr);
de712f94
AK
3043 switch (size) {
3044 case 1: return ldub_p(ptr);
3045 case 2: return lduw_p(ptr);
3046 case 4: return ldl_p(ptr);
3047 default: abort();
3048 }
56384e8b
AF
3049}
3050
de712f94
AK
3051static void subpage_ram_write(void *opaque, target_phys_addr_t addr,
3052 uint64_t value, unsigned size)
56384e8b
AF
3053{
3054 ram_addr_t raddr = addr;
3055 void *ptr = qemu_get_ram_ptr(raddr);
de712f94
AK
3056 switch (size) {
3057 case 1: return stb_p(ptr, value);
3058 case 2: return stw_p(ptr, value);
3059 case 4: return stl_p(ptr, value);
3060 default: abort();
3061 }
56384e8b
AF
3062}
3063
de712f94
AK
3064static const MemoryRegionOps subpage_ram_ops = {
3065 .read = subpage_ram_read,
3066 .write = subpage_ram_write,
3067 .endianness = DEVICE_NATIVE_ENDIAN,
56384e8b
AF
3068};
3069
c227f099 3070static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 3071 uint16_t section)
db7b5426
BS
3072{
3073 int idx, eidx;
3074
3075 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3076 return -1;
3077 idx = SUBPAGE_IDX(start);
3078 eidx = SUBPAGE_IDX(end);
3079#if defined(DEBUG_SUBPAGE)
0bf9e31a 3080 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
db7b5426
BS
3081 mmio, start, end, idx, eidx, memory);
3082#endif
5312bd8b
AK
3083 if (memory_region_is_ram(phys_sections[section].mr)) {
3084 MemoryRegionSection new_section = phys_sections[section];
3085 new_section.mr = &io_mem_subpage_ram;
3086 section = phys_section_add(&new_section);
56384e8b 3087 }
db7b5426 3088 for (; idx <= eidx; idx++) {
5312bd8b 3089 mmio->sub_section[idx] = section;
db7b5426
BS
3090 }
3091
3092 return 0;
3093}
3094
0f0cb164 3095static subpage_t *subpage_init(target_phys_addr_t base)
db7b5426 3096{
c227f099 3097 subpage_t *mmio;
db7b5426 3098
7267c094 3099 mmio = g_malloc0(sizeof(subpage_t));
1eec614b
AL
3100
3101 mmio->base = base;
70c68e44
AK
3102 memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
3103 "subpage", TARGET_PAGE_SIZE);
b3b00c78 3104 mmio->iomem.subpage = true;
db7b5426 3105#if defined(DEBUG_SUBPAGE)
1eec614b
AL
3106 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3107 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
db7b5426 3108#endif
0f0cb164 3109 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, phys_section_unassigned);
db7b5426
BS
3110
3111 return mmio;
3112}
3113
5312bd8b
AK
3114static uint16_t dummy_section(MemoryRegion *mr)
3115{
3116 MemoryRegionSection section = {
3117 .mr = mr,
3118 .offset_within_address_space = 0,
3119 .offset_within_region = 0,
3120 .size = UINT64_MAX,
3121 };
3122
3123 return phys_section_add(&section);
3124}
3125
37ec01d4 3126MemoryRegion *iotlb_to_region(target_phys_addr_t index)
aa102231 3127{
37ec01d4 3128 return phys_sections[index & ~TARGET_PAGE_MASK].mr;
aa102231
AK
3129}
3130
e9179ce1
AK
3131static void io_mem_init(void)
3132{
0e0df1e2 3133 memory_region_init_io(&io_mem_ram, &error_mem_ops, NULL, "ram", UINT64_MAX);
0e0df1e2
AK
3134 memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
3135 memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
3136 "unassigned", UINT64_MAX);
3137 memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
3138 "notdirty", UINT64_MAX);
de712f94
AK
3139 memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
3140 "subpage-ram", UINT64_MAX);
1ec9b909
AK
3141 memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
3142 "watch", UINT64_MAX);
e9179ce1
AK
3143}
3144
50c1e149
AK
3145static void core_begin(MemoryListener *listener)
3146{
54688b1e 3147 destroy_all_mappings();
5312bd8b 3148 phys_sections_clear();
c19e8800 3149 phys_map.ptr = PHYS_MAP_NODE_NIL;
5312bd8b 3150 phys_section_unassigned = dummy_section(&io_mem_unassigned);
aa102231
AK
3151 phys_section_notdirty = dummy_section(&io_mem_notdirty);
3152 phys_section_rom = dummy_section(&io_mem_rom);
3153 phys_section_watch = dummy_section(&io_mem_watch);
50c1e149
AK
3154}
3155
3156static void core_commit(MemoryListener *listener)
3157{
9349b4f9 3158 CPUArchState *env;
117712c3
AK
3159
3160 /* since each CPU stores ram addresses in its TLB cache, we must
3161 reset the modified entries */
3162 /* XXX: slow ! */
3163 for(env = first_cpu; env != NULL; env = env->next_cpu) {
3164 tlb_flush(env, 1);
3165 }
50c1e149
AK
3166}
3167
93632747
AK
3168static void core_region_add(MemoryListener *listener,
3169 MemoryRegionSection *section)
3170{
4855d41a 3171 cpu_register_physical_memory_log(section, section->readonly);
93632747
AK
3172}
3173
3174static void core_region_del(MemoryListener *listener,
3175 MemoryRegionSection *section)
3176{
93632747
AK
3177}
3178
50c1e149
AK
3179static void core_region_nop(MemoryListener *listener,
3180 MemoryRegionSection *section)
3181{
54688b1e 3182 cpu_register_physical_memory_log(section, section->readonly);
50c1e149
AK
3183}
3184
93632747
AK
3185static void core_log_start(MemoryListener *listener,
3186 MemoryRegionSection *section)
3187{
3188}
3189
3190static void core_log_stop(MemoryListener *listener,
3191 MemoryRegionSection *section)
3192{
3193}
3194
3195static void core_log_sync(MemoryListener *listener,
3196 MemoryRegionSection *section)
3197{
3198}
3199
3200static void core_log_global_start(MemoryListener *listener)
3201{
3202 cpu_physical_memory_set_dirty_tracking(1);
3203}
3204
3205static void core_log_global_stop(MemoryListener *listener)
3206{
3207 cpu_physical_memory_set_dirty_tracking(0);
3208}
3209
3210static void core_eventfd_add(MemoryListener *listener,
3211 MemoryRegionSection *section,
753d5e14 3212 bool match_data, uint64_t data, EventNotifier *e)
93632747
AK
3213{
3214}
3215
3216static void core_eventfd_del(MemoryListener *listener,
3217 MemoryRegionSection *section,
753d5e14 3218 bool match_data, uint64_t data, EventNotifier *e)
93632747
AK
3219{
3220}
3221
50c1e149
AK
3222static void io_begin(MemoryListener *listener)
3223{
3224}
3225
3226static void io_commit(MemoryListener *listener)
3227{
3228}
3229
4855d41a
AK
3230static void io_region_add(MemoryListener *listener,
3231 MemoryRegionSection *section)
3232{
a2d33521
AK
3233 MemoryRegionIORange *mrio = g_new(MemoryRegionIORange, 1);
3234
3235 mrio->mr = section->mr;
3236 mrio->offset = section->offset_within_region;
3237 iorange_init(&mrio->iorange, &memory_region_iorange_ops,
4855d41a 3238 section->offset_within_address_space, section->size);
a2d33521 3239 ioport_register(&mrio->iorange);
4855d41a
AK
3240}
3241
3242static void io_region_del(MemoryListener *listener,
3243 MemoryRegionSection *section)
3244{
3245 isa_unassign_ioport(section->offset_within_address_space, section->size);
3246}
3247
50c1e149
AK
3248static void io_region_nop(MemoryListener *listener,
3249 MemoryRegionSection *section)
3250{
3251}
3252
4855d41a
AK
3253static void io_log_start(MemoryListener *listener,
3254 MemoryRegionSection *section)
3255{
3256}
3257
3258static void io_log_stop(MemoryListener *listener,
3259 MemoryRegionSection *section)
3260{
3261}
3262
3263static void io_log_sync(MemoryListener *listener,
3264 MemoryRegionSection *section)
3265{
3266}
3267
3268static void io_log_global_start(MemoryListener *listener)
3269{
3270}
3271
3272static void io_log_global_stop(MemoryListener *listener)
3273{
3274}
3275
3276static void io_eventfd_add(MemoryListener *listener,
3277 MemoryRegionSection *section,
753d5e14 3278 bool match_data, uint64_t data, EventNotifier *e)
4855d41a
AK
3279{
3280}
3281
3282static void io_eventfd_del(MemoryListener *listener,
3283 MemoryRegionSection *section,
753d5e14 3284 bool match_data, uint64_t data, EventNotifier *e)
4855d41a
AK
3285{
3286}
3287
93632747 3288static MemoryListener core_memory_listener = {
50c1e149
AK
3289 .begin = core_begin,
3290 .commit = core_commit,
93632747
AK
3291 .region_add = core_region_add,
3292 .region_del = core_region_del,
50c1e149 3293 .region_nop = core_region_nop,
93632747
AK
3294 .log_start = core_log_start,
3295 .log_stop = core_log_stop,
3296 .log_sync = core_log_sync,
3297 .log_global_start = core_log_global_start,
3298 .log_global_stop = core_log_global_stop,
3299 .eventfd_add = core_eventfd_add,
3300 .eventfd_del = core_eventfd_del,
3301 .priority = 0,
3302};
3303
4855d41a 3304static MemoryListener io_memory_listener = {
50c1e149
AK
3305 .begin = io_begin,
3306 .commit = io_commit,
4855d41a
AK
3307 .region_add = io_region_add,
3308 .region_del = io_region_del,
50c1e149 3309 .region_nop = io_region_nop,
4855d41a
AK
3310 .log_start = io_log_start,
3311 .log_stop = io_log_stop,
3312 .log_sync = io_log_sync,
3313 .log_global_start = io_log_global_start,
3314 .log_global_stop = io_log_global_stop,
3315 .eventfd_add = io_eventfd_add,
3316 .eventfd_del = io_eventfd_del,
3317 .priority = 0,
3318};
3319
62152b8a
AK
3320static void memory_map_init(void)
3321{
7267c094 3322 system_memory = g_malloc(sizeof(*system_memory));
8417cebf 3323 memory_region_init(system_memory, "system", INT64_MAX);
62152b8a 3324 set_system_memory_map(system_memory);
309cb471 3325
7267c094 3326 system_io = g_malloc(sizeof(*system_io));
309cb471
AK
3327 memory_region_init(system_io, "io", 65536);
3328 set_system_io_map(system_io);
93632747 3329
4855d41a
AK
3330 memory_listener_register(&core_memory_listener, system_memory);
3331 memory_listener_register(&io_memory_listener, system_io);
62152b8a
AK
3332}
3333
3334MemoryRegion *get_system_memory(void)
3335{
3336 return system_memory;
3337}
3338
309cb471
AK
3339MemoryRegion *get_system_io(void)
3340{
3341 return system_io;
3342}
3343
e2eef170
PB
3344#endif /* !defined(CONFIG_USER_ONLY) */
3345
13eb76e0
FB
3346/* physical memory access (slow version, mainly for debug) */
3347#if defined(CONFIG_USER_ONLY)
9349b4f9 3348int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
a68fe89c 3349 uint8_t *buf, int len, int is_write)
13eb76e0
FB
3350{
3351 int l, flags;
3352 target_ulong page;
53a5960a 3353 void * p;
13eb76e0
FB
3354
3355 while (len > 0) {
3356 page = addr & TARGET_PAGE_MASK;
3357 l = (page + TARGET_PAGE_SIZE) - addr;
3358 if (l > len)
3359 l = len;
3360 flags = page_get_flags(page);
3361 if (!(flags & PAGE_VALID))
a68fe89c 3362 return -1;
13eb76e0
FB
3363 if (is_write) {
3364 if (!(flags & PAGE_WRITE))
a68fe89c 3365 return -1;
579a97f7 3366 /* XXX: this code should not depend on lock_user */
72fb7daa 3367 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
a68fe89c 3368 return -1;
72fb7daa
AJ
3369 memcpy(p, buf, l);
3370 unlock_user(p, addr, l);
13eb76e0
FB
3371 } else {
3372 if (!(flags & PAGE_READ))
a68fe89c 3373 return -1;
579a97f7 3374 /* XXX: this code should not depend on lock_user */
72fb7daa 3375 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
a68fe89c 3376 return -1;
72fb7daa 3377 memcpy(buf, p, l);
5b257578 3378 unlock_user(p, addr, 0);
13eb76e0
FB
3379 }
3380 len -= l;
3381 buf += l;
3382 addr += l;
3383 }
a68fe89c 3384 return 0;
13eb76e0 3385}
8df1cd07 3386
13eb76e0 3387#else
c227f099 3388void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
3389 int len, int is_write)
3390{
37ec01d4 3391 int l;
13eb76e0
FB
3392 uint8_t *ptr;
3393 uint32_t val;
c227f099 3394 target_phys_addr_t page;
f3705d53 3395 MemoryRegionSection *section;
3b46e624 3396
13eb76e0
FB
3397 while (len > 0) {
3398 page = addr & TARGET_PAGE_MASK;
3399 l = (page + TARGET_PAGE_SIZE) - addr;
3400 if (l > len)
3401 l = len;
06ef3525 3402 section = phys_page_find(page >> TARGET_PAGE_BITS);
3b46e624 3403
13eb76e0 3404 if (is_write) {
f3705d53 3405 if (!memory_region_is_ram(section->mr)) {
f1f6e3b8 3406 target_phys_addr_t addr1;
cc5bea60 3407 addr1 = memory_region_section_addr(section, addr);
6a00d601
FB
3408 /* XXX: could force cpu_single_env to NULL to avoid
3409 potential bugs */
6c2934db 3410 if (l >= 4 && ((addr1 & 3) == 0)) {
1c213d19 3411 /* 32 bit write access */
c27004ec 3412 val = ldl_p(buf);
37ec01d4 3413 io_mem_write(section->mr, addr1, val, 4);
13eb76e0 3414 l = 4;
6c2934db 3415 } else if (l >= 2 && ((addr1 & 1) == 0)) {
1c213d19 3416 /* 16 bit write access */
c27004ec 3417 val = lduw_p(buf);
37ec01d4 3418 io_mem_write(section->mr, addr1, val, 2);
13eb76e0
FB
3419 l = 2;
3420 } else {
1c213d19 3421 /* 8 bit write access */
c27004ec 3422 val = ldub_p(buf);
37ec01d4 3423 io_mem_write(section->mr, addr1, val, 1);
13eb76e0
FB
3424 l = 1;
3425 }
f3705d53 3426 } else if (!section->readonly) {
8ca5692d 3427 ram_addr_t addr1;
f3705d53 3428 addr1 = memory_region_get_ram_addr(section->mr)
cc5bea60 3429 + memory_region_section_addr(section, addr);
13eb76e0 3430 /* RAM case */
5579c7f3 3431 ptr = qemu_get_ram_ptr(addr1);
13eb76e0 3432 memcpy(ptr, buf, l);
3a7d929e
FB
3433 if (!cpu_physical_memory_is_dirty(addr1)) {
3434 /* invalidate code */
3435 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3436 /* set dirty bit */
f7c11b53
YT
3437 cpu_physical_memory_set_dirty_flags(
3438 addr1, (0xff & ~CODE_DIRTY_FLAG));
3a7d929e 3439 }
050a0ddf 3440 qemu_put_ram_ptr(ptr);
13eb76e0
FB
3441 }
3442 } else {
cc5bea60
BS
3443 if (!(memory_region_is_ram(section->mr) ||
3444 memory_region_is_romd(section->mr))) {
f1f6e3b8 3445 target_phys_addr_t addr1;
13eb76e0 3446 /* I/O case */
cc5bea60 3447 addr1 = memory_region_section_addr(section, addr);
6c2934db 3448 if (l >= 4 && ((addr1 & 3) == 0)) {
13eb76e0 3449 /* 32 bit read access */
37ec01d4 3450 val = io_mem_read(section->mr, addr1, 4);
c27004ec 3451 stl_p(buf, val);
13eb76e0 3452 l = 4;
6c2934db 3453 } else if (l >= 2 && ((addr1 & 1) == 0)) {
13eb76e0 3454 /* 16 bit read access */
37ec01d4 3455 val = io_mem_read(section->mr, addr1, 2);
c27004ec 3456 stw_p(buf, val);
13eb76e0
FB
3457 l = 2;
3458 } else {
1c213d19 3459 /* 8 bit read access */
37ec01d4 3460 val = io_mem_read(section->mr, addr1, 1);
c27004ec 3461 stb_p(buf, val);
13eb76e0
FB
3462 l = 1;
3463 }
3464 } else {
3465 /* RAM case */
0a1b357f 3466 ptr = qemu_get_ram_ptr(section->mr->ram_addr
cc5bea60
BS
3467 + memory_region_section_addr(section,
3468 addr));
f3705d53 3469 memcpy(buf, ptr, l);
050a0ddf 3470 qemu_put_ram_ptr(ptr);
13eb76e0
FB
3471 }
3472 }
3473 len -= l;
3474 buf += l;
3475 addr += l;
3476 }
3477}
8df1cd07 3478
d0ecd2aa 3479/* used for ROM loading : can write in RAM and ROM */
c227f099 3480void cpu_physical_memory_write_rom(target_phys_addr_t addr,
d0ecd2aa
FB
3481 const uint8_t *buf, int len)
3482{
3483 int l;
3484 uint8_t *ptr;
c227f099 3485 target_phys_addr_t page;
f3705d53 3486 MemoryRegionSection *section;
3b46e624 3487
d0ecd2aa
FB
3488 while (len > 0) {
3489 page = addr & TARGET_PAGE_MASK;
3490 l = (page + TARGET_PAGE_SIZE) - addr;
3491 if (l > len)
3492 l = len;
06ef3525 3493 section = phys_page_find(page >> TARGET_PAGE_BITS);
3b46e624 3494
cc5bea60
BS
3495 if (!(memory_region_is_ram(section->mr) ||
3496 memory_region_is_romd(section->mr))) {
d0ecd2aa
FB
3497 /* do nothing */
3498 } else {
3499 unsigned long addr1;
f3705d53 3500 addr1 = memory_region_get_ram_addr(section->mr)
cc5bea60 3501 + memory_region_section_addr(section, addr);
d0ecd2aa 3502 /* ROM/RAM case */
5579c7f3 3503 ptr = qemu_get_ram_ptr(addr1);
d0ecd2aa 3504 memcpy(ptr, buf, l);
050a0ddf 3505 qemu_put_ram_ptr(ptr);
d0ecd2aa
FB
3506 }
3507 len -= l;
3508 buf += l;
3509 addr += l;
3510 }
3511}
3512
6d16c2f8
AL
3513typedef struct {
3514 void *buffer;
c227f099
AL
3515 target_phys_addr_t addr;
3516 target_phys_addr_t len;
6d16c2f8
AL
3517} BounceBuffer;
3518
3519static BounceBuffer bounce;
3520
ba223c29
AL
3521typedef struct MapClient {
3522 void *opaque;
3523 void (*callback)(void *opaque);
72cf2d4f 3524 QLIST_ENTRY(MapClient) link;
ba223c29
AL
3525} MapClient;
3526
72cf2d4f
BS
3527static QLIST_HEAD(map_client_list, MapClient) map_client_list
3528 = QLIST_HEAD_INITIALIZER(map_client_list);
ba223c29
AL
3529
3530void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3531{
7267c094 3532 MapClient *client = g_malloc(sizeof(*client));
ba223c29
AL
3533
3534 client->opaque = opaque;
3535 client->callback = callback;
72cf2d4f 3536 QLIST_INSERT_HEAD(&map_client_list, client, link);
ba223c29
AL
3537 return client;
3538}
3539
3540void cpu_unregister_map_client(void *_client)
3541{
3542 MapClient *client = (MapClient *)_client;
3543
72cf2d4f 3544 QLIST_REMOVE(client, link);
7267c094 3545 g_free(client);
ba223c29
AL
3546}
3547
3548static void cpu_notify_map_clients(void)
3549{
3550 MapClient *client;
3551
72cf2d4f
BS
3552 while (!QLIST_EMPTY(&map_client_list)) {
3553 client = QLIST_FIRST(&map_client_list);
ba223c29 3554 client->callback(client->opaque);
34d5e948 3555 cpu_unregister_map_client(client);
ba223c29
AL
3556 }
3557}
3558
6d16c2f8
AL
3559/* Map a physical memory region into a host virtual address.
3560 * May map a subset of the requested range, given by and returned in *plen.
3561 * May return NULL if resources needed to perform the mapping are exhausted.
3562 * Use only for reads OR writes - not for read-modify-write operations.
ba223c29
AL
3563 * Use cpu_register_map_client() to know when retrying the map operation is
3564 * likely to succeed.
6d16c2f8 3565 */
c227f099
AL
3566void *cpu_physical_memory_map(target_phys_addr_t addr,
3567 target_phys_addr_t *plen,
6d16c2f8
AL
3568 int is_write)
3569{
c227f099 3570 target_phys_addr_t len = *plen;
38bee5dc 3571 target_phys_addr_t todo = 0;
6d16c2f8 3572 int l;
c227f099 3573 target_phys_addr_t page;
f3705d53 3574 MemoryRegionSection *section;
f15fbc4b 3575 ram_addr_t raddr = RAM_ADDR_MAX;
8ab934f9
SS
3576 ram_addr_t rlen;
3577 void *ret;
6d16c2f8
AL
3578
3579 while (len > 0) {
3580 page = addr & TARGET_PAGE_MASK;
3581 l = (page + TARGET_PAGE_SIZE) - addr;
3582 if (l > len)
3583 l = len;
06ef3525 3584 section = phys_page_find(page >> TARGET_PAGE_BITS);
6d16c2f8 3585
f3705d53 3586 if (!(memory_region_is_ram(section->mr) && !section->readonly)) {
38bee5dc 3587 if (todo || bounce.buffer) {
6d16c2f8
AL
3588 break;
3589 }
3590 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3591 bounce.addr = addr;
3592 bounce.len = l;
3593 if (!is_write) {
54f7b4a3 3594 cpu_physical_memory_read(addr, bounce.buffer, l);
6d16c2f8 3595 }
38bee5dc
SS
3596
3597 *plen = l;
3598 return bounce.buffer;
6d16c2f8 3599 }
8ab934f9 3600 if (!todo) {
f3705d53 3601 raddr = memory_region_get_ram_addr(section->mr)
cc5bea60 3602 + memory_region_section_addr(section, addr);
8ab934f9 3603 }
6d16c2f8
AL
3604
3605 len -= l;
3606 addr += l;
38bee5dc 3607 todo += l;
6d16c2f8 3608 }
8ab934f9
SS
3609 rlen = todo;
3610 ret = qemu_ram_ptr_length(raddr, &rlen);
3611 *plen = rlen;
3612 return ret;
6d16c2f8
AL
3613}
3614
3615/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3616 * Will also mark the memory as dirty if is_write == 1. access_len gives
3617 * the amount of memory that was actually read or written by the caller.
3618 */
c227f099
AL
3619void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3620 int is_write, target_phys_addr_t access_len)
6d16c2f8
AL
3621{
3622 if (buffer != bounce.buffer) {
3623 if (is_write) {
e890261f 3624 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
6d16c2f8
AL
3625 while (access_len) {
3626 unsigned l;
3627 l = TARGET_PAGE_SIZE;
3628 if (l > access_len)
3629 l = access_len;
3630 if (!cpu_physical_memory_is_dirty(addr1)) {
3631 /* invalidate code */
3632 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3633 /* set dirty bit */
f7c11b53
YT
3634 cpu_physical_memory_set_dirty_flags(
3635 addr1, (0xff & ~CODE_DIRTY_FLAG));
6d16c2f8
AL
3636 }
3637 addr1 += l;
3638 access_len -= l;
3639 }
3640 }
868bb33f 3641 if (xen_enabled()) {
e41d7c69 3642 xen_invalidate_map_cache_entry(buffer);
050a0ddf 3643 }
6d16c2f8
AL
3644 return;
3645 }
3646 if (is_write) {
3647 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3648 }
f8a83245 3649 qemu_vfree(bounce.buffer);
6d16c2f8 3650 bounce.buffer = NULL;
ba223c29 3651 cpu_notify_map_clients();
6d16c2f8 3652}
d0ecd2aa 3653
8df1cd07 3654/* warning: addr must be aligned */
1e78bcc1
AG
3655static inline uint32_t ldl_phys_internal(target_phys_addr_t addr,
3656 enum device_endian endian)
8df1cd07 3657{
8df1cd07
FB
3658 uint8_t *ptr;
3659 uint32_t val;
f3705d53 3660 MemoryRegionSection *section;
8df1cd07 3661
06ef3525 3662 section = phys_page_find(addr >> TARGET_PAGE_BITS);
3b46e624 3663
cc5bea60
BS
3664 if (!(memory_region_is_ram(section->mr) ||
3665 memory_region_is_romd(section->mr))) {
8df1cd07 3666 /* I/O case */
cc5bea60 3667 addr = memory_region_section_addr(section, addr);
37ec01d4 3668 val = io_mem_read(section->mr, addr, 4);
1e78bcc1
AG
3669#if defined(TARGET_WORDS_BIGENDIAN)
3670 if (endian == DEVICE_LITTLE_ENDIAN) {
3671 val = bswap32(val);
3672 }
3673#else
3674 if (endian == DEVICE_BIG_ENDIAN) {
3675 val = bswap32(val);
3676 }
3677#endif
8df1cd07
FB
3678 } else {
3679 /* RAM case */
f3705d53 3680 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
06ef3525 3681 & TARGET_PAGE_MASK)
cc5bea60 3682 + memory_region_section_addr(section, addr));
1e78bcc1
AG
3683 switch (endian) {
3684 case DEVICE_LITTLE_ENDIAN:
3685 val = ldl_le_p(ptr);
3686 break;
3687 case DEVICE_BIG_ENDIAN:
3688 val = ldl_be_p(ptr);
3689 break;
3690 default:
3691 val = ldl_p(ptr);
3692 break;
3693 }
8df1cd07
FB
3694 }
3695 return val;
3696}
3697
1e78bcc1
AG
3698uint32_t ldl_phys(target_phys_addr_t addr)
3699{
3700 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3701}
3702
3703uint32_t ldl_le_phys(target_phys_addr_t addr)
3704{
3705 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3706}
3707
3708uint32_t ldl_be_phys(target_phys_addr_t addr)
3709{
3710 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
3711}
3712
84b7b8e7 3713/* warning: addr must be aligned */
1e78bcc1
AG
3714static inline uint64_t ldq_phys_internal(target_phys_addr_t addr,
3715 enum device_endian endian)
84b7b8e7 3716{
84b7b8e7
FB
3717 uint8_t *ptr;
3718 uint64_t val;
f3705d53 3719 MemoryRegionSection *section;
84b7b8e7 3720
06ef3525 3721 section = phys_page_find(addr >> TARGET_PAGE_BITS);
3b46e624 3722
cc5bea60
BS
3723 if (!(memory_region_is_ram(section->mr) ||
3724 memory_region_is_romd(section->mr))) {
84b7b8e7 3725 /* I/O case */
cc5bea60 3726 addr = memory_region_section_addr(section, addr);
1e78bcc1
AG
3727
3728 /* XXX This is broken when device endian != cpu endian.
3729 Fix and add "endian" variable check */
84b7b8e7 3730#ifdef TARGET_WORDS_BIGENDIAN
37ec01d4
AK
3731 val = io_mem_read(section->mr, addr, 4) << 32;
3732 val |= io_mem_read(section->mr, addr + 4, 4);
84b7b8e7 3733#else
37ec01d4
AK
3734 val = io_mem_read(section->mr, addr, 4);
3735 val |= io_mem_read(section->mr, addr + 4, 4) << 32;
84b7b8e7
FB
3736#endif
3737 } else {
3738 /* RAM case */
f3705d53 3739 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
06ef3525 3740 & TARGET_PAGE_MASK)
cc5bea60 3741 + memory_region_section_addr(section, addr));
1e78bcc1
AG
3742 switch (endian) {
3743 case DEVICE_LITTLE_ENDIAN:
3744 val = ldq_le_p(ptr);
3745 break;
3746 case DEVICE_BIG_ENDIAN:
3747 val = ldq_be_p(ptr);
3748 break;
3749 default:
3750 val = ldq_p(ptr);
3751 break;
3752 }
84b7b8e7
FB
3753 }
3754 return val;
3755}
3756
1e78bcc1
AG
3757uint64_t ldq_phys(target_phys_addr_t addr)
3758{
3759 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3760}
3761
3762uint64_t ldq_le_phys(target_phys_addr_t addr)
3763{
3764 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3765}
3766
3767uint64_t ldq_be_phys(target_phys_addr_t addr)
3768{
3769 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
3770}
3771
aab33094 3772/* XXX: optimize */
c227f099 3773uint32_t ldub_phys(target_phys_addr_t addr)
aab33094
FB
3774{
3775 uint8_t val;
3776 cpu_physical_memory_read(addr, &val, 1);
3777 return val;
3778}
3779
733f0b02 3780/* warning: addr must be aligned */
1e78bcc1
AG
3781static inline uint32_t lduw_phys_internal(target_phys_addr_t addr,
3782 enum device_endian endian)
aab33094 3783{
733f0b02
MT
3784 uint8_t *ptr;
3785 uint64_t val;
f3705d53 3786 MemoryRegionSection *section;
733f0b02 3787
06ef3525 3788 section = phys_page_find(addr >> TARGET_PAGE_BITS);
733f0b02 3789
cc5bea60
BS
3790 if (!(memory_region_is_ram(section->mr) ||
3791 memory_region_is_romd(section->mr))) {
733f0b02 3792 /* I/O case */
cc5bea60 3793 addr = memory_region_section_addr(section, addr);
37ec01d4 3794 val = io_mem_read(section->mr, addr, 2);
1e78bcc1
AG
3795#if defined(TARGET_WORDS_BIGENDIAN)
3796 if (endian == DEVICE_LITTLE_ENDIAN) {
3797 val = bswap16(val);
3798 }
3799#else
3800 if (endian == DEVICE_BIG_ENDIAN) {
3801 val = bswap16(val);
3802 }
3803#endif
733f0b02
MT
3804 } else {
3805 /* RAM case */
f3705d53 3806 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
06ef3525 3807 & TARGET_PAGE_MASK)
cc5bea60 3808 + memory_region_section_addr(section, addr));
1e78bcc1
AG
3809 switch (endian) {
3810 case DEVICE_LITTLE_ENDIAN:
3811 val = lduw_le_p(ptr);
3812 break;
3813 case DEVICE_BIG_ENDIAN:
3814 val = lduw_be_p(ptr);
3815 break;
3816 default:
3817 val = lduw_p(ptr);
3818 break;
3819 }
733f0b02
MT
3820 }
3821 return val;
aab33094
FB
3822}
3823
1e78bcc1
AG
3824uint32_t lduw_phys(target_phys_addr_t addr)
3825{
3826 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3827}
3828
3829uint32_t lduw_le_phys(target_phys_addr_t addr)
3830{
3831 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3832}
3833
3834uint32_t lduw_be_phys(target_phys_addr_t addr)
3835{
3836 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
3837}
3838
8df1cd07
FB
3839/* warning: addr must be aligned. The ram page is not masked as dirty
3840 and the code inside is not invalidated. It is useful if the dirty
3841 bits are used to track modified PTEs */
c227f099 3842void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
8df1cd07 3843{
8df1cd07 3844 uint8_t *ptr;
f3705d53 3845 MemoryRegionSection *section;
8df1cd07 3846
06ef3525 3847 section = phys_page_find(addr >> TARGET_PAGE_BITS);
3b46e624 3848
f3705d53 3849 if (!memory_region_is_ram(section->mr) || section->readonly) {
cc5bea60 3850 addr = memory_region_section_addr(section, addr);
f3705d53 3851 if (memory_region_is_ram(section->mr)) {
37ec01d4 3852 section = &phys_sections[phys_section_rom];
06ef3525 3853 }
37ec01d4 3854 io_mem_write(section->mr, addr, val, 4);
8df1cd07 3855 } else {
f3705d53 3856 unsigned long addr1 = (memory_region_get_ram_addr(section->mr)
06ef3525 3857 & TARGET_PAGE_MASK)
cc5bea60 3858 + memory_region_section_addr(section, addr);
5579c7f3 3859 ptr = qemu_get_ram_ptr(addr1);
8df1cd07 3860 stl_p(ptr, val);
74576198
AL
3861
3862 if (unlikely(in_migration)) {
3863 if (!cpu_physical_memory_is_dirty(addr1)) {
3864 /* invalidate code */
3865 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3866 /* set dirty bit */
f7c11b53
YT
3867 cpu_physical_memory_set_dirty_flags(
3868 addr1, (0xff & ~CODE_DIRTY_FLAG));
74576198
AL
3869 }
3870 }
8df1cd07
FB
3871 }
3872}
3873
c227f099 3874void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
bc98a7ef 3875{
bc98a7ef 3876 uint8_t *ptr;
f3705d53 3877 MemoryRegionSection *section;
bc98a7ef 3878
06ef3525 3879 section = phys_page_find(addr >> TARGET_PAGE_BITS);
3b46e624 3880
f3705d53 3881 if (!memory_region_is_ram(section->mr) || section->readonly) {
cc5bea60 3882 addr = memory_region_section_addr(section, addr);
f3705d53 3883 if (memory_region_is_ram(section->mr)) {
37ec01d4 3884 section = &phys_sections[phys_section_rom];
06ef3525 3885 }
bc98a7ef 3886#ifdef TARGET_WORDS_BIGENDIAN
37ec01d4
AK
3887 io_mem_write(section->mr, addr, val >> 32, 4);
3888 io_mem_write(section->mr, addr + 4, (uint32_t)val, 4);
bc98a7ef 3889#else
37ec01d4
AK
3890 io_mem_write(section->mr, addr, (uint32_t)val, 4);
3891 io_mem_write(section->mr, addr + 4, val >> 32, 4);
bc98a7ef
JM
3892#endif
3893 } else {
f3705d53 3894 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
06ef3525 3895 & TARGET_PAGE_MASK)
cc5bea60 3896 + memory_region_section_addr(section, addr));
bc98a7ef
JM
3897 stq_p(ptr, val);
3898 }
3899}
3900
8df1cd07 3901/* warning: addr must be aligned */
1e78bcc1
AG
3902static inline void stl_phys_internal(target_phys_addr_t addr, uint32_t val,
3903 enum device_endian endian)
8df1cd07 3904{
8df1cd07 3905 uint8_t *ptr;
f3705d53 3906 MemoryRegionSection *section;
8df1cd07 3907
06ef3525 3908 section = phys_page_find(addr >> TARGET_PAGE_BITS);
3b46e624 3909
f3705d53 3910 if (!memory_region_is_ram(section->mr) || section->readonly) {
cc5bea60 3911 addr = memory_region_section_addr(section, addr);
f3705d53 3912 if (memory_region_is_ram(section->mr)) {
37ec01d4 3913 section = &phys_sections[phys_section_rom];
06ef3525 3914 }
1e78bcc1
AG
3915#if defined(TARGET_WORDS_BIGENDIAN)
3916 if (endian == DEVICE_LITTLE_ENDIAN) {
3917 val = bswap32(val);
3918 }
3919#else
3920 if (endian == DEVICE_BIG_ENDIAN) {
3921 val = bswap32(val);
3922 }
3923#endif
37ec01d4 3924 io_mem_write(section->mr, addr, val, 4);
8df1cd07
FB
3925 } else {
3926 unsigned long addr1;
f3705d53 3927 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
cc5bea60 3928 + memory_region_section_addr(section, addr);
8df1cd07 3929 /* RAM case */
5579c7f3 3930 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
3931 switch (endian) {
3932 case DEVICE_LITTLE_ENDIAN:
3933 stl_le_p(ptr, val);
3934 break;
3935 case DEVICE_BIG_ENDIAN:
3936 stl_be_p(ptr, val);
3937 break;
3938 default:
3939 stl_p(ptr, val);
3940 break;
3941 }
3a7d929e
FB
3942 if (!cpu_physical_memory_is_dirty(addr1)) {
3943 /* invalidate code */
3944 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3945 /* set dirty bit */
f7c11b53
YT
3946 cpu_physical_memory_set_dirty_flags(addr1,
3947 (0xff & ~CODE_DIRTY_FLAG));
3a7d929e 3948 }
8df1cd07
FB
3949 }
3950}
3951
1e78bcc1
AG
3952void stl_phys(target_phys_addr_t addr, uint32_t val)
3953{
3954 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
3955}
3956
3957void stl_le_phys(target_phys_addr_t addr, uint32_t val)
3958{
3959 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
3960}
3961
3962void stl_be_phys(target_phys_addr_t addr, uint32_t val)
3963{
3964 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
3965}
3966
aab33094 3967/* XXX: optimize */
c227f099 3968void stb_phys(target_phys_addr_t addr, uint32_t val)
aab33094
FB
3969{
3970 uint8_t v = val;
3971 cpu_physical_memory_write(addr, &v, 1);
3972}
3973
733f0b02 3974/* warning: addr must be aligned */
1e78bcc1
AG
3975static inline void stw_phys_internal(target_phys_addr_t addr, uint32_t val,
3976 enum device_endian endian)
aab33094 3977{
733f0b02 3978 uint8_t *ptr;
f3705d53 3979 MemoryRegionSection *section;
733f0b02 3980
06ef3525 3981 section = phys_page_find(addr >> TARGET_PAGE_BITS);
733f0b02 3982
f3705d53 3983 if (!memory_region_is_ram(section->mr) || section->readonly) {
cc5bea60 3984 addr = memory_region_section_addr(section, addr);
f3705d53 3985 if (memory_region_is_ram(section->mr)) {
37ec01d4 3986 section = &phys_sections[phys_section_rom];
06ef3525 3987 }
1e78bcc1
AG
3988#if defined(TARGET_WORDS_BIGENDIAN)
3989 if (endian == DEVICE_LITTLE_ENDIAN) {
3990 val = bswap16(val);
3991 }
3992#else
3993 if (endian == DEVICE_BIG_ENDIAN) {
3994 val = bswap16(val);
3995 }
3996#endif
37ec01d4 3997 io_mem_write(section->mr, addr, val, 2);
733f0b02
MT
3998 } else {
3999 unsigned long addr1;
f3705d53 4000 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
cc5bea60 4001 + memory_region_section_addr(section, addr);
733f0b02
MT
4002 /* RAM case */
4003 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
4004 switch (endian) {
4005 case DEVICE_LITTLE_ENDIAN:
4006 stw_le_p(ptr, val);
4007 break;
4008 case DEVICE_BIG_ENDIAN:
4009 stw_be_p(ptr, val);
4010 break;
4011 default:
4012 stw_p(ptr, val);
4013 break;
4014 }
733f0b02
MT
4015 if (!cpu_physical_memory_is_dirty(addr1)) {
4016 /* invalidate code */
4017 tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
4018 /* set dirty bit */
4019 cpu_physical_memory_set_dirty_flags(addr1,
4020 (0xff & ~CODE_DIRTY_FLAG));
4021 }
4022 }
aab33094
FB
4023}
4024
1e78bcc1
AG
4025void stw_phys(target_phys_addr_t addr, uint32_t val)
4026{
4027 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4028}
4029
4030void stw_le_phys(target_phys_addr_t addr, uint32_t val)
4031{
4032 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4033}
4034
4035void stw_be_phys(target_phys_addr_t addr, uint32_t val)
4036{
4037 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4038}
4039
aab33094 4040/* XXX: optimize */
c227f099 4041void stq_phys(target_phys_addr_t addr, uint64_t val)
aab33094
FB
4042{
4043 val = tswap64(val);
71d2b725 4044 cpu_physical_memory_write(addr, &val, 8);
aab33094
FB
4045}
4046
1e78bcc1
AG
4047void stq_le_phys(target_phys_addr_t addr, uint64_t val)
4048{
4049 val = cpu_to_le64(val);
4050 cpu_physical_memory_write(addr, &val, 8);
4051}
4052
4053void stq_be_phys(target_phys_addr_t addr, uint64_t val)
4054{
4055 val = cpu_to_be64(val);
4056 cpu_physical_memory_write(addr, &val, 8);
4057}
4058
5e2972fd 4059/* virtual memory access for debug (includes writing to ROM) */
9349b4f9 4060int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
b448f2f3 4061 uint8_t *buf, int len, int is_write)
13eb76e0
FB
4062{
4063 int l;
c227f099 4064 target_phys_addr_t phys_addr;
9b3c35e0 4065 target_ulong page;
13eb76e0
FB
4066
4067 while (len > 0) {
4068 page = addr & TARGET_PAGE_MASK;
4069 phys_addr = cpu_get_phys_page_debug(env, page);
4070 /* if no physical page mapped, return an error */
4071 if (phys_addr == -1)
4072 return -1;
4073 l = (page + TARGET_PAGE_SIZE) - addr;
4074 if (l > len)
4075 l = len;
5e2972fd 4076 phys_addr += (addr & ~TARGET_PAGE_MASK);
5e2972fd
AL
4077 if (is_write)
4078 cpu_physical_memory_write_rom(phys_addr, buf, l);
4079 else
5e2972fd 4080 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
13eb76e0
FB
4081 len -= l;
4082 buf += l;
4083 addr += l;
4084 }
4085 return 0;
4086}
a68fe89c 4087#endif
13eb76e0 4088
2e70f6ef
PB
4089/* in deterministic execution mode, instructions doing device I/Os
4090 must be at the end of the TB */
20503968 4091void cpu_io_recompile(CPUArchState *env, uintptr_t retaddr)
2e70f6ef
PB
4092{
4093 TranslationBlock *tb;
4094 uint32_t n, cflags;
4095 target_ulong pc, cs_base;
4096 uint64_t flags;
4097
20503968 4098 tb = tb_find_pc(retaddr);
2e70f6ef
PB
4099 if (!tb) {
4100 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
20503968 4101 (void *)retaddr);
2e70f6ef
PB
4102 }
4103 n = env->icount_decr.u16.low + tb->icount;
20503968 4104 cpu_restore_state(tb, env, retaddr);
2e70f6ef 4105 /* Calculate how many instructions had been executed before the fault
bf20dc07 4106 occurred. */
2e70f6ef
PB
4107 n = n - env->icount_decr.u16.low;
4108 /* Generate a new TB ending on the I/O insn. */
4109 n++;
4110 /* On MIPS and SH, delay slot instructions can only be restarted if
4111 they were already the first instruction in the TB. If this is not
bf20dc07 4112 the first instruction in a TB then re-execute the preceding
2e70f6ef
PB
4113 branch. */
4114#if defined(TARGET_MIPS)
4115 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4116 env->active_tc.PC -= 4;
4117 env->icount_decr.u16.low++;
4118 env->hflags &= ~MIPS_HFLAG_BMASK;
4119 }
4120#elif defined(TARGET_SH4)
4121 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4122 && n > 1) {
4123 env->pc -= 2;
4124 env->icount_decr.u16.low++;
4125 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4126 }
4127#endif
4128 /* This should never happen. */
4129 if (n > CF_COUNT_MASK)
4130 cpu_abort(env, "TB too big during recompile");
4131
4132 cflags = n | CF_LAST_IO;
4133 pc = tb->pc;
4134 cs_base = tb->cs_base;
4135 flags = tb->flags;
4136 tb_phys_invalidate(tb, -1);
4137 /* FIXME: In theory this could raise an exception. In practice
4138 we have already translated the block once so it's probably ok. */
4139 tb_gen_code(env, pc, cs_base, flags, cflags);
bf20dc07 4140 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
2e70f6ef
PB
4141 the first in the TB) then we end up generating a whole new TB and
4142 repeating the fault, which is horribly inefficient.
4143 Better would be to execute just this insn uncached, or generate a
4144 second new TB. */
4145 cpu_resume_from_signal(env, NULL);
4146}
4147
b3755a91
PB
4148#if !defined(CONFIG_USER_ONLY)
4149
055403b2 4150void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
e3db7226
FB
4151{
4152 int i, target_code_size, max_target_code_size;
4153 int direct_jmp_count, direct_jmp2_count, cross_page;
4154 TranslationBlock *tb;
3b46e624 4155
e3db7226
FB
4156 target_code_size = 0;
4157 max_target_code_size = 0;
4158 cross_page = 0;
4159 direct_jmp_count = 0;
4160 direct_jmp2_count = 0;
4161 for(i = 0; i < nb_tbs; i++) {
4162 tb = &tbs[i];
4163 target_code_size += tb->size;
4164 if (tb->size > max_target_code_size)
4165 max_target_code_size = tb->size;
4166 if (tb->page_addr[1] != -1)
4167 cross_page++;
4168 if (tb->tb_next_offset[0] != 0xffff) {
4169 direct_jmp_count++;
4170 if (tb->tb_next_offset[1] != 0xffff) {
4171 direct_jmp2_count++;
4172 }
4173 }
4174 }
4175 /* XXX: avoid using doubles ? */
57fec1fe 4176 cpu_fprintf(f, "Translation buffer state:\n");
055403b2 4177 cpu_fprintf(f, "gen code size %td/%ld\n",
26a5f13b
FB
4178 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4179 cpu_fprintf(f, "TB count %d/%d\n",
4180 nb_tbs, code_gen_max_blocks);
5fafdf24 4181 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
e3db7226
FB
4182 nb_tbs ? target_code_size / nb_tbs : 0,
4183 max_target_code_size);
055403b2 4184 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
e3db7226
FB
4185 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4186 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
5fafdf24
TS
4187 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4188 cross_page,
e3db7226
FB
4189 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4190 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
5fafdf24 4191 direct_jmp_count,
e3db7226
FB
4192 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4193 direct_jmp2_count,
4194 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
57fec1fe 4195 cpu_fprintf(f, "\nStatistics:\n");
e3db7226
FB
4196 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4197 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4198 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
b67d9a52 4199 tcg_dump_info(f, cpu_fprintf);
e3db7226
FB
4200}
4201
82afa586
BH
4202/*
4203 * A helper function for the _utterly broken_ virtio device model to find out if
4204 * it's running on a big endian machine. Don't do this at home kids!
4205 */
4206bool virtio_is_big_endian(void);
4207bool virtio_is_big_endian(void)
4208{
4209#if defined(TARGET_WORDS_BIGENDIAN)
4210 return true;
4211#else
4212 return false;
4213#endif
4214}
4215
61382a50 4216#endif
76f35538
WC
4217
4218#ifndef CONFIG_USER_ONLY
4219bool cpu_physical_memory_is_io(target_phys_addr_t phys_addr)
4220{
4221 MemoryRegionSection *section;
4222
4223 section = phys_page_find(phys_addr >> TARGET_PAGE_BITS);
4224
4225 return !(memory_region_is_ram(section->mr) ||
4226 memory_region_is_romd(section->mr));
4227}
4228#endif