]> git.proxmox.com Git - qemu.git/blame - exec.c
fdc: fix FD_SR0_SEEK for non-DMA transfers and multi sectors transfers
[qemu.git] / exec.c
CommitLineData
54936004 1/*
fd6ce8f6 2 * virtual page mapping and translated block handling
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
54936004 18 */
67b915a5 19#include "config.h"
d5a8f07c
FB
20#ifdef _WIN32
21#include <windows.h>
22#else
a98d49b1 23#include <sys/types.h>
d5a8f07c
FB
24#include <sys/mman.h>
25#endif
54936004 26
055403b2 27#include "qemu-common.h"
6180a181 28#include "cpu.h"
b67d9a52 29#include "tcg.h"
b3c7724c 30#include "hw/hw.h"
cc9e98cb 31#include "hw/qdev.h"
74576198 32#include "osdep.h"
7ba1e619 33#include "kvm.h"
432d268c 34#include "hw/xen.h"
29e922b6 35#include "qemu-timer.h"
62152b8a 36#include "memory.h"
9e11908f 37#include "dma.h"
62152b8a 38#include "exec-memory.h"
53a5960a
PB
39#if defined(CONFIG_USER_ONLY)
40#include <qemu.h>
f01576f1
JL
41#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
42#include <sys/param.h>
43#if __FreeBSD_version >= 700104
44#define HAVE_KINFO_GETVMMAP
45#define sigqueue sigqueue_freebsd /* avoid redefinition */
46#include <sys/time.h>
47#include <sys/proc.h>
48#include <machine/profile.h>
49#define _KERNEL
50#include <sys/user.h>
51#undef _KERNEL
52#undef sigqueue
53#include <libutil.h>
54#endif
55#endif
432d268c
JN
56#else /* !CONFIG_USER_ONLY */
57#include "xen-mapcache.h"
6506e4f9 58#include "trace.h"
53a5960a 59#endif
54936004 60
0cac1b66
BS
61#include "cputlb.h"
62
7762c2c1 63#include "memory-internal.h"
67d95c15 64
fd6ce8f6 65//#define DEBUG_TB_INVALIDATE
66e85a21 66//#define DEBUG_FLUSH
67d3b957 67//#define DEBUG_UNASSIGNED
fd6ce8f6
FB
68
69/* make various TB consistency checks */
5fafdf24 70//#define DEBUG_TB_CHECK
fd6ce8f6 71
1196be37 72//#define DEBUG_IOPORT
db7b5426 73//#define DEBUG_SUBPAGE
1196be37 74
99773bd4
PB
75#if !defined(CONFIG_USER_ONLY)
76/* TB consistency checks only implemented for usermode emulation. */
77#undef DEBUG_TB_CHECK
78#endif
79
9fa3e853
FB
80#define SMC_BITMAP_USE_THRESHOLD 10
81
bdaf78e0 82static TranslationBlock *tbs;
24ab68ac 83static int code_gen_max_blocks;
9fa3e853 84TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
bdaf78e0 85static int nb_tbs;
eb51d102 86/* any access to the tbs or the page table must use this lock */
c227f099 87spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
fd6ce8f6 88
4438c8a9 89uint8_t *code_gen_prologue;
bdaf78e0 90static uint8_t *code_gen_buffer;
f1bc0bcc 91static size_t code_gen_buffer_size;
26a5f13b 92/* threshold to flush the translated code buffer */
f1bc0bcc 93static size_t code_gen_buffer_max_size;
24ab68ac 94static uint8_t *code_gen_ptr;
fd6ce8f6 95
e2eef170 96#if !defined(CONFIG_USER_ONLY)
9fa3e853 97int phys_ram_fd;
74576198 98static int in_migration;
94a6b54f 99
85d59fef 100RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
62152b8a
AK
101
102static MemoryRegion *system_memory;
309cb471 103static MemoryRegion *system_io;
62152b8a 104
f6790af6
AK
105AddressSpace address_space_io;
106AddressSpace address_space_memory;
9e11908f 107DMAContext dma_context_memory;
2673a5da 108
0e0df1e2 109MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty;
de712f94 110static MemoryRegion io_mem_subpage_ram;
0e0df1e2 111
e2eef170 112#endif
9fa3e853 113
9349b4f9 114CPUArchState *first_cpu;
6a00d601
FB
115/* current CPU in the current thread. It is only valid inside
116 cpu_exec() */
9349b4f9 117DEFINE_TLS(CPUArchState *,cpu_single_env);
2e70f6ef 118/* 0 = Do not count executed instructions.
bf20dc07 119 1 = Precise instruction counting.
2e70f6ef
PB
120 2 = Adaptive rate instruction counting. */
121int use_icount = 0;
6a00d601 122
54936004 123typedef struct PageDesc {
92e873b9 124 /* list of TBs intersecting this ram page */
fd6ce8f6 125 TranslationBlock *first_tb;
9fa3e853
FB
126 /* in order to optimize self modifying code, we count the number
127 of lookups we do to a given page to use a bitmap */
128 unsigned int code_write_count;
129 uint8_t *code_bitmap;
130#if defined(CONFIG_USER_ONLY)
131 unsigned long flags;
132#endif
54936004
FB
133} PageDesc;
134
41c1b1c9 135/* In system mode we want L1_MAP to be based on ram offsets,
5cd2c5b6
RH
136 while in user mode we want it to be based on virtual addresses. */
137#if !defined(CONFIG_USER_ONLY)
41c1b1c9
PB
138#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
139# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
140#else
5cd2c5b6 141# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
41c1b1c9 142#endif
bedb69ea 143#else
5cd2c5b6 144# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
bedb69ea 145#endif
54936004 146
5cd2c5b6
RH
147/* Size of the L2 (and L3, etc) page tables. */
148#define L2_BITS 10
54936004
FB
149#define L2_SIZE (1 << L2_BITS)
150
3eef53df
AK
151#define P_L2_LEVELS \
152 (((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / L2_BITS) + 1)
153
5cd2c5b6 154/* The bits remaining after N lower levels of page tables. */
5cd2c5b6
RH
155#define V_L1_BITS_REM \
156 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
157
5cd2c5b6
RH
158#if V_L1_BITS_REM < 4
159#define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
160#else
161#define V_L1_BITS V_L1_BITS_REM
162#endif
163
5cd2c5b6
RH
164#define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
165
5cd2c5b6
RH
166#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
167
c6d50674
SW
168uintptr_t qemu_real_host_page_size;
169uintptr_t qemu_host_page_size;
170uintptr_t qemu_host_page_mask;
54936004 171
5cd2c5b6
RH
172/* This is a multi-level map on the virtual address space.
173 The bottom level has pointers to PageDesc. */
174static void *l1_map[V_L1_SIZE];
54936004 175
e2eef170 176#if !defined(CONFIG_USER_ONLY)
4346ae3e 177
5312bd8b
AK
178static MemoryRegionSection *phys_sections;
179static unsigned phys_sections_nb, phys_sections_nb_alloc;
180static uint16_t phys_section_unassigned;
aa102231
AK
181static uint16_t phys_section_notdirty;
182static uint16_t phys_section_rom;
183static uint16_t phys_section_watch;
5312bd8b 184
d6f2ea22
AK
185/* Simple allocator for PhysPageEntry nodes */
186static PhysPageEntry (*phys_map_nodes)[L2_SIZE];
187static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc;
188
07f07b31 189#define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
d6f2ea22 190
e2eef170 191static void io_mem_init(void);
62152b8a 192static void memory_map_init(void);
8b9c99d9 193static void *qemu_safe_ram_ptr(ram_addr_t addr);
e2eef170 194
1ec9b909 195static MemoryRegion io_mem_watch;
6658ffb8 196#endif
8b9c99d9
BS
197static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
198 tb_page_addr_t phys_page2);
33417e70 199
e3db7226 200/* statistics */
e3db7226
FB
201static int tb_flush_count;
202static int tb_phys_invalidate_count;
203
7cb69cae 204#ifdef _WIN32
4438c8a9 205static inline void map_exec(void *addr, long size)
7cb69cae
FB
206{
207 DWORD old_protect;
208 VirtualProtect(addr, size,
209 PAGE_EXECUTE_READWRITE, &old_protect);
210
211}
212#else
4438c8a9 213static inline void map_exec(void *addr, long size)
7cb69cae 214{
4369415f 215 unsigned long start, end, page_size;
7cb69cae 216
4369415f 217 page_size = getpagesize();
7cb69cae 218 start = (unsigned long)addr;
4369415f 219 start &= ~(page_size - 1);
7cb69cae
FB
220
221 end = (unsigned long)addr + size;
4369415f
FB
222 end += page_size - 1;
223 end &= ~(page_size - 1);
7cb69cae
FB
224
225 mprotect((void *)start, end - start,
226 PROT_READ | PROT_WRITE | PROT_EXEC);
227}
228#endif
229
b346ff46 230static void page_init(void)
54936004 231{
83fb7adf 232 /* NOTE: we can always suppose that qemu_host_page_size >=
54936004 233 TARGET_PAGE_SIZE */
c2b48b69
AL
234#ifdef _WIN32
235 {
236 SYSTEM_INFO system_info;
237
238 GetSystemInfo(&system_info);
239 qemu_real_host_page_size = system_info.dwPageSize;
240 }
241#else
242 qemu_real_host_page_size = getpagesize();
243#endif
83fb7adf
FB
244 if (qemu_host_page_size == 0)
245 qemu_host_page_size = qemu_real_host_page_size;
246 if (qemu_host_page_size < TARGET_PAGE_SIZE)
247 qemu_host_page_size = TARGET_PAGE_SIZE;
83fb7adf 248 qemu_host_page_mask = ~(qemu_host_page_size - 1);
50a9569b 249
2e9a5713 250#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
50a9569b 251 {
f01576f1
JL
252#ifdef HAVE_KINFO_GETVMMAP
253 struct kinfo_vmentry *freep;
254 int i, cnt;
255
256 freep = kinfo_getvmmap(getpid(), &cnt);
257 if (freep) {
258 mmap_lock();
259 for (i = 0; i < cnt; i++) {
260 unsigned long startaddr, endaddr;
261
262 startaddr = freep[i].kve_start;
263 endaddr = freep[i].kve_end;
264 if (h2g_valid(startaddr)) {
265 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
266
267 if (h2g_valid(endaddr)) {
268 endaddr = h2g(endaddr);
fd436907 269 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
f01576f1
JL
270 } else {
271#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
272 endaddr = ~0ul;
fd436907 273 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
f01576f1
JL
274#endif
275 }
276 }
277 }
278 free(freep);
279 mmap_unlock();
280 }
281#else
50a9569b 282 FILE *f;
50a9569b 283
0776590d 284 last_brk = (unsigned long)sbrk(0);
5cd2c5b6 285
fd436907 286 f = fopen("/compat/linux/proc/self/maps", "r");
50a9569b 287 if (f) {
5cd2c5b6
RH
288 mmap_lock();
289
50a9569b 290 do {
5cd2c5b6
RH
291 unsigned long startaddr, endaddr;
292 int n;
293
294 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
295
296 if (n == 2 && h2g_valid(startaddr)) {
297 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
298
299 if (h2g_valid(endaddr)) {
300 endaddr = h2g(endaddr);
301 } else {
302 endaddr = ~0ul;
303 }
304 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
50a9569b
AZ
305 }
306 } while (!feof(f));
5cd2c5b6 307
50a9569b 308 fclose(f);
5cd2c5b6 309 mmap_unlock();
50a9569b 310 }
f01576f1 311#endif
50a9569b
AZ
312 }
313#endif
54936004
FB
314}
315
41c1b1c9 316static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
54936004 317{
41c1b1c9
PB
318 PageDesc *pd;
319 void **lp;
320 int i;
321
5cd2c5b6 322#if defined(CONFIG_USER_ONLY)
7267c094 323 /* We can't use g_malloc because it may recurse into a locked mutex. */
5cd2c5b6
RH
324# define ALLOC(P, SIZE) \
325 do { \
326 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
327 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
5cd2c5b6
RH
328 } while (0)
329#else
330# define ALLOC(P, SIZE) \
7267c094 331 do { P = g_malloc0(SIZE); } while (0)
17e2377a 332#endif
434929bf 333
5cd2c5b6
RH
334 /* Level 1. Always allocated. */
335 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
336
337 /* Level 2..N-1. */
338 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
339 void **p = *lp;
340
341 if (p == NULL) {
342 if (!alloc) {
343 return NULL;
344 }
345 ALLOC(p, sizeof(void *) * L2_SIZE);
346 *lp = p;
17e2377a 347 }
5cd2c5b6
RH
348
349 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
350 }
351
352 pd = *lp;
353 if (pd == NULL) {
354 if (!alloc) {
355 return NULL;
356 }
357 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
358 *lp = pd;
54936004 359 }
5cd2c5b6
RH
360
361#undef ALLOC
5cd2c5b6
RH
362
363 return pd + (index & (L2_SIZE - 1));
54936004
FB
364}
365
41c1b1c9 366static inline PageDesc *page_find(tb_page_addr_t index)
54936004 367{
5cd2c5b6 368 return page_find_alloc(index, 0);
fd6ce8f6
FB
369}
370
6d9a1304 371#if !defined(CONFIG_USER_ONLY)
d6f2ea22 372
f7bf5461 373static void phys_map_node_reserve(unsigned nodes)
d6f2ea22 374{
f7bf5461 375 if (phys_map_nodes_nb + nodes > phys_map_nodes_nb_alloc) {
d6f2ea22
AK
376 typedef PhysPageEntry Node[L2_SIZE];
377 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16);
f7bf5461
AK
378 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc,
379 phys_map_nodes_nb + nodes);
d6f2ea22
AK
380 phys_map_nodes = g_renew(Node, phys_map_nodes,
381 phys_map_nodes_nb_alloc);
382 }
f7bf5461
AK
383}
384
385static uint16_t phys_map_node_alloc(void)
386{
387 unsigned i;
388 uint16_t ret;
389
390 ret = phys_map_nodes_nb++;
391 assert(ret != PHYS_MAP_NODE_NIL);
392 assert(ret != phys_map_nodes_nb_alloc);
d6f2ea22 393 for (i = 0; i < L2_SIZE; ++i) {
07f07b31 394 phys_map_nodes[ret][i].is_leaf = 0;
c19e8800 395 phys_map_nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
d6f2ea22 396 }
f7bf5461 397 return ret;
d6f2ea22
AK
398}
399
400static void phys_map_nodes_reset(void)
401{
402 phys_map_nodes_nb = 0;
403}
404
92e873b9 405
a8170e5e
AK
406static void phys_page_set_level(PhysPageEntry *lp, hwaddr *index,
407 hwaddr *nb, uint16_t leaf,
2999097b 408 int level)
f7bf5461
AK
409{
410 PhysPageEntry *p;
411 int i;
a8170e5e 412 hwaddr step = (hwaddr)1 << (level * L2_BITS);
108c49b8 413
07f07b31 414 if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
c19e8800
AK
415 lp->ptr = phys_map_node_alloc();
416 p = phys_map_nodes[lp->ptr];
f7bf5461
AK
417 if (level == 0) {
418 for (i = 0; i < L2_SIZE; i++) {
07f07b31 419 p[i].is_leaf = 1;
c19e8800 420 p[i].ptr = phys_section_unassigned;
4346ae3e 421 }
67c4d23c 422 }
f7bf5461 423 } else {
c19e8800 424 p = phys_map_nodes[lp->ptr];
92e873b9 425 }
2999097b 426 lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
f7bf5461 427
2999097b 428 while (*nb && lp < &p[L2_SIZE]) {
07f07b31
AK
429 if ((*index & (step - 1)) == 0 && *nb >= step) {
430 lp->is_leaf = true;
c19e8800 431 lp->ptr = leaf;
07f07b31
AK
432 *index += step;
433 *nb -= step;
2999097b
AK
434 } else {
435 phys_page_set_level(lp, index, nb, leaf, level - 1);
436 }
437 ++lp;
f7bf5461
AK
438 }
439}
440
ac1970fb 441static void phys_page_set(AddressSpaceDispatch *d,
a8170e5e 442 hwaddr index, hwaddr nb,
2999097b 443 uint16_t leaf)
f7bf5461 444{
2999097b 445 /* Wildly overreserve - it doesn't matter much. */
07f07b31 446 phys_map_node_reserve(3 * P_L2_LEVELS);
5cd2c5b6 447
ac1970fb 448 phys_page_set_level(&d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
92e873b9
FB
449}
450
a8170e5e 451MemoryRegionSection *phys_page_find(AddressSpaceDispatch *d, hwaddr index)
92e873b9 452{
ac1970fb 453 PhysPageEntry lp = d->phys_map;
31ab2b4a
AK
454 PhysPageEntry *p;
455 int i;
31ab2b4a 456 uint16_t s_index = phys_section_unassigned;
f1f6e3b8 457
07f07b31 458 for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
c19e8800 459 if (lp.ptr == PHYS_MAP_NODE_NIL) {
31ab2b4a
AK
460 goto not_found;
461 }
c19e8800 462 p = phys_map_nodes[lp.ptr];
31ab2b4a 463 lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
5312bd8b 464 }
31ab2b4a 465
c19e8800 466 s_index = lp.ptr;
31ab2b4a 467not_found:
f3705d53
AK
468 return &phys_sections[s_index];
469}
470
e5548617
BS
471bool memory_region_is_unassigned(MemoryRegion *mr)
472{
473 return mr != &io_mem_ram && mr != &io_mem_rom
474 && mr != &io_mem_notdirty && !mr->rom_device
475 && mr != &io_mem_watch;
476}
477
c8a706fe
PB
478#define mmap_lock() do { } while(0)
479#define mmap_unlock() do { } while(0)
9fa3e853 480#endif
fd6ce8f6 481
4369415f 482#if defined(CONFIG_USER_ONLY)
ccbb4d44 483/* Currently it is not recommended to allocate big chunks of data in
f1bc0bcc
RH
484 user mode. It will change when a dedicated libc will be used. */
485/* ??? 64-bit hosts ought to have no problem mmaping data outside the
486 region in which the guest needs to run. Revisit this. */
4369415f
FB
487#define USE_STATIC_CODE_GEN_BUFFER
488#endif
489
f1bc0bcc
RH
490/* ??? Should configure for this, not list operating systems here. */
491#if (defined(__linux__) \
492 || defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
493 || defined(__DragonFly__) || defined(__OpenBSD__) \
494 || defined(__NetBSD__))
495# define USE_MMAP
4369415f
FB
496#endif
497
74d590c8
RH
498/* Minimum size of the code gen buffer. This number is randomly chosen,
499 but not so small that we can't have a fair number of TB's live. */
500#define MIN_CODE_GEN_BUFFER_SIZE (1024u * 1024)
501
f1bc0bcc
RH
502/* Maximum size of the code gen buffer we'd like to use. Unless otherwise
503 indicated, this is constrained by the range of direct branches on the
504 host cpu, as used by the TCG implementation of goto_tb. */
505#if defined(__x86_64__)
506# define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
507#elif defined(__sparc__)
508# define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
509#elif defined(__arm__)
510# define MAX_CODE_GEN_BUFFER_SIZE (16u * 1024 * 1024)
511#elif defined(__s390x__)
512 /* We have a +- 4GB range on the branches; leave some slop. */
513# define MAX_CODE_GEN_BUFFER_SIZE (3ul * 1024 * 1024 * 1024)
514#else
515# define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
516#endif
517
3d85a72f
RH
518#define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32u * 1024 * 1024)
519
520#define DEFAULT_CODE_GEN_BUFFER_SIZE \
521 (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \
522 ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE)
f1bc0bcc
RH
523
524static inline size_t size_code_gen_buffer(size_t tb_size)
26a5f13b 525{
f1bc0bcc
RH
526 /* Size the buffer. */
527 if (tb_size == 0) {
4369415f 528#ifdef USE_STATIC_CODE_GEN_BUFFER
f1bc0bcc 529 tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
4369415f 530#else
f1bc0bcc
RH
531 /* ??? Needs adjustments. */
532 /* ??? If we relax the requirement that CONFIG_USER_ONLY use the
533 static buffer, we could size this on RESERVED_VA, on the text
534 segment size of the executable, or continue to use the default. */
535 tb_size = (unsigned long)(ram_size / 4);
4369415f 536#endif
26a5f13b 537 }
f1bc0bcc
RH
538 if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) {
539 tb_size = MIN_CODE_GEN_BUFFER_SIZE;
26a5f13b 540 }
f1bc0bcc
RH
541 if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) {
542 tb_size = MAX_CODE_GEN_BUFFER_SIZE;
06e67a82 543 }
f1bc0bcc
RH
544 code_gen_buffer_size = tb_size;
545 return tb_size;
546}
547
548#ifdef USE_STATIC_CODE_GEN_BUFFER
549static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
550 __attribute__((aligned(CODE_GEN_ALIGN)));
551
552static inline void *alloc_code_gen_buffer(void)
553{
554 map_exec(static_code_gen_buffer, code_gen_buffer_size);
555 return static_code_gen_buffer;
556}
557#elif defined(USE_MMAP)
558static inline void *alloc_code_gen_buffer(void)
559{
560 int flags = MAP_PRIVATE | MAP_ANONYMOUS;
561 uintptr_t start = 0;
562 void *buf;
563
564 /* Constrain the position of the buffer based on the host cpu.
565 Note that these addresses are chosen in concert with the
566 addresses assigned in the relevant linker script file. */
405def18
RH
567# if defined(__PIE__) || defined(__PIC__)
568 /* Don't bother setting a preferred location if we're building
569 a position-independent executable. We're more likely to get
570 an address near the main executable if we let the kernel
571 choose the address. */
572# elif defined(__x86_64__) && defined(MAP_32BIT)
f1bc0bcc
RH
573 /* Force the memory down into low memory with the executable.
574 Leave the choice of exact location with the kernel. */
575 flags |= MAP_32BIT;
576 /* Cannot expect to map more than 800MB in low memory. */
577 if (code_gen_buffer_size > 800u * 1024 * 1024) {
578 code_gen_buffer_size = 800u * 1024 * 1024;
579 }
580# elif defined(__sparc__)
581 start = 0x40000000ul;
582# elif defined(__s390x__)
583 start = 0x90000000ul;
584# endif
585
586 buf = mmap((void *)start, code_gen_buffer_size,
587 PROT_WRITE | PROT_READ | PROT_EXEC, flags, -1, 0);
588 return buf == MAP_FAILED ? NULL : buf;
589}
26a5f13b 590#else
f1bc0bcc
RH
591static inline void *alloc_code_gen_buffer(void)
592{
593 void *buf = g_malloc(code_gen_buffer_size);
594 if (buf) {
595 map_exec(buf, code_gen_buffer_size);
596 }
597 return buf;
598}
599#endif /* USE_STATIC_CODE_GEN_BUFFER, USE_MMAP */
600
601static inline void code_gen_alloc(size_t tb_size)
602{
603 code_gen_buffer_size = size_code_gen_buffer(tb_size);
604 code_gen_buffer = alloc_code_gen_buffer();
605 if (code_gen_buffer == NULL) {
606 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
607 exit(1);
608 }
609
4438c8a9
RH
610 /* Steal room for the prologue at the end of the buffer. This ensures
611 (via the MAX_CODE_GEN_BUFFER_SIZE limits above) that direct branches
612 from TB's to the prologue are going to be in range. It also means
613 that we don't need to mark (additional) portions of the data segment
614 as executable. */
615 code_gen_prologue = code_gen_buffer + code_gen_buffer_size - 1024;
616 code_gen_buffer_size -= 1024;
617
a884da8a
PM
618 code_gen_buffer_max_size = code_gen_buffer_size -
619 (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
26a5f13b 620 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
7267c094 621 tbs = g_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
26a5f13b
FB
622}
623
624/* Must be called before using the QEMU cpus. 'tb_size' is the size
625 (in bytes) allocated to the translation buffer. Zero means default
626 size. */
d5ab9713 627void tcg_exec_init(unsigned long tb_size)
26a5f13b 628{
26a5f13b
FB
629 cpu_gen_init();
630 code_gen_alloc(tb_size);
631 code_gen_ptr = code_gen_buffer;
813da627 632 tcg_register_jit(code_gen_buffer, code_gen_buffer_size);
4369415f 633 page_init();
9002ec79
RH
634#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
635 /* There's no guest base to take into account, so go ahead and
636 initialize the prologue now. */
637 tcg_prologue_init(&tcg_ctx);
638#endif
26a5f13b
FB
639}
640
d5ab9713
JK
641bool tcg_enabled(void)
642{
643 return code_gen_buffer != NULL;
644}
645
646void cpu_exec_init_all(void)
647{
648#if !defined(CONFIG_USER_ONLY)
649 memory_map_init();
650 io_mem_init();
651#endif
652}
653
9656f324
PB
654#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
655
e59fb374 656static int cpu_common_post_load(void *opaque, int version_id)
e7f4eff7 657{
9349b4f9 658 CPUArchState *env = opaque;
9656f324 659
3098dba0
AJ
660 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
661 version_id is increased. */
662 env->interrupt_request &= ~0x01;
9656f324
PB
663 tlb_flush(env, 1);
664
665 return 0;
666}
e7f4eff7
JQ
667
668static const VMStateDescription vmstate_cpu_common = {
669 .name = "cpu_common",
670 .version_id = 1,
671 .minimum_version_id = 1,
672 .minimum_version_id_old = 1,
e7f4eff7
JQ
673 .post_load = cpu_common_post_load,
674 .fields = (VMStateField []) {
9349b4f9
AF
675 VMSTATE_UINT32(halted, CPUArchState),
676 VMSTATE_UINT32(interrupt_request, CPUArchState),
e7f4eff7
JQ
677 VMSTATE_END_OF_LIST()
678 }
679};
9656f324
PB
680#endif
681
9349b4f9 682CPUArchState *qemu_get_cpu(int cpu)
950f1472 683{
9349b4f9 684 CPUArchState *env = first_cpu;
950f1472
GC
685
686 while (env) {
687 if (env->cpu_index == cpu)
688 break;
689 env = env->next_cpu;
690 }
691
692 return env;
693}
694
9349b4f9 695void cpu_exec_init(CPUArchState *env)
fd6ce8f6 696{
9f09e18a
AF
697#ifndef CONFIG_USER_ONLY
698 CPUState *cpu = ENV_GET_CPU(env);
699#endif
9349b4f9 700 CPUArchState **penv;
6a00d601
FB
701 int cpu_index;
702
c2764719
PB
703#if defined(CONFIG_USER_ONLY)
704 cpu_list_lock();
705#endif
6a00d601
FB
706 env->next_cpu = NULL;
707 penv = &first_cpu;
708 cpu_index = 0;
709 while (*penv != NULL) {
1e9fa730 710 penv = &(*penv)->next_cpu;
6a00d601
FB
711 cpu_index++;
712 }
713 env->cpu_index = cpu_index;
268a362c 714 env->numa_node = 0;
72cf2d4f
BS
715 QTAILQ_INIT(&env->breakpoints);
716 QTAILQ_INIT(&env->watchpoints);
dc7a09cf 717#ifndef CONFIG_USER_ONLY
9f09e18a 718 cpu->thread_id = qemu_get_thread_id();
dc7a09cf 719#endif
6a00d601 720 *penv = env;
c2764719
PB
721#if defined(CONFIG_USER_ONLY)
722 cpu_list_unlock();
723#endif
b3c7724c 724#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
0be71e32
AW
725 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
726 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
b3c7724c
PB
727 cpu_save, cpu_load, env);
728#endif
fd6ce8f6
FB
729}
730
d1a1eb74
TG
731/* Allocate a new translation block. Flush the translation buffer if
732 too many translation blocks or too much generated code. */
733static TranslationBlock *tb_alloc(target_ulong pc)
734{
735 TranslationBlock *tb;
736
737 if (nb_tbs >= code_gen_max_blocks ||
738 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
739 return NULL;
740 tb = &tbs[nb_tbs++];
741 tb->pc = pc;
742 tb->cflags = 0;
743 return tb;
744}
745
746void tb_free(TranslationBlock *tb)
747{
748 /* In practice this is mostly used for single use temporary TB
749 Ignore the hard cases and just back up if this TB happens to
750 be the last one generated. */
751 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
752 code_gen_ptr = tb->tc_ptr;
753 nb_tbs--;
754 }
755}
756
9fa3e853
FB
757static inline void invalidate_page_bitmap(PageDesc *p)
758{
759 if (p->code_bitmap) {
7267c094 760 g_free(p->code_bitmap);
9fa3e853
FB
761 p->code_bitmap = NULL;
762 }
763 p->code_write_count = 0;
764}
765
5cd2c5b6
RH
766/* Set to NULL all the 'first_tb' fields in all PageDescs. */
767
768static void page_flush_tb_1 (int level, void **lp)
fd6ce8f6 769{
5cd2c5b6 770 int i;
fd6ce8f6 771
5cd2c5b6
RH
772 if (*lp == NULL) {
773 return;
774 }
775 if (level == 0) {
776 PageDesc *pd = *lp;
7296abac 777 for (i = 0; i < L2_SIZE; ++i) {
5cd2c5b6
RH
778 pd[i].first_tb = NULL;
779 invalidate_page_bitmap(pd + i);
fd6ce8f6 780 }
5cd2c5b6
RH
781 } else {
782 void **pp = *lp;
7296abac 783 for (i = 0; i < L2_SIZE; ++i) {
5cd2c5b6
RH
784 page_flush_tb_1 (level - 1, pp + i);
785 }
786 }
787}
788
789static void page_flush_tb(void)
790{
791 int i;
792 for (i = 0; i < V_L1_SIZE; i++) {
793 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
fd6ce8f6
FB
794 }
795}
796
797/* flush all the translation blocks */
d4e8164f 798/* XXX: tb_flush is currently not thread safe */
9349b4f9 799void tb_flush(CPUArchState *env1)
fd6ce8f6 800{
9349b4f9 801 CPUArchState *env;
0124311e 802#if defined(DEBUG_FLUSH)
ab3d1727
BS
803 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
804 (unsigned long)(code_gen_ptr - code_gen_buffer),
805 nb_tbs, nb_tbs > 0 ?
806 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
fd6ce8f6 807#endif
26a5f13b 808 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
a208e54a
PB
809 cpu_abort(env1, "Internal error: code buffer overflow\n");
810
fd6ce8f6 811 nb_tbs = 0;
3b46e624 812
6a00d601
FB
813 for(env = first_cpu; env != NULL; env = env->next_cpu) {
814 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
815 }
9fa3e853 816
8a8a608f 817 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
fd6ce8f6 818 page_flush_tb();
9fa3e853 819
fd6ce8f6 820 code_gen_ptr = code_gen_buffer;
d4e8164f
FB
821 /* XXX: flush processor icache at this point if cache flush is
822 expensive */
e3db7226 823 tb_flush_count++;
fd6ce8f6
FB
824}
825
826#ifdef DEBUG_TB_CHECK
827
bc98a7ef 828static void tb_invalidate_check(target_ulong address)
fd6ce8f6
FB
829{
830 TranslationBlock *tb;
831 int i;
832 address &= TARGET_PAGE_MASK;
99773bd4
PB
833 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
834 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
835 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
836 address >= tb->pc + tb->size)) {
0bf9e31a
BS
837 printf("ERROR invalidate: address=" TARGET_FMT_lx
838 " PC=%08lx size=%04x\n",
99773bd4 839 address, (long)tb->pc, tb->size);
fd6ce8f6
FB
840 }
841 }
842 }
843}
844
845/* verify that all the pages have correct rights for code */
846static void tb_page_check(void)
847{
848 TranslationBlock *tb;
849 int i, flags1, flags2;
3b46e624 850
99773bd4
PB
851 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
852 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
853 flags1 = page_get_flags(tb->pc);
854 flags2 = page_get_flags(tb->pc + tb->size - 1);
855 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
856 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
99773bd4 857 (long)tb->pc, tb->size, flags1, flags2);
fd6ce8f6
FB
858 }
859 }
860 }
861}
862
863#endif
864
865/* invalidate one TB */
866static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
867 int next_offset)
868{
869 TranslationBlock *tb1;
870 for(;;) {
871 tb1 = *ptb;
872 if (tb1 == tb) {
873 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
874 break;
875 }
876 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
877 }
878}
879
9fa3e853
FB
880static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
881{
882 TranslationBlock *tb1;
883 unsigned int n1;
884
885 for(;;) {
886 tb1 = *ptb;
8efe0ca8
SW
887 n1 = (uintptr_t)tb1 & 3;
888 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
9fa3e853
FB
889 if (tb1 == tb) {
890 *ptb = tb1->page_next[n1];
891 break;
892 }
893 ptb = &tb1->page_next[n1];
894 }
895}
896
d4e8164f
FB
897static inline void tb_jmp_remove(TranslationBlock *tb, int n)
898{
899 TranslationBlock *tb1, **ptb;
900 unsigned int n1;
901
902 ptb = &tb->jmp_next[n];
903 tb1 = *ptb;
904 if (tb1) {
905 /* find tb(n) in circular list */
906 for(;;) {
907 tb1 = *ptb;
8efe0ca8
SW
908 n1 = (uintptr_t)tb1 & 3;
909 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
d4e8164f
FB
910 if (n1 == n && tb1 == tb)
911 break;
912 if (n1 == 2) {
913 ptb = &tb1->jmp_first;
914 } else {
915 ptb = &tb1->jmp_next[n1];
916 }
917 }
918 /* now we can suppress tb(n) from the list */
919 *ptb = tb->jmp_next[n];
920
921 tb->jmp_next[n] = NULL;
922 }
923}
924
925/* reset the jump entry 'n' of a TB so that it is not chained to
926 another TB */
927static inline void tb_reset_jump(TranslationBlock *tb, int n)
928{
8efe0ca8 929 tb_set_jmp_target(tb, n, (uintptr_t)(tb->tc_ptr + tb->tb_next_offset[n]));
d4e8164f
FB
930}
931
41c1b1c9 932void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
fd6ce8f6 933{
9349b4f9 934 CPUArchState *env;
8a40a180 935 PageDesc *p;
d4e8164f 936 unsigned int h, n1;
41c1b1c9 937 tb_page_addr_t phys_pc;
8a40a180 938 TranslationBlock *tb1, *tb2;
3b46e624 939
8a40a180
FB
940 /* remove the TB from the hash list */
941 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
942 h = tb_phys_hash_func(phys_pc);
5fafdf24 943 tb_remove(&tb_phys_hash[h], tb,
8a40a180
FB
944 offsetof(TranslationBlock, phys_hash_next));
945
946 /* remove the TB from the page list */
947 if (tb->page_addr[0] != page_addr) {
948 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
949 tb_page_remove(&p->first_tb, tb);
950 invalidate_page_bitmap(p);
951 }
952 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
953 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
954 tb_page_remove(&p->first_tb, tb);
955 invalidate_page_bitmap(p);
956 }
957
36bdbe54 958 tb_invalidated_flag = 1;
59817ccb 959
fd6ce8f6 960 /* remove the TB from the hash list */
8a40a180 961 h = tb_jmp_cache_hash_func(tb->pc);
6a00d601
FB
962 for(env = first_cpu; env != NULL; env = env->next_cpu) {
963 if (env->tb_jmp_cache[h] == tb)
964 env->tb_jmp_cache[h] = NULL;
965 }
d4e8164f
FB
966
967 /* suppress this TB from the two jump lists */
968 tb_jmp_remove(tb, 0);
969 tb_jmp_remove(tb, 1);
970
971 /* suppress any remaining jumps to this TB */
972 tb1 = tb->jmp_first;
973 for(;;) {
8efe0ca8 974 n1 = (uintptr_t)tb1 & 3;
d4e8164f
FB
975 if (n1 == 2)
976 break;
8efe0ca8 977 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
d4e8164f
FB
978 tb2 = tb1->jmp_next[n1];
979 tb_reset_jump(tb1, n1);
980 tb1->jmp_next[n1] = NULL;
981 tb1 = tb2;
982 }
8efe0ca8 983 tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2); /* fail safe */
9fa3e853 984
e3db7226 985 tb_phys_invalidate_count++;
9fa3e853
FB
986}
987
988static inline void set_bits(uint8_t *tab, int start, int len)
989{
990 int end, mask, end1;
991
992 end = start + len;
993 tab += start >> 3;
994 mask = 0xff << (start & 7);
995 if ((start & ~7) == (end & ~7)) {
996 if (start < end) {
997 mask &= ~(0xff << (end & 7));
998 *tab |= mask;
999 }
1000 } else {
1001 *tab++ |= mask;
1002 start = (start + 8) & ~7;
1003 end1 = end & ~7;
1004 while (start < end1) {
1005 *tab++ = 0xff;
1006 start += 8;
1007 }
1008 if (start < end) {
1009 mask = ~(0xff << (end & 7));
1010 *tab |= mask;
1011 }
1012 }
1013}
1014
1015static void build_page_bitmap(PageDesc *p)
1016{
1017 int n, tb_start, tb_end;
1018 TranslationBlock *tb;
3b46e624 1019
7267c094 1020 p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8);
9fa3e853
FB
1021
1022 tb = p->first_tb;
1023 while (tb != NULL) {
8efe0ca8
SW
1024 n = (uintptr_t)tb & 3;
1025 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
9fa3e853
FB
1026 /* NOTE: this is subtle as a TB may span two physical pages */
1027 if (n == 0) {
1028 /* NOTE: tb_end may be after the end of the page, but
1029 it is not a problem */
1030 tb_start = tb->pc & ~TARGET_PAGE_MASK;
1031 tb_end = tb_start + tb->size;
1032 if (tb_end > TARGET_PAGE_SIZE)
1033 tb_end = TARGET_PAGE_SIZE;
1034 } else {
1035 tb_start = 0;
1036 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1037 }
1038 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
1039 tb = tb->page_next[n];
1040 }
1041}
1042
9349b4f9 1043TranslationBlock *tb_gen_code(CPUArchState *env,
2e70f6ef
PB
1044 target_ulong pc, target_ulong cs_base,
1045 int flags, int cflags)
d720b93d
FB
1046{
1047 TranslationBlock *tb;
1048 uint8_t *tc_ptr;
41c1b1c9
PB
1049 tb_page_addr_t phys_pc, phys_page2;
1050 target_ulong virt_page2;
d720b93d
FB
1051 int code_gen_size;
1052
41c1b1c9 1053 phys_pc = get_page_addr_code(env, pc);
c27004ec 1054 tb = tb_alloc(pc);
d720b93d
FB
1055 if (!tb) {
1056 /* flush must be done */
1057 tb_flush(env);
1058 /* cannot fail at this point */
c27004ec 1059 tb = tb_alloc(pc);
2e70f6ef
PB
1060 /* Don't forget to invalidate previous TB info. */
1061 tb_invalidated_flag = 1;
d720b93d
FB
1062 }
1063 tc_ptr = code_gen_ptr;
1064 tb->tc_ptr = tc_ptr;
1065 tb->cs_base = cs_base;
1066 tb->flags = flags;
1067 tb->cflags = cflags;
d07bde88 1068 cpu_gen_code(env, tb, &code_gen_size);
8efe0ca8
SW
1069 code_gen_ptr = (void *)(((uintptr_t)code_gen_ptr + code_gen_size +
1070 CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
3b46e624 1071
d720b93d 1072 /* check next page if needed */
c27004ec 1073 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
d720b93d 1074 phys_page2 = -1;
c27004ec 1075 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
41c1b1c9 1076 phys_page2 = get_page_addr_code(env, virt_page2);
d720b93d 1077 }
41c1b1c9 1078 tb_link_page(tb, phys_pc, phys_page2);
2e70f6ef 1079 return tb;
d720b93d 1080}
3b46e624 1081
77a8f1a5 1082/*
8e0fdce3
JK
1083 * Invalidate all TBs which intersect with the target physical address range
1084 * [start;end[. NOTE: start and end may refer to *different* physical pages.
1085 * 'is_cpu_write_access' should be true if called from a real cpu write
1086 * access: the virtual CPU will exit the current TB if code is modified inside
1087 * this TB.
77a8f1a5
AG
1088 */
1089void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end,
1090 int is_cpu_write_access)
1091{
1092 while (start < end) {
1093 tb_invalidate_phys_page_range(start, end, is_cpu_write_access);
1094 start &= TARGET_PAGE_MASK;
1095 start += TARGET_PAGE_SIZE;
1096 }
1097}
1098
8e0fdce3
JK
1099/*
1100 * Invalidate all TBs which intersect with the target physical address range
1101 * [start;end[. NOTE: start and end must refer to the *same* physical page.
1102 * 'is_cpu_write_access' should be true if called from a real cpu write
1103 * access: the virtual CPU will exit the current TB if code is modified inside
1104 * this TB.
1105 */
41c1b1c9 1106void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
d720b93d
FB
1107 int is_cpu_write_access)
1108{
6b917547 1109 TranslationBlock *tb, *tb_next, *saved_tb;
9349b4f9 1110 CPUArchState *env = cpu_single_env;
41c1b1c9 1111 tb_page_addr_t tb_start, tb_end;
6b917547
AL
1112 PageDesc *p;
1113 int n;
1114#ifdef TARGET_HAS_PRECISE_SMC
1115 int current_tb_not_found = is_cpu_write_access;
1116 TranslationBlock *current_tb = NULL;
1117 int current_tb_modified = 0;
1118 target_ulong current_pc = 0;
1119 target_ulong current_cs_base = 0;
1120 int current_flags = 0;
1121#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
1122
1123 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 1124 if (!p)
9fa3e853 1125 return;
5fafdf24 1126 if (!p->code_bitmap &&
d720b93d
FB
1127 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1128 is_cpu_write_access) {
9fa3e853
FB
1129 /* build code bitmap */
1130 build_page_bitmap(p);
1131 }
1132
1133 /* we remove all the TBs in the range [start, end[ */
1134 /* XXX: see if in some cases it could be faster to invalidate all the code */
1135 tb = p->first_tb;
1136 while (tb != NULL) {
8efe0ca8
SW
1137 n = (uintptr_t)tb & 3;
1138 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
9fa3e853
FB
1139 tb_next = tb->page_next[n];
1140 /* NOTE: this is subtle as a TB may span two physical pages */
1141 if (n == 0) {
1142 /* NOTE: tb_end may be after the end of the page, but
1143 it is not a problem */
1144 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1145 tb_end = tb_start + tb->size;
1146 } else {
1147 tb_start = tb->page_addr[1];
1148 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1149 }
1150 if (!(tb_end <= start || tb_start >= end)) {
d720b93d
FB
1151#ifdef TARGET_HAS_PRECISE_SMC
1152 if (current_tb_not_found) {
1153 current_tb_not_found = 0;
1154 current_tb = NULL;
2e70f6ef 1155 if (env->mem_io_pc) {
d720b93d 1156 /* now we have a real cpu fault */
2e70f6ef 1157 current_tb = tb_find_pc(env->mem_io_pc);
d720b93d
FB
1158 }
1159 }
1160 if (current_tb == tb &&
2e70f6ef 1161 (current_tb->cflags & CF_COUNT_MASK) != 1) {
d720b93d
FB
1162 /* If we are modifying the current TB, we must stop
1163 its execution. We could be more precise by checking
1164 that the modification is after the current PC, but it
1165 would require a specialized function to partially
1166 restore the CPU state */
3b46e624 1167
d720b93d 1168 current_tb_modified = 1;
618ba8e6 1169 cpu_restore_state(current_tb, env, env->mem_io_pc);
6b917547
AL
1170 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1171 &current_flags);
d720b93d
FB
1172 }
1173#endif /* TARGET_HAS_PRECISE_SMC */
6f5a9f7e
FB
1174 /* we need to do that to handle the case where a signal
1175 occurs while doing tb_phys_invalidate() */
1176 saved_tb = NULL;
1177 if (env) {
1178 saved_tb = env->current_tb;
1179 env->current_tb = NULL;
1180 }
9fa3e853 1181 tb_phys_invalidate(tb, -1);
6f5a9f7e
FB
1182 if (env) {
1183 env->current_tb = saved_tb;
1184 if (env->interrupt_request && env->current_tb)
1185 cpu_interrupt(env, env->interrupt_request);
1186 }
9fa3e853
FB
1187 }
1188 tb = tb_next;
1189 }
1190#if !defined(CONFIG_USER_ONLY)
1191 /* if no code remaining, no need to continue to use slow writes */
1192 if (!p->first_tb) {
1193 invalidate_page_bitmap(p);
d720b93d 1194 if (is_cpu_write_access) {
2e70f6ef 1195 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
d720b93d
FB
1196 }
1197 }
1198#endif
1199#ifdef TARGET_HAS_PRECISE_SMC
1200 if (current_tb_modified) {
1201 /* we generate a block containing just the instruction
1202 modifying the memory. It will ensure that it cannot modify
1203 itself */
ea1c1802 1204 env->current_tb = NULL;
2e70f6ef 1205 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
d720b93d 1206 cpu_resume_from_signal(env, NULL);
9fa3e853 1207 }
fd6ce8f6 1208#endif
9fa3e853 1209}
fd6ce8f6 1210
9fa3e853 1211/* len must be <= 8 and start must be a multiple of len */
41c1b1c9 1212static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
9fa3e853
FB
1213{
1214 PageDesc *p;
1215 int offset, b;
59817ccb 1216#if 0
a4193c8a 1217 if (1) {
93fcfe39
AL
1218 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1219 cpu_single_env->mem_io_vaddr, len,
1220 cpu_single_env->eip,
8efe0ca8
SW
1221 cpu_single_env->eip +
1222 (intptr_t)cpu_single_env->segs[R_CS].base);
59817ccb
FB
1223 }
1224#endif
9fa3e853 1225 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 1226 if (!p)
9fa3e853
FB
1227 return;
1228 if (p->code_bitmap) {
1229 offset = start & ~TARGET_PAGE_MASK;
1230 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1231 if (b & ((1 << len) - 1))
1232 goto do_invalidate;
1233 } else {
1234 do_invalidate:
d720b93d 1235 tb_invalidate_phys_page_range(start, start + len, 1);
9fa3e853
FB
1236 }
1237}
1238
9fa3e853 1239#if !defined(CONFIG_SOFTMMU)
41c1b1c9 1240static void tb_invalidate_phys_page(tb_page_addr_t addr,
20503968 1241 uintptr_t pc, void *puc)
9fa3e853 1242{
6b917547 1243 TranslationBlock *tb;
9fa3e853 1244 PageDesc *p;
6b917547 1245 int n;
d720b93d 1246#ifdef TARGET_HAS_PRECISE_SMC
6b917547 1247 TranslationBlock *current_tb = NULL;
9349b4f9 1248 CPUArchState *env = cpu_single_env;
6b917547
AL
1249 int current_tb_modified = 0;
1250 target_ulong current_pc = 0;
1251 target_ulong current_cs_base = 0;
1252 int current_flags = 0;
d720b93d 1253#endif
9fa3e853
FB
1254
1255 addr &= TARGET_PAGE_MASK;
1256 p = page_find(addr >> TARGET_PAGE_BITS);
5fafdf24 1257 if (!p)
9fa3e853
FB
1258 return;
1259 tb = p->first_tb;
d720b93d
FB
1260#ifdef TARGET_HAS_PRECISE_SMC
1261 if (tb && pc != 0) {
1262 current_tb = tb_find_pc(pc);
1263 }
1264#endif
9fa3e853 1265 while (tb != NULL) {
8efe0ca8
SW
1266 n = (uintptr_t)tb & 3;
1267 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
d720b93d
FB
1268#ifdef TARGET_HAS_PRECISE_SMC
1269 if (current_tb == tb &&
2e70f6ef 1270 (current_tb->cflags & CF_COUNT_MASK) != 1) {
d720b93d
FB
1271 /* If we are modifying the current TB, we must stop
1272 its execution. We could be more precise by checking
1273 that the modification is after the current PC, but it
1274 would require a specialized function to partially
1275 restore the CPU state */
3b46e624 1276
d720b93d 1277 current_tb_modified = 1;
618ba8e6 1278 cpu_restore_state(current_tb, env, pc);
6b917547
AL
1279 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1280 &current_flags);
d720b93d
FB
1281 }
1282#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
1283 tb_phys_invalidate(tb, addr);
1284 tb = tb->page_next[n];
1285 }
fd6ce8f6 1286 p->first_tb = NULL;
d720b93d
FB
1287#ifdef TARGET_HAS_PRECISE_SMC
1288 if (current_tb_modified) {
1289 /* we generate a block containing just the instruction
1290 modifying the memory. It will ensure that it cannot modify
1291 itself */
ea1c1802 1292 env->current_tb = NULL;
2e70f6ef 1293 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
d720b93d
FB
1294 cpu_resume_from_signal(env, puc);
1295 }
1296#endif
fd6ce8f6 1297}
9fa3e853 1298#endif
fd6ce8f6
FB
1299
1300/* add the tb in the target page and protect it if necessary */
5fafdf24 1301static inline void tb_alloc_page(TranslationBlock *tb,
41c1b1c9 1302 unsigned int n, tb_page_addr_t page_addr)
fd6ce8f6
FB
1303{
1304 PageDesc *p;
4429ab44
JQ
1305#ifndef CONFIG_USER_ONLY
1306 bool page_already_protected;
1307#endif
9fa3e853
FB
1308
1309 tb->page_addr[n] = page_addr;
5cd2c5b6 1310 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
9fa3e853 1311 tb->page_next[n] = p->first_tb;
4429ab44
JQ
1312#ifndef CONFIG_USER_ONLY
1313 page_already_protected = p->first_tb != NULL;
1314#endif
8efe0ca8 1315 p->first_tb = (TranslationBlock *)((uintptr_t)tb | n);
9fa3e853 1316 invalidate_page_bitmap(p);
fd6ce8f6 1317
107db443 1318#if defined(TARGET_HAS_SMC) || 1
d720b93d 1319
9fa3e853 1320#if defined(CONFIG_USER_ONLY)
fd6ce8f6 1321 if (p->flags & PAGE_WRITE) {
53a5960a
PB
1322 target_ulong addr;
1323 PageDesc *p2;
9fa3e853
FB
1324 int prot;
1325
fd6ce8f6
FB
1326 /* force the host page as non writable (writes will have a
1327 page fault + mprotect overhead) */
53a5960a 1328 page_addr &= qemu_host_page_mask;
fd6ce8f6 1329 prot = 0;
53a5960a
PB
1330 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1331 addr += TARGET_PAGE_SIZE) {
1332
1333 p2 = page_find (addr >> TARGET_PAGE_BITS);
1334 if (!p2)
1335 continue;
1336 prot |= p2->flags;
1337 p2->flags &= ~PAGE_WRITE;
53a5960a 1338 }
5fafdf24 1339 mprotect(g2h(page_addr), qemu_host_page_size,
fd6ce8f6
FB
1340 (prot & PAGE_BITS) & ~PAGE_WRITE);
1341#ifdef DEBUG_TB_INVALIDATE
ab3d1727 1342 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
53a5960a 1343 page_addr);
fd6ce8f6 1344#endif
fd6ce8f6 1345 }
9fa3e853
FB
1346#else
1347 /* if some code is already present, then the pages are already
1348 protected. So we handle the case where only the first TB is
1349 allocated in a physical page */
4429ab44 1350 if (!page_already_protected) {
6a00d601 1351 tlb_protect_code(page_addr);
9fa3e853
FB
1352 }
1353#endif
d720b93d
FB
1354
1355#endif /* TARGET_HAS_SMC */
fd6ce8f6
FB
1356}
1357
9fa3e853
FB
1358/* add a new TB and link it to the physical page tables. phys_page2 is
1359 (-1) to indicate that only one page contains the TB. */
8b9c99d9
BS
1360static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
1361 tb_page_addr_t phys_page2)
d4e8164f 1362{
9fa3e853
FB
1363 unsigned int h;
1364 TranslationBlock **ptb;
1365
c8a706fe
PB
1366 /* Grab the mmap lock to stop another thread invalidating this TB
1367 before we are done. */
1368 mmap_lock();
9fa3e853
FB
1369 /* add in the physical hash table */
1370 h = tb_phys_hash_func(phys_pc);
1371 ptb = &tb_phys_hash[h];
1372 tb->phys_hash_next = *ptb;
1373 *ptb = tb;
fd6ce8f6
FB
1374
1375 /* add in the page list */
9fa3e853
FB
1376 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1377 if (phys_page2 != -1)
1378 tb_alloc_page(tb, 1, phys_page2);
1379 else
1380 tb->page_addr[1] = -1;
9fa3e853 1381
8efe0ca8 1382 tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2);
d4e8164f
FB
1383 tb->jmp_next[0] = NULL;
1384 tb->jmp_next[1] = NULL;
1385
1386 /* init original jump addresses */
1387 if (tb->tb_next_offset[0] != 0xffff)
1388 tb_reset_jump(tb, 0);
1389 if (tb->tb_next_offset[1] != 0xffff)
1390 tb_reset_jump(tb, 1);
8a40a180
FB
1391
1392#ifdef DEBUG_TB_CHECK
1393 tb_page_check();
1394#endif
c8a706fe 1395 mmap_unlock();
fd6ce8f6
FB
1396}
1397
fdbb84d1
YL
1398#if defined(CONFIG_QEMU_LDST_OPTIMIZATION) && defined(CONFIG_SOFTMMU)
1399/* check whether the given addr is in TCG generated code buffer or not */
1400bool is_tcg_gen_code(uintptr_t tc_ptr)
1401{
1402 /* This can be called during code generation, code_gen_buffer_max_size
1403 is used instead of code_gen_ptr for upper boundary checking */
1404 return (tc_ptr >= (uintptr_t)code_gen_buffer &&
1405 tc_ptr < (uintptr_t)(code_gen_buffer + code_gen_buffer_max_size));
1406}
1407#endif
1408
9fa3e853
FB
1409/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1410 tb[1].tc_ptr. Return NULL if not found */
6375e09e 1411TranslationBlock *tb_find_pc(uintptr_t tc_ptr)
fd6ce8f6 1412{
9fa3e853 1413 int m_min, m_max, m;
8efe0ca8 1414 uintptr_t v;
9fa3e853 1415 TranslationBlock *tb;
a513fe19
FB
1416
1417 if (nb_tbs <= 0)
1418 return NULL;
8efe0ca8
SW
1419 if (tc_ptr < (uintptr_t)code_gen_buffer ||
1420 tc_ptr >= (uintptr_t)code_gen_ptr) {
a513fe19 1421 return NULL;
8efe0ca8 1422 }
a513fe19
FB
1423 /* binary search (cf Knuth) */
1424 m_min = 0;
1425 m_max = nb_tbs - 1;
1426 while (m_min <= m_max) {
1427 m = (m_min + m_max) >> 1;
1428 tb = &tbs[m];
8efe0ca8 1429 v = (uintptr_t)tb->tc_ptr;
a513fe19
FB
1430 if (v == tc_ptr)
1431 return tb;
1432 else if (tc_ptr < v) {
1433 m_max = m - 1;
1434 } else {
1435 m_min = m + 1;
1436 }
5fafdf24 1437 }
a513fe19
FB
1438 return &tbs[m_max];
1439}
7501267e 1440
ea041c0e
FB
1441static void tb_reset_jump_recursive(TranslationBlock *tb);
1442
1443static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1444{
1445 TranslationBlock *tb1, *tb_next, **ptb;
1446 unsigned int n1;
1447
1448 tb1 = tb->jmp_next[n];
1449 if (tb1 != NULL) {
1450 /* find head of list */
1451 for(;;) {
8efe0ca8
SW
1452 n1 = (uintptr_t)tb1 & 3;
1453 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
ea041c0e
FB
1454 if (n1 == 2)
1455 break;
1456 tb1 = tb1->jmp_next[n1];
1457 }
1458 /* we are now sure now that tb jumps to tb1 */
1459 tb_next = tb1;
1460
1461 /* remove tb from the jmp_first list */
1462 ptb = &tb_next->jmp_first;
1463 for(;;) {
1464 tb1 = *ptb;
8efe0ca8
SW
1465 n1 = (uintptr_t)tb1 & 3;
1466 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
ea041c0e
FB
1467 if (n1 == n && tb1 == tb)
1468 break;
1469 ptb = &tb1->jmp_next[n1];
1470 }
1471 *ptb = tb->jmp_next[n];
1472 tb->jmp_next[n] = NULL;
3b46e624 1473
ea041c0e
FB
1474 /* suppress the jump to next tb in generated code */
1475 tb_reset_jump(tb, n);
1476
0124311e 1477 /* suppress jumps in the tb on which we could have jumped */
ea041c0e
FB
1478 tb_reset_jump_recursive(tb_next);
1479 }
1480}
1481
1482static void tb_reset_jump_recursive(TranslationBlock *tb)
1483{
1484 tb_reset_jump_recursive2(tb, 0);
1485 tb_reset_jump_recursive2(tb, 1);
1486}
1487
1fddef4b 1488#if defined(TARGET_HAS_ICE)
94df27fd 1489#if defined(CONFIG_USER_ONLY)
9349b4f9 1490static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
94df27fd
PB
1491{
1492 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1493}
1494#else
a8170e5e 1495void tb_invalidate_phys_addr(hwaddr addr)
d720b93d 1496{
c227f099 1497 ram_addr_t ram_addr;
f3705d53 1498 MemoryRegionSection *section;
d720b93d 1499
ac1970fb 1500 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
f3705d53
AK
1501 if (!(memory_region_is_ram(section->mr)
1502 || (section->mr->rom_device && section->mr->readable))) {
06ef3525
AK
1503 return;
1504 }
f3705d53 1505 ram_addr = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
cc5bea60 1506 + memory_region_section_addr(section, addr);
706cd4b5 1507 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
d720b93d 1508}
1e7855a5
MF
1509
1510static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
1511{
9d70c4b7
MF
1512 tb_invalidate_phys_addr(cpu_get_phys_page_debug(env, pc) |
1513 (pc & ~TARGET_PAGE_MASK));
1e7855a5 1514}
c27004ec 1515#endif
94df27fd 1516#endif /* TARGET_HAS_ICE */
d720b93d 1517
c527ee8f 1518#if defined(CONFIG_USER_ONLY)
9349b4f9 1519void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
c527ee8f
PB
1520
1521{
1522}
1523
9349b4f9 1524int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
c527ee8f
PB
1525 int flags, CPUWatchpoint **watchpoint)
1526{
1527 return -ENOSYS;
1528}
1529#else
6658ffb8 1530/* Add a watchpoint. */
9349b4f9 1531int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
a1d1bb31 1532 int flags, CPUWatchpoint **watchpoint)
6658ffb8 1533{
b4051334 1534 target_ulong len_mask = ~(len - 1);
c0ce998e 1535 CPUWatchpoint *wp;
6658ffb8 1536
b4051334 1537 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
0dc23828
MF
1538 if ((len & (len - 1)) || (addr & ~len_mask) ||
1539 len == 0 || len > TARGET_PAGE_SIZE) {
b4051334
AL
1540 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1541 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1542 return -EINVAL;
1543 }
7267c094 1544 wp = g_malloc(sizeof(*wp));
a1d1bb31
AL
1545
1546 wp->vaddr = addr;
b4051334 1547 wp->len_mask = len_mask;
a1d1bb31
AL
1548 wp->flags = flags;
1549
2dc9f411 1550 /* keep all GDB-injected watchpoints in front */
c0ce998e 1551 if (flags & BP_GDB)
72cf2d4f 1552 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
c0ce998e 1553 else
72cf2d4f 1554 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
6658ffb8 1555
6658ffb8 1556 tlb_flush_page(env, addr);
a1d1bb31
AL
1557
1558 if (watchpoint)
1559 *watchpoint = wp;
1560 return 0;
6658ffb8
PB
1561}
1562
a1d1bb31 1563/* Remove a specific watchpoint. */
9349b4f9 1564int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
a1d1bb31 1565 int flags)
6658ffb8 1566{
b4051334 1567 target_ulong len_mask = ~(len - 1);
a1d1bb31 1568 CPUWatchpoint *wp;
6658ffb8 1569
72cf2d4f 1570 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334 1571 if (addr == wp->vaddr && len_mask == wp->len_mask
6e140f28 1572 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
a1d1bb31 1573 cpu_watchpoint_remove_by_ref(env, wp);
6658ffb8
PB
1574 return 0;
1575 }
1576 }
a1d1bb31 1577 return -ENOENT;
6658ffb8
PB
1578}
1579
a1d1bb31 1580/* Remove a specific watchpoint by reference. */
9349b4f9 1581void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
a1d1bb31 1582{
72cf2d4f 1583 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
7d03f82f 1584
a1d1bb31
AL
1585 tlb_flush_page(env, watchpoint->vaddr);
1586
7267c094 1587 g_free(watchpoint);
a1d1bb31
AL
1588}
1589
1590/* Remove all matching watchpoints. */
9349b4f9 1591void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
a1d1bb31 1592{
c0ce998e 1593 CPUWatchpoint *wp, *next;
a1d1bb31 1594
72cf2d4f 1595 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
a1d1bb31
AL
1596 if (wp->flags & mask)
1597 cpu_watchpoint_remove_by_ref(env, wp);
c0ce998e 1598 }
7d03f82f 1599}
c527ee8f 1600#endif
7d03f82f 1601
a1d1bb31 1602/* Add a breakpoint. */
9349b4f9 1603int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
a1d1bb31 1604 CPUBreakpoint **breakpoint)
4c3a88a2 1605{
1fddef4b 1606#if defined(TARGET_HAS_ICE)
c0ce998e 1607 CPUBreakpoint *bp;
3b46e624 1608
7267c094 1609 bp = g_malloc(sizeof(*bp));
4c3a88a2 1610
a1d1bb31
AL
1611 bp->pc = pc;
1612 bp->flags = flags;
1613
2dc9f411 1614 /* keep all GDB-injected breakpoints in front */
c0ce998e 1615 if (flags & BP_GDB)
72cf2d4f 1616 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
c0ce998e 1617 else
72cf2d4f 1618 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
3b46e624 1619
d720b93d 1620 breakpoint_invalidate(env, pc);
a1d1bb31
AL
1621
1622 if (breakpoint)
1623 *breakpoint = bp;
4c3a88a2
FB
1624 return 0;
1625#else
a1d1bb31 1626 return -ENOSYS;
4c3a88a2
FB
1627#endif
1628}
1629
a1d1bb31 1630/* Remove a specific breakpoint. */
9349b4f9 1631int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
a1d1bb31 1632{
7d03f82f 1633#if defined(TARGET_HAS_ICE)
a1d1bb31
AL
1634 CPUBreakpoint *bp;
1635
72cf2d4f 1636 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31
AL
1637 if (bp->pc == pc && bp->flags == flags) {
1638 cpu_breakpoint_remove_by_ref(env, bp);
1639 return 0;
1640 }
7d03f82f 1641 }
a1d1bb31
AL
1642 return -ENOENT;
1643#else
1644 return -ENOSYS;
7d03f82f
EI
1645#endif
1646}
1647
a1d1bb31 1648/* Remove a specific breakpoint by reference. */
9349b4f9 1649void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
4c3a88a2 1650{
1fddef4b 1651#if defined(TARGET_HAS_ICE)
72cf2d4f 1652 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
d720b93d 1653
a1d1bb31
AL
1654 breakpoint_invalidate(env, breakpoint->pc);
1655
7267c094 1656 g_free(breakpoint);
a1d1bb31
AL
1657#endif
1658}
1659
1660/* Remove all matching breakpoints. */
9349b4f9 1661void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
a1d1bb31
AL
1662{
1663#if defined(TARGET_HAS_ICE)
c0ce998e 1664 CPUBreakpoint *bp, *next;
a1d1bb31 1665
72cf2d4f 1666 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
a1d1bb31
AL
1667 if (bp->flags & mask)
1668 cpu_breakpoint_remove_by_ref(env, bp);
c0ce998e 1669 }
4c3a88a2
FB
1670#endif
1671}
1672
c33a346e
FB
1673/* enable or disable single step mode. EXCP_DEBUG is returned by the
1674 CPU loop after each instruction */
9349b4f9 1675void cpu_single_step(CPUArchState *env, int enabled)
c33a346e 1676{
1fddef4b 1677#if defined(TARGET_HAS_ICE)
c33a346e
FB
1678 if (env->singlestep_enabled != enabled) {
1679 env->singlestep_enabled = enabled;
e22a25c9
AL
1680 if (kvm_enabled())
1681 kvm_update_guest_debug(env, 0);
1682 else {
ccbb4d44 1683 /* must flush all the translated code to avoid inconsistencies */
e22a25c9
AL
1684 /* XXX: only flush what is necessary */
1685 tb_flush(env);
1686 }
c33a346e
FB
1687 }
1688#endif
1689}
1690
9349b4f9 1691static void cpu_unlink_tb(CPUArchState *env)
ea041c0e 1692{
3098dba0
AJ
1693 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1694 problem and hope the cpu will stop of its own accord. For userspace
1695 emulation this often isn't actually as bad as it sounds. Often
1696 signals are used primarily to interrupt blocking syscalls. */
ea041c0e 1697 TranslationBlock *tb;
c227f099 1698 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
59817ccb 1699
cab1b4bd 1700 spin_lock(&interrupt_lock);
3098dba0
AJ
1701 tb = env->current_tb;
1702 /* if the cpu is currently executing code, we must unlink it and
1703 all the potentially executing TB */
f76cfe56 1704 if (tb) {
3098dba0
AJ
1705 env->current_tb = NULL;
1706 tb_reset_jump_recursive(tb);
be214e6c 1707 }
cab1b4bd 1708 spin_unlock(&interrupt_lock);
3098dba0
AJ
1709}
1710
97ffbd8d 1711#ifndef CONFIG_USER_ONLY
3098dba0 1712/* mask must never be zero, except for A20 change call */
9349b4f9 1713static void tcg_handle_interrupt(CPUArchState *env, int mask)
3098dba0 1714{
60e82579 1715 CPUState *cpu = ENV_GET_CPU(env);
3098dba0 1716 int old_mask;
be214e6c 1717
2e70f6ef 1718 old_mask = env->interrupt_request;
68a79315 1719 env->interrupt_request |= mask;
3098dba0 1720
8edac960
AL
1721 /*
1722 * If called from iothread context, wake the target cpu in
1723 * case its halted.
1724 */
60e82579 1725 if (!qemu_cpu_is_self(cpu)) {
c08d7424 1726 qemu_cpu_kick(cpu);
8edac960
AL
1727 return;
1728 }
8edac960 1729
2e70f6ef 1730 if (use_icount) {
266910c4 1731 env->icount_decr.u16.high = 0xffff;
2e70f6ef 1732 if (!can_do_io(env)
be214e6c 1733 && (mask & ~old_mask) != 0) {
2e70f6ef
PB
1734 cpu_abort(env, "Raised interrupt while not in I/O function");
1735 }
2e70f6ef 1736 } else {
3098dba0 1737 cpu_unlink_tb(env);
ea041c0e
FB
1738 }
1739}
1740
ec6959d0
JK
1741CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1742
97ffbd8d
JK
1743#else /* CONFIG_USER_ONLY */
1744
9349b4f9 1745void cpu_interrupt(CPUArchState *env, int mask)
97ffbd8d
JK
1746{
1747 env->interrupt_request |= mask;
1748 cpu_unlink_tb(env);
1749}
1750#endif /* CONFIG_USER_ONLY */
1751
9349b4f9 1752void cpu_reset_interrupt(CPUArchState *env, int mask)
b54ad049
FB
1753{
1754 env->interrupt_request &= ~mask;
1755}
1756
9349b4f9 1757void cpu_exit(CPUArchState *env)
3098dba0
AJ
1758{
1759 env->exit_request = 1;
1760 cpu_unlink_tb(env);
1761}
1762
9349b4f9 1763void cpu_abort(CPUArchState *env, const char *fmt, ...)
7501267e
FB
1764{
1765 va_list ap;
493ae1f0 1766 va_list ap2;
7501267e
FB
1767
1768 va_start(ap, fmt);
493ae1f0 1769 va_copy(ap2, ap);
7501267e
FB
1770 fprintf(stderr, "qemu: fatal: ");
1771 vfprintf(stderr, fmt, ap);
1772 fprintf(stderr, "\n");
6fd2a026 1773 cpu_dump_state(env, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
93fcfe39
AL
1774 if (qemu_log_enabled()) {
1775 qemu_log("qemu: fatal: ");
1776 qemu_log_vprintf(fmt, ap2);
1777 qemu_log("\n");
6fd2a026 1778 log_cpu_state(env, CPU_DUMP_FPU | CPU_DUMP_CCOP);
31b1a7b4 1779 qemu_log_flush();
93fcfe39 1780 qemu_log_close();
924edcae 1781 }
493ae1f0 1782 va_end(ap2);
f9373291 1783 va_end(ap);
fd052bf6
RV
1784#if defined(CONFIG_USER_ONLY)
1785 {
1786 struct sigaction act;
1787 sigfillset(&act.sa_mask);
1788 act.sa_handler = SIG_DFL;
1789 sigaction(SIGABRT, &act, NULL);
1790 }
1791#endif
7501267e
FB
1792 abort();
1793}
1794
9349b4f9 1795CPUArchState *cpu_copy(CPUArchState *env)
c5be9f08 1796{
9349b4f9
AF
1797 CPUArchState *new_env = cpu_init(env->cpu_model_str);
1798 CPUArchState *next_cpu = new_env->next_cpu;
c5be9f08 1799 int cpu_index = new_env->cpu_index;
5a38f081
AL
1800#if defined(TARGET_HAS_ICE)
1801 CPUBreakpoint *bp;
1802 CPUWatchpoint *wp;
1803#endif
1804
9349b4f9 1805 memcpy(new_env, env, sizeof(CPUArchState));
5a38f081
AL
1806
1807 /* Preserve chaining and index. */
c5be9f08
TS
1808 new_env->next_cpu = next_cpu;
1809 new_env->cpu_index = cpu_index;
5a38f081
AL
1810
1811 /* Clone all break/watchpoints.
1812 Note: Once we support ptrace with hw-debug register access, make sure
1813 BP_CPU break/watchpoints are handled correctly on clone. */
72cf2d4f
BS
1814 QTAILQ_INIT(&env->breakpoints);
1815 QTAILQ_INIT(&env->watchpoints);
5a38f081 1816#if defined(TARGET_HAS_ICE)
72cf2d4f 1817 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
5a38f081
AL
1818 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1819 }
72cf2d4f 1820 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
5a38f081
AL
1821 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1822 wp->flags, NULL);
1823 }
1824#endif
1825
c5be9f08
TS
1826 return new_env;
1827}
1828
0124311e 1829#if !defined(CONFIG_USER_ONLY)
0cac1b66 1830void tb_flush_jmp_cache(CPUArchState *env, target_ulong addr)
5c751e99
EI
1831{
1832 unsigned int i;
1833
1834 /* Discard jump cache entries for any tb which might potentially
1835 overlap the flushed page. */
1836 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1837 memset (&env->tb_jmp_cache[i], 0,
9742bf26 1838 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
5c751e99
EI
1839
1840 i = tb_jmp_cache_hash_page(addr);
1841 memset (&env->tb_jmp_cache[i], 0,
9742bf26 1842 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
5c751e99
EI
1843}
1844
d24981d3
JQ
1845static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end,
1846 uintptr_t length)
1847{
1848 uintptr_t start1;
1849
1850 /* we modify the TLB cache so that the dirty bit will be set again
1851 when accessing the range */
1852 start1 = (uintptr_t)qemu_safe_ram_ptr(start);
1853 /* Check that we don't span multiple blocks - this breaks the
1854 address comparisons below. */
1855 if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1
1856 != (end - 1) - start) {
1857 abort();
1858 }
1859 cpu_tlb_reset_dirty_all(start1, length);
1860
1861}
1862
5579c7f3 1863/* Note: start and end must be within the same ram block. */
c227f099 1864void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 1865 int dirty_flags)
1ccde1cb 1866{
d24981d3 1867 uintptr_t length;
1ccde1cb
FB
1868
1869 start &= TARGET_PAGE_MASK;
1870 end = TARGET_PAGE_ALIGN(end);
1871
1872 length = end - start;
1873 if (length == 0)
1874 return;
f7c11b53 1875 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
f23db169 1876
d24981d3
JQ
1877 if (tcg_enabled()) {
1878 tlb_reset_dirty_range_all(start, end, length);
5579c7f3 1879 }
1ccde1cb
FB
1880}
1881
8b9c99d9 1882static int cpu_physical_memory_set_dirty_tracking(int enable)
74576198 1883{
f6f3fbca 1884 int ret = 0;
74576198 1885 in_migration = enable;
f6f3fbca 1886 return ret;
74576198
AL
1887}
1888
a8170e5e 1889hwaddr memory_region_section_get_iotlb(CPUArchState *env,
e5548617
BS
1890 MemoryRegionSection *section,
1891 target_ulong vaddr,
a8170e5e 1892 hwaddr paddr,
e5548617
BS
1893 int prot,
1894 target_ulong *address)
1895{
a8170e5e 1896 hwaddr iotlb;
e5548617
BS
1897 CPUWatchpoint *wp;
1898
cc5bea60 1899 if (memory_region_is_ram(section->mr)) {
e5548617
BS
1900 /* Normal RAM. */
1901 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
cc5bea60 1902 + memory_region_section_addr(section, paddr);
e5548617
BS
1903 if (!section->readonly) {
1904 iotlb |= phys_section_notdirty;
1905 } else {
1906 iotlb |= phys_section_rom;
1907 }
1908 } else {
1909 /* IO handlers are currently passed a physical address.
1910 It would be nice to pass an offset from the base address
1911 of that region. This would avoid having to special case RAM,
1912 and avoid full address decoding in every device.
1913 We can't use the high bits of pd for this because
1914 IO_MEM_ROMD uses these as a ram address. */
1915 iotlb = section - phys_sections;
cc5bea60 1916 iotlb += memory_region_section_addr(section, paddr);
e5548617
BS
1917 }
1918
1919 /* Make accesses to pages with watchpoints go via the
1920 watchpoint trap routines. */
1921 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1922 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
1923 /* Avoid trapping reads of pages with a write breakpoint. */
1924 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
1925 iotlb = phys_section_watch + paddr;
1926 *address |= TLB_MMIO;
1927 break;
1928 }
1929 }
1930 }
1931
1932 return iotlb;
1933}
1934
0124311e 1935#else
edf8e2af
MW
1936/*
1937 * Walks guest process memory "regions" one by one
1938 * and calls callback function 'fn' for each region.
1939 */
5cd2c5b6
RH
1940
1941struct walk_memory_regions_data
1942{
1943 walk_memory_regions_fn fn;
1944 void *priv;
8efe0ca8 1945 uintptr_t start;
5cd2c5b6
RH
1946 int prot;
1947};
1948
1949static int walk_memory_regions_end(struct walk_memory_regions_data *data,
b480d9b7 1950 abi_ulong end, int new_prot)
5cd2c5b6
RH
1951{
1952 if (data->start != -1ul) {
1953 int rc = data->fn(data->priv, data->start, end, data->prot);
1954 if (rc != 0) {
1955 return rc;
1956 }
1957 }
1958
1959 data->start = (new_prot ? end : -1ul);
1960 data->prot = new_prot;
1961
1962 return 0;
1963}
1964
1965static int walk_memory_regions_1(struct walk_memory_regions_data *data,
b480d9b7 1966 abi_ulong base, int level, void **lp)
5cd2c5b6 1967{
b480d9b7 1968 abi_ulong pa;
5cd2c5b6
RH
1969 int i, rc;
1970
1971 if (*lp == NULL) {
1972 return walk_memory_regions_end(data, base, 0);
1973 }
1974
1975 if (level == 0) {
1976 PageDesc *pd = *lp;
7296abac 1977 for (i = 0; i < L2_SIZE; ++i) {
5cd2c5b6
RH
1978 int prot = pd[i].flags;
1979
1980 pa = base | (i << TARGET_PAGE_BITS);
1981 if (prot != data->prot) {
1982 rc = walk_memory_regions_end(data, pa, prot);
1983 if (rc != 0) {
1984 return rc;
9fa3e853 1985 }
9fa3e853 1986 }
5cd2c5b6
RH
1987 }
1988 } else {
1989 void **pp = *lp;
7296abac 1990 for (i = 0; i < L2_SIZE; ++i) {
b480d9b7
PB
1991 pa = base | ((abi_ulong)i <<
1992 (TARGET_PAGE_BITS + L2_BITS * level));
5cd2c5b6
RH
1993 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
1994 if (rc != 0) {
1995 return rc;
1996 }
1997 }
1998 }
1999
2000 return 0;
2001}
2002
2003int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2004{
2005 struct walk_memory_regions_data data;
8efe0ca8 2006 uintptr_t i;
5cd2c5b6
RH
2007
2008 data.fn = fn;
2009 data.priv = priv;
2010 data.start = -1ul;
2011 data.prot = 0;
2012
2013 for (i = 0; i < V_L1_SIZE; i++) {
b480d9b7 2014 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
5cd2c5b6
RH
2015 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2016 if (rc != 0) {
2017 return rc;
9fa3e853 2018 }
33417e70 2019 }
5cd2c5b6
RH
2020
2021 return walk_memory_regions_end(&data, 0, 0);
edf8e2af
MW
2022}
2023
b480d9b7
PB
2024static int dump_region(void *priv, abi_ulong start,
2025 abi_ulong end, unsigned long prot)
edf8e2af
MW
2026{
2027 FILE *f = (FILE *)priv;
2028
b480d9b7
PB
2029 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2030 " "TARGET_ABI_FMT_lx" %c%c%c\n",
edf8e2af
MW
2031 start, end, end - start,
2032 ((prot & PAGE_READ) ? 'r' : '-'),
2033 ((prot & PAGE_WRITE) ? 'w' : '-'),
2034 ((prot & PAGE_EXEC) ? 'x' : '-'));
2035
2036 return (0);
2037}
2038
2039/* dump memory mappings */
2040void page_dump(FILE *f)
2041{
2042 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2043 "start", "end", "size", "prot");
2044 walk_memory_regions(f, dump_region);
33417e70
FB
2045}
2046
53a5960a 2047int page_get_flags(target_ulong address)
33417e70 2048{
9fa3e853
FB
2049 PageDesc *p;
2050
2051 p = page_find(address >> TARGET_PAGE_BITS);
33417e70 2052 if (!p)
9fa3e853
FB
2053 return 0;
2054 return p->flags;
2055}
2056
376a7909
RH
2057/* Modify the flags of a page and invalidate the code if necessary.
2058 The flag PAGE_WRITE_ORG is positioned automatically depending
2059 on PAGE_WRITE. The mmap_lock should already be held. */
53a5960a 2060void page_set_flags(target_ulong start, target_ulong end, int flags)
9fa3e853 2061{
376a7909
RH
2062 target_ulong addr, len;
2063
2064 /* This function should never be called with addresses outside the
2065 guest address space. If this assert fires, it probably indicates
2066 a missing call to h2g_valid. */
b480d9b7
PB
2067#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2068 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
376a7909
RH
2069#endif
2070 assert(start < end);
9fa3e853
FB
2071
2072 start = start & TARGET_PAGE_MASK;
2073 end = TARGET_PAGE_ALIGN(end);
376a7909
RH
2074
2075 if (flags & PAGE_WRITE) {
9fa3e853 2076 flags |= PAGE_WRITE_ORG;
376a7909
RH
2077 }
2078
2079 for (addr = start, len = end - start;
2080 len != 0;
2081 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2082 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2083
2084 /* If the write protection bit is set, then we invalidate
2085 the code inside. */
5fafdf24 2086 if (!(p->flags & PAGE_WRITE) &&
9fa3e853
FB
2087 (flags & PAGE_WRITE) &&
2088 p->first_tb) {
d720b93d 2089 tb_invalidate_phys_page(addr, 0, NULL);
9fa3e853
FB
2090 }
2091 p->flags = flags;
2092 }
33417e70
FB
2093}
2094
3d97b40b
TS
2095int page_check_range(target_ulong start, target_ulong len, int flags)
2096{
2097 PageDesc *p;
2098 target_ulong end;
2099 target_ulong addr;
2100
376a7909
RH
2101 /* This function should never be called with addresses outside the
2102 guest address space. If this assert fires, it probably indicates
2103 a missing call to h2g_valid. */
338e9e6c
BS
2104#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2105 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
376a7909
RH
2106#endif
2107
3e0650a9
RH
2108 if (len == 0) {
2109 return 0;
2110 }
376a7909
RH
2111 if (start + len - 1 < start) {
2112 /* We've wrapped around. */
55f280c9 2113 return -1;
376a7909 2114 }
55f280c9 2115
3d97b40b
TS
2116 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2117 start = start & TARGET_PAGE_MASK;
2118
376a7909
RH
2119 for (addr = start, len = end - start;
2120 len != 0;
2121 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
3d97b40b
TS
2122 p = page_find(addr >> TARGET_PAGE_BITS);
2123 if( !p )
2124 return -1;
2125 if( !(p->flags & PAGE_VALID) )
2126 return -1;
2127
dae3270c 2128 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
3d97b40b 2129 return -1;
dae3270c
FB
2130 if (flags & PAGE_WRITE) {
2131 if (!(p->flags & PAGE_WRITE_ORG))
2132 return -1;
2133 /* unprotect the page if it was put read-only because it
2134 contains translated code */
2135 if (!(p->flags & PAGE_WRITE)) {
2136 if (!page_unprotect(addr, 0, NULL))
2137 return -1;
2138 }
2139 return 0;
2140 }
3d97b40b
TS
2141 }
2142 return 0;
2143}
2144
9fa3e853 2145/* called from signal handler: invalidate the code and unprotect the
ccbb4d44 2146 page. Return TRUE if the fault was successfully handled. */
6375e09e 2147int page_unprotect(target_ulong address, uintptr_t pc, void *puc)
9fa3e853 2148{
45d679d6
AJ
2149 unsigned int prot;
2150 PageDesc *p;
53a5960a 2151 target_ulong host_start, host_end, addr;
9fa3e853 2152
c8a706fe
PB
2153 /* Technically this isn't safe inside a signal handler. However we
2154 know this only ever happens in a synchronous SEGV handler, so in
2155 practice it seems to be ok. */
2156 mmap_lock();
2157
45d679d6
AJ
2158 p = page_find(address >> TARGET_PAGE_BITS);
2159 if (!p) {
c8a706fe 2160 mmap_unlock();
9fa3e853 2161 return 0;
c8a706fe 2162 }
45d679d6 2163
9fa3e853
FB
2164 /* if the page was really writable, then we change its
2165 protection back to writable */
45d679d6
AJ
2166 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2167 host_start = address & qemu_host_page_mask;
2168 host_end = host_start + qemu_host_page_size;
2169
2170 prot = 0;
2171 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2172 p = page_find(addr >> TARGET_PAGE_BITS);
2173 p->flags |= PAGE_WRITE;
2174 prot |= p->flags;
2175
9fa3e853
FB
2176 /* and since the content will be modified, we must invalidate
2177 the corresponding translated code. */
45d679d6 2178 tb_invalidate_phys_page(addr, pc, puc);
9fa3e853 2179#ifdef DEBUG_TB_CHECK
45d679d6 2180 tb_invalidate_check(addr);
9fa3e853 2181#endif
9fa3e853 2182 }
45d679d6
AJ
2183 mprotect((void *)g2h(host_start), qemu_host_page_size,
2184 prot & PAGE_BITS);
2185
2186 mmap_unlock();
2187 return 1;
9fa3e853 2188 }
c8a706fe 2189 mmap_unlock();
9fa3e853
FB
2190 return 0;
2191}
9fa3e853
FB
2192#endif /* defined(CONFIG_USER_ONLY) */
2193
e2eef170 2194#if !defined(CONFIG_USER_ONLY)
8da3ff18 2195
c04b2b78
PB
2196#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2197typedef struct subpage_t {
70c68e44 2198 MemoryRegion iomem;
a8170e5e 2199 hwaddr base;
5312bd8b 2200 uint16_t sub_section[TARGET_PAGE_SIZE];
c04b2b78
PB
2201} subpage_t;
2202
c227f099 2203static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 2204 uint16_t section);
a8170e5e 2205static subpage_t *subpage_init(hwaddr base);
5312bd8b 2206static void destroy_page_desc(uint16_t section_index)
54688b1e 2207{
5312bd8b
AK
2208 MemoryRegionSection *section = &phys_sections[section_index];
2209 MemoryRegion *mr = section->mr;
54688b1e
AK
2210
2211 if (mr->subpage) {
2212 subpage_t *subpage = container_of(mr, subpage_t, iomem);
2213 memory_region_destroy(&subpage->iomem);
2214 g_free(subpage);
2215 }
2216}
2217
4346ae3e 2218static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level)
54688b1e
AK
2219{
2220 unsigned i;
d6f2ea22 2221 PhysPageEntry *p;
54688b1e 2222
c19e8800 2223 if (lp->ptr == PHYS_MAP_NODE_NIL) {
54688b1e
AK
2224 return;
2225 }
2226
c19e8800 2227 p = phys_map_nodes[lp->ptr];
4346ae3e 2228 for (i = 0; i < L2_SIZE; ++i) {
07f07b31 2229 if (!p[i].is_leaf) {
54688b1e 2230 destroy_l2_mapping(&p[i], level - 1);
4346ae3e 2231 } else {
c19e8800 2232 destroy_page_desc(p[i].ptr);
54688b1e 2233 }
54688b1e 2234 }
07f07b31 2235 lp->is_leaf = 0;
c19e8800 2236 lp->ptr = PHYS_MAP_NODE_NIL;
54688b1e
AK
2237}
2238
ac1970fb 2239static void destroy_all_mappings(AddressSpaceDispatch *d)
54688b1e 2240{
ac1970fb 2241 destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
d6f2ea22 2242 phys_map_nodes_reset();
54688b1e
AK
2243}
2244
5312bd8b
AK
2245static uint16_t phys_section_add(MemoryRegionSection *section)
2246{
2247 if (phys_sections_nb == phys_sections_nb_alloc) {
2248 phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16);
2249 phys_sections = g_renew(MemoryRegionSection, phys_sections,
2250 phys_sections_nb_alloc);
2251 }
2252 phys_sections[phys_sections_nb] = *section;
2253 return phys_sections_nb++;
2254}
2255
2256static void phys_sections_clear(void)
2257{
2258 phys_sections_nb = 0;
2259}
2260
ac1970fb 2261static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
0f0cb164
AK
2262{
2263 subpage_t *subpage;
a8170e5e 2264 hwaddr base = section->offset_within_address_space
0f0cb164 2265 & TARGET_PAGE_MASK;
ac1970fb 2266 MemoryRegionSection *existing = phys_page_find(d, base >> TARGET_PAGE_BITS);
0f0cb164
AK
2267 MemoryRegionSection subsection = {
2268 .offset_within_address_space = base,
2269 .size = TARGET_PAGE_SIZE,
2270 };
a8170e5e 2271 hwaddr start, end;
0f0cb164 2272
f3705d53 2273 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
0f0cb164 2274
f3705d53 2275 if (!(existing->mr->subpage)) {
0f0cb164
AK
2276 subpage = subpage_init(base);
2277 subsection.mr = &subpage->iomem;
ac1970fb 2278 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
2999097b 2279 phys_section_add(&subsection));
0f0cb164 2280 } else {
f3705d53 2281 subpage = container_of(existing->mr, subpage_t, iomem);
0f0cb164
AK
2282 }
2283 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
adb2a9b5 2284 end = start + section->size - 1;
0f0cb164
AK
2285 subpage_register(subpage, start, end, phys_section_add(section));
2286}
2287
2288
ac1970fb 2289static void register_multipage(AddressSpaceDispatch *d, MemoryRegionSection *section)
33417e70 2290{
a8170e5e 2291 hwaddr start_addr = section->offset_within_address_space;
dd81124b 2292 ram_addr_t size = section->size;
a8170e5e 2293 hwaddr addr;
5312bd8b 2294 uint16_t section_index = phys_section_add(section);
dd81124b 2295
3b8e6a2d 2296 assert(size);
f6f3fbca 2297
3b8e6a2d 2298 addr = start_addr;
ac1970fb 2299 phys_page_set(d, addr >> TARGET_PAGE_BITS, size >> TARGET_PAGE_BITS,
2999097b 2300 section_index);
33417e70
FB
2301}
2302
ac1970fb 2303static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
0f0cb164 2304{
ac1970fb 2305 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
0f0cb164
AK
2306 MemoryRegionSection now = *section, remain = *section;
2307
2308 if ((now.offset_within_address_space & ~TARGET_PAGE_MASK)
2309 || (now.size < TARGET_PAGE_SIZE)) {
2310 now.size = MIN(TARGET_PAGE_ALIGN(now.offset_within_address_space)
2311 - now.offset_within_address_space,
2312 now.size);
ac1970fb 2313 register_subpage(d, &now);
0f0cb164
AK
2314 remain.size -= now.size;
2315 remain.offset_within_address_space += now.size;
2316 remain.offset_within_region += now.size;
2317 }
69b67646
TH
2318 while (remain.size >= TARGET_PAGE_SIZE) {
2319 now = remain;
2320 if (remain.offset_within_region & ~TARGET_PAGE_MASK) {
2321 now.size = TARGET_PAGE_SIZE;
ac1970fb 2322 register_subpage(d, &now);
69b67646
TH
2323 } else {
2324 now.size &= TARGET_PAGE_MASK;
ac1970fb 2325 register_multipage(d, &now);
69b67646 2326 }
0f0cb164
AK
2327 remain.size -= now.size;
2328 remain.offset_within_address_space += now.size;
2329 remain.offset_within_region += now.size;
2330 }
2331 now = remain;
2332 if (now.size) {
ac1970fb 2333 register_subpage(d, &now);
0f0cb164
AK
2334 }
2335}
2336
62a2744c
SY
2337void qemu_flush_coalesced_mmio_buffer(void)
2338{
2339 if (kvm_enabled())
2340 kvm_flush_coalesced_mmio_buffer();
2341}
2342
c902760f
MT
2343#if defined(__linux__) && !defined(TARGET_S390X)
2344
2345#include <sys/vfs.h>
2346
2347#define HUGETLBFS_MAGIC 0x958458f6
2348
2349static long gethugepagesize(const char *path)
2350{
2351 struct statfs fs;
2352 int ret;
2353
2354 do {
9742bf26 2355 ret = statfs(path, &fs);
c902760f
MT
2356 } while (ret != 0 && errno == EINTR);
2357
2358 if (ret != 0) {
9742bf26
YT
2359 perror(path);
2360 return 0;
c902760f
MT
2361 }
2362
2363 if (fs.f_type != HUGETLBFS_MAGIC)
9742bf26 2364 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
c902760f
MT
2365
2366 return fs.f_bsize;
2367}
2368
04b16653
AW
2369static void *file_ram_alloc(RAMBlock *block,
2370 ram_addr_t memory,
2371 const char *path)
c902760f
MT
2372{
2373 char *filename;
2374 void *area;
2375 int fd;
2376#ifdef MAP_POPULATE
2377 int flags;
2378#endif
2379 unsigned long hpagesize;
2380
2381 hpagesize = gethugepagesize(path);
2382 if (!hpagesize) {
9742bf26 2383 return NULL;
c902760f
MT
2384 }
2385
2386 if (memory < hpagesize) {
2387 return NULL;
2388 }
2389
2390 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2391 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2392 return NULL;
2393 }
2394
2395 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
9742bf26 2396 return NULL;
c902760f
MT
2397 }
2398
2399 fd = mkstemp(filename);
2400 if (fd < 0) {
9742bf26
YT
2401 perror("unable to create backing store for hugepages");
2402 free(filename);
2403 return NULL;
c902760f
MT
2404 }
2405 unlink(filename);
2406 free(filename);
2407
2408 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2409
2410 /*
2411 * ftruncate is not supported by hugetlbfs in older
2412 * hosts, so don't bother bailing out on errors.
2413 * If anything goes wrong with it under other filesystems,
2414 * mmap will fail.
2415 */
2416 if (ftruncate(fd, memory))
9742bf26 2417 perror("ftruncate");
c902760f
MT
2418
2419#ifdef MAP_POPULATE
2420 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2421 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2422 * to sidestep this quirk.
2423 */
2424 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2425 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2426#else
2427 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2428#endif
2429 if (area == MAP_FAILED) {
9742bf26
YT
2430 perror("file_ram_alloc: can't mmap RAM pages");
2431 close(fd);
2432 return (NULL);
c902760f 2433 }
04b16653 2434 block->fd = fd;
c902760f
MT
2435 return area;
2436}
2437#endif
2438
d17b5288 2439static ram_addr_t find_ram_offset(ram_addr_t size)
04b16653
AW
2440{
2441 RAMBlock *block, *next_block;
3e837b2c 2442 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
04b16653
AW
2443
2444 if (QLIST_EMPTY(&ram_list.blocks))
2445 return 0;
2446
2447 QLIST_FOREACH(block, &ram_list.blocks, next) {
f15fbc4b 2448 ram_addr_t end, next = RAM_ADDR_MAX;
04b16653
AW
2449
2450 end = block->offset + block->length;
2451
2452 QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2453 if (next_block->offset >= end) {
2454 next = MIN(next, next_block->offset);
2455 }
2456 }
2457 if (next - end >= size && next - end < mingap) {
3e837b2c 2458 offset = end;
04b16653
AW
2459 mingap = next - end;
2460 }
2461 }
3e837b2c
AW
2462
2463 if (offset == RAM_ADDR_MAX) {
2464 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
2465 (uint64_t)size);
2466 abort();
2467 }
2468
04b16653
AW
2469 return offset;
2470}
2471
652d7ec2 2472ram_addr_t last_ram_offset(void)
d17b5288
AW
2473{
2474 RAMBlock *block;
2475 ram_addr_t last = 0;
2476
2477 QLIST_FOREACH(block, &ram_list.blocks, next)
2478 last = MAX(last, block->offset + block->length);
2479
2480 return last;
2481}
2482
ddb97f1d
JB
2483static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
2484{
2485 int ret;
2486 QemuOpts *machine_opts;
2487
2488 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
2489 machine_opts = qemu_opts_find(qemu_find_opts("machine"), 0);
2490 if (machine_opts &&
2491 !qemu_opt_get_bool(machine_opts, "dump-guest-core", true)) {
2492 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
2493 if (ret) {
2494 perror("qemu_madvise");
2495 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
2496 "but dump_guest_core=off specified\n");
2497 }
2498 }
2499}
2500
c5705a77 2501void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
84b89d78
CM
2502{
2503 RAMBlock *new_block, *block;
2504
c5705a77
AK
2505 new_block = NULL;
2506 QLIST_FOREACH(block, &ram_list.blocks, next) {
2507 if (block->offset == addr) {
2508 new_block = block;
2509 break;
2510 }
2511 }
2512 assert(new_block);
2513 assert(!new_block->idstr[0]);
84b89d78 2514
09e5ab63
AL
2515 if (dev) {
2516 char *id = qdev_get_dev_path(dev);
84b89d78
CM
2517 if (id) {
2518 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
7267c094 2519 g_free(id);
84b89d78
CM
2520 }
2521 }
2522 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2523
2524 QLIST_FOREACH(block, &ram_list.blocks, next) {
c5705a77 2525 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
84b89d78
CM
2526 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2527 new_block->idstr);
2528 abort();
2529 }
2530 }
c5705a77
AK
2531}
2532
8490fc78
LC
2533static int memory_try_enable_merging(void *addr, size_t len)
2534{
2535 QemuOpts *opts;
2536
2537 opts = qemu_opts_find(qemu_find_opts("machine"), 0);
2538 if (opts && !qemu_opt_get_bool(opts, "mem-merge", true)) {
2539 /* disabled by the user */
2540 return 0;
2541 }
2542
2543 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
2544}
2545
c5705a77
AK
2546ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
2547 MemoryRegion *mr)
2548{
2549 RAMBlock *new_block;
2550
2551 size = TARGET_PAGE_ALIGN(size);
2552 new_block = g_malloc0(sizeof(*new_block));
84b89d78 2553
7c637366 2554 new_block->mr = mr;
432d268c 2555 new_block->offset = find_ram_offset(size);
6977dfe6
YT
2556 if (host) {
2557 new_block->host = host;
cd19cfa2 2558 new_block->flags |= RAM_PREALLOC_MASK;
6977dfe6
YT
2559 } else {
2560 if (mem_path) {
c902760f 2561#if defined (__linux__) && !defined(TARGET_S390X)
6977dfe6
YT
2562 new_block->host = file_ram_alloc(new_block, size, mem_path);
2563 if (!new_block->host) {
2564 new_block->host = qemu_vmalloc(size);
8490fc78 2565 memory_try_enable_merging(new_block->host, size);
6977dfe6 2566 }
c902760f 2567#else
6977dfe6
YT
2568 fprintf(stderr, "-mem-path option unsupported\n");
2569 exit(1);
c902760f 2570#endif
6977dfe6 2571 } else {
868bb33f 2572 if (xen_enabled()) {
fce537d4 2573 xen_ram_alloc(new_block->offset, size, mr);
fdec9918
CB
2574 } else if (kvm_enabled()) {
2575 /* some s390/kvm configurations have special constraints */
2576 new_block->host = kvm_vmalloc(size);
432d268c
JN
2577 } else {
2578 new_block->host = qemu_vmalloc(size);
2579 }
8490fc78 2580 memory_try_enable_merging(new_block->host, size);
6977dfe6 2581 }
c902760f 2582 }
94a6b54f
PB
2583 new_block->length = size;
2584
f471a17e 2585 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
94a6b54f 2586
7267c094 2587 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
04b16653 2588 last_ram_offset() >> TARGET_PAGE_BITS);
5fda043f
IM
2589 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
2590 0, size >> TARGET_PAGE_BITS);
1720aeee 2591 cpu_physical_memory_set_dirty_range(new_block->offset, size, 0xff);
94a6b54f 2592
ddb97f1d 2593 qemu_ram_setup_dump(new_block->host, size);
ad0b5321 2594 qemu_madvise(new_block->host, size, QEMU_MADV_HUGEPAGE);
ddb97f1d 2595
6f0437e8
JK
2596 if (kvm_enabled())
2597 kvm_setup_guest_memory(new_block->host, size);
2598
94a6b54f
PB
2599 return new_block->offset;
2600}
e9a1ab19 2601
c5705a77 2602ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
6977dfe6 2603{
c5705a77 2604 return qemu_ram_alloc_from_ptr(size, NULL, mr);
6977dfe6
YT
2605}
2606
1f2e98b6
AW
2607void qemu_ram_free_from_ptr(ram_addr_t addr)
2608{
2609 RAMBlock *block;
2610
2611 QLIST_FOREACH(block, &ram_list.blocks, next) {
2612 if (addr == block->offset) {
2613 QLIST_REMOVE(block, next);
7267c094 2614 g_free(block);
1f2e98b6
AW
2615 return;
2616 }
2617 }
2618}
2619
c227f099 2620void qemu_ram_free(ram_addr_t addr)
e9a1ab19 2621{
04b16653
AW
2622 RAMBlock *block;
2623
2624 QLIST_FOREACH(block, &ram_list.blocks, next) {
2625 if (addr == block->offset) {
2626 QLIST_REMOVE(block, next);
cd19cfa2
HY
2627 if (block->flags & RAM_PREALLOC_MASK) {
2628 ;
2629 } else if (mem_path) {
04b16653
AW
2630#if defined (__linux__) && !defined(TARGET_S390X)
2631 if (block->fd) {
2632 munmap(block->host, block->length);
2633 close(block->fd);
2634 } else {
2635 qemu_vfree(block->host);
2636 }
fd28aa13
JK
2637#else
2638 abort();
04b16653
AW
2639#endif
2640 } else {
2641#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2642 munmap(block->host, block->length);
2643#else
868bb33f 2644 if (xen_enabled()) {
e41d7c69 2645 xen_invalidate_map_cache_entry(block->host);
432d268c
JN
2646 } else {
2647 qemu_vfree(block->host);
2648 }
04b16653
AW
2649#endif
2650 }
7267c094 2651 g_free(block);
04b16653
AW
2652 return;
2653 }
2654 }
2655
e9a1ab19
FB
2656}
2657
cd19cfa2
HY
2658#ifndef _WIN32
2659void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
2660{
2661 RAMBlock *block;
2662 ram_addr_t offset;
2663 int flags;
2664 void *area, *vaddr;
2665
2666 QLIST_FOREACH(block, &ram_list.blocks, next) {
2667 offset = addr - block->offset;
2668 if (offset < block->length) {
2669 vaddr = block->host + offset;
2670 if (block->flags & RAM_PREALLOC_MASK) {
2671 ;
2672 } else {
2673 flags = MAP_FIXED;
2674 munmap(vaddr, length);
2675 if (mem_path) {
2676#if defined(__linux__) && !defined(TARGET_S390X)
2677 if (block->fd) {
2678#ifdef MAP_POPULATE
2679 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
2680 MAP_PRIVATE;
2681#else
2682 flags |= MAP_PRIVATE;
2683#endif
2684 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2685 flags, block->fd, offset);
2686 } else {
2687 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2688 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2689 flags, -1, 0);
2690 }
fd28aa13
JK
2691#else
2692 abort();
cd19cfa2
HY
2693#endif
2694 } else {
2695#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2696 flags |= MAP_SHARED | MAP_ANONYMOUS;
2697 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
2698 flags, -1, 0);
2699#else
2700 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2701 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2702 flags, -1, 0);
2703#endif
2704 }
2705 if (area != vaddr) {
f15fbc4b
AP
2706 fprintf(stderr, "Could not remap addr: "
2707 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
cd19cfa2
HY
2708 length, addr);
2709 exit(1);
2710 }
8490fc78 2711 memory_try_enable_merging(vaddr, length);
ddb97f1d 2712 qemu_ram_setup_dump(vaddr, length);
cd19cfa2
HY
2713 }
2714 return;
2715 }
2716 }
2717}
2718#endif /* !_WIN32 */
2719
dc828ca1 2720/* Return a host pointer to ram allocated with qemu_ram_alloc.
5579c7f3
PB
2721 With the exception of the softmmu code in this file, this should
2722 only be used for local memory (e.g. video ram) that the device owns,
2723 and knows it isn't going to access beyond the end of the block.
2724
2725 It should not be used for general purpose DMA.
2726 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2727 */
c227f099 2728void *qemu_get_ram_ptr(ram_addr_t addr)
dc828ca1 2729{
94a6b54f
PB
2730 RAMBlock *block;
2731
f471a17e
AW
2732 QLIST_FOREACH(block, &ram_list.blocks, next) {
2733 if (addr - block->offset < block->length) {
7d82af38
VP
2734 /* Move this entry to to start of the list. */
2735 if (block != QLIST_FIRST(&ram_list.blocks)) {
2736 QLIST_REMOVE(block, next);
2737 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
2738 }
868bb33f 2739 if (xen_enabled()) {
432d268c
JN
2740 /* We need to check if the requested address is in the RAM
2741 * because we don't want to map the entire memory in QEMU.
712c2b41 2742 * In that case just map until the end of the page.
432d268c
JN
2743 */
2744 if (block->offset == 0) {
e41d7c69 2745 return xen_map_cache(addr, 0, 0);
432d268c 2746 } else if (block->host == NULL) {
e41d7c69
JK
2747 block->host =
2748 xen_map_cache(block->offset, block->length, 1);
432d268c
JN
2749 }
2750 }
f471a17e
AW
2751 return block->host + (addr - block->offset);
2752 }
94a6b54f 2753 }
f471a17e
AW
2754
2755 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2756 abort();
2757
2758 return NULL;
dc828ca1
PB
2759}
2760
b2e0a138
MT
2761/* Return a host pointer to ram allocated with qemu_ram_alloc.
2762 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
2763 */
8b9c99d9 2764static void *qemu_safe_ram_ptr(ram_addr_t addr)
b2e0a138
MT
2765{
2766 RAMBlock *block;
2767
2768 QLIST_FOREACH(block, &ram_list.blocks, next) {
2769 if (addr - block->offset < block->length) {
868bb33f 2770 if (xen_enabled()) {
432d268c
JN
2771 /* We need to check if the requested address is in the RAM
2772 * because we don't want to map the entire memory in QEMU.
712c2b41 2773 * In that case just map until the end of the page.
432d268c
JN
2774 */
2775 if (block->offset == 0) {
e41d7c69 2776 return xen_map_cache(addr, 0, 0);
432d268c 2777 } else if (block->host == NULL) {
e41d7c69
JK
2778 block->host =
2779 xen_map_cache(block->offset, block->length, 1);
432d268c
JN
2780 }
2781 }
b2e0a138
MT
2782 return block->host + (addr - block->offset);
2783 }
2784 }
2785
2786 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2787 abort();
2788
2789 return NULL;
2790}
2791
38bee5dc
SS
2792/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
2793 * but takes a size argument */
8b9c99d9 2794static void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
38bee5dc 2795{
8ab934f9
SS
2796 if (*size == 0) {
2797 return NULL;
2798 }
868bb33f 2799 if (xen_enabled()) {
e41d7c69 2800 return xen_map_cache(addr, *size, 1);
868bb33f 2801 } else {
38bee5dc
SS
2802 RAMBlock *block;
2803
2804 QLIST_FOREACH(block, &ram_list.blocks, next) {
2805 if (addr - block->offset < block->length) {
2806 if (addr - block->offset + *size > block->length)
2807 *size = block->length - addr + block->offset;
2808 return block->host + (addr - block->offset);
2809 }
2810 }
2811
2812 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2813 abort();
38bee5dc
SS
2814 }
2815}
2816
050a0ddf
AP
2817void qemu_put_ram_ptr(void *addr)
2818{
2819 trace_qemu_put_ram_ptr(addr);
050a0ddf
AP
2820}
2821
e890261f 2822int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
5579c7f3 2823{
94a6b54f
PB
2824 RAMBlock *block;
2825 uint8_t *host = ptr;
2826
868bb33f 2827 if (xen_enabled()) {
e41d7c69 2828 *ram_addr = xen_ram_addr_from_mapcache(ptr);
712c2b41
SS
2829 return 0;
2830 }
2831
f471a17e 2832 QLIST_FOREACH(block, &ram_list.blocks, next) {
432d268c
JN
2833 /* This case append when the block is not mapped. */
2834 if (block->host == NULL) {
2835 continue;
2836 }
f471a17e 2837 if (host - block->host < block->length) {
e890261f
MT
2838 *ram_addr = block->offset + (host - block->host);
2839 return 0;
f471a17e 2840 }
94a6b54f 2841 }
432d268c 2842
e890261f
MT
2843 return -1;
2844}
f471a17e 2845
e890261f
MT
2846/* Some of the softmmu routines need to translate from a host pointer
2847 (typically a TLB entry) back to a ram offset. */
2848ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
2849{
2850 ram_addr_t ram_addr;
f471a17e 2851
e890261f
MT
2852 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
2853 fprintf(stderr, "Bad ram pointer %p\n", ptr);
2854 abort();
2855 }
2856 return ram_addr;
5579c7f3
PB
2857}
2858
a8170e5e 2859static uint64_t unassigned_mem_read(void *opaque, hwaddr addr,
0e0df1e2 2860 unsigned size)
e18231a3
BS
2861{
2862#ifdef DEBUG_UNASSIGNED
2863 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2864#endif
5b450407 2865#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
0e0df1e2 2866 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
e18231a3
BS
2867#endif
2868 return 0;
2869}
2870
a8170e5e 2871static void unassigned_mem_write(void *opaque, hwaddr addr,
0e0df1e2 2872 uint64_t val, unsigned size)
e18231a3
BS
2873{
2874#ifdef DEBUG_UNASSIGNED
0e0df1e2 2875 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
e18231a3 2876#endif
5b450407 2877#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
0e0df1e2 2878 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
67d3b957 2879#endif
33417e70
FB
2880}
2881
0e0df1e2
AK
2882static const MemoryRegionOps unassigned_mem_ops = {
2883 .read = unassigned_mem_read,
2884 .write = unassigned_mem_write,
2885 .endianness = DEVICE_NATIVE_ENDIAN,
2886};
e18231a3 2887
a8170e5e 2888static uint64_t error_mem_read(void *opaque, hwaddr addr,
0e0df1e2 2889 unsigned size)
e18231a3 2890{
0e0df1e2 2891 abort();
e18231a3
BS
2892}
2893
a8170e5e 2894static void error_mem_write(void *opaque, hwaddr addr,
0e0df1e2 2895 uint64_t value, unsigned size)
e18231a3 2896{
0e0df1e2 2897 abort();
33417e70
FB
2898}
2899
0e0df1e2
AK
2900static const MemoryRegionOps error_mem_ops = {
2901 .read = error_mem_read,
2902 .write = error_mem_write,
2903 .endianness = DEVICE_NATIVE_ENDIAN,
33417e70
FB
2904};
2905
0e0df1e2
AK
2906static const MemoryRegionOps rom_mem_ops = {
2907 .read = error_mem_read,
2908 .write = unassigned_mem_write,
2909 .endianness = DEVICE_NATIVE_ENDIAN,
33417e70
FB
2910};
2911
a8170e5e 2912static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
0e0df1e2 2913 uint64_t val, unsigned size)
9fa3e853 2914{
3a7d929e 2915 int dirty_flags;
f7c11b53 2916 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3a7d929e 2917 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2918#if !defined(CONFIG_USER_ONLY)
0e0df1e2 2919 tb_invalidate_phys_page_fast(ram_addr, size);
f7c11b53 2920 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
9fa3e853 2921#endif
3a7d929e 2922 }
0e0df1e2
AK
2923 switch (size) {
2924 case 1:
2925 stb_p(qemu_get_ram_ptr(ram_addr), val);
2926 break;
2927 case 2:
2928 stw_p(qemu_get_ram_ptr(ram_addr), val);
2929 break;
2930 case 4:
2931 stl_p(qemu_get_ram_ptr(ram_addr), val);
2932 break;
2933 default:
2934 abort();
3a7d929e 2935 }
f23db169 2936 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
f7c11b53 2937 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
f23db169
FB
2938 /* we remove the notdirty callback only if the code has been
2939 flushed */
2940 if (dirty_flags == 0xff)
2e70f6ef 2941 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
2942}
2943
0e0df1e2
AK
2944static const MemoryRegionOps notdirty_mem_ops = {
2945 .read = error_mem_read,
2946 .write = notdirty_mem_write,
2947 .endianness = DEVICE_NATIVE_ENDIAN,
1ccde1cb
FB
2948};
2949
0f459d16 2950/* Generate a debug exception if a watchpoint has been hit. */
b4051334 2951static void check_watchpoint(int offset, int len_mask, int flags)
0f459d16 2952{
9349b4f9 2953 CPUArchState *env = cpu_single_env;
06d55cc1
AL
2954 target_ulong pc, cs_base;
2955 TranslationBlock *tb;
0f459d16 2956 target_ulong vaddr;
a1d1bb31 2957 CPUWatchpoint *wp;
06d55cc1 2958 int cpu_flags;
0f459d16 2959
06d55cc1
AL
2960 if (env->watchpoint_hit) {
2961 /* We re-entered the check after replacing the TB. Now raise
2962 * the debug interrupt so that is will trigger after the
2963 * current instruction. */
2964 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2965 return;
2966 }
2e70f6ef 2967 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
72cf2d4f 2968 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334
AL
2969 if ((vaddr == (wp->vaddr & len_mask) ||
2970 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
6e140f28
AL
2971 wp->flags |= BP_WATCHPOINT_HIT;
2972 if (!env->watchpoint_hit) {
2973 env->watchpoint_hit = wp;
2974 tb = tb_find_pc(env->mem_io_pc);
2975 if (!tb) {
2976 cpu_abort(env, "check_watchpoint: could not find TB for "
2977 "pc=%p", (void *)env->mem_io_pc);
2978 }
618ba8e6 2979 cpu_restore_state(tb, env, env->mem_io_pc);
6e140f28
AL
2980 tb_phys_invalidate(tb, -1);
2981 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2982 env->exception_index = EXCP_DEBUG;
488d6577 2983 cpu_loop_exit(env);
6e140f28
AL
2984 } else {
2985 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2986 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
488d6577 2987 cpu_resume_from_signal(env, NULL);
6e140f28 2988 }
06d55cc1 2989 }
6e140f28
AL
2990 } else {
2991 wp->flags &= ~BP_WATCHPOINT_HIT;
0f459d16
PB
2992 }
2993 }
2994}
2995
6658ffb8
PB
2996/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2997 so these check for a hit then pass through to the normal out-of-line
2998 phys routines. */
a8170e5e 2999static uint64_t watch_mem_read(void *opaque, hwaddr addr,
1ec9b909 3000 unsigned size)
6658ffb8 3001{
1ec9b909
AK
3002 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
3003 switch (size) {
3004 case 1: return ldub_phys(addr);
3005 case 2: return lduw_phys(addr);
3006 case 4: return ldl_phys(addr);
3007 default: abort();
3008 }
6658ffb8
PB
3009}
3010
a8170e5e 3011static void watch_mem_write(void *opaque, hwaddr addr,
1ec9b909 3012 uint64_t val, unsigned size)
6658ffb8 3013{
1ec9b909
AK
3014 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
3015 switch (size) {
67364150
MF
3016 case 1:
3017 stb_phys(addr, val);
3018 break;
3019 case 2:
3020 stw_phys(addr, val);
3021 break;
3022 case 4:
3023 stl_phys(addr, val);
3024 break;
1ec9b909
AK
3025 default: abort();
3026 }
6658ffb8
PB
3027}
3028
1ec9b909
AK
3029static const MemoryRegionOps watch_mem_ops = {
3030 .read = watch_mem_read,
3031 .write = watch_mem_write,
3032 .endianness = DEVICE_NATIVE_ENDIAN,
6658ffb8 3033};
6658ffb8 3034
a8170e5e 3035static uint64_t subpage_read(void *opaque, hwaddr addr,
70c68e44 3036 unsigned len)
db7b5426 3037{
70c68e44 3038 subpage_t *mmio = opaque;
f6405247 3039 unsigned int idx = SUBPAGE_IDX(addr);
5312bd8b 3040 MemoryRegionSection *section;
db7b5426
BS
3041#if defined(DEBUG_SUBPAGE)
3042 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3043 mmio, len, addr, idx);
3044#endif
db7b5426 3045
5312bd8b
AK
3046 section = &phys_sections[mmio->sub_section[idx]];
3047 addr += mmio->base;
3048 addr -= section->offset_within_address_space;
3049 addr += section->offset_within_region;
37ec01d4 3050 return io_mem_read(section->mr, addr, len);
db7b5426
BS
3051}
3052
a8170e5e 3053static void subpage_write(void *opaque, hwaddr addr,
70c68e44 3054 uint64_t value, unsigned len)
db7b5426 3055{
70c68e44 3056 subpage_t *mmio = opaque;
f6405247 3057 unsigned int idx = SUBPAGE_IDX(addr);
5312bd8b 3058 MemoryRegionSection *section;
db7b5426 3059#if defined(DEBUG_SUBPAGE)
70c68e44
AK
3060 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
3061 " idx %d value %"PRIx64"\n",
f6405247 3062 __func__, mmio, len, addr, idx, value);
db7b5426 3063#endif
f6405247 3064
5312bd8b
AK
3065 section = &phys_sections[mmio->sub_section[idx]];
3066 addr += mmio->base;
3067 addr -= section->offset_within_address_space;
3068 addr += section->offset_within_region;
37ec01d4 3069 io_mem_write(section->mr, addr, value, len);
db7b5426
BS
3070}
3071
70c68e44
AK
3072static const MemoryRegionOps subpage_ops = {
3073 .read = subpage_read,
3074 .write = subpage_write,
3075 .endianness = DEVICE_NATIVE_ENDIAN,
db7b5426
BS
3076};
3077
a8170e5e 3078static uint64_t subpage_ram_read(void *opaque, hwaddr addr,
de712f94 3079 unsigned size)
56384e8b
AF
3080{
3081 ram_addr_t raddr = addr;
3082 void *ptr = qemu_get_ram_ptr(raddr);
de712f94
AK
3083 switch (size) {
3084 case 1: return ldub_p(ptr);
3085 case 2: return lduw_p(ptr);
3086 case 4: return ldl_p(ptr);
3087 default: abort();
3088 }
56384e8b
AF
3089}
3090
a8170e5e 3091static void subpage_ram_write(void *opaque, hwaddr addr,
de712f94 3092 uint64_t value, unsigned size)
56384e8b
AF
3093{
3094 ram_addr_t raddr = addr;
3095 void *ptr = qemu_get_ram_ptr(raddr);
de712f94
AK
3096 switch (size) {
3097 case 1: return stb_p(ptr, value);
3098 case 2: return stw_p(ptr, value);
3099 case 4: return stl_p(ptr, value);
3100 default: abort();
3101 }
56384e8b
AF
3102}
3103
de712f94
AK
3104static const MemoryRegionOps subpage_ram_ops = {
3105 .read = subpage_ram_read,
3106 .write = subpage_ram_write,
3107 .endianness = DEVICE_NATIVE_ENDIAN,
56384e8b
AF
3108};
3109
c227f099 3110static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 3111 uint16_t section)
db7b5426
BS
3112{
3113 int idx, eidx;
3114
3115 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3116 return -1;
3117 idx = SUBPAGE_IDX(start);
3118 eidx = SUBPAGE_IDX(end);
3119#if defined(DEBUG_SUBPAGE)
0bf9e31a 3120 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
db7b5426
BS
3121 mmio, start, end, idx, eidx, memory);
3122#endif
5312bd8b
AK
3123 if (memory_region_is_ram(phys_sections[section].mr)) {
3124 MemoryRegionSection new_section = phys_sections[section];
3125 new_section.mr = &io_mem_subpage_ram;
3126 section = phys_section_add(&new_section);
56384e8b 3127 }
db7b5426 3128 for (; idx <= eidx; idx++) {
5312bd8b 3129 mmio->sub_section[idx] = section;
db7b5426
BS
3130 }
3131
3132 return 0;
3133}
3134
a8170e5e 3135static subpage_t *subpage_init(hwaddr base)
db7b5426 3136{
c227f099 3137 subpage_t *mmio;
db7b5426 3138
7267c094 3139 mmio = g_malloc0(sizeof(subpage_t));
1eec614b
AL
3140
3141 mmio->base = base;
70c68e44
AK
3142 memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
3143 "subpage", TARGET_PAGE_SIZE);
b3b00c78 3144 mmio->iomem.subpage = true;
db7b5426 3145#if defined(DEBUG_SUBPAGE)
1eec614b
AL
3146 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3147 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
db7b5426 3148#endif
0f0cb164 3149 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, phys_section_unassigned);
db7b5426
BS
3150
3151 return mmio;
3152}
3153
5312bd8b
AK
3154static uint16_t dummy_section(MemoryRegion *mr)
3155{
3156 MemoryRegionSection section = {
3157 .mr = mr,
3158 .offset_within_address_space = 0,
3159 .offset_within_region = 0,
3160 .size = UINT64_MAX,
3161 };
3162
3163 return phys_section_add(&section);
3164}
3165
a8170e5e 3166MemoryRegion *iotlb_to_region(hwaddr index)
aa102231 3167{
37ec01d4 3168 return phys_sections[index & ~TARGET_PAGE_MASK].mr;
aa102231
AK
3169}
3170
e9179ce1
AK
3171static void io_mem_init(void)
3172{
0e0df1e2 3173 memory_region_init_io(&io_mem_ram, &error_mem_ops, NULL, "ram", UINT64_MAX);
0e0df1e2
AK
3174 memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
3175 memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
3176 "unassigned", UINT64_MAX);
3177 memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
3178 "notdirty", UINT64_MAX);
de712f94
AK
3179 memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
3180 "subpage-ram", UINT64_MAX);
1ec9b909
AK
3181 memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
3182 "watch", UINT64_MAX);
e9179ce1
AK
3183}
3184
ac1970fb
AK
3185static void mem_begin(MemoryListener *listener)
3186{
3187 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
3188
3189 destroy_all_mappings(d);
3190 d->phys_map.ptr = PHYS_MAP_NODE_NIL;
3191}
3192
50c1e149
AK
3193static void core_begin(MemoryListener *listener)
3194{
5312bd8b
AK
3195 phys_sections_clear();
3196 phys_section_unassigned = dummy_section(&io_mem_unassigned);
aa102231
AK
3197 phys_section_notdirty = dummy_section(&io_mem_notdirty);
3198 phys_section_rom = dummy_section(&io_mem_rom);
3199 phys_section_watch = dummy_section(&io_mem_watch);
50c1e149
AK
3200}
3201
1d71148e 3202static void tcg_commit(MemoryListener *listener)
50c1e149 3203{
9349b4f9 3204 CPUArchState *env;
117712c3
AK
3205
3206 /* since each CPU stores ram addresses in its TLB cache, we must
3207 reset the modified entries */
3208 /* XXX: slow ! */
3209 for(env = first_cpu; env != NULL; env = env->next_cpu) {
3210 tlb_flush(env, 1);
3211 }
50c1e149
AK
3212}
3213
93632747
AK
3214static void core_log_global_start(MemoryListener *listener)
3215{
3216 cpu_physical_memory_set_dirty_tracking(1);
3217}
3218
3219static void core_log_global_stop(MemoryListener *listener)
3220{
3221 cpu_physical_memory_set_dirty_tracking(0);
3222}
3223
4855d41a
AK
3224static void io_region_add(MemoryListener *listener,
3225 MemoryRegionSection *section)
3226{
a2d33521
AK
3227 MemoryRegionIORange *mrio = g_new(MemoryRegionIORange, 1);
3228
3229 mrio->mr = section->mr;
3230 mrio->offset = section->offset_within_region;
3231 iorange_init(&mrio->iorange, &memory_region_iorange_ops,
4855d41a 3232 section->offset_within_address_space, section->size);
a2d33521 3233 ioport_register(&mrio->iorange);
4855d41a
AK
3234}
3235
3236static void io_region_del(MemoryListener *listener,
3237 MemoryRegionSection *section)
3238{
3239 isa_unassign_ioport(section->offset_within_address_space, section->size);
3240}
3241
93632747 3242static MemoryListener core_memory_listener = {
50c1e149 3243 .begin = core_begin,
93632747
AK
3244 .log_global_start = core_log_global_start,
3245 .log_global_stop = core_log_global_stop,
ac1970fb 3246 .priority = 1,
93632747
AK
3247};
3248
4855d41a
AK
3249static MemoryListener io_memory_listener = {
3250 .region_add = io_region_add,
3251 .region_del = io_region_del,
4855d41a
AK
3252 .priority = 0,
3253};
3254
1d71148e
AK
3255static MemoryListener tcg_memory_listener = {
3256 .commit = tcg_commit,
3257};
3258
ac1970fb
AK
3259void address_space_init_dispatch(AddressSpace *as)
3260{
3261 AddressSpaceDispatch *d = g_new(AddressSpaceDispatch, 1);
3262
3263 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
3264 d->listener = (MemoryListener) {
3265 .begin = mem_begin,
3266 .region_add = mem_add,
3267 .region_nop = mem_add,
3268 .priority = 0,
3269 };
3270 as->dispatch = d;
3271 memory_listener_register(&d->listener, as);
3272}
3273
83f3c251
AK
3274void address_space_destroy_dispatch(AddressSpace *as)
3275{
3276 AddressSpaceDispatch *d = as->dispatch;
3277
3278 memory_listener_unregister(&d->listener);
3279 destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
3280 g_free(d);
3281 as->dispatch = NULL;
3282}
3283
62152b8a
AK
3284static void memory_map_init(void)
3285{
7267c094 3286 system_memory = g_malloc(sizeof(*system_memory));
8417cebf 3287 memory_region_init(system_memory, "system", INT64_MAX);
2673a5da
AK
3288 address_space_init(&address_space_memory, system_memory);
3289 address_space_memory.name = "memory";
309cb471 3290
7267c094 3291 system_io = g_malloc(sizeof(*system_io));
309cb471 3292 memory_region_init(system_io, "io", 65536);
2673a5da
AK
3293 address_space_init(&address_space_io, system_io);
3294 address_space_io.name = "I/O";
93632747 3295
f6790af6
AK
3296 memory_listener_register(&core_memory_listener, &address_space_memory);
3297 memory_listener_register(&io_memory_listener, &address_space_io);
3298 memory_listener_register(&tcg_memory_listener, &address_space_memory);
9e11908f
PM
3299
3300 dma_context_init(&dma_context_memory, &address_space_memory,
3301 NULL, NULL, NULL);
62152b8a
AK
3302}
3303
3304MemoryRegion *get_system_memory(void)
3305{
3306 return system_memory;
3307}
3308
309cb471
AK
3309MemoryRegion *get_system_io(void)
3310{
3311 return system_io;
3312}
3313
e2eef170
PB
3314#endif /* !defined(CONFIG_USER_ONLY) */
3315
13eb76e0
FB
3316/* physical memory access (slow version, mainly for debug) */
3317#if defined(CONFIG_USER_ONLY)
9349b4f9 3318int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
a68fe89c 3319 uint8_t *buf, int len, int is_write)
13eb76e0
FB
3320{
3321 int l, flags;
3322 target_ulong page;
53a5960a 3323 void * p;
13eb76e0
FB
3324
3325 while (len > 0) {
3326 page = addr & TARGET_PAGE_MASK;
3327 l = (page + TARGET_PAGE_SIZE) - addr;
3328 if (l > len)
3329 l = len;
3330 flags = page_get_flags(page);
3331 if (!(flags & PAGE_VALID))
a68fe89c 3332 return -1;
13eb76e0
FB
3333 if (is_write) {
3334 if (!(flags & PAGE_WRITE))
a68fe89c 3335 return -1;
579a97f7 3336 /* XXX: this code should not depend on lock_user */
72fb7daa 3337 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
a68fe89c 3338 return -1;
72fb7daa
AJ
3339 memcpy(p, buf, l);
3340 unlock_user(p, addr, l);
13eb76e0
FB
3341 } else {
3342 if (!(flags & PAGE_READ))
a68fe89c 3343 return -1;
579a97f7 3344 /* XXX: this code should not depend on lock_user */
72fb7daa 3345 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
a68fe89c 3346 return -1;
72fb7daa 3347 memcpy(buf, p, l);
5b257578 3348 unlock_user(p, addr, 0);
13eb76e0
FB
3349 }
3350 len -= l;
3351 buf += l;
3352 addr += l;
3353 }
a68fe89c 3354 return 0;
13eb76e0 3355}
8df1cd07 3356
13eb76e0 3357#else
51d7a9eb 3358
a8170e5e
AK
3359static void invalidate_and_set_dirty(hwaddr addr,
3360 hwaddr length)
51d7a9eb
AP
3361{
3362 if (!cpu_physical_memory_is_dirty(addr)) {
3363 /* invalidate code */
3364 tb_invalidate_phys_page_range(addr, addr + length, 0);
3365 /* set dirty bit */
3366 cpu_physical_memory_set_dirty_flags(addr, (0xff & ~CODE_DIRTY_FLAG));
3367 }
e226939d 3368 xen_modified_memory(addr, length);
51d7a9eb
AP
3369}
3370
a8170e5e 3371void address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
ac1970fb 3372 int len, bool is_write)
13eb76e0 3373{
ac1970fb 3374 AddressSpaceDispatch *d = as->dispatch;
37ec01d4 3375 int l;
13eb76e0
FB
3376 uint8_t *ptr;
3377 uint32_t val;
a8170e5e 3378 hwaddr page;
f3705d53 3379 MemoryRegionSection *section;
3b46e624 3380
13eb76e0
FB
3381 while (len > 0) {
3382 page = addr & TARGET_PAGE_MASK;
3383 l = (page + TARGET_PAGE_SIZE) - addr;
3384 if (l > len)
3385 l = len;
ac1970fb 3386 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
3b46e624 3387
13eb76e0 3388 if (is_write) {
f3705d53 3389 if (!memory_region_is_ram(section->mr)) {
a8170e5e 3390 hwaddr addr1;
cc5bea60 3391 addr1 = memory_region_section_addr(section, addr);
6a00d601
FB
3392 /* XXX: could force cpu_single_env to NULL to avoid
3393 potential bugs */
6c2934db 3394 if (l >= 4 && ((addr1 & 3) == 0)) {
1c213d19 3395 /* 32 bit write access */
c27004ec 3396 val = ldl_p(buf);
37ec01d4 3397 io_mem_write(section->mr, addr1, val, 4);
13eb76e0 3398 l = 4;
6c2934db 3399 } else if (l >= 2 && ((addr1 & 1) == 0)) {
1c213d19 3400 /* 16 bit write access */
c27004ec 3401 val = lduw_p(buf);
37ec01d4 3402 io_mem_write(section->mr, addr1, val, 2);
13eb76e0
FB
3403 l = 2;
3404 } else {
1c213d19 3405 /* 8 bit write access */
c27004ec 3406 val = ldub_p(buf);
37ec01d4 3407 io_mem_write(section->mr, addr1, val, 1);
13eb76e0
FB
3408 l = 1;
3409 }
f3705d53 3410 } else if (!section->readonly) {
8ca5692d 3411 ram_addr_t addr1;
f3705d53 3412 addr1 = memory_region_get_ram_addr(section->mr)
cc5bea60 3413 + memory_region_section_addr(section, addr);
13eb76e0 3414 /* RAM case */
5579c7f3 3415 ptr = qemu_get_ram_ptr(addr1);
13eb76e0 3416 memcpy(ptr, buf, l);
51d7a9eb 3417 invalidate_and_set_dirty(addr1, l);
050a0ddf 3418 qemu_put_ram_ptr(ptr);
13eb76e0
FB
3419 }
3420 } else {
cc5bea60
BS
3421 if (!(memory_region_is_ram(section->mr) ||
3422 memory_region_is_romd(section->mr))) {
a8170e5e 3423 hwaddr addr1;
13eb76e0 3424 /* I/O case */
cc5bea60 3425 addr1 = memory_region_section_addr(section, addr);
6c2934db 3426 if (l >= 4 && ((addr1 & 3) == 0)) {
13eb76e0 3427 /* 32 bit read access */
37ec01d4 3428 val = io_mem_read(section->mr, addr1, 4);
c27004ec 3429 stl_p(buf, val);
13eb76e0 3430 l = 4;
6c2934db 3431 } else if (l >= 2 && ((addr1 & 1) == 0)) {
13eb76e0 3432 /* 16 bit read access */
37ec01d4 3433 val = io_mem_read(section->mr, addr1, 2);
c27004ec 3434 stw_p(buf, val);
13eb76e0
FB
3435 l = 2;
3436 } else {
1c213d19 3437 /* 8 bit read access */
37ec01d4 3438 val = io_mem_read(section->mr, addr1, 1);
c27004ec 3439 stb_p(buf, val);
13eb76e0
FB
3440 l = 1;
3441 }
3442 } else {
3443 /* RAM case */
0a1b357f 3444 ptr = qemu_get_ram_ptr(section->mr->ram_addr
cc5bea60
BS
3445 + memory_region_section_addr(section,
3446 addr));
f3705d53 3447 memcpy(buf, ptr, l);
050a0ddf 3448 qemu_put_ram_ptr(ptr);
13eb76e0
FB
3449 }
3450 }
3451 len -= l;
3452 buf += l;
3453 addr += l;
3454 }
3455}
8df1cd07 3456
a8170e5e 3457void address_space_write(AddressSpace *as, hwaddr addr,
ac1970fb
AK
3458 const uint8_t *buf, int len)
3459{
3460 address_space_rw(as, addr, (uint8_t *)buf, len, true);
3461}
3462
3463/**
3464 * address_space_read: read from an address space.
3465 *
3466 * @as: #AddressSpace to be accessed
3467 * @addr: address within that address space
3468 * @buf: buffer with the data transferred
3469 */
a8170e5e 3470void address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
ac1970fb
AK
3471{
3472 address_space_rw(as, addr, buf, len, false);
3473}
3474
3475
a8170e5e 3476void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
ac1970fb
AK
3477 int len, int is_write)
3478{
3479 return address_space_rw(&address_space_memory, addr, buf, len, is_write);
3480}
3481
d0ecd2aa 3482/* used for ROM loading : can write in RAM and ROM */
a8170e5e 3483void cpu_physical_memory_write_rom(hwaddr addr,
d0ecd2aa
FB
3484 const uint8_t *buf, int len)
3485{
ac1970fb 3486 AddressSpaceDispatch *d = address_space_memory.dispatch;
d0ecd2aa
FB
3487 int l;
3488 uint8_t *ptr;
a8170e5e 3489 hwaddr page;
f3705d53 3490 MemoryRegionSection *section;
3b46e624 3491
d0ecd2aa
FB
3492 while (len > 0) {
3493 page = addr & TARGET_PAGE_MASK;
3494 l = (page + TARGET_PAGE_SIZE) - addr;
3495 if (l > len)
3496 l = len;
ac1970fb 3497 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
3b46e624 3498
cc5bea60
BS
3499 if (!(memory_region_is_ram(section->mr) ||
3500 memory_region_is_romd(section->mr))) {
d0ecd2aa
FB
3501 /* do nothing */
3502 } else {
3503 unsigned long addr1;
f3705d53 3504 addr1 = memory_region_get_ram_addr(section->mr)
cc5bea60 3505 + memory_region_section_addr(section, addr);
d0ecd2aa 3506 /* ROM/RAM case */
5579c7f3 3507 ptr = qemu_get_ram_ptr(addr1);
d0ecd2aa 3508 memcpy(ptr, buf, l);
51d7a9eb 3509 invalidate_and_set_dirty(addr1, l);
050a0ddf 3510 qemu_put_ram_ptr(ptr);
d0ecd2aa
FB
3511 }
3512 len -= l;
3513 buf += l;
3514 addr += l;
3515 }
3516}
3517
6d16c2f8
AL
3518typedef struct {
3519 void *buffer;
a8170e5e
AK
3520 hwaddr addr;
3521 hwaddr len;
6d16c2f8
AL
3522} BounceBuffer;
3523
3524static BounceBuffer bounce;
3525
ba223c29
AL
3526typedef struct MapClient {
3527 void *opaque;
3528 void (*callback)(void *opaque);
72cf2d4f 3529 QLIST_ENTRY(MapClient) link;
ba223c29
AL
3530} MapClient;
3531
72cf2d4f
BS
3532static QLIST_HEAD(map_client_list, MapClient) map_client_list
3533 = QLIST_HEAD_INITIALIZER(map_client_list);
ba223c29
AL
3534
3535void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3536{
7267c094 3537 MapClient *client = g_malloc(sizeof(*client));
ba223c29
AL
3538
3539 client->opaque = opaque;
3540 client->callback = callback;
72cf2d4f 3541 QLIST_INSERT_HEAD(&map_client_list, client, link);
ba223c29
AL
3542 return client;
3543}
3544
8b9c99d9 3545static void cpu_unregister_map_client(void *_client)
ba223c29
AL
3546{
3547 MapClient *client = (MapClient *)_client;
3548
72cf2d4f 3549 QLIST_REMOVE(client, link);
7267c094 3550 g_free(client);
ba223c29
AL
3551}
3552
3553static void cpu_notify_map_clients(void)
3554{
3555 MapClient *client;
3556
72cf2d4f
BS
3557 while (!QLIST_EMPTY(&map_client_list)) {
3558 client = QLIST_FIRST(&map_client_list);
ba223c29 3559 client->callback(client->opaque);
34d5e948 3560 cpu_unregister_map_client(client);
ba223c29
AL
3561 }
3562}
3563
6d16c2f8
AL
3564/* Map a physical memory region into a host virtual address.
3565 * May map a subset of the requested range, given by and returned in *plen.
3566 * May return NULL if resources needed to perform the mapping are exhausted.
3567 * Use only for reads OR writes - not for read-modify-write operations.
ba223c29
AL
3568 * Use cpu_register_map_client() to know when retrying the map operation is
3569 * likely to succeed.
6d16c2f8 3570 */
ac1970fb 3571void *address_space_map(AddressSpace *as,
a8170e5e
AK
3572 hwaddr addr,
3573 hwaddr *plen,
ac1970fb 3574 bool is_write)
6d16c2f8 3575{
ac1970fb 3576 AddressSpaceDispatch *d = as->dispatch;
a8170e5e
AK
3577 hwaddr len = *plen;
3578 hwaddr todo = 0;
6d16c2f8 3579 int l;
a8170e5e 3580 hwaddr page;
f3705d53 3581 MemoryRegionSection *section;
f15fbc4b 3582 ram_addr_t raddr = RAM_ADDR_MAX;
8ab934f9
SS
3583 ram_addr_t rlen;
3584 void *ret;
6d16c2f8
AL
3585
3586 while (len > 0) {
3587 page = addr & TARGET_PAGE_MASK;
3588 l = (page + TARGET_PAGE_SIZE) - addr;
3589 if (l > len)
3590 l = len;
ac1970fb 3591 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
6d16c2f8 3592
f3705d53 3593 if (!(memory_region_is_ram(section->mr) && !section->readonly)) {
38bee5dc 3594 if (todo || bounce.buffer) {
6d16c2f8
AL
3595 break;
3596 }
3597 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3598 bounce.addr = addr;
3599 bounce.len = l;
3600 if (!is_write) {
ac1970fb 3601 address_space_read(as, addr, bounce.buffer, l);
6d16c2f8 3602 }
38bee5dc
SS
3603
3604 *plen = l;
3605 return bounce.buffer;
6d16c2f8 3606 }
8ab934f9 3607 if (!todo) {
f3705d53 3608 raddr = memory_region_get_ram_addr(section->mr)
cc5bea60 3609 + memory_region_section_addr(section, addr);
8ab934f9 3610 }
6d16c2f8
AL
3611
3612 len -= l;
3613 addr += l;
38bee5dc 3614 todo += l;
6d16c2f8 3615 }
8ab934f9
SS
3616 rlen = todo;
3617 ret = qemu_ram_ptr_length(raddr, &rlen);
3618 *plen = rlen;
3619 return ret;
6d16c2f8
AL
3620}
3621
ac1970fb 3622/* Unmaps a memory region previously mapped by address_space_map().
6d16c2f8
AL
3623 * Will also mark the memory as dirty if is_write == 1. access_len gives
3624 * the amount of memory that was actually read or written by the caller.
3625 */
a8170e5e
AK
3626void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
3627 int is_write, hwaddr access_len)
6d16c2f8
AL
3628{
3629 if (buffer != bounce.buffer) {
3630 if (is_write) {
e890261f 3631 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
6d16c2f8
AL
3632 while (access_len) {
3633 unsigned l;
3634 l = TARGET_PAGE_SIZE;
3635 if (l > access_len)
3636 l = access_len;
51d7a9eb 3637 invalidate_and_set_dirty(addr1, l);
6d16c2f8
AL
3638 addr1 += l;
3639 access_len -= l;
3640 }
3641 }
868bb33f 3642 if (xen_enabled()) {
e41d7c69 3643 xen_invalidate_map_cache_entry(buffer);
050a0ddf 3644 }
6d16c2f8
AL
3645 return;
3646 }
3647 if (is_write) {
ac1970fb 3648 address_space_write(as, bounce.addr, bounce.buffer, access_len);
6d16c2f8 3649 }
f8a83245 3650 qemu_vfree(bounce.buffer);
6d16c2f8 3651 bounce.buffer = NULL;
ba223c29 3652 cpu_notify_map_clients();
6d16c2f8 3653}
d0ecd2aa 3654
a8170e5e
AK
3655void *cpu_physical_memory_map(hwaddr addr,
3656 hwaddr *plen,
ac1970fb
AK
3657 int is_write)
3658{
3659 return address_space_map(&address_space_memory, addr, plen, is_write);
3660}
3661
a8170e5e
AK
3662void cpu_physical_memory_unmap(void *buffer, hwaddr len,
3663 int is_write, hwaddr access_len)
ac1970fb
AK
3664{
3665 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
3666}
3667
8df1cd07 3668/* warning: addr must be aligned */
a8170e5e 3669static inline uint32_t ldl_phys_internal(hwaddr addr,
1e78bcc1 3670 enum device_endian endian)
8df1cd07 3671{
8df1cd07
FB
3672 uint8_t *ptr;
3673 uint32_t val;
f3705d53 3674 MemoryRegionSection *section;
8df1cd07 3675
ac1970fb 3676 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
3b46e624 3677
cc5bea60
BS
3678 if (!(memory_region_is_ram(section->mr) ||
3679 memory_region_is_romd(section->mr))) {
8df1cd07 3680 /* I/O case */
cc5bea60 3681 addr = memory_region_section_addr(section, addr);
37ec01d4 3682 val = io_mem_read(section->mr, addr, 4);
1e78bcc1
AG
3683#if defined(TARGET_WORDS_BIGENDIAN)
3684 if (endian == DEVICE_LITTLE_ENDIAN) {
3685 val = bswap32(val);
3686 }
3687#else
3688 if (endian == DEVICE_BIG_ENDIAN) {
3689 val = bswap32(val);
3690 }
3691#endif
8df1cd07
FB
3692 } else {
3693 /* RAM case */
f3705d53 3694 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
06ef3525 3695 & TARGET_PAGE_MASK)
cc5bea60 3696 + memory_region_section_addr(section, addr));
1e78bcc1
AG
3697 switch (endian) {
3698 case DEVICE_LITTLE_ENDIAN:
3699 val = ldl_le_p(ptr);
3700 break;
3701 case DEVICE_BIG_ENDIAN:
3702 val = ldl_be_p(ptr);
3703 break;
3704 default:
3705 val = ldl_p(ptr);
3706 break;
3707 }
8df1cd07
FB
3708 }
3709 return val;
3710}
3711
a8170e5e 3712uint32_t ldl_phys(hwaddr addr)
1e78bcc1
AG
3713{
3714 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3715}
3716
a8170e5e 3717uint32_t ldl_le_phys(hwaddr addr)
1e78bcc1
AG
3718{
3719 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3720}
3721
a8170e5e 3722uint32_t ldl_be_phys(hwaddr addr)
1e78bcc1
AG
3723{
3724 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
3725}
3726
84b7b8e7 3727/* warning: addr must be aligned */
a8170e5e 3728static inline uint64_t ldq_phys_internal(hwaddr addr,
1e78bcc1 3729 enum device_endian endian)
84b7b8e7 3730{
84b7b8e7
FB
3731 uint8_t *ptr;
3732 uint64_t val;
f3705d53 3733 MemoryRegionSection *section;
84b7b8e7 3734
ac1970fb 3735 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
3b46e624 3736
cc5bea60
BS
3737 if (!(memory_region_is_ram(section->mr) ||
3738 memory_region_is_romd(section->mr))) {
84b7b8e7 3739 /* I/O case */
cc5bea60 3740 addr = memory_region_section_addr(section, addr);
1e78bcc1
AG
3741
3742 /* XXX This is broken when device endian != cpu endian.
3743 Fix and add "endian" variable check */
84b7b8e7 3744#ifdef TARGET_WORDS_BIGENDIAN
37ec01d4
AK
3745 val = io_mem_read(section->mr, addr, 4) << 32;
3746 val |= io_mem_read(section->mr, addr + 4, 4);
84b7b8e7 3747#else
37ec01d4
AK
3748 val = io_mem_read(section->mr, addr, 4);
3749 val |= io_mem_read(section->mr, addr + 4, 4) << 32;
84b7b8e7
FB
3750#endif
3751 } else {
3752 /* RAM case */
f3705d53 3753 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
06ef3525 3754 & TARGET_PAGE_MASK)
cc5bea60 3755 + memory_region_section_addr(section, addr));
1e78bcc1
AG
3756 switch (endian) {
3757 case DEVICE_LITTLE_ENDIAN:
3758 val = ldq_le_p(ptr);
3759 break;
3760 case DEVICE_BIG_ENDIAN:
3761 val = ldq_be_p(ptr);
3762 break;
3763 default:
3764 val = ldq_p(ptr);
3765 break;
3766 }
84b7b8e7
FB
3767 }
3768 return val;
3769}
3770
a8170e5e 3771uint64_t ldq_phys(hwaddr addr)
1e78bcc1
AG
3772{
3773 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3774}
3775
a8170e5e 3776uint64_t ldq_le_phys(hwaddr addr)
1e78bcc1
AG
3777{
3778 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3779}
3780
a8170e5e 3781uint64_t ldq_be_phys(hwaddr addr)
1e78bcc1
AG
3782{
3783 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
3784}
3785
aab33094 3786/* XXX: optimize */
a8170e5e 3787uint32_t ldub_phys(hwaddr addr)
aab33094
FB
3788{
3789 uint8_t val;
3790 cpu_physical_memory_read(addr, &val, 1);
3791 return val;
3792}
3793
733f0b02 3794/* warning: addr must be aligned */
a8170e5e 3795static inline uint32_t lduw_phys_internal(hwaddr addr,
1e78bcc1 3796 enum device_endian endian)
aab33094 3797{
733f0b02
MT
3798 uint8_t *ptr;
3799 uint64_t val;
f3705d53 3800 MemoryRegionSection *section;
733f0b02 3801
ac1970fb 3802 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
733f0b02 3803
cc5bea60
BS
3804 if (!(memory_region_is_ram(section->mr) ||
3805 memory_region_is_romd(section->mr))) {
733f0b02 3806 /* I/O case */
cc5bea60 3807 addr = memory_region_section_addr(section, addr);
37ec01d4 3808 val = io_mem_read(section->mr, addr, 2);
1e78bcc1
AG
3809#if defined(TARGET_WORDS_BIGENDIAN)
3810 if (endian == DEVICE_LITTLE_ENDIAN) {
3811 val = bswap16(val);
3812 }
3813#else
3814 if (endian == DEVICE_BIG_ENDIAN) {
3815 val = bswap16(val);
3816 }
3817#endif
733f0b02
MT
3818 } else {
3819 /* RAM case */
f3705d53 3820 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
06ef3525 3821 & TARGET_PAGE_MASK)
cc5bea60 3822 + memory_region_section_addr(section, addr));
1e78bcc1
AG
3823 switch (endian) {
3824 case DEVICE_LITTLE_ENDIAN:
3825 val = lduw_le_p(ptr);
3826 break;
3827 case DEVICE_BIG_ENDIAN:
3828 val = lduw_be_p(ptr);
3829 break;
3830 default:
3831 val = lduw_p(ptr);
3832 break;
3833 }
733f0b02
MT
3834 }
3835 return val;
aab33094
FB
3836}
3837
a8170e5e 3838uint32_t lduw_phys(hwaddr addr)
1e78bcc1
AG
3839{
3840 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3841}
3842
a8170e5e 3843uint32_t lduw_le_phys(hwaddr addr)
1e78bcc1
AG
3844{
3845 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3846}
3847
a8170e5e 3848uint32_t lduw_be_phys(hwaddr addr)
1e78bcc1
AG
3849{
3850 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
3851}
3852
8df1cd07
FB
3853/* warning: addr must be aligned. The ram page is not masked as dirty
3854 and the code inside is not invalidated. It is useful if the dirty
3855 bits are used to track modified PTEs */
a8170e5e 3856void stl_phys_notdirty(hwaddr addr, uint32_t val)
8df1cd07 3857{
8df1cd07 3858 uint8_t *ptr;
f3705d53 3859 MemoryRegionSection *section;
8df1cd07 3860
ac1970fb 3861 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
3b46e624 3862
f3705d53 3863 if (!memory_region_is_ram(section->mr) || section->readonly) {
cc5bea60 3864 addr = memory_region_section_addr(section, addr);
f3705d53 3865 if (memory_region_is_ram(section->mr)) {
37ec01d4 3866 section = &phys_sections[phys_section_rom];
06ef3525 3867 }
37ec01d4 3868 io_mem_write(section->mr, addr, val, 4);
8df1cd07 3869 } else {
f3705d53 3870 unsigned long addr1 = (memory_region_get_ram_addr(section->mr)
06ef3525 3871 & TARGET_PAGE_MASK)
cc5bea60 3872 + memory_region_section_addr(section, addr);
5579c7f3 3873 ptr = qemu_get_ram_ptr(addr1);
8df1cd07 3874 stl_p(ptr, val);
74576198
AL
3875
3876 if (unlikely(in_migration)) {
3877 if (!cpu_physical_memory_is_dirty(addr1)) {
3878 /* invalidate code */
3879 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3880 /* set dirty bit */
f7c11b53
YT
3881 cpu_physical_memory_set_dirty_flags(
3882 addr1, (0xff & ~CODE_DIRTY_FLAG));
74576198
AL
3883 }
3884 }
8df1cd07
FB
3885 }
3886}
3887
a8170e5e 3888void stq_phys_notdirty(hwaddr addr, uint64_t val)
bc98a7ef 3889{
bc98a7ef 3890 uint8_t *ptr;
f3705d53 3891 MemoryRegionSection *section;
bc98a7ef 3892
ac1970fb 3893 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
3b46e624 3894
f3705d53 3895 if (!memory_region_is_ram(section->mr) || section->readonly) {
cc5bea60 3896 addr = memory_region_section_addr(section, addr);
f3705d53 3897 if (memory_region_is_ram(section->mr)) {
37ec01d4 3898 section = &phys_sections[phys_section_rom];
06ef3525 3899 }
bc98a7ef 3900#ifdef TARGET_WORDS_BIGENDIAN
37ec01d4
AK
3901 io_mem_write(section->mr, addr, val >> 32, 4);
3902 io_mem_write(section->mr, addr + 4, (uint32_t)val, 4);
bc98a7ef 3903#else
37ec01d4
AK
3904 io_mem_write(section->mr, addr, (uint32_t)val, 4);
3905 io_mem_write(section->mr, addr + 4, val >> 32, 4);
bc98a7ef
JM
3906#endif
3907 } else {
f3705d53 3908 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
06ef3525 3909 & TARGET_PAGE_MASK)
cc5bea60 3910 + memory_region_section_addr(section, addr));
bc98a7ef
JM
3911 stq_p(ptr, val);
3912 }
3913}
3914
8df1cd07 3915/* warning: addr must be aligned */
a8170e5e 3916static inline void stl_phys_internal(hwaddr addr, uint32_t val,
1e78bcc1 3917 enum device_endian endian)
8df1cd07 3918{
8df1cd07 3919 uint8_t *ptr;
f3705d53 3920 MemoryRegionSection *section;
8df1cd07 3921
ac1970fb 3922 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
3b46e624 3923
f3705d53 3924 if (!memory_region_is_ram(section->mr) || section->readonly) {
cc5bea60 3925 addr = memory_region_section_addr(section, addr);
f3705d53 3926 if (memory_region_is_ram(section->mr)) {
37ec01d4 3927 section = &phys_sections[phys_section_rom];
06ef3525 3928 }
1e78bcc1
AG
3929#if defined(TARGET_WORDS_BIGENDIAN)
3930 if (endian == DEVICE_LITTLE_ENDIAN) {
3931 val = bswap32(val);
3932 }
3933#else
3934 if (endian == DEVICE_BIG_ENDIAN) {
3935 val = bswap32(val);
3936 }
3937#endif
37ec01d4 3938 io_mem_write(section->mr, addr, val, 4);
8df1cd07
FB
3939 } else {
3940 unsigned long addr1;
f3705d53 3941 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
cc5bea60 3942 + memory_region_section_addr(section, addr);
8df1cd07 3943 /* RAM case */
5579c7f3 3944 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
3945 switch (endian) {
3946 case DEVICE_LITTLE_ENDIAN:
3947 stl_le_p(ptr, val);
3948 break;
3949 case DEVICE_BIG_ENDIAN:
3950 stl_be_p(ptr, val);
3951 break;
3952 default:
3953 stl_p(ptr, val);
3954 break;
3955 }
51d7a9eb 3956 invalidate_and_set_dirty(addr1, 4);
8df1cd07
FB
3957 }
3958}
3959
a8170e5e 3960void stl_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
3961{
3962 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
3963}
3964
a8170e5e 3965void stl_le_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
3966{
3967 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
3968}
3969
a8170e5e 3970void stl_be_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
3971{
3972 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
3973}
3974
aab33094 3975/* XXX: optimize */
a8170e5e 3976void stb_phys(hwaddr addr, uint32_t val)
aab33094
FB
3977{
3978 uint8_t v = val;
3979 cpu_physical_memory_write(addr, &v, 1);
3980}
3981
733f0b02 3982/* warning: addr must be aligned */
a8170e5e 3983static inline void stw_phys_internal(hwaddr addr, uint32_t val,
1e78bcc1 3984 enum device_endian endian)
aab33094 3985{
733f0b02 3986 uint8_t *ptr;
f3705d53 3987 MemoryRegionSection *section;
733f0b02 3988
ac1970fb 3989 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
733f0b02 3990
f3705d53 3991 if (!memory_region_is_ram(section->mr) || section->readonly) {
cc5bea60 3992 addr = memory_region_section_addr(section, addr);
f3705d53 3993 if (memory_region_is_ram(section->mr)) {
37ec01d4 3994 section = &phys_sections[phys_section_rom];
06ef3525 3995 }
1e78bcc1
AG
3996#if defined(TARGET_WORDS_BIGENDIAN)
3997 if (endian == DEVICE_LITTLE_ENDIAN) {
3998 val = bswap16(val);
3999 }
4000#else
4001 if (endian == DEVICE_BIG_ENDIAN) {
4002 val = bswap16(val);
4003 }
4004#endif
37ec01d4 4005 io_mem_write(section->mr, addr, val, 2);
733f0b02
MT
4006 } else {
4007 unsigned long addr1;
f3705d53 4008 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
cc5bea60 4009 + memory_region_section_addr(section, addr);
733f0b02
MT
4010 /* RAM case */
4011 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
4012 switch (endian) {
4013 case DEVICE_LITTLE_ENDIAN:
4014 stw_le_p(ptr, val);
4015 break;
4016 case DEVICE_BIG_ENDIAN:
4017 stw_be_p(ptr, val);
4018 break;
4019 default:
4020 stw_p(ptr, val);
4021 break;
4022 }
51d7a9eb 4023 invalidate_and_set_dirty(addr1, 2);
733f0b02 4024 }
aab33094
FB
4025}
4026
a8170e5e 4027void stw_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
4028{
4029 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4030}
4031
a8170e5e 4032void stw_le_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
4033{
4034 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4035}
4036
a8170e5e 4037void stw_be_phys(hwaddr addr, uint32_t val)
1e78bcc1
AG
4038{
4039 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4040}
4041
aab33094 4042/* XXX: optimize */
a8170e5e 4043void stq_phys(hwaddr addr, uint64_t val)
aab33094
FB
4044{
4045 val = tswap64(val);
71d2b725 4046 cpu_physical_memory_write(addr, &val, 8);
aab33094
FB
4047}
4048
a8170e5e 4049void stq_le_phys(hwaddr addr, uint64_t val)
1e78bcc1
AG
4050{
4051 val = cpu_to_le64(val);
4052 cpu_physical_memory_write(addr, &val, 8);
4053}
4054
a8170e5e 4055void stq_be_phys(hwaddr addr, uint64_t val)
1e78bcc1
AG
4056{
4057 val = cpu_to_be64(val);
4058 cpu_physical_memory_write(addr, &val, 8);
4059}
4060
5e2972fd 4061/* virtual memory access for debug (includes writing to ROM) */
9349b4f9 4062int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
b448f2f3 4063 uint8_t *buf, int len, int is_write)
13eb76e0
FB
4064{
4065 int l;
a8170e5e 4066 hwaddr phys_addr;
9b3c35e0 4067 target_ulong page;
13eb76e0
FB
4068
4069 while (len > 0) {
4070 page = addr & TARGET_PAGE_MASK;
4071 phys_addr = cpu_get_phys_page_debug(env, page);
4072 /* if no physical page mapped, return an error */
4073 if (phys_addr == -1)
4074 return -1;
4075 l = (page + TARGET_PAGE_SIZE) - addr;
4076 if (l > len)
4077 l = len;
5e2972fd 4078 phys_addr += (addr & ~TARGET_PAGE_MASK);
5e2972fd
AL
4079 if (is_write)
4080 cpu_physical_memory_write_rom(phys_addr, buf, l);
4081 else
5e2972fd 4082 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
13eb76e0
FB
4083 len -= l;
4084 buf += l;
4085 addr += l;
4086 }
4087 return 0;
4088}
a68fe89c 4089#endif
13eb76e0 4090
2e70f6ef
PB
4091/* in deterministic execution mode, instructions doing device I/Os
4092 must be at the end of the TB */
20503968 4093void cpu_io_recompile(CPUArchState *env, uintptr_t retaddr)
2e70f6ef
PB
4094{
4095 TranslationBlock *tb;
4096 uint32_t n, cflags;
4097 target_ulong pc, cs_base;
4098 uint64_t flags;
4099
20503968 4100 tb = tb_find_pc(retaddr);
2e70f6ef
PB
4101 if (!tb) {
4102 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
20503968 4103 (void *)retaddr);
2e70f6ef
PB
4104 }
4105 n = env->icount_decr.u16.low + tb->icount;
20503968 4106 cpu_restore_state(tb, env, retaddr);
2e70f6ef 4107 /* Calculate how many instructions had been executed before the fault
bf20dc07 4108 occurred. */
2e70f6ef
PB
4109 n = n - env->icount_decr.u16.low;
4110 /* Generate a new TB ending on the I/O insn. */
4111 n++;
4112 /* On MIPS and SH, delay slot instructions can only be restarted if
4113 they were already the first instruction in the TB. If this is not
bf20dc07 4114 the first instruction in a TB then re-execute the preceding
2e70f6ef
PB
4115 branch. */
4116#if defined(TARGET_MIPS)
4117 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4118 env->active_tc.PC -= 4;
4119 env->icount_decr.u16.low++;
4120 env->hflags &= ~MIPS_HFLAG_BMASK;
4121 }
4122#elif defined(TARGET_SH4)
4123 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4124 && n > 1) {
4125 env->pc -= 2;
4126 env->icount_decr.u16.low++;
4127 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4128 }
4129#endif
4130 /* This should never happen. */
4131 if (n > CF_COUNT_MASK)
4132 cpu_abort(env, "TB too big during recompile");
4133
4134 cflags = n | CF_LAST_IO;
4135 pc = tb->pc;
4136 cs_base = tb->cs_base;
4137 flags = tb->flags;
4138 tb_phys_invalidate(tb, -1);
4139 /* FIXME: In theory this could raise an exception. In practice
4140 we have already translated the block once so it's probably ok. */
4141 tb_gen_code(env, pc, cs_base, flags, cflags);
bf20dc07 4142 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
2e70f6ef
PB
4143 the first in the TB) then we end up generating a whole new TB and
4144 repeating the fault, which is horribly inefficient.
4145 Better would be to execute just this insn uncached, or generate a
4146 second new TB. */
4147 cpu_resume_from_signal(env, NULL);
4148}
4149
b3755a91
PB
4150#if !defined(CONFIG_USER_ONLY)
4151
055403b2 4152void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
e3db7226
FB
4153{
4154 int i, target_code_size, max_target_code_size;
4155 int direct_jmp_count, direct_jmp2_count, cross_page;
4156 TranslationBlock *tb;
3b46e624 4157
e3db7226
FB
4158 target_code_size = 0;
4159 max_target_code_size = 0;
4160 cross_page = 0;
4161 direct_jmp_count = 0;
4162 direct_jmp2_count = 0;
4163 for(i = 0; i < nb_tbs; i++) {
4164 tb = &tbs[i];
4165 target_code_size += tb->size;
4166 if (tb->size > max_target_code_size)
4167 max_target_code_size = tb->size;
4168 if (tb->page_addr[1] != -1)
4169 cross_page++;
4170 if (tb->tb_next_offset[0] != 0xffff) {
4171 direct_jmp_count++;
4172 if (tb->tb_next_offset[1] != 0xffff) {
4173 direct_jmp2_count++;
4174 }
4175 }
4176 }
4177 /* XXX: avoid using doubles ? */
57fec1fe 4178 cpu_fprintf(f, "Translation buffer state:\n");
f1bc0bcc 4179 cpu_fprintf(f, "gen code size %td/%zd\n",
26a5f13b
FB
4180 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4181 cpu_fprintf(f, "TB count %d/%d\n",
4182 nb_tbs, code_gen_max_blocks);
5fafdf24 4183 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
e3db7226
FB
4184 nb_tbs ? target_code_size / nb_tbs : 0,
4185 max_target_code_size);
055403b2 4186 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
e3db7226
FB
4187 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4188 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
5fafdf24
TS
4189 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4190 cross_page,
e3db7226
FB
4191 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4192 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
5fafdf24 4193 direct_jmp_count,
e3db7226
FB
4194 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4195 direct_jmp2_count,
4196 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
57fec1fe 4197 cpu_fprintf(f, "\nStatistics:\n");
e3db7226
FB
4198 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4199 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4200 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
b67d9a52 4201 tcg_dump_info(f, cpu_fprintf);
e3db7226
FB
4202}
4203
82afa586
BH
4204/*
4205 * A helper function for the _utterly broken_ virtio device model to find out if
4206 * it's running on a big endian machine. Don't do this at home kids!
4207 */
4208bool virtio_is_big_endian(void);
4209bool virtio_is_big_endian(void)
4210{
4211#if defined(TARGET_WORDS_BIGENDIAN)
4212 return true;
4213#else
4214 return false;
4215#endif
4216}
4217
61382a50 4218#endif
76f35538
WC
4219
4220#ifndef CONFIG_USER_ONLY
a8170e5e 4221bool cpu_physical_memory_is_io(hwaddr phys_addr)
76f35538
WC
4222{
4223 MemoryRegionSection *section;
4224
ac1970fb
AK
4225 section = phys_page_find(address_space_memory.dispatch,
4226 phys_addr >> TARGET_PAGE_BITS);
76f35538
WC
4227
4228 return !(memory_region_is_ram(section->mr) ||
4229 memory_region_is_romd(section->mr));
4230}
4231#endif