]> git.proxmox.com Git - mirror_qemu.git/blame - exec.c
i386: Remove REGPARM
[mirror_qemu.git] / exec.c
CommitLineData
54936004 1/*
fd6ce8f6 2 * virtual page mapping and translated block handling
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
54936004 18 */
67b915a5 19#include "config.h"
d5a8f07c
FB
20#ifdef _WIN32
21#include <windows.h>
22#else
a98d49b1 23#include <sys/types.h>
d5a8f07c
FB
24#include <sys/mman.h>
25#endif
54936004 26
055403b2 27#include "qemu-common.h"
6180a181 28#include "cpu.h"
b67d9a52 29#include "tcg.h"
b3c7724c 30#include "hw/hw.h"
cc9e98cb 31#include "hw/qdev.h"
74576198 32#include "osdep.h"
7ba1e619 33#include "kvm.h"
432d268c 34#include "hw/xen.h"
29e922b6 35#include "qemu-timer.h"
62152b8a
AK
36#include "memory.h"
37#include "exec-memory.h"
53a5960a
PB
38#if defined(CONFIG_USER_ONLY)
39#include <qemu.h>
f01576f1
JL
40#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41#include <sys/param.h>
42#if __FreeBSD_version >= 700104
43#define HAVE_KINFO_GETVMMAP
44#define sigqueue sigqueue_freebsd /* avoid redefinition */
45#include <sys/time.h>
46#include <sys/proc.h>
47#include <machine/profile.h>
48#define _KERNEL
49#include <sys/user.h>
50#undef _KERNEL
51#undef sigqueue
52#include <libutil.h>
53#endif
54#endif
432d268c
JN
55#else /* !CONFIG_USER_ONLY */
56#include "xen-mapcache.h"
6506e4f9 57#include "trace.h"
53a5960a 58#endif
54936004 59
67d95c15
AK
60#define WANT_EXEC_OBSOLETE
61#include "exec-obsolete.h"
62
fd6ce8f6 63//#define DEBUG_TB_INVALIDATE
66e85a21 64//#define DEBUG_FLUSH
9fa3e853 65//#define DEBUG_TLB
67d3b957 66//#define DEBUG_UNASSIGNED
fd6ce8f6
FB
67
68/* make various TB consistency checks */
5fafdf24
TS
69//#define DEBUG_TB_CHECK
70//#define DEBUG_TLB_CHECK
fd6ce8f6 71
1196be37 72//#define DEBUG_IOPORT
db7b5426 73//#define DEBUG_SUBPAGE
1196be37 74
99773bd4
PB
75#if !defined(CONFIG_USER_ONLY)
76/* TB consistency checks only implemented for usermode emulation. */
77#undef DEBUG_TB_CHECK
78#endif
79
9fa3e853
FB
80#define SMC_BITMAP_USE_THRESHOLD 10
81
bdaf78e0 82static TranslationBlock *tbs;
24ab68ac 83static int code_gen_max_blocks;
9fa3e853 84TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
bdaf78e0 85static int nb_tbs;
eb51d102 86/* any access to the tbs or the page table must use this lock */
c227f099 87spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
fd6ce8f6 88
141ac468
BS
89#if defined(__arm__) || defined(__sparc_v9__)
90/* The prologue must be reachable with a direct jump. ARM and Sparc64
91 have limited branch ranges (possibly also PPC) so place it in a
d03d860b
BS
92 section close to code segment. */
93#define code_gen_section \
94 __attribute__((__section__(".gen_code"))) \
95 __attribute__((aligned (32)))
f8e2af11
SW
96#elif defined(_WIN32)
97/* Maximum alignment for Win32 is 16. */
98#define code_gen_section \
99 __attribute__((aligned (16)))
d03d860b
BS
100#else
101#define code_gen_section \
102 __attribute__((aligned (32)))
103#endif
104
105uint8_t code_gen_prologue[1024] code_gen_section;
bdaf78e0
BS
106static uint8_t *code_gen_buffer;
107static unsigned long code_gen_buffer_size;
26a5f13b 108/* threshold to flush the translated code buffer */
bdaf78e0 109static unsigned long code_gen_buffer_max_size;
24ab68ac 110static uint8_t *code_gen_ptr;
fd6ce8f6 111
e2eef170 112#if !defined(CONFIG_USER_ONLY)
9fa3e853 113int phys_ram_fd;
74576198 114static int in_migration;
94a6b54f 115
85d59fef 116RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
62152b8a
AK
117
118static MemoryRegion *system_memory;
309cb471 119static MemoryRegion *system_io;
62152b8a 120
0e0df1e2 121MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty;
de712f94 122static MemoryRegion io_mem_subpage_ram;
0e0df1e2 123
e2eef170 124#endif
9fa3e853 125
9349b4f9 126CPUArchState *first_cpu;
6a00d601
FB
127/* current CPU in the current thread. It is only valid inside
128 cpu_exec() */
9349b4f9 129DEFINE_TLS(CPUArchState *,cpu_single_env);
2e70f6ef 130/* 0 = Do not count executed instructions.
bf20dc07 131 1 = Precise instruction counting.
2e70f6ef
PB
132 2 = Adaptive rate instruction counting. */
133int use_icount = 0;
6a00d601 134
54936004 135typedef struct PageDesc {
92e873b9 136 /* list of TBs intersecting this ram page */
fd6ce8f6 137 TranslationBlock *first_tb;
9fa3e853
FB
138 /* in order to optimize self modifying code, we count the number
139 of lookups we do to a given page to use a bitmap */
140 unsigned int code_write_count;
141 uint8_t *code_bitmap;
142#if defined(CONFIG_USER_ONLY)
143 unsigned long flags;
144#endif
54936004
FB
145} PageDesc;
146
41c1b1c9 147/* In system mode we want L1_MAP to be based on ram offsets,
5cd2c5b6
RH
148 while in user mode we want it to be based on virtual addresses. */
149#if !defined(CONFIG_USER_ONLY)
41c1b1c9
PB
150#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
151# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
152#else
5cd2c5b6 153# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
41c1b1c9 154#endif
bedb69ea 155#else
5cd2c5b6 156# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
bedb69ea 157#endif
54936004 158
5cd2c5b6
RH
159/* Size of the L2 (and L3, etc) page tables. */
160#define L2_BITS 10
54936004
FB
161#define L2_SIZE (1 << L2_BITS)
162
3eef53df
AK
163#define P_L2_LEVELS \
164 (((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / L2_BITS) + 1)
165
5cd2c5b6 166/* The bits remaining after N lower levels of page tables. */
5cd2c5b6
RH
167#define V_L1_BITS_REM \
168 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
169
5cd2c5b6
RH
170#if V_L1_BITS_REM < 4
171#define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
172#else
173#define V_L1_BITS V_L1_BITS_REM
174#endif
175
5cd2c5b6
RH
176#define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
177
5cd2c5b6
RH
178#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
179
83fb7adf 180unsigned long qemu_real_host_page_size;
83fb7adf
FB
181unsigned long qemu_host_page_size;
182unsigned long qemu_host_page_mask;
54936004 183
5cd2c5b6
RH
184/* This is a multi-level map on the virtual address space.
185 The bottom level has pointers to PageDesc. */
186static void *l1_map[V_L1_SIZE];
54936004 187
e2eef170 188#if !defined(CONFIG_USER_ONLY)
4346ae3e
AK
189typedef struct PhysPageEntry PhysPageEntry;
190
5312bd8b
AK
191static MemoryRegionSection *phys_sections;
192static unsigned phys_sections_nb, phys_sections_nb_alloc;
193static uint16_t phys_section_unassigned;
aa102231
AK
194static uint16_t phys_section_notdirty;
195static uint16_t phys_section_rom;
196static uint16_t phys_section_watch;
5312bd8b 197
4346ae3e 198struct PhysPageEntry {
07f07b31
AK
199 uint16_t is_leaf : 1;
200 /* index into phys_sections (is_leaf) or phys_map_nodes (!is_leaf) */
201 uint16_t ptr : 15;
4346ae3e
AK
202};
203
d6f2ea22
AK
204/* Simple allocator for PhysPageEntry nodes */
205static PhysPageEntry (*phys_map_nodes)[L2_SIZE];
206static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc;
207
07f07b31 208#define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
d6f2ea22 209
5cd2c5b6 210/* This is a multi-level map on the physical address space.
06ef3525 211 The bottom level has pointers to MemoryRegionSections. */
07f07b31 212static PhysPageEntry phys_map = { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
6d9a1304 213
e2eef170 214static void io_mem_init(void);
62152b8a 215static void memory_map_init(void);
e2eef170 216
1ec9b909 217static MemoryRegion io_mem_watch;
6658ffb8 218#endif
33417e70 219
34865134 220/* log support */
1e8b27ca
JR
221#ifdef WIN32
222static const char *logfilename = "qemu.log";
223#else
d9b630fd 224static const char *logfilename = "/tmp/qemu.log";
1e8b27ca 225#endif
34865134
FB
226FILE *logfile;
227int loglevel;
e735b91c 228static int log_append = 0;
34865134 229
e3db7226 230/* statistics */
b3755a91 231#if !defined(CONFIG_USER_ONLY)
e3db7226 232static int tlb_flush_count;
b3755a91 233#endif
e3db7226
FB
234static int tb_flush_count;
235static int tb_phys_invalidate_count;
236
7cb69cae
FB
237#ifdef _WIN32
238static void map_exec(void *addr, long size)
239{
240 DWORD old_protect;
241 VirtualProtect(addr, size,
242 PAGE_EXECUTE_READWRITE, &old_protect);
243
244}
245#else
246static void map_exec(void *addr, long size)
247{
4369415f 248 unsigned long start, end, page_size;
7cb69cae 249
4369415f 250 page_size = getpagesize();
7cb69cae 251 start = (unsigned long)addr;
4369415f 252 start &= ~(page_size - 1);
7cb69cae
FB
253
254 end = (unsigned long)addr + size;
4369415f
FB
255 end += page_size - 1;
256 end &= ~(page_size - 1);
7cb69cae
FB
257
258 mprotect((void *)start, end - start,
259 PROT_READ | PROT_WRITE | PROT_EXEC);
260}
261#endif
262
b346ff46 263static void page_init(void)
54936004 264{
83fb7adf 265 /* NOTE: we can always suppose that qemu_host_page_size >=
54936004 266 TARGET_PAGE_SIZE */
c2b48b69
AL
267#ifdef _WIN32
268 {
269 SYSTEM_INFO system_info;
270
271 GetSystemInfo(&system_info);
272 qemu_real_host_page_size = system_info.dwPageSize;
273 }
274#else
275 qemu_real_host_page_size = getpagesize();
276#endif
83fb7adf
FB
277 if (qemu_host_page_size == 0)
278 qemu_host_page_size = qemu_real_host_page_size;
279 if (qemu_host_page_size < TARGET_PAGE_SIZE)
280 qemu_host_page_size = TARGET_PAGE_SIZE;
83fb7adf 281 qemu_host_page_mask = ~(qemu_host_page_size - 1);
50a9569b 282
2e9a5713 283#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
50a9569b 284 {
f01576f1
JL
285#ifdef HAVE_KINFO_GETVMMAP
286 struct kinfo_vmentry *freep;
287 int i, cnt;
288
289 freep = kinfo_getvmmap(getpid(), &cnt);
290 if (freep) {
291 mmap_lock();
292 for (i = 0; i < cnt; i++) {
293 unsigned long startaddr, endaddr;
294
295 startaddr = freep[i].kve_start;
296 endaddr = freep[i].kve_end;
297 if (h2g_valid(startaddr)) {
298 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
299
300 if (h2g_valid(endaddr)) {
301 endaddr = h2g(endaddr);
fd436907 302 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
f01576f1
JL
303 } else {
304#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
305 endaddr = ~0ul;
fd436907 306 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
f01576f1
JL
307#endif
308 }
309 }
310 }
311 free(freep);
312 mmap_unlock();
313 }
314#else
50a9569b 315 FILE *f;
50a9569b 316
0776590d 317 last_brk = (unsigned long)sbrk(0);
5cd2c5b6 318
fd436907 319 f = fopen("/compat/linux/proc/self/maps", "r");
50a9569b 320 if (f) {
5cd2c5b6
RH
321 mmap_lock();
322
50a9569b 323 do {
5cd2c5b6
RH
324 unsigned long startaddr, endaddr;
325 int n;
326
327 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
328
329 if (n == 2 && h2g_valid(startaddr)) {
330 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
331
332 if (h2g_valid(endaddr)) {
333 endaddr = h2g(endaddr);
334 } else {
335 endaddr = ~0ul;
336 }
337 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
50a9569b
AZ
338 }
339 } while (!feof(f));
5cd2c5b6 340
50a9569b 341 fclose(f);
5cd2c5b6 342 mmap_unlock();
50a9569b 343 }
f01576f1 344#endif
50a9569b
AZ
345 }
346#endif
54936004
FB
347}
348
41c1b1c9 349static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
54936004 350{
41c1b1c9
PB
351 PageDesc *pd;
352 void **lp;
353 int i;
354
5cd2c5b6 355#if defined(CONFIG_USER_ONLY)
7267c094 356 /* We can't use g_malloc because it may recurse into a locked mutex. */
5cd2c5b6
RH
357# define ALLOC(P, SIZE) \
358 do { \
359 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
360 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
5cd2c5b6
RH
361 } while (0)
362#else
363# define ALLOC(P, SIZE) \
7267c094 364 do { P = g_malloc0(SIZE); } while (0)
17e2377a 365#endif
434929bf 366
5cd2c5b6
RH
367 /* Level 1. Always allocated. */
368 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
369
370 /* Level 2..N-1. */
371 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
372 void **p = *lp;
373
374 if (p == NULL) {
375 if (!alloc) {
376 return NULL;
377 }
378 ALLOC(p, sizeof(void *) * L2_SIZE);
379 *lp = p;
17e2377a 380 }
5cd2c5b6
RH
381
382 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
383 }
384
385 pd = *lp;
386 if (pd == NULL) {
387 if (!alloc) {
388 return NULL;
389 }
390 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
391 *lp = pd;
54936004 392 }
5cd2c5b6
RH
393
394#undef ALLOC
5cd2c5b6
RH
395
396 return pd + (index & (L2_SIZE - 1));
54936004
FB
397}
398
41c1b1c9 399static inline PageDesc *page_find(tb_page_addr_t index)
54936004 400{
5cd2c5b6 401 return page_find_alloc(index, 0);
fd6ce8f6
FB
402}
403
6d9a1304 404#if !defined(CONFIG_USER_ONLY)
d6f2ea22 405
f7bf5461 406static void phys_map_node_reserve(unsigned nodes)
d6f2ea22 407{
f7bf5461 408 if (phys_map_nodes_nb + nodes > phys_map_nodes_nb_alloc) {
d6f2ea22
AK
409 typedef PhysPageEntry Node[L2_SIZE];
410 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16);
f7bf5461
AK
411 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc,
412 phys_map_nodes_nb + nodes);
d6f2ea22
AK
413 phys_map_nodes = g_renew(Node, phys_map_nodes,
414 phys_map_nodes_nb_alloc);
415 }
f7bf5461
AK
416}
417
418static uint16_t phys_map_node_alloc(void)
419{
420 unsigned i;
421 uint16_t ret;
422
423 ret = phys_map_nodes_nb++;
424 assert(ret != PHYS_MAP_NODE_NIL);
425 assert(ret != phys_map_nodes_nb_alloc);
d6f2ea22 426 for (i = 0; i < L2_SIZE; ++i) {
07f07b31 427 phys_map_nodes[ret][i].is_leaf = 0;
c19e8800 428 phys_map_nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
d6f2ea22 429 }
f7bf5461 430 return ret;
d6f2ea22
AK
431}
432
433static void phys_map_nodes_reset(void)
434{
435 phys_map_nodes_nb = 0;
436}
437
92e873b9 438
2999097b
AK
439static void phys_page_set_level(PhysPageEntry *lp, target_phys_addr_t *index,
440 target_phys_addr_t *nb, uint16_t leaf,
441 int level)
f7bf5461
AK
442{
443 PhysPageEntry *p;
444 int i;
07f07b31 445 target_phys_addr_t step = (target_phys_addr_t)1 << (level * L2_BITS);
108c49b8 446
07f07b31 447 if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
c19e8800
AK
448 lp->ptr = phys_map_node_alloc();
449 p = phys_map_nodes[lp->ptr];
f7bf5461
AK
450 if (level == 0) {
451 for (i = 0; i < L2_SIZE; i++) {
07f07b31 452 p[i].is_leaf = 1;
c19e8800 453 p[i].ptr = phys_section_unassigned;
4346ae3e 454 }
67c4d23c 455 }
f7bf5461 456 } else {
c19e8800 457 p = phys_map_nodes[lp->ptr];
92e873b9 458 }
2999097b 459 lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
f7bf5461 460
2999097b 461 while (*nb && lp < &p[L2_SIZE]) {
07f07b31
AK
462 if ((*index & (step - 1)) == 0 && *nb >= step) {
463 lp->is_leaf = true;
c19e8800 464 lp->ptr = leaf;
07f07b31
AK
465 *index += step;
466 *nb -= step;
2999097b
AK
467 } else {
468 phys_page_set_level(lp, index, nb, leaf, level - 1);
469 }
470 ++lp;
f7bf5461
AK
471 }
472}
473
2999097b
AK
474static void phys_page_set(target_phys_addr_t index, target_phys_addr_t nb,
475 uint16_t leaf)
f7bf5461 476{
2999097b 477 /* Wildly overreserve - it doesn't matter much. */
07f07b31 478 phys_map_node_reserve(3 * P_L2_LEVELS);
5cd2c5b6 479
2999097b 480 phys_page_set_level(&phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
92e873b9
FB
481}
482
f3705d53 483static MemoryRegionSection *phys_page_find(target_phys_addr_t index)
92e873b9 484{
31ab2b4a
AK
485 PhysPageEntry lp = phys_map;
486 PhysPageEntry *p;
487 int i;
31ab2b4a 488 uint16_t s_index = phys_section_unassigned;
f1f6e3b8 489
07f07b31 490 for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
c19e8800 491 if (lp.ptr == PHYS_MAP_NODE_NIL) {
31ab2b4a
AK
492 goto not_found;
493 }
c19e8800 494 p = phys_map_nodes[lp.ptr];
31ab2b4a 495 lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
5312bd8b 496 }
31ab2b4a 497
c19e8800 498 s_index = lp.ptr;
31ab2b4a 499not_found:
f3705d53
AK
500 return &phys_sections[s_index];
501}
502
503static target_phys_addr_t section_addr(MemoryRegionSection *section,
504 target_phys_addr_t addr)
505{
506 addr -= section->offset_within_address_space;
507 addr += section->offset_within_region;
508 return addr;
92e873b9
FB
509}
510
c227f099 511static void tlb_protect_code(ram_addr_t ram_addr);
9349b4f9 512static void tlb_unprotect_code_phys(CPUArchState *env, ram_addr_t ram_addr,
3a7d929e 513 target_ulong vaddr);
c8a706fe
PB
514#define mmap_lock() do { } while(0)
515#define mmap_unlock() do { } while(0)
9fa3e853 516#endif
fd6ce8f6 517
4369415f
FB
518#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
519
520#if defined(CONFIG_USER_ONLY)
ccbb4d44 521/* Currently it is not recommended to allocate big chunks of data in
4369415f
FB
522 user mode. It will change when a dedicated libc will be used */
523#define USE_STATIC_CODE_GEN_BUFFER
524#endif
525
526#ifdef USE_STATIC_CODE_GEN_BUFFER
ebf50fb3
AJ
527static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
528 __attribute__((aligned (CODE_GEN_ALIGN)));
4369415f
FB
529#endif
530
8fcd3692 531static void code_gen_alloc(unsigned long tb_size)
26a5f13b 532{
4369415f
FB
533#ifdef USE_STATIC_CODE_GEN_BUFFER
534 code_gen_buffer = static_code_gen_buffer;
535 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
536 map_exec(code_gen_buffer, code_gen_buffer_size);
537#else
26a5f13b
FB
538 code_gen_buffer_size = tb_size;
539 if (code_gen_buffer_size == 0) {
4369415f 540#if defined(CONFIG_USER_ONLY)
4369415f
FB
541 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
542#else
ccbb4d44 543 /* XXX: needs adjustments */
94a6b54f 544 code_gen_buffer_size = (unsigned long)(ram_size / 4);
4369415f 545#endif
26a5f13b
FB
546 }
547 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
548 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
549 /* The code gen buffer location may have constraints depending on
550 the host cpu and OS */
551#if defined(__linux__)
552 {
553 int flags;
141ac468
BS
554 void *start = NULL;
555
26a5f13b
FB
556 flags = MAP_PRIVATE | MAP_ANONYMOUS;
557#if defined(__x86_64__)
558 flags |= MAP_32BIT;
559 /* Cannot map more than that */
560 if (code_gen_buffer_size > (800 * 1024 * 1024))
561 code_gen_buffer_size = (800 * 1024 * 1024);
141ac468
BS
562#elif defined(__sparc_v9__)
563 // Map the buffer below 2G, so we can use direct calls and branches
564 flags |= MAP_FIXED;
565 start = (void *) 0x60000000UL;
566 if (code_gen_buffer_size > (512 * 1024 * 1024))
567 code_gen_buffer_size = (512 * 1024 * 1024);
1cb0661e 568#elif defined(__arm__)
5c84bd90 569 /* Keep the buffer no bigger than 16MB to branch between blocks */
1cb0661e
AZ
570 if (code_gen_buffer_size > 16 * 1024 * 1024)
571 code_gen_buffer_size = 16 * 1024 * 1024;
eba0b893
RH
572#elif defined(__s390x__)
573 /* Map the buffer so that we can use direct calls and branches. */
574 /* We have a +- 4GB range on the branches; leave some slop. */
575 if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
576 code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
577 }
578 start = (void *)0x90000000UL;
26a5f13b 579#endif
141ac468
BS
580 code_gen_buffer = mmap(start, code_gen_buffer_size,
581 PROT_WRITE | PROT_READ | PROT_EXEC,
26a5f13b
FB
582 flags, -1, 0);
583 if (code_gen_buffer == MAP_FAILED) {
584 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
585 exit(1);
586 }
587 }
cbb608a5 588#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
9f4b09a4
TN
589 || defined(__DragonFly__) || defined(__OpenBSD__) \
590 || defined(__NetBSD__)
06e67a82
AL
591 {
592 int flags;
593 void *addr = NULL;
594 flags = MAP_PRIVATE | MAP_ANONYMOUS;
595#if defined(__x86_64__)
596 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
597 * 0x40000000 is free */
598 flags |= MAP_FIXED;
599 addr = (void *)0x40000000;
600 /* Cannot map more than that */
601 if (code_gen_buffer_size > (800 * 1024 * 1024))
602 code_gen_buffer_size = (800 * 1024 * 1024);
4cd31ad2
BS
603#elif defined(__sparc_v9__)
604 // Map the buffer below 2G, so we can use direct calls and branches
605 flags |= MAP_FIXED;
606 addr = (void *) 0x60000000UL;
607 if (code_gen_buffer_size > (512 * 1024 * 1024)) {
608 code_gen_buffer_size = (512 * 1024 * 1024);
609 }
06e67a82
AL
610#endif
611 code_gen_buffer = mmap(addr, code_gen_buffer_size,
612 PROT_WRITE | PROT_READ | PROT_EXEC,
613 flags, -1, 0);
614 if (code_gen_buffer == MAP_FAILED) {
615 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
616 exit(1);
617 }
618 }
26a5f13b 619#else
7267c094 620 code_gen_buffer = g_malloc(code_gen_buffer_size);
26a5f13b
FB
621 map_exec(code_gen_buffer, code_gen_buffer_size);
622#endif
4369415f 623#endif /* !USE_STATIC_CODE_GEN_BUFFER */
26a5f13b 624 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
a884da8a
PM
625 code_gen_buffer_max_size = code_gen_buffer_size -
626 (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
26a5f13b 627 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
7267c094 628 tbs = g_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
26a5f13b
FB
629}
630
631/* Must be called before using the QEMU cpus. 'tb_size' is the size
632 (in bytes) allocated to the translation buffer. Zero means default
633 size. */
d5ab9713 634void tcg_exec_init(unsigned long tb_size)
26a5f13b 635{
26a5f13b
FB
636 cpu_gen_init();
637 code_gen_alloc(tb_size);
638 code_gen_ptr = code_gen_buffer;
4369415f 639 page_init();
9002ec79
RH
640#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
641 /* There's no guest base to take into account, so go ahead and
642 initialize the prologue now. */
643 tcg_prologue_init(&tcg_ctx);
644#endif
26a5f13b
FB
645}
646
d5ab9713
JK
647bool tcg_enabled(void)
648{
649 return code_gen_buffer != NULL;
650}
651
652void cpu_exec_init_all(void)
653{
654#if !defined(CONFIG_USER_ONLY)
655 memory_map_init();
656 io_mem_init();
657#endif
658}
659
9656f324
PB
660#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
661
e59fb374 662static int cpu_common_post_load(void *opaque, int version_id)
e7f4eff7 663{
9349b4f9 664 CPUArchState *env = opaque;
9656f324 665
3098dba0
AJ
666 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
667 version_id is increased. */
668 env->interrupt_request &= ~0x01;
9656f324
PB
669 tlb_flush(env, 1);
670
671 return 0;
672}
e7f4eff7
JQ
673
674static const VMStateDescription vmstate_cpu_common = {
675 .name = "cpu_common",
676 .version_id = 1,
677 .minimum_version_id = 1,
678 .minimum_version_id_old = 1,
e7f4eff7
JQ
679 .post_load = cpu_common_post_load,
680 .fields = (VMStateField []) {
9349b4f9
AF
681 VMSTATE_UINT32(halted, CPUArchState),
682 VMSTATE_UINT32(interrupt_request, CPUArchState),
e7f4eff7
JQ
683 VMSTATE_END_OF_LIST()
684 }
685};
9656f324
PB
686#endif
687
9349b4f9 688CPUArchState *qemu_get_cpu(int cpu)
950f1472 689{
9349b4f9 690 CPUArchState *env = first_cpu;
950f1472
GC
691
692 while (env) {
693 if (env->cpu_index == cpu)
694 break;
695 env = env->next_cpu;
696 }
697
698 return env;
699}
700
9349b4f9 701void cpu_exec_init(CPUArchState *env)
fd6ce8f6 702{
9349b4f9 703 CPUArchState **penv;
6a00d601
FB
704 int cpu_index;
705
c2764719
PB
706#if defined(CONFIG_USER_ONLY)
707 cpu_list_lock();
708#endif
6a00d601
FB
709 env->next_cpu = NULL;
710 penv = &first_cpu;
711 cpu_index = 0;
712 while (*penv != NULL) {
1e9fa730 713 penv = &(*penv)->next_cpu;
6a00d601
FB
714 cpu_index++;
715 }
716 env->cpu_index = cpu_index;
268a362c 717 env->numa_node = 0;
72cf2d4f
BS
718 QTAILQ_INIT(&env->breakpoints);
719 QTAILQ_INIT(&env->watchpoints);
dc7a09cf
JK
720#ifndef CONFIG_USER_ONLY
721 env->thread_id = qemu_get_thread_id();
722#endif
6a00d601 723 *penv = env;
c2764719
PB
724#if defined(CONFIG_USER_ONLY)
725 cpu_list_unlock();
726#endif
b3c7724c 727#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
0be71e32
AW
728 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
729 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
b3c7724c
PB
730 cpu_save, cpu_load, env);
731#endif
fd6ce8f6
FB
732}
733
d1a1eb74
TG
734/* Allocate a new translation block. Flush the translation buffer if
735 too many translation blocks or too much generated code. */
736static TranslationBlock *tb_alloc(target_ulong pc)
737{
738 TranslationBlock *tb;
739
740 if (nb_tbs >= code_gen_max_blocks ||
741 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
742 return NULL;
743 tb = &tbs[nb_tbs++];
744 tb->pc = pc;
745 tb->cflags = 0;
746 return tb;
747}
748
749void tb_free(TranslationBlock *tb)
750{
751 /* In practice this is mostly used for single use temporary TB
752 Ignore the hard cases and just back up if this TB happens to
753 be the last one generated. */
754 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
755 code_gen_ptr = tb->tc_ptr;
756 nb_tbs--;
757 }
758}
759
9fa3e853
FB
760static inline void invalidate_page_bitmap(PageDesc *p)
761{
762 if (p->code_bitmap) {
7267c094 763 g_free(p->code_bitmap);
9fa3e853
FB
764 p->code_bitmap = NULL;
765 }
766 p->code_write_count = 0;
767}
768
5cd2c5b6
RH
769/* Set to NULL all the 'first_tb' fields in all PageDescs. */
770
771static void page_flush_tb_1 (int level, void **lp)
fd6ce8f6 772{
5cd2c5b6 773 int i;
fd6ce8f6 774
5cd2c5b6
RH
775 if (*lp == NULL) {
776 return;
777 }
778 if (level == 0) {
779 PageDesc *pd = *lp;
7296abac 780 for (i = 0; i < L2_SIZE; ++i) {
5cd2c5b6
RH
781 pd[i].first_tb = NULL;
782 invalidate_page_bitmap(pd + i);
fd6ce8f6 783 }
5cd2c5b6
RH
784 } else {
785 void **pp = *lp;
7296abac 786 for (i = 0; i < L2_SIZE; ++i) {
5cd2c5b6
RH
787 page_flush_tb_1 (level - 1, pp + i);
788 }
789 }
790}
791
792static void page_flush_tb(void)
793{
794 int i;
795 for (i = 0; i < V_L1_SIZE; i++) {
796 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
fd6ce8f6
FB
797 }
798}
799
800/* flush all the translation blocks */
d4e8164f 801/* XXX: tb_flush is currently not thread safe */
9349b4f9 802void tb_flush(CPUArchState *env1)
fd6ce8f6 803{
9349b4f9 804 CPUArchState *env;
0124311e 805#if defined(DEBUG_FLUSH)
ab3d1727
BS
806 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
807 (unsigned long)(code_gen_ptr - code_gen_buffer),
808 nb_tbs, nb_tbs > 0 ?
809 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
fd6ce8f6 810#endif
26a5f13b 811 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
a208e54a
PB
812 cpu_abort(env1, "Internal error: code buffer overflow\n");
813
fd6ce8f6 814 nb_tbs = 0;
3b46e624 815
6a00d601
FB
816 for(env = first_cpu; env != NULL; env = env->next_cpu) {
817 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
818 }
9fa3e853 819
8a8a608f 820 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
fd6ce8f6 821 page_flush_tb();
9fa3e853 822
fd6ce8f6 823 code_gen_ptr = code_gen_buffer;
d4e8164f
FB
824 /* XXX: flush processor icache at this point if cache flush is
825 expensive */
e3db7226 826 tb_flush_count++;
fd6ce8f6
FB
827}
828
829#ifdef DEBUG_TB_CHECK
830
bc98a7ef 831static void tb_invalidate_check(target_ulong address)
fd6ce8f6
FB
832{
833 TranslationBlock *tb;
834 int i;
835 address &= TARGET_PAGE_MASK;
99773bd4
PB
836 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
837 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
838 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
839 address >= tb->pc + tb->size)) {
0bf9e31a
BS
840 printf("ERROR invalidate: address=" TARGET_FMT_lx
841 " PC=%08lx size=%04x\n",
99773bd4 842 address, (long)tb->pc, tb->size);
fd6ce8f6
FB
843 }
844 }
845 }
846}
847
848/* verify that all the pages have correct rights for code */
849static void tb_page_check(void)
850{
851 TranslationBlock *tb;
852 int i, flags1, flags2;
3b46e624 853
99773bd4
PB
854 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
855 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
856 flags1 = page_get_flags(tb->pc);
857 flags2 = page_get_flags(tb->pc + tb->size - 1);
858 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
859 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
99773bd4 860 (long)tb->pc, tb->size, flags1, flags2);
fd6ce8f6
FB
861 }
862 }
863 }
864}
865
866#endif
867
868/* invalidate one TB */
869static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
870 int next_offset)
871{
872 TranslationBlock *tb1;
873 for(;;) {
874 tb1 = *ptb;
875 if (tb1 == tb) {
876 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
877 break;
878 }
879 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
880 }
881}
882
9fa3e853
FB
883static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
884{
885 TranslationBlock *tb1;
886 unsigned int n1;
887
888 for(;;) {
889 tb1 = *ptb;
890 n1 = (long)tb1 & 3;
891 tb1 = (TranslationBlock *)((long)tb1 & ~3);
892 if (tb1 == tb) {
893 *ptb = tb1->page_next[n1];
894 break;
895 }
896 ptb = &tb1->page_next[n1];
897 }
898}
899
d4e8164f
FB
900static inline void tb_jmp_remove(TranslationBlock *tb, int n)
901{
902 TranslationBlock *tb1, **ptb;
903 unsigned int n1;
904
905 ptb = &tb->jmp_next[n];
906 tb1 = *ptb;
907 if (tb1) {
908 /* find tb(n) in circular list */
909 for(;;) {
910 tb1 = *ptb;
911 n1 = (long)tb1 & 3;
912 tb1 = (TranslationBlock *)((long)tb1 & ~3);
913 if (n1 == n && tb1 == tb)
914 break;
915 if (n1 == 2) {
916 ptb = &tb1->jmp_first;
917 } else {
918 ptb = &tb1->jmp_next[n1];
919 }
920 }
921 /* now we can suppress tb(n) from the list */
922 *ptb = tb->jmp_next[n];
923
924 tb->jmp_next[n] = NULL;
925 }
926}
927
928/* reset the jump entry 'n' of a TB so that it is not chained to
929 another TB */
930static inline void tb_reset_jump(TranslationBlock *tb, int n)
931{
932 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
933}
934
41c1b1c9 935void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
fd6ce8f6 936{
9349b4f9 937 CPUArchState *env;
8a40a180 938 PageDesc *p;
d4e8164f 939 unsigned int h, n1;
41c1b1c9 940 tb_page_addr_t phys_pc;
8a40a180 941 TranslationBlock *tb1, *tb2;
3b46e624 942
8a40a180
FB
943 /* remove the TB from the hash list */
944 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
945 h = tb_phys_hash_func(phys_pc);
5fafdf24 946 tb_remove(&tb_phys_hash[h], tb,
8a40a180
FB
947 offsetof(TranslationBlock, phys_hash_next));
948
949 /* remove the TB from the page list */
950 if (tb->page_addr[0] != page_addr) {
951 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
952 tb_page_remove(&p->first_tb, tb);
953 invalidate_page_bitmap(p);
954 }
955 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
956 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
957 tb_page_remove(&p->first_tb, tb);
958 invalidate_page_bitmap(p);
959 }
960
36bdbe54 961 tb_invalidated_flag = 1;
59817ccb 962
fd6ce8f6 963 /* remove the TB from the hash list */
8a40a180 964 h = tb_jmp_cache_hash_func(tb->pc);
6a00d601
FB
965 for(env = first_cpu; env != NULL; env = env->next_cpu) {
966 if (env->tb_jmp_cache[h] == tb)
967 env->tb_jmp_cache[h] = NULL;
968 }
d4e8164f
FB
969
970 /* suppress this TB from the two jump lists */
971 tb_jmp_remove(tb, 0);
972 tb_jmp_remove(tb, 1);
973
974 /* suppress any remaining jumps to this TB */
975 tb1 = tb->jmp_first;
976 for(;;) {
977 n1 = (long)tb1 & 3;
978 if (n1 == 2)
979 break;
980 tb1 = (TranslationBlock *)((long)tb1 & ~3);
981 tb2 = tb1->jmp_next[n1];
982 tb_reset_jump(tb1, n1);
983 tb1->jmp_next[n1] = NULL;
984 tb1 = tb2;
985 }
986 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
9fa3e853 987
e3db7226 988 tb_phys_invalidate_count++;
9fa3e853
FB
989}
990
991static inline void set_bits(uint8_t *tab, int start, int len)
992{
993 int end, mask, end1;
994
995 end = start + len;
996 tab += start >> 3;
997 mask = 0xff << (start & 7);
998 if ((start & ~7) == (end & ~7)) {
999 if (start < end) {
1000 mask &= ~(0xff << (end & 7));
1001 *tab |= mask;
1002 }
1003 } else {
1004 *tab++ |= mask;
1005 start = (start + 8) & ~7;
1006 end1 = end & ~7;
1007 while (start < end1) {
1008 *tab++ = 0xff;
1009 start += 8;
1010 }
1011 if (start < end) {
1012 mask = ~(0xff << (end & 7));
1013 *tab |= mask;
1014 }
1015 }
1016}
1017
1018static void build_page_bitmap(PageDesc *p)
1019{
1020 int n, tb_start, tb_end;
1021 TranslationBlock *tb;
3b46e624 1022
7267c094 1023 p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8);
9fa3e853
FB
1024
1025 tb = p->first_tb;
1026 while (tb != NULL) {
1027 n = (long)tb & 3;
1028 tb = (TranslationBlock *)((long)tb & ~3);
1029 /* NOTE: this is subtle as a TB may span two physical pages */
1030 if (n == 0) {
1031 /* NOTE: tb_end may be after the end of the page, but
1032 it is not a problem */
1033 tb_start = tb->pc & ~TARGET_PAGE_MASK;
1034 tb_end = tb_start + tb->size;
1035 if (tb_end > TARGET_PAGE_SIZE)
1036 tb_end = TARGET_PAGE_SIZE;
1037 } else {
1038 tb_start = 0;
1039 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1040 }
1041 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
1042 tb = tb->page_next[n];
1043 }
1044}
1045
9349b4f9 1046TranslationBlock *tb_gen_code(CPUArchState *env,
2e70f6ef
PB
1047 target_ulong pc, target_ulong cs_base,
1048 int flags, int cflags)
d720b93d
FB
1049{
1050 TranslationBlock *tb;
1051 uint8_t *tc_ptr;
41c1b1c9
PB
1052 tb_page_addr_t phys_pc, phys_page2;
1053 target_ulong virt_page2;
d720b93d
FB
1054 int code_gen_size;
1055
41c1b1c9 1056 phys_pc = get_page_addr_code(env, pc);
c27004ec 1057 tb = tb_alloc(pc);
d720b93d
FB
1058 if (!tb) {
1059 /* flush must be done */
1060 tb_flush(env);
1061 /* cannot fail at this point */
c27004ec 1062 tb = tb_alloc(pc);
2e70f6ef
PB
1063 /* Don't forget to invalidate previous TB info. */
1064 tb_invalidated_flag = 1;
d720b93d
FB
1065 }
1066 tc_ptr = code_gen_ptr;
1067 tb->tc_ptr = tc_ptr;
1068 tb->cs_base = cs_base;
1069 tb->flags = flags;
1070 tb->cflags = cflags;
d07bde88 1071 cpu_gen_code(env, tb, &code_gen_size);
d720b93d 1072 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
3b46e624 1073
d720b93d 1074 /* check next page if needed */
c27004ec 1075 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
d720b93d 1076 phys_page2 = -1;
c27004ec 1077 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
41c1b1c9 1078 phys_page2 = get_page_addr_code(env, virt_page2);
d720b93d 1079 }
41c1b1c9 1080 tb_link_page(tb, phys_pc, phys_page2);
2e70f6ef 1081 return tb;
d720b93d 1082}
3b46e624 1083
9fa3e853
FB
1084/* invalidate all TBs which intersect with the target physical page
1085 starting in range [start;end[. NOTE: start and end must refer to
d720b93d
FB
1086 the same physical page. 'is_cpu_write_access' should be true if called
1087 from a real cpu write access: the virtual CPU will exit the current
1088 TB if code is modified inside this TB. */
41c1b1c9 1089void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
d720b93d
FB
1090 int is_cpu_write_access)
1091{
6b917547 1092 TranslationBlock *tb, *tb_next, *saved_tb;
9349b4f9 1093 CPUArchState *env = cpu_single_env;
41c1b1c9 1094 tb_page_addr_t tb_start, tb_end;
6b917547
AL
1095 PageDesc *p;
1096 int n;
1097#ifdef TARGET_HAS_PRECISE_SMC
1098 int current_tb_not_found = is_cpu_write_access;
1099 TranslationBlock *current_tb = NULL;
1100 int current_tb_modified = 0;
1101 target_ulong current_pc = 0;
1102 target_ulong current_cs_base = 0;
1103 int current_flags = 0;
1104#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
1105
1106 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 1107 if (!p)
9fa3e853 1108 return;
5fafdf24 1109 if (!p->code_bitmap &&
d720b93d
FB
1110 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1111 is_cpu_write_access) {
9fa3e853
FB
1112 /* build code bitmap */
1113 build_page_bitmap(p);
1114 }
1115
1116 /* we remove all the TBs in the range [start, end[ */
1117 /* XXX: see if in some cases it could be faster to invalidate all the code */
1118 tb = p->first_tb;
1119 while (tb != NULL) {
1120 n = (long)tb & 3;
1121 tb = (TranslationBlock *)((long)tb & ~3);
1122 tb_next = tb->page_next[n];
1123 /* NOTE: this is subtle as a TB may span two physical pages */
1124 if (n == 0) {
1125 /* NOTE: tb_end may be after the end of the page, but
1126 it is not a problem */
1127 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1128 tb_end = tb_start + tb->size;
1129 } else {
1130 tb_start = tb->page_addr[1];
1131 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1132 }
1133 if (!(tb_end <= start || tb_start >= end)) {
d720b93d
FB
1134#ifdef TARGET_HAS_PRECISE_SMC
1135 if (current_tb_not_found) {
1136 current_tb_not_found = 0;
1137 current_tb = NULL;
2e70f6ef 1138 if (env->mem_io_pc) {
d720b93d 1139 /* now we have a real cpu fault */
2e70f6ef 1140 current_tb = tb_find_pc(env->mem_io_pc);
d720b93d
FB
1141 }
1142 }
1143 if (current_tb == tb &&
2e70f6ef 1144 (current_tb->cflags & CF_COUNT_MASK) != 1) {
d720b93d
FB
1145 /* If we are modifying the current TB, we must stop
1146 its execution. We could be more precise by checking
1147 that the modification is after the current PC, but it
1148 would require a specialized function to partially
1149 restore the CPU state */
3b46e624 1150
d720b93d 1151 current_tb_modified = 1;
618ba8e6 1152 cpu_restore_state(current_tb, env, env->mem_io_pc);
6b917547
AL
1153 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1154 &current_flags);
d720b93d
FB
1155 }
1156#endif /* TARGET_HAS_PRECISE_SMC */
6f5a9f7e
FB
1157 /* we need to do that to handle the case where a signal
1158 occurs while doing tb_phys_invalidate() */
1159 saved_tb = NULL;
1160 if (env) {
1161 saved_tb = env->current_tb;
1162 env->current_tb = NULL;
1163 }
9fa3e853 1164 tb_phys_invalidate(tb, -1);
6f5a9f7e
FB
1165 if (env) {
1166 env->current_tb = saved_tb;
1167 if (env->interrupt_request && env->current_tb)
1168 cpu_interrupt(env, env->interrupt_request);
1169 }
9fa3e853
FB
1170 }
1171 tb = tb_next;
1172 }
1173#if !defined(CONFIG_USER_ONLY)
1174 /* if no code remaining, no need to continue to use slow writes */
1175 if (!p->first_tb) {
1176 invalidate_page_bitmap(p);
d720b93d 1177 if (is_cpu_write_access) {
2e70f6ef 1178 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
d720b93d
FB
1179 }
1180 }
1181#endif
1182#ifdef TARGET_HAS_PRECISE_SMC
1183 if (current_tb_modified) {
1184 /* we generate a block containing just the instruction
1185 modifying the memory. It will ensure that it cannot modify
1186 itself */
ea1c1802 1187 env->current_tb = NULL;
2e70f6ef 1188 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
d720b93d 1189 cpu_resume_from_signal(env, NULL);
9fa3e853 1190 }
fd6ce8f6 1191#endif
9fa3e853 1192}
fd6ce8f6 1193
9fa3e853 1194/* len must be <= 8 and start must be a multiple of len */
41c1b1c9 1195static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
9fa3e853
FB
1196{
1197 PageDesc *p;
1198 int offset, b;
59817ccb 1199#if 0
a4193c8a 1200 if (1) {
93fcfe39
AL
1201 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1202 cpu_single_env->mem_io_vaddr, len,
1203 cpu_single_env->eip,
1204 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
59817ccb
FB
1205 }
1206#endif
9fa3e853 1207 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 1208 if (!p)
9fa3e853
FB
1209 return;
1210 if (p->code_bitmap) {
1211 offset = start & ~TARGET_PAGE_MASK;
1212 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1213 if (b & ((1 << len) - 1))
1214 goto do_invalidate;
1215 } else {
1216 do_invalidate:
d720b93d 1217 tb_invalidate_phys_page_range(start, start + len, 1);
9fa3e853
FB
1218 }
1219}
1220
9fa3e853 1221#if !defined(CONFIG_SOFTMMU)
41c1b1c9 1222static void tb_invalidate_phys_page(tb_page_addr_t addr,
d720b93d 1223 unsigned long pc, void *puc)
9fa3e853 1224{
6b917547 1225 TranslationBlock *tb;
9fa3e853 1226 PageDesc *p;
6b917547 1227 int n;
d720b93d 1228#ifdef TARGET_HAS_PRECISE_SMC
6b917547 1229 TranslationBlock *current_tb = NULL;
9349b4f9 1230 CPUArchState *env = cpu_single_env;
6b917547
AL
1231 int current_tb_modified = 0;
1232 target_ulong current_pc = 0;
1233 target_ulong current_cs_base = 0;
1234 int current_flags = 0;
d720b93d 1235#endif
9fa3e853
FB
1236
1237 addr &= TARGET_PAGE_MASK;
1238 p = page_find(addr >> TARGET_PAGE_BITS);
5fafdf24 1239 if (!p)
9fa3e853
FB
1240 return;
1241 tb = p->first_tb;
d720b93d
FB
1242#ifdef TARGET_HAS_PRECISE_SMC
1243 if (tb && pc != 0) {
1244 current_tb = tb_find_pc(pc);
1245 }
1246#endif
9fa3e853
FB
1247 while (tb != NULL) {
1248 n = (long)tb & 3;
1249 tb = (TranslationBlock *)((long)tb & ~3);
d720b93d
FB
1250#ifdef TARGET_HAS_PRECISE_SMC
1251 if (current_tb == tb &&
2e70f6ef 1252 (current_tb->cflags & CF_COUNT_MASK) != 1) {
d720b93d
FB
1253 /* If we are modifying the current TB, we must stop
1254 its execution. We could be more precise by checking
1255 that the modification is after the current PC, but it
1256 would require a specialized function to partially
1257 restore the CPU state */
3b46e624 1258
d720b93d 1259 current_tb_modified = 1;
618ba8e6 1260 cpu_restore_state(current_tb, env, pc);
6b917547
AL
1261 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1262 &current_flags);
d720b93d
FB
1263 }
1264#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
1265 tb_phys_invalidate(tb, addr);
1266 tb = tb->page_next[n];
1267 }
fd6ce8f6 1268 p->first_tb = NULL;
d720b93d
FB
1269#ifdef TARGET_HAS_PRECISE_SMC
1270 if (current_tb_modified) {
1271 /* we generate a block containing just the instruction
1272 modifying the memory. It will ensure that it cannot modify
1273 itself */
ea1c1802 1274 env->current_tb = NULL;
2e70f6ef 1275 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
d720b93d
FB
1276 cpu_resume_from_signal(env, puc);
1277 }
1278#endif
fd6ce8f6 1279}
9fa3e853 1280#endif
fd6ce8f6
FB
1281
1282/* add the tb in the target page and protect it if necessary */
5fafdf24 1283static inline void tb_alloc_page(TranslationBlock *tb,
41c1b1c9 1284 unsigned int n, tb_page_addr_t page_addr)
fd6ce8f6
FB
1285{
1286 PageDesc *p;
4429ab44
JQ
1287#ifndef CONFIG_USER_ONLY
1288 bool page_already_protected;
1289#endif
9fa3e853
FB
1290
1291 tb->page_addr[n] = page_addr;
5cd2c5b6 1292 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
9fa3e853 1293 tb->page_next[n] = p->first_tb;
4429ab44
JQ
1294#ifndef CONFIG_USER_ONLY
1295 page_already_protected = p->first_tb != NULL;
1296#endif
9fa3e853
FB
1297 p->first_tb = (TranslationBlock *)((long)tb | n);
1298 invalidate_page_bitmap(p);
fd6ce8f6 1299
107db443 1300#if defined(TARGET_HAS_SMC) || 1
d720b93d 1301
9fa3e853 1302#if defined(CONFIG_USER_ONLY)
fd6ce8f6 1303 if (p->flags & PAGE_WRITE) {
53a5960a
PB
1304 target_ulong addr;
1305 PageDesc *p2;
9fa3e853
FB
1306 int prot;
1307
fd6ce8f6
FB
1308 /* force the host page as non writable (writes will have a
1309 page fault + mprotect overhead) */
53a5960a 1310 page_addr &= qemu_host_page_mask;
fd6ce8f6 1311 prot = 0;
53a5960a
PB
1312 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1313 addr += TARGET_PAGE_SIZE) {
1314
1315 p2 = page_find (addr >> TARGET_PAGE_BITS);
1316 if (!p2)
1317 continue;
1318 prot |= p2->flags;
1319 p2->flags &= ~PAGE_WRITE;
53a5960a 1320 }
5fafdf24 1321 mprotect(g2h(page_addr), qemu_host_page_size,
fd6ce8f6
FB
1322 (prot & PAGE_BITS) & ~PAGE_WRITE);
1323#ifdef DEBUG_TB_INVALIDATE
ab3d1727 1324 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
53a5960a 1325 page_addr);
fd6ce8f6 1326#endif
fd6ce8f6 1327 }
9fa3e853
FB
1328#else
1329 /* if some code is already present, then the pages are already
1330 protected. So we handle the case where only the first TB is
1331 allocated in a physical page */
4429ab44 1332 if (!page_already_protected) {
6a00d601 1333 tlb_protect_code(page_addr);
9fa3e853
FB
1334 }
1335#endif
d720b93d
FB
1336
1337#endif /* TARGET_HAS_SMC */
fd6ce8f6
FB
1338}
1339
9fa3e853
FB
1340/* add a new TB and link it to the physical page tables. phys_page2 is
1341 (-1) to indicate that only one page contains the TB. */
41c1b1c9
PB
1342void tb_link_page(TranslationBlock *tb,
1343 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
d4e8164f 1344{
9fa3e853
FB
1345 unsigned int h;
1346 TranslationBlock **ptb;
1347
c8a706fe
PB
1348 /* Grab the mmap lock to stop another thread invalidating this TB
1349 before we are done. */
1350 mmap_lock();
9fa3e853
FB
1351 /* add in the physical hash table */
1352 h = tb_phys_hash_func(phys_pc);
1353 ptb = &tb_phys_hash[h];
1354 tb->phys_hash_next = *ptb;
1355 *ptb = tb;
fd6ce8f6
FB
1356
1357 /* add in the page list */
9fa3e853
FB
1358 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1359 if (phys_page2 != -1)
1360 tb_alloc_page(tb, 1, phys_page2);
1361 else
1362 tb->page_addr[1] = -1;
9fa3e853 1363
d4e8164f
FB
1364 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1365 tb->jmp_next[0] = NULL;
1366 tb->jmp_next[1] = NULL;
1367
1368 /* init original jump addresses */
1369 if (tb->tb_next_offset[0] != 0xffff)
1370 tb_reset_jump(tb, 0);
1371 if (tb->tb_next_offset[1] != 0xffff)
1372 tb_reset_jump(tb, 1);
8a40a180
FB
1373
1374#ifdef DEBUG_TB_CHECK
1375 tb_page_check();
1376#endif
c8a706fe 1377 mmap_unlock();
fd6ce8f6
FB
1378}
1379
9fa3e853
FB
1380/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1381 tb[1].tc_ptr. Return NULL if not found */
1382TranslationBlock *tb_find_pc(unsigned long tc_ptr)
fd6ce8f6 1383{
9fa3e853
FB
1384 int m_min, m_max, m;
1385 unsigned long v;
1386 TranslationBlock *tb;
a513fe19
FB
1387
1388 if (nb_tbs <= 0)
1389 return NULL;
1390 if (tc_ptr < (unsigned long)code_gen_buffer ||
1391 tc_ptr >= (unsigned long)code_gen_ptr)
1392 return NULL;
1393 /* binary search (cf Knuth) */
1394 m_min = 0;
1395 m_max = nb_tbs - 1;
1396 while (m_min <= m_max) {
1397 m = (m_min + m_max) >> 1;
1398 tb = &tbs[m];
1399 v = (unsigned long)tb->tc_ptr;
1400 if (v == tc_ptr)
1401 return tb;
1402 else if (tc_ptr < v) {
1403 m_max = m - 1;
1404 } else {
1405 m_min = m + 1;
1406 }
5fafdf24 1407 }
a513fe19
FB
1408 return &tbs[m_max];
1409}
7501267e 1410
ea041c0e
FB
1411static void tb_reset_jump_recursive(TranslationBlock *tb);
1412
1413static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1414{
1415 TranslationBlock *tb1, *tb_next, **ptb;
1416 unsigned int n1;
1417
1418 tb1 = tb->jmp_next[n];
1419 if (tb1 != NULL) {
1420 /* find head of list */
1421 for(;;) {
1422 n1 = (long)tb1 & 3;
1423 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1424 if (n1 == 2)
1425 break;
1426 tb1 = tb1->jmp_next[n1];
1427 }
1428 /* we are now sure now that tb jumps to tb1 */
1429 tb_next = tb1;
1430
1431 /* remove tb from the jmp_first list */
1432 ptb = &tb_next->jmp_first;
1433 for(;;) {
1434 tb1 = *ptb;
1435 n1 = (long)tb1 & 3;
1436 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1437 if (n1 == n && tb1 == tb)
1438 break;
1439 ptb = &tb1->jmp_next[n1];
1440 }
1441 *ptb = tb->jmp_next[n];
1442 tb->jmp_next[n] = NULL;
3b46e624 1443
ea041c0e
FB
1444 /* suppress the jump to next tb in generated code */
1445 tb_reset_jump(tb, n);
1446
0124311e 1447 /* suppress jumps in the tb on which we could have jumped */
ea041c0e
FB
1448 tb_reset_jump_recursive(tb_next);
1449 }
1450}
1451
1452static void tb_reset_jump_recursive(TranslationBlock *tb)
1453{
1454 tb_reset_jump_recursive2(tb, 0);
1455 tb_reset_jump_recursive2(tb, 1);
1456}
1457
1fddef4b 1458#if defined(TARGET_HAS_ICE)
94df27fd 1459#if defined(CONFIG_USER_ONLY)
9349b4f9 1460static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
94df27fd
PB
1461{
1462 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1463}
1464#else
9349b4f9 1465static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
d720b93d 1466{
c227f099 1467 target_phys_addr_t addr;
c227f099 1468 ram_addr_t ram_addr;
f3705d53 1469 MemoryRegionSection *section;
d720b93d 1470
c2f07f81 1471 addr = cpu_get_phys_page_debug(env, pc);
06ef3525 1472 section = phys_page_find(addr >> TARGET_PAGE_BITS);
f3705d53
AK
1473 if (!(memory_region_is_ram(section->mr)
1474 || (section->mr->rom_device && section->mr->readable))) {
06ef3525
AK
1475 return;
1476 }
f3705d53
AK
1477 ram_addr = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
1478 + section_addr(section, addr);
706cd4b5 1479 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
d720b93d 1480}
c27004ec 1481#endif
94df27fd 1482#endif /* TARGET_HAS_ICE */
d720b93d 1483
c527ee8f 1484#if defined(CONFIG_USER_ONLY)
9349b4f9 1485void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
c527ee8f
PB
1486
1487{
1488}
1489
9349b4f9 1490int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
c527ee8f
PB
1491 int flags, CPUWatchpoint **watchpoint)
1492{
1493 return -ENOSYS;
1494}
1495#else
6658ffb8 1496/* Add a watchpoint. */
9349b4f9 1497int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
a1d1bb31 1498 int flags, CPUWatchpoint **watchpoint)
6658ffb8 1499{
b4051334 1500 target_ulong len_mask = ~(len - 1);
c0ce998e 1501 CPUWatchpoint *wp;
6658ffb8 1502
b4051334 1503 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
0dc23828
MF
1504 if ((len & (len - 1)) || (addr & ~len_mask) ||
1505 len == 0 || len > TARGET_PAGE_SIZE) {
b4051334
AL
1506 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1507 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1508 return -EINVAL;
1509 }
7267c094 1510 wp = g_malloc(sizeof(*wp));
a1d1bb31
AL
1511
1512 wp->vaddr = addr;
b4051334 1513 wp->len_mask = len_mask;
a1d1bb31
AL
1514 wp->flags = flags;
1515
2dc9f411 1516 /* keep all GDB-injected watchpoints in front */
c0ce998e 1517 if (flags & BP_GDB)
72cf2d4f 1518 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
c0ce998e 1519 else
72cf2d4f 1520 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
6658ffb8 1521
6658ffb8 1522 tlb_flush_page(env, addr);
a1d1bb31
AL
1523
1524 if (watchpoint)
1525 *watchpoint = wp;
1526 return 0;
6658ffb8
PB
1527}
1528
a1d1bb31 1529/* Remove a specific watchpoint. */
9349b4f9 1530int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
a1d1bb31 1531 int flags)
6658ffb8 1532{
b4051334 1533 target_ulong len_mask = ~(len - 1);
a1d1bb31 1534 CPUWatchpoint *wp;
6658ffb8 1535
72cf2d4f 1536 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334 1537 if (addr == wp->vaddr && len_mask == wp->len_mask
6e140f28 1538 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
a1d1bb31 1539 cpu_watchpoint_remove_by_ref(env, wp);
6658ffb8
PB
1540 return 0;
1541 }
1542 }
a1d1bb31 1543 return -ENOENT;
6658ffb8
PB
1544}
1545
a1d1bb31 1546/* Remove a specific watchpoint by reference. */
9349b4f9 1547void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
a1d1bb31 1548{
72cf2d4f 1549 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
7d03f82f 1550
a1d1bb31
AL
1551 tlb_flush_page(env, watchpoint->vaddr);
1552
7267c094 1553 g_free(watchpoint);
a1d1bb31
AL
1554}
1555
1556/* Remove all matching watchpoints. */
9349b4f9 1557void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
a1d1bb31 1558{
c0ce998e 1559 CPUWatchpoint *wp, *next;
a1d1bb31 1560
72cf2d4f 1561 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
a1d1bb31
AL
1562 if (wp->flags & mask)
1563 cpu_watchpoint_remove_by_ref(env, wp);
c0ce998e 1564 }
7d03f82f 1565}
c527ee8f 1566#endif
7d03f82f 1567
a1d1bb31 1568/* Add a breakpoint. */
9349b4f9 1569int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
a1d1bb31 1570 CPUBreakpoint **breakpoint)
4c3a88a2 1571{
1fddef4b 1572#if defined(TARGET_HAS_ICE)
c0ce998e 1573 CPUBreakpoint *bp;
3b46e624 1574
7267c094 1575 bp = g_malloc(sizeof(*bp));
4c3a88a2 1576
a1d1bb31
AL
1577 bp->pc = pc;
1578 bp->flags = flags;
1579
2dc9f411 1580 /* keep all GDB-injected breakpoints in front */
c0ce998e 1581 if (flags & BP_GDB)
72cf2d4f 1582 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
c0ce998e 1583 else
72cf2d4f 1584 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
3b46e624 1585
d720b93d 1586 breakpoint_invalidate(env, pc);
a1d1bb31
AL
1587
1588 if (breakpoint)
1589 *breakpoint = bp;
4c3a88a2
FB
1590 return 0;
1591#else
a1d1bb31 1592 return -ENOSYS;
4c3a88a2
FB
1593#endif
1594}
1595
a1d1bb31 1596/* Remove a specific breakpoint. */
9349b4f9 1597int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
a1d1bb31 1598{
7d03f82f 1599#if defined(TARGET_HAS_ICE)
a1d1bb31
AL
1600 CPUBreakpoint *bp;
1601
72cf2d4f 1602 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31
AL
1603 if (bp->pc == pc && bp->flags == flags) {
1604 cpu_breakpoint_remove_by_ref(env, bp);
1605 return 0;
1606 }
7d03f82f 1607 }
a1d1bb31
AL
1608 return -ENOENT;
1609#else
1610 return -ENOSYS;
7d03f82f
EI
1611#endif
1612}
1613
a1d1bb31 1614/* Remove a specific breakpoint by reference. */
9349b4f9 1615void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
4c3a88a2 1616{
1fddef4b 1617#if defined(TARGET_HAS_ICE)
72cf2d4f 1618 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
d720b93d 1619
a1d1bb31
AL
1620 breakpoint_invalidate(env, breakpoint->pc);
1621
7267c094 1622 g_free(breakpoint);
a1d1bb31
AL
1623#endif
1624}
1625
1626/* Remove all matching breakpoints. */
9349b4f9 1627void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
a1d1bb31
AL
1628{
1629#if defined(TARGET_HAS_ICE)
c0ce998e 1630 CPUBreakpoint *bp, *next;
a1d1bb31 1631
72cf2d4f 1632 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
a1d1bb31
AL
1633 if (bp->flags & mask)
1634 cpu_breakpoint_remove_by_ref(env, bp);
c0ce998e 1635 }
4c3a88a2
FB
1636#endif
1637}
1638
c33a346e
FB
1639/* enable or disable single step mode. EXCP_DEBUG is returned by the
1640 CPU loop after each instruction */
9349b4f9 1641void cpu_single_step(CPUArchState *env, int enabled)
c33a346e 1642{
1fddef4b 1643#if defined(TARGET_HAS_ICE)
c33a346e
FB
1644 if (env->singlestep_enabled != enabled) {
1645 env->singlestep_enabled = enabled;
e22a25c9
AL
1646 if (kvm_enabled())
1647 kvm_update_guest_debug(env, 0);
1648 else {
ccbb4d44 1649 /* must flush all the translated code to avoid inconsistencies */
e22a25c9
AL
1650 /* XXX: only flush what is necessary */
1651 tb_flush(env);
1652 }
c33a346e
FB
1653 }
1654#endif
1655}
1656
34865134
FB
1657/* enable or disable low levels log */
1658void cpu_set_log(int log_flags)
1659{
1660 loglevel = log_flags;
1661 if (loglevel && !logfile) {
11fcfab4 1662 logfile = fopen(logfilename, log_append ? "a" : "w");
34865134
FB
1663 if (!logfile) {
1664 perror(logfilename);
1665 _exit(1);
1666 }
9fa3e853
FB
1667#if !defined(CONFIG_SOFTMMU)
1668 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1669 {
b55266b5 1670 static char logfile_buf[4096];
9fa3e853
FB
1671 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1672 }
daf767b1
SW
1673#elif defined(_WIN32)
1674 /* Win32 doesn't support line-buffering, so use unbuffered output. */
1675 setvbuf(logfile, NULL, _IONBF, 0);
1676#else
34865134 1677 setvbuf(logfile, NULL, _IOLBF, 0);
9fa3e853 1678#endif
e735b91c
PB
1679 log_append = 1;
1680 }
1681 if (!loglevel && logfile) {
1682 fclose(logfile);
1683 logfile = NULL;
34865134
FB
1684 }
1685}
1686
1687void cpu_set_log_filename(const char *filename)
1688{
1689 logfilename = strdup(filename);
e735b91c
PB
1690 if (logfile) {
1691 fclose(logfile);
1692 logfile = NULL;
1693 }
1694 cpu_set_log(loglevel);
34865134 1695}
c33a346e 1696
9349b4f9 1697static void cpu_unlink_tb(CPUArchState *env)
ea041c0e 1698{
3098dba0
AJ
1699 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1700 problem and hope the cpu will stop of its own accord. For userspace
1701 emulation this often isn't actually as bad as it sounds. Often
1702 signals are used primarily to interrupt blocking syscalls. */
ea041c0e 1703 TranslationBlock *tb;
c227f099 1704 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
59817ccb 1705
cab1b4bd 1706 spin_lock(&interrupt_lock);
3098dba0
AJ
1707 tb = env->current_tb;
1708 /* if the cpu is currently executing code, we must unlink it and
1709 all the potentially executing TB */
f76cfe56 1710 if (tb) {
3098dba0
AJ
1711 env->current_tb = NULL;
1712 tb_reset_jump_recursive(tb);
be214e6c 1713 }
cab1b4bd 1714 spin_unlock(&interrupt_lock);
3098dba0
AJ
1715}
1716
97ffbd8d 1717#ifndef CONFIG_USER_ONLY
3098dba0 1718/* mask must never be zero, except for A20 change call */
9349b4f9 1719static void tcg_handle_interrupt(CPUArchState *env, int mask)
3098dba0
AJ
1720{
1721 int old_mask;
be214e6c 1722
2e70f6ef 1723 old_mask = env->interrupt_request;
68a79315 1724 env->interrupt_request |= mask;
3098dba0 1725
8edac960
AL
1726 /*
1727 * If called from iothread context, wake the target cpu in
1728 * case its halted.
1729 */
b7680cb6 1730 if (!qemu_cpu_is_self(env)) {
8edac960
AL
1731 qemu_cpu_kick(env);
1732 return;
1733 }
8edac960 1734
2e70f6ef 1735 if (use_icount) {
266910c4 1736 env->icount_decr.u16.high = 0xffff;
2e70f6ef 1737 if (!can_do_io(env)
be214e6c 1738 && (mask & ~old_mask) != 0) {
2e70f6ef
PB
1739 cpu_abort(env, "Raised interrupt while not in I/O function");
1740 }
2e70f6ef 1741 } else {
3098dba0 1742 cpu_unlink_tb(env);
ea041c0e
FB
1743 }
1744}
1745
ec6959d0
JK
1746CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1747
97ffbd8d
JK
1748#else /* CONFIG_USER_ONLY */
1749
9349b4f9 1750void cpu_interrupt(CPUArchState *env, int mask)
97ffbd8d
JK
1751{
1752 env->interrupt_request |= mask;
1753 cpu_unlink_tb(env);
1754}
1755#endif /* CONFIG_USER_ONLY */
1756
9349b4f9 1757void cpu_reset_interrupt(CPUArchState *env, int mask)
b54ad049
FB
1758{
1759 env->interrupt_request &= ~mask;
1760}
1761
9349b4f9 1762void cpu_exit(CPUArchState *env)
3098dba0
AJ
1763{
1764 env->exit_request = 1;
1765 cpu_unlink_tb(env);
1766}
1767
c7cd6a37 1768const CPULogItem cpu_log_items[] = {
5fafdf24 1769 { CPU_LOG_TB_OUT_ASM, "out_asm",
f193c797
FB
1770 "show generated host assembly code for each compiled TB" },
1771 { CPU_LOG_TB_IN_ASM, "in_asm",
1772 "show target assembly code for each compiled TB" },
5fafdf24 1773 { CPU_LOG_TB_OP, "op",
57fec1fe 1774 "show micro ops for each compiled TB" },
f193c797 1775 { CPU_LOG_TB_OP_OPT, "op_opt",
e01a1157
BS
1776 "show micro ops "
1777#ifdef TARGET_I386
1778 "before eflags optimization and "
f193c797 1779#endif
e01a1157 1780 "after liveness analysis" },
f193c797
FB
1781 { CPU_LOG_INT, "int",
1782 "show interrupts/exceptions in short format" },
1783 { CPU_LOG_EXEC, "exec",
1784 "show trace before each executed TB (lots of logs)" },
9fddaa0c 1785 { CPU_LOG_TB_CPU, "cpu",
e91c8a77 1786 "show CPU state before block translation" },
f193c797
FB
1787#ifdef TARGET_I386
1788 { CPU_LOG_PCALL, "pcall",
1789 "show protected mode far calls/returns/exceptions" },
eca1bdf4
AL
1790 { CPU_LOG_RESET, "cpu_reset",
1791 "show CPU state before CPU resets" },
f193c797 1792#endif
8e3a9fd2 1793#ifdef DEBUG_IOPORT
fd872598
FB
1794 { CPU_LOG_IOPORT, "ioport",
1795 "show all i/o ports accesses" },
8e3a9fd2 1796#endif
f193c797
FB
1797 { 0, NULL, NULL },
1798};
1799
1800static int cmp1(const char *s1, int n, const char *s2)
1801{
1802 if (strlen(s2) != n)
1803 return 0;
1804 return memcmp(s1, s2, n) == 0;
1805}
3b46e624 1806
f193c797
FB
1807/* takes a comma separated list of log masks. Return 0 if error. */
1808int cpu_str_to_log_mask(const char *str)
1809{
c7cd6a37 1810 const CPULogItem *item;
f193c797
FB
1811 int mask;
1812 const char *p, *p1;
1813
1814 p = str;
1815 mask = 0;
1816 for(;;) {
1817 p1 = strchr(p, ',');
1818 if (!p1)
1819 p1 = p + strlen(p);
9742bf26
YT
1820 if(cmp1(p,p1-p,"all")) {
1821 for(item = cpu_log_items; item->mask != 0; item++) {
1822 mask |= item->mask;
1823 }
1824 } else {
1825 for(item = cpu_log_items; item->mask != 0; item++) {
1826 if (cmp1(p, p1 - p, item->name))
1827 goto found;
1828 }
1829 return 0;
f193c797 1830 }
f193c797
FB
1831 found:
1832 mask |= item->mask;
1833 if (*p1 != ',')
1834 break;
1835 p = p1 + 1;
1836 }
1837 return mask;
1838}
ea041c0e 1839
9349b4f9 1840void cpu_abort(CPUArchState *env, const char *fmt, ...)
7501267e
FB
1841{
1842 va_list ap;
493ae1f0 1843 va_list ap2;
7501267e
FB
1844
1845 va_start(ap, fmt);
493ae1f0 1846 va_copy(ap2, ap);
7501267e
FB
1847 fprintf(stderr, "qemu: fatal: ");
1848 vfprintf(stderr, fmt, ap);
1849 fprintf(stderr, "\n");
1850#ifdef TARGET_I386
7fe48483
FB
1851 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1852#else
1853 cpu_dump_state(env, stderr, fprintf, 0);
7501267e 1854#endif
93fcfe39
AL
1855 if (qemu_log_enabled()) {
1856 qemu_log("qemu: fatal: ");
1857 qemu_log_vprintf(fmt, ap2);
1858 qemu_log("\n");
f9373291 1859#ifdef TARGET_I386
93fcfe39 1860 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
f9373291 1861#else
93fcfe39 1862 log_cpu_state(env, 0);
f9373291 1863#endif
31b1a7b4 1864 qemu_log_flush();
93fcfe39 1865 qemu_log_close();
924edcae 1866 }
493ae1f0 1867 va_end(ap2);
f9373291 1868 va_end(ap);
fd052bf6
RV
1869#if defined(CONFIG_USER_ONLY)
1870 {
1871 struct sigaction act;
1872 sigfillset(&act.sa_mask);
1873 act.sa_handler = SIG_DFL;
1874 sigaction(SIGABRT, &act, NULL);
1875 }
1876#endif
7501267e
FB
1877 abort();
1878}
1879
9349b4f9 1880CPUArchState *cpu_copy(CPUArchState *env)
c5be9f08 1881{
9349b4f9
AF
1882 CPUArchState *new_env = cpu_init(env->cpu_model_str);
1883 CPUArchState *next_cpu = new_env->next_cpu;
c5be9f08 1884 int cpu_index = new_env->cpu_index;
5a38f081
AL
1885#if defined(TARGET_HAS_ICE)
1886 CPUBreakpoint *bp;
1887 CPUWatchpoint *wp;
1888#endif
1889
9349b4f9 1890 memcpy(new_env, env, sizeof(CPUArchState));
5a38f081
AL
1891
1892 /* Preserve chaining and index. */
c5be9f08
TS
1893 new_env->next_cpu = next_cpu;
1894 new_env->cpu_index = cpu_index;
5a38f081
AL
1895
1896 /* Clone all break/watchpoints.
1897 Note: Once we support ptrace with hw-debug register access, make sure
1898 BP_CPU break/watchpoints are handled correctly on clone. */
72cf2d4f
BS
1899 QTAILQ_INIT(&env->breakpoints);
1900 QTAILQ_INIT(&env->watchpoints);
5a38f081 1901#if defined(TARGET_HAS_ICE)
72cf2d4f 1902 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
5a38f081
AL
1903 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1904 }
72cf2d4f 1905 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
5a38f081
AL
1906 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1907 wp->flags, NULL);
1908 }
1909#endif
1910
c5be9f08
TS
1911 return new_env;
1912}
1913
0124311e
FB
1914#if !defined(CONFIG_USER_ONLY)
1915
9349b4f9 1916static inline void tlb_flush_jmp_cache(CPUArchState *env, target_ulong addr)
5c751e99
EI
1917{
1918 unsigned int i;
1919
1920 /* Discard jump cache entries for any tb which might potentially
1921 overlap the flushed page. */
1922 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1923 memset (&env->tb_jmp_cache[i], 0,
9742bf26 1924 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
5c751e99
EI
1925
1926 i = tb_jmp_cache_hash_page(addr);
1927 memset (&env->tb_jmp_cache[i], 0,
9742bf26 1928 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
5c751e99
EI
1929}
1930
08738984
IK
1931static CPUTLBEntry s_cputlb_empty_entry = {
1932 .addr_read = -1,
1933 .addr_write = -1,
1934 .addr_code = -1,
1935 .addend = -1,
1936};
1937
771124e1
PM
1938/* NOTE:
1939 * If flush_global is true (the usual case), flush all tlb entries.
1940 * If flush_global is false, flush (at least) all tlb entries not
1941 * marked global.
1942 *
1943 * Since QEMU doesn't currently implement a global/not-global flag
1944 * for tlb entries, at the moment tlb_flush() will also flush all
1945 * tlb entries in the flush_global == false case. This is OK because
1946 * CPU architectures generally permit an implementation to drop
1947 * entries from the TLB at any time, so flushing more entries than
1948 * required is only an efficiency issue, not a correctness issue.
1949 */
9349b4f9 1950void tlb_flush(CPUArchState *env, int flush_global)
33417e70 1951{
33417e70 1952 int i;
0124311e 1953
9fa3e853
FB
1954#if defined(DEBUG_TLB)
1955 printf("tlb_flush:\n");
1956#endif
0124311e
FB
1957 /* must reset current TB so that interrupts cannot modify the
1958 links while we are modifying them */
1959 env->current_tb = NULL;
1960
33417e70 1961 for(i = 0; i < CPU_TLB_SIZE; i++) {
cfde4bd9
IY
1962 int mmu_idx;
1963 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
08738984 1964 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
cfde4bd9 1965 }
33417e70 1966 }
9fa3e853 1967
8a40a180 1968 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
9fa3e853 1969
d4c430a8
PB
1970 env->tlb_flush_addr = -1;
1971 env->tlb_flush_mask = 0;
e3db7226 1972 tlb_flush_count++;
33417e70
FB
1973}
1974
274da6b2 1975static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
61382a50 1976{
5fafdf24 1977 if (addr == (tlb_entry->addr_read &
84b7b8e7 1978 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1979 addr == (tlb_entry->addr_write &
84b7b8e7 1980 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1981 addr == (tlb_entry->addr_code &
84b7b8e7 1982 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
08738984 1983 *tlb_entry = s_cputlb_empty_entry;
84b7b8e7 1984 }
61382a50
FB
1985}
1986
9349b4f9 1987void tlb_flush_page(CPUArchState *env, target_ulong addr)
33417e70 1988{
8a40a180 1989 int i;
cfde4bd9 1990 int mmu_idx;
0124311e 1991
9fa3e853 1992#if defined(DEBUG_TLB)
108c49b8 1993 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
9fa3e853 1994#endif
d4c430a8
PB
1995 /* Check if we need to flush due to large pages. */
1996 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
1997#if defined(DEBUG_TLB)
1998 printf("tlb_flush_page: forced full flush ("
1999 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
2000 env->tlb_flush_addr, env->tlb_flush_mask);
2001#endif
2002 tlb_flush(env, 1);
2003 return;
2004 }
0124311e
FB
2005 /* must reset current TB so that interrupts cannot modify the
2006 links while we are modifying them */
2007 env->current_tb = NULL;
61382a50
FB
2008
2009 addr &= TARGET_PAGE_MASK;
2010 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
cfde4bd9
IY
2011 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2012 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
0124311e 2013
5c751e99 2014 tlb_flush_jmp_cache(env, addr);
9fa3e853
FB
2015}
2016
9fa3e853
FB
2017/* update the TLBs so that writes to code in the virtual page 'addr'
2018 can be detected */
c227f099 2019static void tlb_protect_code(ram_addr_t ram_addr)
9fa3e853 2020{
5fafdf24 2021 cpu_physical_memory_reset_dirty(ram_addr,
6a00d601
FB
2022 ram_addr + TARGET_PAGE_SIZE,
2023 CODE_DIRTY_FLAG);
9fa3e853
FB
2024}
2025
9fa3e853 2026/* update the TLB so that writes in physical page 'phys_addr' are no longer
3a7d929e 2027 tested for self modifying code */
9349b4f9 2028static void tlb_unprotect_code_phys(CPUArchState *env, ram_addr_t ram_addr,
3a7d929e 2029 target_ulong vaddr)
9fa3e853 2030{
f7c11b53 2031 cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
1ccde1cb
FB
2032}
2033
5fafdf24 2034static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1ccde1cb
FB
2035 unsigned long start, unsigned long length)
2036{
2037 unsigned long addr;
0e0df1e2 2038 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr) {
84b7b8e7 2039 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1ccde1cb 2040 if ((addr - start) < length) {
0f459d16 2041 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1ccde1cb
FB
2042 }
2043 }
2044}
2045
5579c7f3 2046/* Note: start and end must be within the same ram block. */
c227f099 2047void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 2048 int dirty_flags)
1ccde1cb 2049{
9349b4f9 2050 CPUArchState *env;
4f2ac237 2051 unsigned long length, start1;
f7c11b53 2052 int i;
1ccde1cb
FB
2053
2054 start &= TARGET_PAGE_MASK;
2055 end = TARGET_PAGE_ALIGN(end);
2056
2057 length = end - start;
2058 if (length == 0)
2059 return;
f7c11b53 2060 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
f23db169 2061
1ccde1cb
FB
2062 /* we modify the TLB cache so that the dirty bit will be set again
2063 when accessing the range */
b2e0a138 2064 start1 = (unsigned long)qemu_safe_ram_ptr(start);
a57d23e4 2065 /* Check that we don't span multiple blocks - this breaks the
5579c7f3 2066 address comparisons below. */
b2e0a138 2067 if ((unsigned long)qemu_safe_ram_ptr(end - 1) - start1
5579c7f3
PB
2068 != (end - 1) - start) {
2069 abort();
2070 }
2071
6a00d601 2072 for(env = first_cpu; env != NULL; env = env->next_cpu) {
cfde4bd9
IY
2073 int mmu_idx;
2074 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2075 for(i = 0; i < CPU_TLB_SIZE; i++)
2076 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
2077 start1, length);
2078 }
6a00d601 2079 }
1ccde1cb
FB
2080}
2081
74576198
AL
2082int cpu_physical_memory_set_dirty_tracking(int enable)
2083{
f6f3fbca 2084 int ret = 0;
74576198 2085 in_migration = enable;
f6f3fbca 2086 return ret;
74576198
AL
2087}
2088
3a7d929e
FB
2089static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2090{
c227f099 2091 ram_addr_t ram_addr;
5579c7f3 2092 void *p;
3a7d929e 2093
0e0df1e2 2094 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr) {
5579c7f3
PB
2095 p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
2096 + tlb_entry->addend);
e890261f 2097 ram_addr = qemu_ram_addr_from_host_nofail(p);
3a7d929e 2098 if (!cpu_physical_memory_is_dirty(ram_addr)) {
0f459d16 2099 tlb_entry->addr_write |= TLB_NOTDIRTY;
3a7d929e
FB
2100 }
2101 }
2102}
2103
2104/* update the TLB according to the current state of the dirty bits */
9349b4f9 2105void cpu_tlb_update_dirty(CPUArchState *env)
3a7d929e
FB
2106{
2107 int i;
cfde4bd9
IY
2108 int mmu_idx;
2109 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2110 for(i = 0; i < CPU_TLB_SIZE; i++)
2111 tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2112 }
3a7d929e
FB
2113}
2114
0f459d16 2115static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1ccde1cb 2116{
0f459d16
PB
2117 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2118 tlb_entry->addr_write = vaddr;
1ccde1cb
FB
2119}
2120
0f459d16
PB
2121/* update the TLB corresponding to virtual page vaddr
2122 so that it is no longer dirty */
9349b4f9 2123static inline void tlb_set_dirty(CPUArchState *env, target_ulong vaddr)
1ccde1cb 2124{
1ccde1cb 2125 int i;
cfde4bd9 2126 int mmu_idx;
1ccde1cb 2127
0f459d16 2128 vaddr &= TARGET_PAGE_MASK;
1ccde1cb 2129 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
cfde4bd9
IY
2130 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2131 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
9fa3e853
FB
2132}
2133
d4c430a8
PB
2134/* Our TLB does not support large pages, so remember the area covered by
2135 large pages and trigger a full TLB flush if these are invalidated. */
9349b4f9 2136static void tlb_add_large_page(CPUArchState *env, target_ulong vaddr,
d4c430a8
PB
2137 target_ulong size)
2138{
2139 target_ulong mask = ~(size - 1);
2140
2141 if (env->tlb_flush_addr == (target_ulong)-1) {
2142 env->tlb_flush_addr = vaddr & mask;
2143 env->tlb_flush_mask = mask;
2144 return;
2145 }
2146 /* Extend the existing region to include the new page.
2147 This is a compromise between unnecessary flushes and the cost
2148 of maintaining a full variable size TLB. */
2149 mask &= env->tlb_flush_mask;
2150 while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
2151 mask <<= 1;
2152 }
2153 env->tlb_flush_addr &= mask;
2154 env->tlb_flush_mask = mask;
2155}
2156
06ef3525 2157static bool is_ram_rom(MemoryRegionSection *s)
1d393fa2 2158{
06ef3525 2159 return memory_region_is_ram(s->mr);
1d393fa2
AK
2160}
2161
06ef3525 2162static bool is_romd(MemoryRegionSection *s)
75c578dc 2163{
06ef3525 2164 MemoryRegion *mr = s->mr;
75c578dc 2165
75c578dc
AK
2166 return mr->rom_device && mr->readable;
2167}
2168
06ef3525 2169static bool is_ram_rom_romd(MemoryRegionSection *s)
1d393fa2 2170{
06ef3525 2171 return is_ram_rom(s) || is_romd(s);
1d393fa2
AK
2172}
2173
d4c430a8
PB
2174/* Add a new TLB entry. At most one entry for a given virtual address
2175 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2176 supplied size is only used by tlb_flush_page. */
9349b4f9 2177void tlb_set_page(CPUArchState *env, target_ulong vaddr,
d4c430a8
PB
2178 target_phys_addr_t paddr, int prot,
2179 int mmu_idx, target_ulong size)
9fa3e853 2180{
f3705d53 2181 MemoryRegionSection *section;
9fa3e853 2182 unsigned int index;
4f2ac237 2183 target_ulong address;
0f459d16 2184 target_ulong code_address;
355b1943 2185 unsigned long addend;
84b7b8e7 2186 CPUTLBEntry *te;
a1d1bb31 2187 CPUWatchpoint *wp;
c227f099 2188 target_phys_addr_t iotlb;
9fa3e853 2189
d4c430a8
PB
2190 assert(size >= TARGET_PAGE_SIZE);
2191 if (size != TARGET_PAGE_SIZE) {
2192 tlb_add_large_page(env, vaddr, size);
2193 }
06ef3525 2194 section = phys_page_find(paddr >> TARGET_PAGE_BITS);
9fa3e853 2195#if defined(DEBUG_TLB)
7fd3f494
SW
2196 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
2197 " prot=%x idx=%d pd=0x%08lx\n",
2198 vaddr, paddr, prot, mmu_idx, pd);
9fa3e853
FB
2199#endif
2200
0f459d16 2201 address = vaddr;
f3705d53 2202 if (!is_ram_rom_romd(section)) {
0f459d16
PB
2203 /* IO memory case (romd handled later) */
2204 address |= TLB_MMIO;
2205 }
f3705d53
AK
2206 if (is_ram_rom_romd(section)) {
2207 addend = (unsigned long)memory_region_get_ram_ptr(section->mr)
2208 + section_addr(section, paddr);
06ef3525
AK
2209 } else {
2210 addend = 0;
2211 }
f3705d53 2212 if (is_ram_rom(section)) {
0f459d16 2213 /* Normal RAM. */
f3705d53
AK
2214 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
2215 + section_addr(section, paddr);
2216 if (!section->readonly)
aa102231 2217 iotlb |= phys_section_notdirty;
0f459d16 2218 else
aa102231 2219 iotlb |= phys_section_rom;
0f459d16 2220 } else {
ccbb4d44 2221 /* IO handlers are currently passed a physical address.
0f459d16
PB
2222 It would be nice to pass an offset from the base address
2223 of that region. This would avoid having to special case RAM,
2224 and avoid full address decoding in every device.
2225 We can't use the high bits of pd for this because
2226 IO_MEM_ROMD uses these as a ram address. */
aa102231 2227 iotlb = section - phys_sections;
f3705d53 2228 iotlb += section_addr(section, paddr);
0f459d16
PB
2229 }
2230
2231 code_address = address;
2232 /* Make accesses to pages with watchpoints go via the
2233 watchpoint trap routines. */
72cf2d4f 2234 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
a1d1bb31 2235 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
bf298f83
JK
2236 /* Avoid trapping reads of pages with a write breakpoint. */
2237 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
aa102231 2238 iotlb = phys_section_watch + paddr;
bf298f83
JK
2239 address |= TLB_MMIO;
2240 break;
2241 }
6658ffb8 2242 }
0f459d16 2243 }
d79acba4 2244
0f459d16
PB
2245 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2246 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2247 te = &env->tlb_table[mmu_idx][index];
2248 te->addend = addend - vaddr;
2249 if (prot & PAGE_READ) {
2250 te->addr_read = address;
2251 } else {
2252 te->addr_read = -1;
2253 }
5c751e99 2254
0f459d16
PB
2255 if (prot & PAGE_EXEC) {
2256 te->addr_code = code_address;
2257 } else {
2258 te->addr_code = -1;
2259 }
2260 if (prot & PAGE_WRITE) {
f3705d53
AK
2261 if ((memory_region_is_ram(section->mr) && section->readonly)
2262 || is_romd(section)) {
0f459d16
PB
2263 /* Write access calls the I/O callback. */
2264 te->addr_write = address | TLB_MMIO;
f3705d53 2265 } else if (memory_region_is_ram(section->mr)
06ef3525 2266 && !cpu_physical_memory_is_dirty(
f3705d53
AK
2267 section->mr->ram_addr
2268 + section_addr(section, paddr))) {
0f459d16 2269 te->addr_write = address | TLB_NOTDIRTY;
9fa3e853 2270 } else {
0f459d16 2271 te->addr_write = address;
9fa3e853 2272 }
0f459d16
PB
2273 } else {
2274 te->addr_write = -1;
9fa3e853 2275 }
9fa3e853
FB
2276}
2277
0124311e
FB
2278#else
2279
9349b4f9 2280void tlb_flush(CPUArchState *env, int flush_global)
0124311e
FB
2281{
2282}
2283
9349b4f9 2284void tlb_flush_page(CPUArchState *env, target_ulong addr)
0124311e
FB
2285{
2286}
2287
edf8e2af
MW
2288/*
2289 * Walks guest process memory "regions" one by one
2290 * and calls callback function 'fn' for each region.
2291 */
5cd2c5b6
RH
2292
2293struct walk_memory_regions_data
2294{
2295 walk_memory_regions_fn fn;
2296 void *priv;
2297 unsigned long start;
2298 int prot;
2299};
2300
2301static int walk_memory_regions_end(struct walk_memory_regions_data *data,
b480d9b7 2302 abi_ulong end, int new_prot)
5cd2c5b6
RH
2303{
2304 if (data->start != -1ul) {
2305 int rc = data->fn(data->priv, data->start, end, data->prot);
2306 if (rc != 0) {
2307 return rc;
2308 }
2309 }
2310
2311 data->start = (new_prot ? end : -1ul);
2312 data->prot = new_prot;
2313
2314 return 0;
2315}
2316
2317static int walk_memory_regions_1(struct walk_memory_regions_data *data,
b480d9b7 2318 abi_ulong base, int level, void **lp)
5cd2c5b6 2319{
b480d9b7 2320 abi_ulong pa;
5cd2c5b6
RH
2321 int i, rc;
2322
2323 if (*lp == NULL) {
2324 return walk_memory_regions_end(data, base, 0);
2325 }
2326
2327 if (level == 0) {
2328 PageDesc *pd = *lp;
7296abac 2329 for (i = 0; i < L2_SIZE; ++i) {
5cd2c5b6
RH
2330 int prot = pd[i].flags;
2331
2332 pa = base | (i << TARGET_PAGE_BITS);
2333 if (prot != data->prot) {
2334 rc = walk_memory_regions_end(data, pa, prot);
2335 if (rc != 0) {
2336 return rc;
9fa3e853 2337 }
9fa3e853 2338 }
5cd2c5b6
RH
2339 }
2340 } else {
2341 void **pp = *lp;
7296abac 2342 for (i = 0; i < L2_SIZE; ++i) {
b480d9b7
PB
2343 pa = base | ((abi_ulong)i <<
2344 (TARGET_PAGE_BITS + L2_BITS * level));
5cd2c5b6
RH
2345 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2346 if (rc != 0) {
2347 return rc;
2348 }
2349 }
2350 }
2351
2352 return 0;
2353}
2354
2355int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2356{
2357 struct walk_memory_regions_data data;
2358 unsigned long i;
2359
2360 data.fn = fn;
2361 data.priv = priv;
2362 data.start = -1ul;
2363 data.prot = 0;
2364
2365 for (i = 0; i < V_L1_SIZE; i++) {
b480d9b7 2366 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
5cd2c5b6
RH
2367 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2368 if (rc != 0) {
2369 return rc;
9fa3e853 2370 }
33417e70 2371 }
5cd2c5b6
RH
2372
2373 return walk_memory_regions_end(&data, 0, 0);
edf8e2af
MW
2374}
2375
b480d9b7
PB
2376static int dump_region(void *priv, abi_ulong start,
2377 abi_ulong end, unsigned long prot)
edf8e2af
MW
2378{
2379 FILE *f = (FILE *)priv;
2380
b480d9b7
PB
2381 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2382 " "TARGET_ABI_FMT_lx" %c%c%c\n",
edf8e2af
MW
2383 start, end, end - start,
2384 ((prot & PAGE_READ) ? 'r' : '-'),
2385 ((prot & PAGE_WRITE) ? 'w' : '-'),
2386 ((prot & PAGE_EXEC) ? 'x' : '-'));
2387
2388 return (0);
2389}
2390
2391/* dump memory mappings */
2392void page_dump(FILE *f)
2393{
2394 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2395 "start", "end", "size", "prot");
2396 walk_memory_regions(f, dump_region);
33417e70
FB
2397}
2398
53a5960a 2399int page_get_flags(target_ulong address)
33417e70 2400{
9fa3e853
FB
2401 PageDesc *p;
2402
2403 p = page_find(address >> TARGET_PAGE_BITS);
33417e70 2404 if (!p)
9fa3e853
FB
2405 return 0;
2406 return p->flags;
2407}
2408
376a7909
RH
2409/* Modify the flags of a page and invalidate the code if necessary.
2410 The flag PAGE_WRITE_ORG is positioned automatically depending
2411 on PAGE_WRITE. The mmap_lock should already be held. */
53a5960a 2412void page_set_flags(target_ulong start, target_ulong end, int flags)
9fa3e853 2413{
376a7909
RH
2414 target_ulong addr, len;
2415
2416 /* This function should never be called with addresses outside the
2417 guest address space. If this assert fires, it probably indicates
2418 a missing call to h2g_valid. */
b480d9b7
PB
2419#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2420 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
376a7909
RH
2421#endif
2422 assert(start < end);
9fa3e853
FB
2423
2424 start = start & TARGET_PAGE_MASK;
2425 end = TARGET_PAGE_ALIGN(end);
376a7909
RH
2426
2427 if (flags & PAGE_WRITE) {
9fa3e853 2428 flags |= PAGE_WRITE_ORG;
376a7909
RH
2429 }
2430
2431 for (addr = start, len = end - start;
2432 len != 0;
2433 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2434 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2435
2436 /* If the write protection bit is set, then we invalidate
2437 the code inside. */
5fafdf24 2438 if (!(p->flags & PAGE_WRITE) &&
9fa3e853
FB
2439 (flags & PAGE_WRITE) &&
2440 p->first_tb) {
d720b93d 2441 tb_invalidate_phys_page(addr, 0, NULL);
9fa3e853
FB
2442 }
2443 p->flags = flags;
2444 }
33417e70
FB
2445}
2446
3d97b40b
TS
2447int page_check_range(target_ulong start, target_ulong len, int flags)
2448{
2449 PageDesc *p;
2450 target_ulong end;
2451 target_ulong addr;
2452
376a7909
RH
2453 /* This function should never be called with addresses outside the
2454 guest address space. If this assert fires, it probably indicates
2455 a missing call to h2g_valid. */
338e9e6c
BS
2456#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2457 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
376a7909
RH
2458#endif
2459
3e0650a9
RH
2460 if (len == 0) {
2461 return 0;
2462 }
376a7909
RH
2463 if (start + len - 1 < start) {
2464 /* We've wrapped around. */
55f280c9 2465 return -1;
376a7909 2466 }
55f280c9 2467
3d97b40b
TS
2468 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2469 start = start & TARGET_PAGE_MASK;
2470
376a7909
RH
2471 for (addr = start, len = end - start;
2472 len != 0;
2473 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
3d97b40b
TS
2474 p = page_find(addr >> TARGET_PAGE_BITS);
2475 if( !p )
2476 return -1;
2477 if( !(p->flags & PAGE_VALID) )
2478 return -1;
2479
dae3270c 2480 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
3d97b40b 2481 return -1;
dae3270c
FB
2482 if (flags & PAGE_WRITE) {
2483 if (!(p->flags & PAGE_WRITE_ORG))
2484 return -1;
2485 /* unprotect the page if it was put read-only because it
2486 contains translated code */
2487 if (!(p->flags & PAGE_WRITE)) {
2488 if (!page_unprotect(addr, 0, NULL))
2489 return -1;
2490 }
2491 return 0;
2492 }
3d97b40b
TS
2493 }
2494 return 0;
2495}
2496
9fa3e853 2497/* called from signal handler: invalidate the code and unprotect the
ccbb4d44 2498 page. Return TRUE if the fault was successfully handled. */
53a5960a 2499int page_unprotect(target_ulong address, unsigned long pc, void *puc)
9fa3e853 2500{
45d679d6
AJ
2501 unsigned int prot;
2502 PageDesc *p;
53a5960a 2503 target_ulong host_start, host_end, addr;
9fa3e853 2504
c8a706fe
PB
2505 /* Technically this isn't safe inside a signal handler. However we
2506 know this only ever happens in a synchronous SEGV handler, so in
2507 practice it seems to be ok. */
2508 mmap_lock();
2509
45d679d6
AJ
2510 p = page_find(address >> TARGET_PAGE_BITS);
2511 if (!p) {
c8a706fe 2512 mmap_unlock();
9fa3e853 2513 return 0;
c8a706fe 2514 }
45d679d6 2515
9fa3e853
FB
2516 /* if the page was really writable, then we change its
2517 protection back to writable */
45d679d6
AJ
2518 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2519 host_start = address & qemu_host_page_mask;
2520 host_end = host_start + qemu_host_page_size;
2521
2522 prot = 0;
2523 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2524 p = page_find(addr >> TARGET_PAGE_BITS);
2525 p->flags |= PAGE_WRITE;
2526 prot |= p->flags;
2527
9fa3e853
FB
2528 /* and since the content will be modified, we must invalidate
2529 the corresponding translated code. */
45d679d6 2530 tb_invalidate_phys_page(addr, pc, puc);
9fa3e853 2531#ifdef DEBUG_TB_CHECK
45d679d6 2532 tb_invalidate_check(addr);
9fa3e853 2533#endif
9fa3e853 2534 }
45d679d6
AJ
2535 mprotect((void *)g2h(host_start), qemu_host_page_size,
2536 prot & PAGE_BITS);
2537
2538 mmap_unlock();
2539 return 1;
9fa3e853 2540 }
c8a706fe 2541 mmap_unlock();
9fa3e853
FB
2542 return 0;
2543}
2544
9349b4f9 2545static inline void tlb_set_dirty(CPUArchState *env,
6a00d601 2546 unsigned long addr, target_ulong vaddr)
1ccde1cb
FB
2547{
2548}
9fa3e853
FB
2549#endif /* defined(CONFIG_USER_ONLY) */
2550
e2eef170 2551#if !defined(CONFIG_USER_ONLY)
8da3ff18 2552
c04b2b78
PB
2553#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2554typedef struct subpage_t {
70c68e44 2555 MemoryRegion iomem;
c04b2b78 2556 target_phys_addr_t base;
5312bd8b 2557 uint16_t sub_section[TARGET_PAGE_SIZE];
c04b2b78
PB
2558} subpage_t;
2559
c227f099 2560static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 2561 uint16_t section);
0f0cb164 2562static subpage_t *subpage_init(target_phys_addr_t base);
5312bd8b 2563static void destroy_page_desc(uint16_t section_index)
54688b1e 2564{
5312bd8b
AK
2565 MemoryRegionSection *section = &phys_sections[section_index];
2566 MemoryRegion *mr = section->mr;
54688b1e
AK
2567
2568 if (mr->subpage) {
2569 subpage_t *subpage = container_of(mr, subpage_t, iomem);
2570 memory_region_destroy(&subpage->iomem);
2571 g_free(subpage);
2572 }
2573}
2574
4346ae3e 2575static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level)
54688b1e
AK
2576{
2577 unsigned i;
d6f2ea22 2578 PhysPageEntry *p;
54688b1e 2579
c19e8800 2580 if (lp->ptr == PHYS_MAP_NODE_NIL) {
54688b1e
AK
2581 return;
2582 }
2583
c19e8800 2584 p = phys_map_nodes[lp->ptr];
4346ae3e 2585 for (i = 0; i < L2_SIZE; ++i) {
07f07b31 2586 if (!p[i].is_leaf) {
54688b1e 2587 destroy_l2_mapping(&p[i], level - 1);
4346ae3e 2588 } else {
c19e8800 2589 destroy_page_desc(p[i].ptr);
54688b1e 2590 }
54688b1e 2591 }
07f07b31 2592 lp->is_leaf = 0;
c19e8800 2593 lp->ptr = PHYS_MAP_NODE_NIL;
54688b1e
AK
2594}
2595
2596static void destroy_all_mappings(void)
2597{
3eef53df 2598 destroy_l2_mapping(&phys_map, P_L2_LEVELS - 1);
d6f2ea22 2599 phys_map_nodes_reset();
54688b1e
AK
2600}
2601
5312bd8b
AK
2602static uint16_t phys_section_add(MemoryRegionSection *section)
2603{
2604 if (phys_sections_nb == phys_sections_nb_alloc) {
2605 phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16);
2606 phys_sections = g_renew(MemoryRegionSection, phys_sections,
2607 phys_sections_nb_alloc);
2608 }
2609 phys_sections[phys_sections_nb] = *section;
2610 return phys_sections_nb++;
2611}
2612
2613static void phys_sections_clear(void)
2614{
2615 phys_sections_nb = 0;
2616}
2617
8f2498f9
MT
2618/* register physical memory.
2619 For RAM, 'size' must be a multiple of the target page size.
2620 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
8da3ff18
PB
2621 io memory page. The address used when calling the IO function is
2622 the offset from the start of the region, plus region_offset. Both
ccbb4d44 2623 start_addr and region_offset are rounded down to a page boundary
8da3ff18
PB
2624 before calculating this offset. This should not be a problem unless
2625 the low bits of start_addr and region_offset differ. */
0f0cb164
AK
2626static void register_subpage(MemoryRegionSection *section)
2627{
2628 subpage_t *subpage;
2629 target_phys_addr_t base = section->offset_within_address_space
2630 & TARGET_PAGE_MASK;
f3705d53 2631 MemoryRegionSection *existing = phys_page_find(base >> TARGET_PAGE_BITS);
0f0cb164
AK
2632 MemoryRegionSection subsection = {
2633 .offset_within_address_space = base,
2634 .size = TARGET_PAGE_SIZE,
2635 };
0f0cb164
AK
2636 target_phys_addr_t start, end;
2637
f3705d53 2638 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
0f0cb164 2639
f3705d53 2640 if (!(existing->mr->subpage)) {
0f0cb164
AK
2641 subpage = subpage_init(base);
2642 subsection.mr = &subpage->iomem;
2999097b
AK
2643 phys_page_set(base >> TARGET_PAGE_BITS, 1,
2644 phys_section_add(&subsection));
0f0cb164 2645 } else {
f3705d53 2646 subpage = container_of(existing->mr, subpage_t, iomem);
0f0cb164
AK
2647 }
2648 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
2649 end = start + section->size;
2650 subpage_register(subpage, start, end, phys_section_add(section));
2651}
2652
2653
2654static void register_multipage(MemoryRegionSection *section)
33417e70 2655{
dd81124b
AK
2656 target_phys_addr_t start_addr = section->offset_within_address_space;
2657 ram_addr_t size = section->size;
2999097b 2658 target_phys_addr_t addr;
5312bd8b 2659 uint16_t section_index = phys_section_add(section);
dd81124b 2660
3b8e6a2d 2661 assert(size);
f6f3fbca 2662
3b8e6a2d 2663 addr = start_addr;
2999097b
AK
2664 phys_page_set(addr >> TARGET_PAGE_BITS, size >> TARGET_PAGE_BITS,
2665 section_index);
33417e70
FB
2666}
2667
0f0cb164
AK
2668void cpu_register_physical_memory_log(MemoryRegionSection *section,
2669 bool readonly)
2670{
2671 MemoryRegionSection now = *section, remain = *section;
2672
2673 if ((now.offset_within_address_space & ~TARGET_PAGE_MASK)
2674 || (now.size < TARGET_PAGE_SIZE)) {
2675 now.size = MIN(TARGET_PAGE_ALIGN(now.offset_within_address_space)
2676 - now.offset_within_address_space,
2677 now.size);
2678 register_subpage(&now);
2679 remain.size -= now.size;
2680 remain.offset_within_address_space += now.size;
2681 remain.offset_within_region += now.size;
2682 }
2683 now = remain;
2684 now.size &= TARGET_PAGE_MASK;
2685 if (now.size) {
2686 register_multipage(&now);
2687 remain.size -= now.size;
2688 remain.offset_within_address_space += now.size;
2689 remain.offset_within_region += now.size;
2690 }
2691 now = remain;
2692 if (now.size) {
2693 register_subpage(&now);
2694 }
2695}
2696
2697
c227f099 2698void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
f65ed4c1
AL
2699{
2700 if (kvm_enabled())
2701 kvm_coalesce_mmio_region(addr, size);
2702}
2703
c227f099 2704void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
f65ed4c1
AL
2705{
2706 if (kvm_enabled())
2707 kvm_uncoalesce_mmio_region(addr, size);
2708}
2709
62a2744c
SY
2710void qemu_flush_coalesced_mmio_buffer(void)
2711{
2712 if (kvm_enabled())
2713 kvm_flush_coalesced_mmio_buffer();
2714}
2715
c902760f
MT
2716#if defined(__linux__) && !defined(TARGET_S390X)
2717
2718#include <sys/vfs.h>
2719
2720#define HUGETLBFS_MAGIC 0x958458f6
2721
2722static long gethugepagesize(const char *path)
2723{
2724 struct statfs fs;
2725 int ret;
2726
2727 do {
9742bf26 2728 ret = statfs(path, &fs);
c902760f
MT
2729 } while (ret != 0 && errno == EINTR);
2730
2731 if (ret != 0) {
9742bf26
YT
2732 perror(path);
2733 return 0;
c902760f
MT
2734 }
2735
2736 if (fs.f_type != HUGETLBFS_MAGIC)
9742bf26 2737 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
c902760f
MT
2738
2739 return fs.f_bsize;
2740}
2741
04b16653
AW
2742static void *file_ram_alloc(RAMBlock *block,
2743 ram_addr_t memory,
2744 const char *path)
c902760f
MT
2745{
2746 char *filename;
2747 void *area;
2748 int fd;
2749#ifdef MAP_POPULATE
2750 int flags;
2751#endif
2752 unsigned long hpagesize;
2753
2754 hpagesize = gethugepagesize(path);
2755 if (!hpagesize) {
9742bf26 2756 return NULL;
c902760f
MT
2757 }
2758
2759 if (memory < hpagesize) {
2760 return NULL;
2761 }
2762
2763 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2764 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2765 return NULL;
2766 }
2767
2768 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
9742bf26 2769 return NULL;
c902760f
MT
2770 }
2771
2772 fd = mkstemp(filename);
2773 if (fd < 0) {
9742bf26
YT
2774 perror("unable to create backing store for hugepages");
2775 free(filename);
2776 return NULL;
c902760f
MT
2777 }
2778 unlink(filename);
2779 free(filename);
2780
2781 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2782
2783 /*
2784 * ftruncate is not supported by hugetlbfs in older
2785 * hosts, so don't bother bailing out on errors.
2786 * If anything goes wrong with it under other filesystems,
2787 * mmap will fail.
2788 */
2789 if (ftruncate(fd, memory))
9742bf26 2790 perror("ftruncate");
c902760f
MT
2791
2792#ifdef MAP_POPULATE
2793 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2794 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2795 * to sidestep this quirk.
2796 */
2797 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2798 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2799#else
2800 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2801#endif
2802 if (area == MAP_FAILED) {
9742bf26
YT
2803 perror("file_ram_alloc: can't mmap RAM pages");
2804 close(fd);
2805 return (NULL);
c902760f 2806 }
04b16653 2807 block->fd = fd;
c902760f
MT
2808 return area;
2809}
2810#endif
2811
d17b5288 2812static ram_addr_t find_ram_offset(ram_addr_t size)
04b16653
AW
2813{
2814 RAMBlock *block, *next_block;
3e837b2c 2815 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
04b16653
AW
2816
2817 if (QLIST_EMPTY(&ram_list.blocks))
2818 return 0;
2819
2820 QLIST_FOREACH(block, &ram_list.blocks, next) {
f15fbc4b 2821 ram_addr_t end, next = RAM_ADDR_MAX;
04b16653
AW
2822
2823 end = block->offset + block->length;
2824
2825 QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2826 if (next_block->offset >= end) {
2827 next = MIN(next, next_block->offset);
2828 }
2829 }
2830 if (next - end >= size && next - end < mingap) {
3e837b2c 2831 offset = end;
04b16653
AW
2832 mingap = next - end;
2833 }
2834 }
3e837b2c
AW
2835
2836 if (offset == RAM_ADDR_MAX) {
2837 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
2838 (uint64_t)size);
2839 abort();
2840 }
2841
04b16653
AW
2842 return offset;
2843}
2844
2845static ram_addr_t last_ram_offset(void)
d17b5288
AW
2846{
2847 RAMBlock *block;
2848 ram_addr_t last = 0;
2849
2850 QLIST_FOREACH(block, &ram_list.blocks, next)
2851 last = MAX(last, block->offset + block->length);
2852
2853 return last;
2854}
2855
c5705a77 2856void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
84b89d78
CM
2857{
2858 RAMBlock *new_block, *block;
2859
c5705a77
AK
2860 new_block = NULL;
2861 QLIST_FOREACH(block, &ram_list.blocks, next) {
2862 if (block->offset == addr) {
2863 new_block = block;
2864 break;
2865 }
2866 }
2867 assert(new_block);
2868 assert(!new_block->idstr[0]);
84b89d78
CM
2869
2870 if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
2871 char *id = dev->parent_bus->info->get_dev_path(dev);
2872 if (id) {
2873 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
7267c094 2874 g_free(id);
84b89d78
CM
2875 }
2876 }
2877 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2878
2879 QLIST_FOREACH(block, &ram_list.blocks, next) {
c5705a77 2880 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
84b89d78
CM
2881 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2882 new_block->idstr);
2883 abort();
2884 }
2885 }
c5705a77
AK
2886}
2887
2888ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
2889 MemoryRegion *mr)
2890{
2891 RAMBlock *new_block;
2892
2893 size = TARGET_PAGE_ALIGN(size);
2894 new_block = g_malloc0(sizeof(*new_block));
84b89d78 2895
7c637366 2896 new_block->mr = mr;
432d268c 2897 new_block->offset = find_ram_offset(size);
6977dfe6
YT
2898 if (host) {
2899 new_block->host = host;
cd19cfa2 2900 new_block->flags |= RAM_PREALLOC_MASK;
6977dfe6
YT
2901 } else {
2902 if (mem_path) {
c902760f 2903#if defined (__linux__) && !defined(TARGET_S390X)
6977dfe6
YT
2904 new_block->host = file_ram_alloc(new_block, size, mem_path);
2905 if (!new_block->host) {
2906 new_block->host = qemu_vmalloc(size);
e78815a5 2907 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
6977dfe6 2908 }
c902760f 2909#else
6977dfe6
YT
2910 fprintf(stderr, "-mem-path option unsupported\n");
2911 exit(1);
c902760f 2912#endif
6977dfe6 2913 } else {
6b02494d 2914#if defined(TARGET_S390X) && defined(CONFIG_KVM)
ff83678a
CB
2915 /* S390 KVM requires the topmost vma of the RAM to be smaller than
2916 an system defined value, which is at least 256GB. Larger systems
2917 have larger values. We put the guest between the end of data
2918 segment (system break) and this value. We use 32GB as a base to
2919 have enough room for the system break to grow. */
2920 new_block->host = mmap((void*)0x800000000, size,
6977dfe6 2921 PROT_EXEC|PROT_READ|PROT_WRITE,
ff83678a 2922 MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
fb8b2735
AG
2923 if (new_block->host == MAP_FAILED) {
2924 fprintf(stderr, "Allocating RAM failed\n");
2925 abort();
2926 }
6b02494d 2927#else
868bb33f 2928 if (xen_enabled()) {
fce537d4 2929 xen_ram_alloc(new_block->offset, size, mr);
432d268c
JN
2930 } else {
2931 new_block->host = qemu_vmalloc(size);
2932 }
6b02494d 2933#endif
e78815a5 2934 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
6977dfe6 2935 }
c902760f 2936 }
94a6b54f
PB
2937 new_block->length = size;
2938
f471a17e 2939 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
94a6b54f 2940
7267c094 2941 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
04b16653 2942 last_ram_offset() >> TARGET_PAGE_BITS);
d17b5288 2943 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
94a6b54f
PB
2944 0xff, size >> TARGET_PAGE_BITS);
2945
6f0437e8
JK
2946 if (kvm_enabled())
2947 kvm_setup_guest_memory(new_block->host, size);
2948
94a6b54f
PB
2949 return new_block->offset;
2950}
e9a1ab19 2951
c5705a77 2952ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
6977dfe6 2953{
c5705a77 2954 return qemu_ram_alloc_from_ptr(size, NULL, mr);
6977dfe6
YT
2955}
2956
1f2e98b6
AW
2957void qemu_ram_free_from_ptr(ram_addr_t addr)
2958{
2959 RAMBlock *block;
2960
2961 QLIST_FOREACH(block, &ram_list.blocks, next) {
2962 if (addr == block->offset) {
2963 QLIST_REMOVE(block, next);
7267c094 2964 g_free(block);
1f2e98b6
AW
2965 return;
2966 }
2967 }
2968}
2969
c227f099 2970void qemu_ram_free(ram_addr_t addr)
e9a1ab19 2971{
04b16653
AW
2972 RAMBlock *block;
2973
2974 QLIST_FOREACH(block, &ram_list.blocks, next) {
2975 if (addr == block->offset) {
2976 QLIST_REMOVE(block, next);
cd19cfa2
HY
2977 if (block->flags & RAM_PREALLOC_MASK) {
2978 ;
2979 } else if (mem_path) {
04b16653
AW
2980#if defined (__linux__) && !defined(TARGET_S390X)
2981 if (block->fd) {
2982 munmap(block->host, block->length);
2983 close(block->fd);
2984 } else {
2985 qemu_vfree(block->host);
2986 }
fd28aa13
JK
2987#else
2988 abort();
04b16653
AW
2989#endif
2990 } else {
2991#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2992 munmap(block->host, block->length);
2993#else
868bb33f 2994 if (xen_enabled()) {
e41d7c69 2995 xen_invalidate_map_cache_entry(block->host);
432d268c
JN
2996 } else {
2997 qemu_vfree(block->host);
2998 }
04b16653
AW
2999#endif
3000 }
7267c094 3001 g_free(block);
04b16653
AW
3002 return;
3003 }
3004 }
3005
e9a1ab19
FB
3006}
3007
cd19cfa2
HY
3008#ifndef _WIN32
3009void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
3010{
3011 RAMBlock *block;
3012 ram_addr_t offset;
3013 int flags;
3014 void *area, *vaddr;
3015
3016 QLIST_FOREACH(block, &ram_list.blocks, next) {
3017 offset = addr - block->offset;
3018 if (offset < block->length) {
3019 vaddr = block->host + offset;
3020 if (block->flags & RAM_PREALLOC_MASK) {
3021 ;
3022 } else {
3023 flags = MAP_FIXED;
3024 munmap(vaddr, length);
3025 if (mem_path) {
3026#if defined(__linux__) && !defined(TARGET_S390X)
3027 if (block->fd) {
3028#ifdef MAP_POPULATE
3029 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
3030 MAP_PRIVATE;
3031#else
3032 flags |= MAP_PRIVATE;
3033#endif
3034 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3035 flags, block->fd, offset);
3036 } else {
3037 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
3038 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3039 flags, -1, 0);
3040 }
fd28aa13
JK
3041#else
3042 abort();
cd19cfa2
HY
3043#endif
3044 } else {
3045#if defined(TARGET_S390X) && defined(CONFIG_KVM)
3046 flags |= MAP_SHARED | MAP_ANONYMOUS;
3047 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
3048 flags, -1, 0);
3049#else
3050 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
3051 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3052 flags, -1, 0);
3053#endif
3054 }
3055 if (area != vaddr) {
f15fbc4b
AP
3056 fprintf(stderr, "Could not remap addr: "
3057 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
cd19cfa2
HY
3058 length, addr);
3059 exit(1);
3060 }
3061 qemu_madvise(vaddr, length, QEMU_MADV_MERGEABLE);
3062 }
3063 return;
3064 }
3065 }
3066}
3067#endif /* !_WIN32 */
3068
dc828ca1 3069/* Return a host pointer to ram allocated with qemu_ram_alloc.
5579c7f3
PB
3070 With the exception of the softmmu code in this file, this should
3071 only be used for local memory (e.g. video ram) that the device owns,
3072 and knows it isn't going to access beyond the end of the block.
3073
3074 It should not be used for general purpose DMA.
3075 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
3076 */
c227f099 3077void *qemu_get_ram_ptr(ram_addr_t addr)
dc828ca1 3078{
94a6b54f
PB
3079 RAMBlock *block;
3080
f471a17e
AW
3081 QLIST_FOREACH(block, &ram_list.blocks, next) {
3082 if (addr - block->offset < block->length) {
7d82af38
VP
3083 /* Move this entry to to start of the list. */
3084 if (block != QLIST_FIRST(&ram_list.blocks)) {
3085 QLIST_REMOVE(block, next);
3086 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
3087 }
868bb33f 3088 if (xen_enabled()) {
432d268c
JN
3089 /* We need to check if the requested address is in the RAM
3090 * because we don't want to map the entire memory in QEMU.
712c2b41 3091 * In that case just map until the end of the page.
432d268c
JN
3092 */
3093 if (block->offset == 0) {
e41d7c69 3094 return xen_map_cache(addr, 0, 0);
432d268c 3095 } else if (block->host == NULL) {
e41d7c69
JK
3096 block->host =
3097 xen_map_cache(block->offset, block->length, 1);
432d268c
JN
3098 }
3099 }
f471a17e
AW
3100 return block->host + (addr - block->offset);
3101 }
94a6b54f 3102 }
f471a17e
AW
3103
3104 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3105 abort();
3106
3107 return NULL;
dc828ca1
PB
3108}
3109
b2e0a138
MT
3110/* Return a host pointer to ram allocated with qemu_ram_alloc.
3111 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
3112 */
3113void *qemu_safe_ram_ptr(ram_addr_t addr)
3114{
3115 RAMBlock *block;
3116
3117 QLIST_FOREACH(block, &ram_list.blocks, next) {
3118 if (addr - block->offset < block->length) {
868bb33f 3119 if (xen_enabled()) {
432d268c
JN
3120 /* We need to check if the requested address is in the RAM
3121 * because we don't want to map the entire memory in QEMU.
712c2b41 3122 * In that case just map until the end of the page.
432d268c
JN
3123 */
3124 if (block->offset == 0) {
e41d7c69 3125 return xen_map_cache(addr, 0, 0);
432d268c 3126 } else if (block->host == NULL) {
e41d7c69
JK
3127 block->host =
3128 xen_map_cache(block->offset, block->length, 1);
432d268c
JN
3129 }
3130 }
b2e0a138
MT
3131 return block->host + (addr - block->offset);
3132 }
3133 }
3134
3135 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3136 abort();
3137
3138 return NULL;
3139}
3140
38bee5dc
SS
3141/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
3142 * but takes a size argument */
8ab934f9 3143void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
38bee5dc 3144{
8ab934f9
SS
3145 if (*size == 0) {
3146 return NULL;
3147 }
868bb33f 3148 if (xen_enabled()) {
e41d7c69 3149 return xen_map_cache(addr, *size, 1);
868bb33f 3150 } else {
38bee5dc
SS
3151 RAMBlock *block;
3152
3153 QLIST_FOREACH(block, &ram_list.blocks, next) {
3154 if (addr - block->offset < block->length) {
3155 if (addr - block->offset + *size > block->length)
3156 *size = block->length - addr + block->offset;
3157 return block->host + (addr - block->offset);
3158 }
3159 }
3160
3161 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3162 abort();
38bee5dc
SS
3163 }
3164}
3165
050a0ddf
AP
3166void qemu_put_ram_ptr(void *addr)
3167{
3168 trace_qemu_put_ram_ptr(addr);
050a0ddf
AP
3169}
3170
e890261f 3171int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
5579c7f3 3172{
94a6b54f
PB
3173 RAMBlock *block;
3174 uint8_t *host = ptr;
3175
868bb33f 3176 if (xen_enabled()) {
e41d7c69 3177 *ram_addr = xen_ram_addr_from_mapcache(ptr);
712c2b41
SS
3178 return 0;
3179 }
3180
f471a17e 3181 QLIST_FOREACH(block, &ram_list.blocks, next) {
432d268c
JN
3182 /* This case append when the block is not mapped. */
3183 if (block->host == NULL) {
3184 continue;
3185 }
f471a17e 3186 if (host - block->host < block->length) {
e890261f
MT
3187 *ram_addr = block->offset + (host - block->host);
3188 return 0;
f471a17e 3189 }
94a6b54f 3190 }
432d268c 3191
e890261f
MT
3192 return -1;
3193}
f471a17e 3194
e890261f
MT
3195/* Some of the softmmu routines need to translate from a host pointer
3196 (typically a TLB entry) back to a ram offset. */
3197ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
3198{
3199 ram_addr_t ram_addr;
f471a17e 3200
e890261f
MT
3201 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
3202 fprintf(stderr, "Bad ram pointer %p\n", ptr);
3203 abort();
3204 }
3205 return ram_addr;
5579c7f3
PB
3206}
3207
0e0df1e2
AK
3208static uint64_t unassigned_mem_read(void *opaque, target_phys_addr_t addr,
3209 unsigned size)
e18231a3
BS
3210{
3211#ifdef DEBUG_UNASSIGNED
3212 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3213#endif
5b450407 3214#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
0e0df1e2 3215 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
e18231a3
BS
3216#endif
3217 return 0;
3218}
3219
0e0df1e2
AK
3220static void unassigned_mem_write(void *opaque, target_phys_addr_t addr,
3221 uint64_t val, unsigned size)
e18231a3
BS
3222{
3223#ifdef DEBUG_UNASSIGNED
0e0df1e2 3224 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
e18231a3 3225#endif
5b450407 3226#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
0e0df1e2 3227 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
67d3b957 3228#endif
33417e70
FB
3229}
3230
0e0df1e2
AK
3231static const MemoryRegionOps unassigned_mem_ops = {
3232 .read = unassigned_mem_read,
3233 .write = unassigned_mem_write,
3234 .endianness = DEVICE_NATIVE_ENDIAN,
3235};
e18231a3 3236
0e0df1e2
AK
3237static uint64_t error_mem_read(void *opaque, target_phys_addr_t addr,
3238 unsigned size)
e18231a3 3239{
0e0df1e2 3240 abort();
e18231a3
BS
3241}
3242
0e0df1e2
AK
3243static void error_mem_write(void *opaque, target_phys_addr_t addr,
3244 uint64_t value, unsigned size)
e18231a3 3245{
0e0df1e2 3246 abort();
33417e70
FB
3247}
3248
0e0df1e2
AK
3249static const MemoryRegionOps error_mem_ops = {
3250 .read = error_mem_read,
3251 .write = error_mem_write,
3252 .endianness = DEVICE_NATIVE_ENDIAN,
33417e70
FB
3253};
3254
0e0df1e2
AK
3255static const MemoryRegionOps rom_mem_ops = {
3256 .read = error_mem_read,
3257 .write = unassigned_mem_write,
3258 .endianness = DEVICE_NATIVE_ENDIAN,
33417e70
FB
3259};
3260
0e0df1e2
AK
3261static void notdirty_mem_write(void *opaque, target_phys_addr_t ram_addr,
3262 uint64_t val, unsigned size)
9fa3e853 3263{
3a7d929e 3264 int dirty_flags;
f7c11b53 3265 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3a7d929e 3266 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 3267#if !defined(CONFIG_USER_ONLY)
0e0df1e2 3268 tb_invalidate_phys_page_fast(ram_addr, size);
f7c11b53 3269 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
9fa3e853 3270#endif
3a7d929e 3271 }
0e0df1e2
AK
3272 switch (size) {
3273 case 1:
3274 stb_p(qemu_get_ram_ptr(ram_addr), val);
3275 break;
3276 case 2:
3277 stw_p(qemu_get_ram_ptr(ram_addr), val);
3278 break;
3279 case 4:
3280 stl_p(qemu_get_ram_ptr(ram_addr), val);
3281 break;
3282 default:
3283 abort();
3a7d929e 3284 }
f23db169 3285 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
f7c11b53 3286 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
f23db169
FB
3287 /* we remove the notdirty callback only if the code has been
3288 flushed */
3289 if (dirty_flags == 0xff)
2e70f6ef 3290 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
3291}
3292
0e0df1e2
AK
3293static const MemoryRegionOps notdirty_mem_ops = {
3294 .read = error_mem_read,
3295 .write = notdirty_mem_write,
3296 .endianness = DEVICE_NATIVE_ENDIAN,
1ccde1cb
FB
3297};
3298
0f459d16 3299/* Generate a debug exception if a watchpoint has been hit. */
b4051334 3300static void check_watchpoint(int offset, int len_mask, int flags)
0f459d16 3301{
9349b4f9 3302 CPUArchState *env = cpu_single_env;
06d55cc1
AL
3303 target_ulong pc, cs_base;
3304 TranslationBlock *tb;
0f459d16 3305 target_ulong vaddr;
a1d1bb31 3306 CPUWatchpoint *wp;
06d55cc1 3307 int cpu_flags;
0f459d16 3308
06d55cc1
AL
3309 if (env->watchpoint_hit) {
3310 /* We re-entered the check after replacing the TB. Now raise
3311 * the debug interrupt so that is will trigger after the
3312 * current instruction. */
3313 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3314 return;
3315 }
2e70f6ef 3316 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
72cf2d4f 3317 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334
AL
3318 if ((vaddr == (wp->vaddr & len_mask) ||
3319 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
6e140f28
AL
3320 wp->flags |= BP_WATCHPOINT_HIT;
3321 if (!env->watchpoint_hit) {
3322 env->watchpoint_hit = wp;
3323 tb = tb_find_pc(env->mem_io_pc);
3324 if (!tb) {
3325 cpu_abort(env, "check_watchpoint: could not find TB for "
3326 "pc=%p", (void *)env->mem_io_pc);
3327 }
618ba8e6 3328 cpu_restore_state(tb, env, env->mem_io_pc);
6e140f28
AL
3329 tb_phys_invalidate(tb, -1);
3330 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3331 env->exception_index = EXCP_DEBUG;
488d6577 3332 cpu_loop_exit(env);
6e140f28
AL
3333 } else {
3334 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3335 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
488d6577 3336 cpu_resume_from_signal(env, NULL);
6e140f28 3337 }
06d55cc1 3338 }
6e140f28
AL
3339 } else {
3340 wp->flags &= ~BP_WATCHPOINT_HIT;
0f459d16
PB
3341 }
3342 }
3343}
3344
6658ffb8
PB
3345/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3346 so these check for a hit then pass through to the normal out-of-line
3347 phys routines. */
1ec9b909
AK
3348static uint64_t watch_mem_read(void *opaque, target_phys_addr_t addr,
3349 unsigned size)
6658ffb8 3350{
1ec9b909
AK
3351 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
3352 switch (size) {
3353 case 1: return ldub_phys(addr);
3354 case 2: return lduw_phys(addr);
3355 case 4: return ldl_phys(addr);
3356 default: abort();
3357 }
6658ffb8
PB
3358}
3359
1ec9b909
AK
3360static void watch_mem_write(void *opaque, target_phys_addr_t addr,
3361 uint64_t val, unsigned size)
6658ffb8 3362{
1ec9b909
AK
3363 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
3364 switch (size) {
67364150
MF
3365 case 1:
3366 stb_phys(addr, val);
3367 break;
3368 case 2:
3369 stw_phys(addr, val);
3370 break;
3371 case 4:
3372 stl_phys(addr, val);
3373 break;
1ec9b909
AK
3374 default: abort();
3375 }
6658ffb8
PB
3376}
3377
1ec9b909
AK
3378static const MemoryRegionOps watch_mem_ops = {
3379 .read = watch_mem_read,
3380 .write = watch_mem_write,
3381 .endianness = DEVICE_NATIVE_ENDIAN,
6658ffb8 3382};
6658ffb8 3383
70c68e44
AK
3384static uint64_t subpage_read(void *opaque, target_phys_addr_t addr,
3385 unsigned len)
db7b5426 3386{
70c68e44 3387 subpage_t *mmio = opaque;
f6405247 3388 unsigned int idx = SUBPAGE_IDX(addr);
5312bd8b 3389 MemoryRegionSection *section;
db7b5426
BS
3390#if defined(DEBUG_SUBPAGE)
3391 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3392 mmio, len, addr, idx);
3393#endif
db7b5426 3394
5312bd8b
AK
3395 section = &phys_sections[mmio->sub_section[idx]];
3396 addr += mmio->base;
3397 addr -= section->offset_within_address_space;
3398 addr += section->offset_within_region;
37ec01d4 3399 return io_mem_read(section->mr, addr, len);
db7b5426
BS
3400}
3401
70c68e44
AK
3402static void subpage_write(void *opaque, target_phys_addr_t addr,
3403 uint64_t value, unsigned len)
db7b5426 3404{
70c68e44 3405 subpage_t *mmio = opaque;
f6405247 3406 unsigned int idx = SUBPAGE_IDX(addr);
5312bd8b 3407 MemoryRegionSection *section;
db7b5426 3408#if defined(DEBUG_SUBPAGE)
70c68e44
AK
3409 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
3410 " idx %d value %"PRIx64"\n",
f6405247 3411 __func__, mmio, len, addr, idx, value);
db7b5426 3412#endif
f6405247 3413
5312bd8b
AK
3414 section = &phys_sections[mmio->sub_section[idx]];
3415 addr += mmio->base;
3416 addr -= section->offset_within_address_space;
3417 addr += section->offset_within_region;
37ec01d4 3418 io_mem_write(section->mr, addr, value, len);
db7b5426
BS
3419}
3420
70c68e44
AK
3421static const MemoryRegionOps subpage_ops = {
3422 .read = subpage_read,
3423 .write = subpage_write,
3424 .endianness = DEVICE_NATIVE_ENDIAN,
db7b5426
BS
3425};
3426
de712f94
AK
3427static uint64_t subpage_ram_read(void *opaque, target_phys_addr_t addr,
3428 unsigned size)
56384e8b
AF
3429{
3430 ram_addr_t raddr = addr;
3431 void *ptr = qemu_get_ram_ptr(raddr);
de712f94
AK
3432 switch (size) {
3433 case 1: return ldub_p(ptr);
3434 case 2: return lduw_p(ptr);
3435 case 4: return ldl_p(ptr);
3436 default: abort();
3437 }
56384e8b
AF
3438}
3439
de712f94
AK
3440static void subpage_ram_write(void *opaque, target_phys_addr_t addr,
3441 uint64_t value, unsigned size)
56384e8b
AF
3442{
3443 ram_addr_t raddr = addr;
3444 void *ptr = qemu_get_ram_ptr(raddr);
de712f94
AK
3445 switch (size) {
3446 case 1: return stb_p(ptr, value);
3447 case 2: return stw_p(ptr, value);
3448 case 4: return stl_p(ptr, value);
3449 default: abort();
3450 }
56384e8b
AF
3451}
3452
de712f94
AK
3453static const MemoryRegionOps subpage_ram_ops = {
3454 .read = subpage_ram_read,
3455 .write = subpage_ram_write,
3456 .endianness = DEVICE_NATIVE_ENDIAN,
56384e8b
AF
3457};
3458
c227f099 3459static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 3460 uint16_t section)
db7b5426
BS
3461{
3462 int idx, eidx;
3463
3464 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3465 return -1;
3466 idx = SUBPAGE_IDX(start);
3467 eidx = SUBPAGE_IDX(end);
3468#if defined(DEBUG_SUBPAGE)
0bf9e31a 3469 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
db7b5426
BS
3470 mmio, start, end, idx, eidx, memory);
3471#endif
5312bd8b
AK
3472 if (memory_region_is_ram(phys_sections[section].mr)) {
3473 MemoryRegionSection new_section = phys_sections[section];
3474 new_section.mr = &io_mem_subpage_ram;
3475 section = phys_section_add(&new_section);
56384e8b 3476 }
db7b5426 3477 for (; idx <= eidx; idx++) {
5312bd8b 3478 mmio->sub_section[idx] = section;
db7b5426
BS
3479 }
3480
3481 return 0;
3482}
3483
0f0cb164 3484static subpage_t *subpage_init(target_phys_addr_t base)
db7b5426 3485{
c227f099 3486 subpage_t *mmio;
db7b5426 3487
7267c094 3488 mmio = g_malloc0(sizeof(subpage_t));
1eec614b
AL
3489
3490 mmio->base = base;
70c68e44
AK
3491 memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
3492 "subpage", TARGET_PAGE_SIZE);
b3b00c78 3493 mmio->iomem.subpage = true;
db7b5426 3494#if defined(DEBUG_SUBPAGE)
1eec614b
AL
3495 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3496 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
db7b5426 3497#endif
0f0cb164 3498 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, phys_section_unassigned);
db7b5426
BS
3499
3500 return mmio;
3501}
3502
5312bd8b
AK
3503static uint16_t dummy_section(MemoryRegion *mr)
3504{
3505 MemoryRegionSection section = {
3506 .mr = mr,
3507 .offset_within_address_space = 0,
3508 .offset_within_region = 0,
3509 .size = UINT64_MAX,
3510 };
3511
3512 return phys_section_add(&section);
3513}
3514
37ec01d4 3515MemoryRegion *iotlb_to_region(target_phys_addr_t index)
aa102231 3516{
37ec01d4 3517 return phys_sections[index & ~TARGET_PAGE_MASK].mr;
aa102231
AK
3518}
3519
e9179ce1
AK
3520static void io_mem_init(void)
3521{
0e0df1e2 3522 memory_region_init_io(&io_mem_ram, &error_mem_ops, NULL, "ram", UINT64_MAX);
0e0df1e2
AK
3523 memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
3524 memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
3525 "unassigned", UINT64_MAX);
3526 memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
3527 "notdirty", UINT64_MAX);
de712f94
AK
3528 memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
3529 "subpage-ram", UINT64_MAX);
1ec9b909
AK
3530 memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
3531 "watch", UINT64_MAX);
e9179ce1
AK
3532}
3533
50c1e149
AK
3534static void core_begin(MemoryListener *listener)
3535{
54688b1e 3536 destroy_all_mappings();
5312bd8b 3537 phys_sections_clear();
c19e8800 3538 phys_map.ptr = PHYS_MAP_NODE_NIL;
5312bd8b 3539 phys_section_unassigned = dummy_section(&io_mem_unassigned);
aa102231
AK
3540 phys_section_notdirty = dummy_section(&io_mem_notdirty);
3541 phys_section_rom = dummy_section(&io_mem_rom);
3542 phys_section_watch = dummy_section(&io_mem_watch);
50c1e149
AK
3543}
3544
3545static void core_commit(MemoryListener *listener)
3546{
9349b4f9 3547 CPUArchState *env;
117712c3
AK
3548
3549 /* since each CPU stores ram addresses in its TLB cache, we must
3550 reset the modified entries */
3551 /* XXX: slow ! */
3552 for(env = first_cpu; env != NULL; env = env->next_cpu) {
3553 tlb_flush(env, 1);
3554 }
50c1e149
AK
3555}
3556
93632747
AK
3557static void core_region_add(MemoryListener *listener,
3558 MemoryRegionSection *section)
3559{
4855d41a 3560 cpu_register_physical_memory_log(section, section->readonly);
93632747
AK
3561}
3562
3563static void core_region_del(MemoryListener *listener,
3564 MemoryRegionSection *section)
3565{
93632747
AK
3566}
3567
50c1e149
AK
3568static void core_region_nop(MemoryListener *listener,
3569 MemoryRegionSection *section)
3570{
54688b1e 3571 cpu_register_physical_memory_log(section, section->readonly);
50c1e149
AK
3572}
3573
93632747
AK
3574static void core_log_start(MemoryListener *listener,
3575 MemoryRegionSection *section)
3576{
3577}
3578
3579static void core_log_stop(MemoryListener *listener,
3580 MemoryRegionSection *section)
3581{
3582}
3583
3584static void core_log_sync(MemoryListener *listener,
3585 MemoryRegionSection *section)
3586{
3587}
3588
3589static void core_log_global_start(MemoryListener *listener)
3590{
3591 cpu_physical_memory_set_dirty_tracking(1);
3592}
3593
3594static void core_log_global_stop(MemoryListener *listener)
3595{
3596 cpu_physical_memory_set_dirty_tracking(0);
3597}
3598
3599static void core_eventfd_add(MemoryListener *listener,
3600 MemoryRegionSection *section,
3601 bool match_data, uint64_t data, int fd)
3602{
3603}
3604
3605static void core_eventfd_del(MemoryListener *listener,
3606 MemoryRegionSection *section,
3607 bool match_data, uint64_t data, int fd)
3608{
3609}
3610
50c1e149
AK
3611static void io_begin(MemoryListener *listener)
3612{
3613}
3614
3615static void io_commit(MemoryListener *listener)
3616{
3617}
3618
4855d41a
AK
3619static void io_region_add(MemoryListener *listener,
3620 MemoryRegionSection *section)
3621{
a2d33521
AK
3622 MemoryRegionIORange *mrio = g_new(MemoryRegionIORange, 1);
3623
3624 mrio->mr = section->mr;
3625 mrio->offset = section->offset_within_region;
3626 iorange_init(&mrio->iorange, &memory_region_iorange_ops,
4855d41a 3627 section->offset_within_address_space, section->size);
a2d33521 3628 ioport_register(&mrio->iorange);
4855d41a
AK
3629}
3630
3631static void io_region_del(MemoryListener *listener,
3632 MemoryRegionSection *section)
3633{
3634 isa_unassign_ioport(section->offset_within_address_space, section->size);
3635}
3636
50c1e149
AK
3637static void io_region_nop(MemoryListener *listener,
3638 MemoryRegionSection *section)
3639{
3640}
3641
4855d41a
AK
3642static void io_log_start(MemoryListener *listener,
3643 MemoryRegionSection *section)
3644{
3645}
3646
3647static void io_log_stop(MemoryListener *listener,
3648 MemoryRegionSection *section)
3649{
3650}
3651
3652static void io_log_sync(MemoryListener *listener,
3653 MemoryRegionSection *section)
3654{
3655}
3656
3657static void io_log_global_start(MemoryListener *listener)
3658{
3659}
3660
3661static void io_log_global_stop(MemoryListener *listener)
3662{
3663}
3664
3665static void io_eventfd_add(MemoryListener *listener,
3666 MemoryRegionSection *section,
3667 bool match_data, uint64_t data, int fd)
3668{
3669}
3670
3671static void io_eventfd_del(MemoryListener *listener,
3672 MemoryRegionSection *section,
3673 bool match_data, uint64_t data, int fd)
3674{
3675}
3676
93632747 3677static MemoryListener core_memory_listener = {
50c1e149
AK
3678 .begin = core_begin,
3679 .commit = core_commit,
93632747
AK
3680 .region_add = core_region_add,
3681 .region_del = core_region_del,
50c1e149 3682 .region_nop = core_region_nop,
93632747
AK
3683 .log_start = core_log_start,
3684 .log_stop = core_log_stop,
3685 .log_sync = core_log_sync,
3686 .log_global_start = core_log_global_start,
3687 .log_global_stop = core_log_global_stop,
3688 .eventfd_add = core_eventfd_add,
3689 .eventfd_del = core_eventfd_del,
3690 .priority = 0,
3691};
3692
4855d41a 3693static MemoryListener io_memory_listener = {
50c1e149
AK
3694 .begin = io_begin,
3695 .commit = io_commit,
4855d41a
AK
3696 .region_add = io_region_add,
3697 .region_del = io_region_del,
50c1e149 3698 .region_nop = io_region_nop,
4855d41a
AK
3699 .log_start = io_log_start,
3700 .log_stop = io_log_stop,
3701 .log_sync = io_log_sync,
3702 .log_global_start = io_log_global_start,
3703 .log_global_stop = io_log_global_stop,
3704 .eventfd_add = io_eventfd_add,
3705 .eventfd_del = io_eventfd_del,
3706 .priority = 0,
3707};
3708
62152b8a
AK
3709static void memory_map_init(void)
3710{
7267c094 3711 system_memory = g_malloc(sizeof(*system_memory));
8417cebf 3712 memory_region_init(system_memory, "system", INT64_MAX);
62152b8a 3713 set_system_memory_map(system_memory);
309cb471 3714
7267c094 3715 system_io = g_malloc(sizeof(*system_io));
309cb471
AK
3716 memory_region_init(system_io, "io", 65536);
3717 set_system_io_map(system_io);
93632747 3718
4855d41a
AK
3719 memory_listener_register(&core_memory_listener, system_memory);
3720 memory_listener_register(&io_memory_listener, system_io);
62152b8a
AK
3721}
3722
3723MemoryRegion *get_system_memory(void)
3724{
3725 return system_memory;
3726}
3727
309cb471
AK
3728MemoryRegion *get_system_io(void)
3729{
3730 return system_io;
3731}
3732
e2eef170
PB
3733#endif /* !defined(CONFIG_USER_ONLY) */
3734
13eb76e0
FB
3735/* physical memory access (slow version, mainly for debug) */
3736#if defined(CONFIG_USER_ONLY)
9349b4f9 3737int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
a68fe89c 3738 uint8_t *buf, int len, int is_write)
13eb76e0
FB
3739{
3740 int l, flags;
3741 target_ulong page;
53a5960a 3742 void * p;
13eb76e0
FB
3743
3744 while (len > 0) {
3745 page = addr & TARGET_PAGE_MASK;
3746 l = (page + TARGET_PAGE_SIZE) - addr;
3747 if (l > len)
3748 l = len;
3749 flags = page_get_flags(page);
3750 if (!(flags & PAGE_VALID))
a68fe89c 3751 return -1;
13eb76e0
FB
3752 if (is_write) {
3753 if (!(flags & PAGE_WRITE))
a68fe89c 3754 return -1;
579a97f7 3755 /* XXX: this code should not depend on lock_user */
72fb7daa 3756 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
a68fe89c 3757 return -1;
72fb7daa
AJ
3758 memcpy(p, buf, l);
3759 unlock_user(p, addr, l);
13eb76e0
FB
3760 } else {
3761 if (!(flags & PAGE_READ))
a68fe89c 3762 return -1;
579a97f7 3763 /* XXX: this code should not depend on lock_user */
72fb7daa 3764 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
a68fe89c 3765 return -1;
72fb7daa 3766 memcpy(buf, p, l);
5b257578 3767 unlock_user(p, addr, 0);
13eb76e0
FB
3768 }
3769 len -= l;
3770 buf += l;
3771 addr += l;
3772 }
a68fe89c 3773 return 0;
13eb76e0 3774}
8df1cd07 3775
13eb76e0 3776#else
c227f099 3777void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
3778 int len, int is_write)
3779{
37ec01d4 3780 int l;
13eb76e0
FB
3781 uint8_t *ptr;
3782 uint32_t val;
c227f099 3783 target_phys_addr_t page;
f3705d53 3784 MemoryRegionSection *section;
3b46e624 3785
13eb76e0
FB
3786 while (len > 0) {
3787 page = addr & TARGET_PAGE_MASK;
3788 l = (page + TARGET_PAGE_SIZE) - addr;
3789 if (l > len)
3790 l = len;
06ef3525 3791 section = phys_page_find(page >> TARGET_PAGE_BITS);
3b46e624 3792
13eb76e0 3793 if (is_write) {
f3705d53 3794 if (!memory_region_is_ram(section->mr)) {
f1f6e3b8 3795 target_phys_addr_t addr1;
f3705d53 3796 addr1 = section_addr(section, addr);
6a00d601
FB
3797 /* XXX: could force cpu_single_env to NULL to avoid
3798 potential bugs */
6c2934db 3799 if (l >= 4 && ((addr1 & 3) == 0)) {
1c213d19 3800 /* 32 bit write access */
c27004ec 3801 val = ldl_p(buf);
37ec01d4 3802 io_mem_write(section->mr, addr1, val, 4);
13eb76e0 3803 l = 4;
6c2934db 3804 } else if (l >= 2 && ((addr1 & 1) == 0)) {
1c213d19 3805 /* 16 bit write access */
c27004ec 3806 val = lduw_p(buf);
37ec01d4 3807 io_mem_write(section->mr, addr1, val, 2);
13eb76e0
FB
3808 l = 2;
3809 } else {
1c213d19 3810 /* 8 bit write access */
c27004ec 3811 val = ldub_p(buf);
37ec01d4 3812 io_mem_write(section->mr, addr1, val, 1);
13eb76e0
FB
3813 l = 1;
3814 }
f3705d53 3815 } else if (!section->readonly) {
8ca5692d 3816 ram_addr_t addr1;
f3705d53
AK
3817 addr1 = memory_region_get_ram_addr(section->mr)
3818 + section_addr(section, addr);
13eb76e0 3819 /* RAM case */
5579c7f3 3820 ptr = qemu_get_ram_ptr(addr1);
13eb76e0 3821 memcpy(ptr, buf, l);
3a7d929e
FB
3822 if (!cpu_physical_memory_is_dirty(addr1)) {
3823 /* invalidate code */
3824 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3825 /* set dirty bit */
f7c11b53
YT
3826 cpu_physical_memory_set_dirty_flags(
3827 addr1, (0xff & ~CODE_DIRTY_FLAG));
3a7d929e 3828 }
050a0ddf 3829 qemu_put_ram_ptr(ptr);
13eb76e0
FB
3830 }
3831 } else {
f3705d53 3832 if (!is_ram_rom_romd(section)) {
f1f6e3b8 3833 target_phys_addr_t addr1;
13eb76e0 3834 /* I/O case */
f3705d53 3835 addr1 = section_addr(section, addr);
6c2934db 3836 if (l >= 4 && ((addr1 & 3) == 0)) {
13eb76e0 3837 /* 32 bit read access */
37ec01d4 3838 val = io_mem_read(section->mr, addr1, 4);
c27004ec 3839 stl_p(buf, val);
13eb76e0 3840 l = 4;
6c2934db 3841 } else if (l >= 2 && ((addr1 & 1) == 0)) {
13eb76e0 3842 /* 16 bit read access */
37ec01d4 3843 val = io_mem_read(section->mr, addr1, 2);
c27004ec 3844 stw_p(buf, val);
13eb76e0
FB
3845 l = 2;
3846 } else {
1c213d19 3847 /* 8 bit read access */
37ec01d4 3848 val = io_mem_read(section->mr, addr1, 1);
c27004ec 3849 stb_p(buf, val);
13eb76e0
FB
3850 l = 1;
3851 }
3852 } else {
3853 /* RAM case */
f3705d53
AK
3854 ptr = qemu_get_ram_ptr(section->mr->ram_addr)
3855 + section_addr(section, addr);
3856 memcpy(buf, ptr, l);
050a0ddf 3857 qemu_put_ram_ptr(ptr);
13eb76e0
FB
3858 }
3859 }
3860 len -= l;
3861 buf += l;
3862 addr += l;
3863 }
3864}
8df1cd07 3865
d0ecd2aa 3866/* used for ROM loading : can write in RAM and ROM */
c227f099 3867void cpu_physical_memory_write_rom(target_phys_addr_t addr,
d0ecd2aa
FB
3868 const uint8_t *buf, int len)
3869{
3870 int l;
3871 uint8_t *ptr;
c227f099 3872 target_phys_addr_t page;
f3705d53 3873 MemoryRegionSection *section;
3b46e624 3874
d0ecd2aa
FB
3875 while (len > 0) {
3876 page = addr & TARGET_PAGE_MASK;
3877 l = (page + TARGET_PAGE_SIZE) - addr;
3878 if (l > len)
3879 l = len;
06ef3525 3880 section = phys_page_find(page >> TARGET_PAGE_BITS);
3b46e624 3881
f3705d53 3882 if (!is_ram_rom_romd(section)) {
d0ecd2aa
FB
3883 /* do nothing */
3884 } else {
3885 unsigned long addr1;
f3705d53
AK
3886 addr1 = memory_region_get_ram_addr(section->mr)
3887 + section_addr(section, addr);
d0ecd2aa 3888 /* ROM/RAM case */
5579c7f3 3889 ptr = qemu_get_ram_ptr(addr1);
d0ecd2aa 3890 memcpy(ptr, buf, l);
050a0ddf 3891 qemu_put_ram_ptr(ptr);
d0ecd2aa
FB
3892 }
3893 len -= l;
3894 buf += l;
3895 addr += l;
3896 }
3897}
3898
6d16c2f8
AL
3899typedef struct {
3900 void *buffer;
c227f099
AL
3901 target_phys_addr_t addr;
3902 target_phys_addr_t len;
6d16c2f8
AL
3903} BounceBuffer;
3904
3905static BounceBuffer bounce;
3906
ba223c29
AL
3907typedef struct MapClient {
3908 void *opaque;
3909 void (*callback)(void *opaque);
72cf2d4f 3910 QLIST_ENTRY(MapClient) link;
ba223c29
AL
3911} MapClient;
3912
72cf2d4f
BS
3913static QLIST_HEAD(map_client_list, MapClient) map_client_list
3914 = QLIST_HEAD_INITIALIZER(map_client_list);
ba223c29
AL
3915
3916void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3917{
7267c094 3918 MapClient *client = g_malloc(sizeof(*client));
ba223c29
AL
3919
3920 client->opaque = opaque;
3921 client->callback = callback;
72cf2d4f 3922 QLIST_INSERT_HEAD(&map_client_list, client, link);
ba223c29
AL
3923 return client;
3924}
3925
3926void cpu_unregister_map_client(void *_client)
3927{
3928 MapClient *client = (MapClient *)_client;
3929
72cf2d4f 3930 QLIST_REMOVE(client, link);
7267c094 3931 g_free(client);
ba223c29
AL
3932}
3933
3934static void cpu_notify_map_clients(void)
3935{
3936 MapClient *client;
3937
72cf2d4f
BS
3938 while (!QLIST_EMPTY(&map_client_list)) {
3939 client = QLIST_FIRST(&map_client_list);
ba223c29 3940 client->callback(client->opaque);
34d5e948 3941 cpu_unregister_map_client(client);
ba223c29
AL
3942 }
3943}
3944
6d16c2f8
AL
3945/* Map a physical memory region into a host virtual address.
3946 * May map a subset of the requested range, given by and returned in *plen.
3947 * May return NULL if resources needed to perform the mapping are exhausted.
3948 * Use only for reads OR writes - not for read-modify-write operations.
ba223c29
AL
3949 * Use cpu_register_map_client() to know when retrying the map operation is
3950 * likely to succeed.
6d16c2f8 3951 */
c227f099
AL
3952void *cpu_physical_memory_map(target_phys_addr_t addr,
3953 target_phys_addr_t *plen,
6d16c2f8
AL
3954 int is_write)
3955{
c227f099 3956 target_phys_addr_t len = *plen;
38bee5dc 3957 target_phys_addr_t todo = 0;
6d16c2f8 3958 int l;
c227f099 3959 target_phys_addr_t page;
f3705d53 3960 MemoryRegionSection *section;
f15fbc4b 3961 ram_addr_t raddr = RAM_ADDR_MAX;
8ab934f9
SS
3962 ram_addr_t rlen;
3963 void *ret;
6d16c2f8
AL
3964
3965 while (len > 0) {
3966 page = addr & TARGET_PAGE_MASK;
3967 l = (page + TARGET_PAGE_SIZE) - addr;
3968 if (l > len)
3969 l = len;
06ef3525 3970 section = phys_page_find(page >> TARGET_PAGE_BITS);
6d16c2f8 3971
f3705d53 3972 if (!(memory_region_is_ram(section->mr) && !section->readonly)) {
38bee5dc 3973 if (todo || bounce.buffer) {
6d16c2f8
AL
3974 break;
3975 }
3976 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3977 bounce.addr = addr;
3978 bounce.len = l;
3979 if (!is_write) {
54f7b4a3 3980 cpu_physical_memory_read(addr, bounce.buffer, l);
6d16c2f8 3981 }
38bee5dc
SS
3982
3983 *plen = l;
3984 return bounce.buffer;
6d16c2f8 3985 }
8ab934f9 3986 if (!todo) {
f3705d53
AK
3987 raddr = memory_region_get_ram_addr(section->mr)
3988 + section_addr(section, addr);
8ab934f9 3989 }
6d16c2f8
AL
3990
3991 len -= l;
3992 addr += l;
38bee5dc 3993 todo += l;
6d16c2f8 3994 }
8ab934f9
SS
3995 rlen = todo;
3996 ret = qemu_ram_ptr_length(raddr, &rlen);
3997 *plen = rlen;
3998 return ret;
6d16c2f8
AL
3999}
4000
4001/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
4002 * Will also mark the memory as dirty if is_write == 1. access_len gives
4003 * the amount of memory that was actually read or written by the caller.
4004 */
c227f099
AL
4005void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
4006 int is_write, target_phys_addr_t access_len)
6d16c2f8
AL
4007{
4008 if (buffer != bounce.buffer) {
4009 if (is_write) {
e890261f 4010 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
6d16c2f8
AL
4011 while (access_len) {
4012 unsigned l;
4013 l = TARGET_PAGE_SIZE;
4014 if (l > access_len)
4015 l = access_len;
4016 if (!cpu_physical_memory_is_dirty(addr1)) {
4017 /* invalidate code */
4018 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
4019 /* set dirty bit */
f7c11b53
YT
4020 cpu_physical_memory_set_dirty_flags(
4021 addr1, (0xff & ~CODE_DIRTY_FLAG));
6d16c2f8
AL
4022 }
4023 addr1 += l;
4024 access_len -= l;
4025 }
4026 }
868bb33f 4027 if (xen_enabled()) {
e41d7c69 4028 xen_invalidate_map_cache_entry(buffer);
050a0ddf 4029 }
6d16c2f8
AL
4030 return;
4031 }
4032 if (is_write) {
4033 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
4034 }
f8a83245 4035 qemu_vfree(bounce.buffer);
6d16c2f8 4036 bounce.buffer = NULL;
ba223c29 4037 cpu_notify_map_clients();
6d16c2f8 4038}
d0ecd2aa 4039
8df1cd07 4040/* warning: addr must be aligned */
1e78bcc1
AG
4041static inline uint32_t ldl_phys_internal(target_phys_addr_t addr,
4042 enum device_endian endian)
8df1cd07 4043{
8df1cd07
FB
4044 uint8_t *ptr;
4045 uint32_t val;
f3705d53 4046 MemoryRegionSection *section;
8df1cd07 4047
06ef3525 4048 section = phys_page_find(addr >> TARGET_PAGE_BITS);
3b46e624 4049
f3705d53 4050 if (!is_ram_rom_romd(section)) {
8df1cd07 4051 /* I/O case */
f3705d53 4052 addr = section_addr(section, addr);
37ec01d4 4053 val = io_mem_read(section->mr, addr, 4);
1e78bcc1
AG
4054#if defined(TARGET_WORDS_BIGENDIAN)
4055 if (endian == DEVICE_LITTLE_ENDIAN) {
4056 val = bswap32(val);
4057 }
4058#else
4059 if (endian == DEVICE_BIG_ENDIAN) {
4060 val = bswap32(val);
4061 }
4062#endif
8df1cd07
FB
4063 } else {
4064 /* RAM case */
f3705d53 4065 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
06ef3525 4066 & TARGET_PAGE_MASK)
f3705d53 4067 + section_addr(section, addr));
1e78bcc1
AG
4068 switch (endian) {
4069 case DEVICE_LITTLE_ENDIAN:
4070 val = ldl_le_p(ptr);
4071 break;
4072 case DEVICE_BIG_ENDIAN:
4073 val = ldl_be_p(ptr);
4074 break;
4075 default:
4076 val = ldl_p(ptr);
4077 break;
4078 }
8df1cd07
FB
4079 }
4080 return val;
4081}
4082
1e78bcc1
AG
4083uint32_t ldl_phys(target_phys_addr_t addr)
4084{
4085 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4086}
4087
4088uint32_t ldl_le_phys(target_phys_addr_t addr)
4089{
4090 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4091}
4092
4093uint32_t ldl_be_phys(target_phys_addr_t addr)
4094{
4095 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
4096}
4097
84b7b8e7 4098/* warning: addr must be aligned */
1e78bcc1
AG
4099static inline uint64_t ldq_phys_internal(target_phys_addr_t addr,
4100 enum device_endian endian)
84b7b8e7 4101{
84b7b8e7
FB
4102 uint8_t *ptr;
4103 uint64_t val;
f3705d53 4104 MemoryRegionSection *section;
84b7b8e7 4105
06ef3525 4106 section = phys_page_find(addr >> TARGET_PAGE_BITS);
3b46e624 4107
f3705d53 4108 if (!is_ram_rom_romd(section)) {
84b7b8e7 4109 /* I/O case */
f3705d53 4110 addr = section_addr(section, addr);
1e78bcc1
AG
4111
4112 /* XXX This is broken when device endian != cpu endian.
4113 Fix and add "endian" variable check */
84b7b8e7 4114#ifdef TARGET_WORDS_BIGENDIAN
37ec01d4
AK
4115 val = io_mem_read(section->mr, addr, 4) << 32;
4116 val |= io_mem_read(section->mr, addr + 4, 4);
84b7b8e7 4117#else
37ec01d4
AK
4118 val = io_mem_read(section->mr, addr, 4);
4119 val |= io_mem_read(section->mr, addr + 4, 4) << 32;
84b7b8e7
FB
4120#endif
4121 } else {
4122 /* RAM case */
f3705d53 4123 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
06ef3525 4124 & TARGET_PAGE_MASK)
f3705d53 4125 + section_addr(section, addr));
1e78bcc1
AG
4126 switch (endian) {
4127 case DEVICE_LITTLE_ENDIAN:
4128 val = ldq_le_p(ptr);
4129 break;
4130 case DEVICE_BIG_ENDIAN:
4131 val = ldq_be_p(ptr);
4132 break;
4133 default:
4134 val = ldq_p(ptr);
4135 break;
4136 }
84b7b8e7
FB
4137 }
4138 return val;
4139}
4140
1e78bcc1
AG
4141uint64_t ldq_phys(target_phys_addr_t addr)
4142{
4143 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4144}
4145
4146uint64_t ldq_le_phys(target_phys_addr_t addr)
4147{
4148 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4149}
4150
4151uint64_t ldq_be_phys(target_phys_addr_t addr)
4152{
4153 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
4154}
4155
aab33094 4156/* XXX: optimize */
c227f099 4157uint32_t ldub_phys(target_phys_addr_t addr)
aab33094
FB
4158{
4159 uint8_t val;
4160 cpu_physical_memory_read(addr, &val, 1);
4161 return val;
4162}
4163
733f0b02 4164/* warning: addr must be aligned */
1e78bcc1
AG
4165static inline uint32_t lduw_phys_internal(target_phys_addr_t addr,
4166 enum device_endian endian)
aab33094 4167{
733f0b02
MT
4168 uint8_t *ptr;
4169 uint64_t val;
f3705d53 4170 MemoryRegionSection *section;
733f0b02 4171
06ef3525 4172 section = phys_page_find(addr >> TARGET_PAGE_BITS);
733f0b02 4173
f3705d53 4174 if (!is_ram_rom_romd(section)) {
733f0b02 4175 /* I/O case */
f3705d53 4176 addr = section_addr(section, addr);
37ec01d4 4177 val = io_mem_read(section->mr, addr, 2);
1e78bcc1
AG
4178#if defined(TARGET_WORDS_BIGENDIAN)
4179 if (endian == DEVICE_LITTLE_ENDIAN) {
4180 val = bswap16(val);
4181 }
4182#else
4183 if (endian == DEVICE_BIG_ENDIAN) {
4184 val = bswap16(val);
4185 }
4186#endif
733f0b02
MT
4187 } else {
4188 /* RAM case */
f3705d53 4189 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
06ef3525 4190 & TARGET_PAGE_MASK)
f3705d53 4191 + section_addr(section, addr));
1e78bcc1
AG
4192 switch (endian) {
4193 case DEVICE_LITTLE_ENDIAN:
4194 val = lduw_le_p(ptr);
4195 break;
4196 case DEVICE_BIG_ENDIAN:
4197 val = lduw_be_p(ptr);
4198 break;
4199 default:
4200 val = lduw_p(ptr);
4201 break;
4202 }
733f0b02
MT
4203 }
4204 return val;
aab33094
FB
4205}
4206
1e78bcc1
AG
4207uint32_t lduw_phys(target_phys_addr_t addr)
4208{
4209 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4210}
4211
4212uint32_t lduw_le_phys(target_phys_addr_t addr)
4213{
4214 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4215}
4216
4217uint32_t lduw_be_phys(target_phys_addr_t addr)
4218{
4219 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
4220}
4221
8df1cd07
FB
4222/* warning: addr must be aligned. The ram page is not masked as dirty
4223 and the code inside is not invalidated. It is useful if the dirty
4224 bits are used to track modified PTEs */
c227f099 4225void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
8df1cd07 4226{
8df1cd07 4227 uint8_t *ptr;
f3705d53 4228 MemoryRegionSection *section;
8df1cd07 4229
06ef3525 4230 section = phys_page_find(addr >> TARGET_PAGE_BITS);
3b46e624 4231
f3705d53 4232 if (!memory_region_is_ram(section->mr) || section->readonly) {
37ec01d4 4233 addr = section_addr(section, addr);
f3705d53 4234 if (memory_region_is_ram(section->mr)) {
37ec01d4 4235 section = &phys_sections[phys_section_rom];
06ef3525 4236 }
37ec01d4 4237 io_mem_write(section->mr, addr, val, 4);
8df1cd07 4238 } else {
f3705d53 4239 unsigned long addr1 = (memory_region_get_ram_addr(section->mr)
06ef3525 4240 & TARGET_PAGE_MASK)
f3705d53 4241 + section_addr(section, addr);
5579c7f3 4242 ptr = qemu_get_ram_ptr(addr1);
8df1cd07 4243 stl_p(ptr, val);
74576198
AL
4244
4245 if (unlikely(in_migration)) {
4246 if (!cpu_physical_memory_is_dirty(addr1)) {
4247 /* invalidate code */
4248 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4249 /* set dirty bit */
f7c11b53
YT
4250 cpu_physical_memory_set_dirty_flags(
4251 addr1, (0xff & ~CODE_DIRTY_FLAG));
74576198
AL
4252 }
4253 }
8df1cd07
FB
4254 }
4255}
4256
c227f099 4257void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
bc98a7ef 4258{
bc98a7ef 4259 uint8_t *ptr;
f3705d53 4260 MemoryRegionSection *section;
bc98a7ef 4261
06ef3525 4262 section = phys_page_find(addr >> TARGET_PAGE_BITS);
3b46e624 4263
f3705d53 4264 if (!memory_region_is_ram(section->mr) || section->readonly) {
37ec01d4 4265 addr = section_addr(section, addr);
f3705d53 4266 if (memory_region_is_ram(section->mr)) {
37ec01d4 4267 section = &phys_sections[phys_section_rom];
06ef3525 4268 }
bc98a7ef 4269#ifdef TARGET_WORDS_BIGENDIAN
37ec01d4
AK
4270 io_mem_write(section->mr, addr, val >> 32, 4);
4271 io_mem_write(section->mr, addr + 4, (uint32_t)val, 4);
bc98a7ef 4272#else
37ec01d4
AK
4273 io_mem_write(section->mr, addr, (uint32_t)val, 4);
4274 io_mem_write(section->mr, addr + 4, val >> 32, 4);
bc98a7ef
JM
4275#endif
4276 } else {
f3705d53 4277 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
06ef3525 4278 & TARGET_PAGE_MASK)
f3705d53 4279 + section_addr(section, addr));
bc98a7ef
JM
4280 stq_p(ptr, val);
4281 }
4282}
4283
8df1cd07 4284/* warning: addr must be aligned */
1e78bcc1
AG
4285static inline void stl_phys_internal(target_phys_addr_t addr, uint32_t val,
4286 enum device_endian endian)
8df1cd07 4287{
8df1cd07 4288 uint8_t *ptr;
f3705d53 4289 MemoryRegionSection *section;
8df1cd07 4290
06ef3525 4291 section = phys_page_find(addr >> TARGET_PAGE_BITS);
3b46e624 4292
f3705d53 4293 if (!memory_region_is_ram(section->mr) || section->readonly) {
37ec01d4 4294 addr = section_addr(section, addr);
f3705d53 4295 if (memory_region_is_ram(section->mr)) {
37ec01d4 4296 section = &phys_sections[phys_section_rom];
06ef3525 4297 }
1e78bcc1
AG
4298#if defined(TARGET_WORDS_BIGENDIAN)
4299 if (endian == DEVICE_LITTLE_ENDIAN) {
4300 val = bswap32(val);
4301 }
4302#else
4303 if (endian == DEVICE_BIG_ENDIAN) {
4304 val = bswap32(val);
4305 }
4306#endif
37ec01d4 4307 io_mem_write(section->mr, addr, val, 4);
8df1cd07
FB
4308 } else {
4309 unsigned long addr1;
f3705d53
AK
4310 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
4311 + section_addr(section, addr);
8df1cd07 4312 /* RAM case */
5579c7f3 4313 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
4314 switch (endian) {
4315 case DEVICE_LITTLE_ENDIAN:
4316 stl_le_p(ptr, val);
4317 break;
4318 case DEVICE_BIG_ENDIAN:
4319 stl_be_p(ptr, val);
4320 break;
4321 default:
4322 stl_p(ptr, val);
4323 break;
4324 }
3a7d929e
FB
4325 if (!cpu_physical_memory_is_dirty(addr1)) {
4326 /* invalidate code */
4327 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4328 /* set dirty bit */
f7c11b53
YT
4329 cpu_physical_memory_set_dirty_flags(addr1,
4330 (0xff & ~CODE_DIRTY_FLAG));
3a7d929e 4331 }
8df1cd07
FB
4332 }
4333}
4334
1e78bcc1
AG
4335void stl_phys(target_phys_addr_t addr, uint32_t val)
4336{
4337 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4338}
4339
4340void stl_le_phys(target_phys_addr_t addr, uint32_t val)
4341{
4342 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4343}
4344
4345void stl_be_phys(target_phys_addr_t addr, uint32_t val)
4346{
4347 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4348}
4349
aab33094 4350/* XXX: optimize */
c227f099 4351void stb_phys(target_phys_addr_t addr, uint32_t val)
aab33094
FB
4352{
4353 uint8_t v = val;
4354 cpu_physical_memory_write(addr, &v, 1);
4355}
4356
733f0b02 4357/* warning: addr must be aligned */
1e78bcc1
AG
4358static inline void stw_phys_internal(target_phys_addr_t addr, uint32_t val,
4359 enum device_endian endian)
aab33094 4360{
733f0b02 4361 uint8_t *ptr;
f3705d53 4362 MemoryRegionSection *section;
733f0b02 4363
06ef3525 4364 section = phys_page_find(addr >> TARGET_PAGE_BITS);
733f0b02 4365
f3705d53 4366 if (!memory_region_is_ram(section->mr) || section->readonly) {
37ec01d4 4367 addr = section_addr(section, addr);
f3705d53 4368 if (memory_region_is_ram(section->mr)) {
37ec01d4 4369 section = &phys_sections[phys_section_rom];
06ef3525 4370 }
1e78bcc1
AG
4371#if defined(TARGET_WORDS_BIGENDIAN)
4372 if (endian == DEVICE_LITTLE_ENDIAN) {
4373 val = bswap16(val);
4374 }
4375#else
4376 if (endian == DEVICE_BIG_ENDIAN) {
4377 val = bswap16(val);
4378 }
4379#endif
37ec01d4 4380 io_mem_write(section->mr, addr, val, 2);
733f0b02
MT
4381 } else {
4382 unsigned long addr1;
f3705d53
AK
4383 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
4384 + section_addr(section, addr);
733f0b02
MT
4385 /* RAM case */
4386 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
4387 switch (endian) {
4388 case DEVICE_LITTLE_ENDIAN:
4389 stw_le_p(ptr, val);
4390 break;
4391 case DEVICE_BIG_ENDIAN:
4392 stw_be_p(ptr, val);
4393 break;
4394 default:
4395 stw_p(ptr, val);
4396 break;
4397 }
733f0b02
MT
4398 if (!cpu_physical_memory_is_dirty(addr1)) {
4399 /* invalidate code */
4400 tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
4401 /* set dirty bit */
4402 cpu_physical_memory_set_dirty_flags(addr1,
4403 (0xff & ~CODE_DIRTY_FLAG));
4404 }
4405 }
aab33094
FB
4406}
4407
1e78bcc1
AG
4408void stw_phys(target_phys_addr_t addr, uint32_t val)
4409{
4410 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4411}
4412
4413void stw_le_phys(target_phys_addr_t addr, uint32_t val)
4414{
4415 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4416}
4417
4418void stw_be_phys(target_phys_addr_t addr, uint32_t val)
4419{
4420 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4421}
4422
aab33094 4423/* XXX: optimize */
c227f099 4424void stq_phys(target_phys_addr_t addr, uint64_t val)
aab33094
FB
4425{
4426 val = tswap64(val);
71d2b725 4427 cpu_physical_memory_write(addr, &val, 8);
aab33094
FB
4428}
4429
1e78bcc1
AG
4430void stq_le_phys(target_phys_addr_t addr, uint64_t val)
4431{
4432 val = cpu_to_le64(val);
4433 cpu_physical_memory_write(addr, &val, 8);
4434}
4435
4436void stq_be_phys(target_phys_addr_t addr, uint64_t val)
4437{
4438 val = cpu_to_be64(val);
4439 cpu_physical_memory_write(addr, &val, 8);
4440}
4441
5e2972fd 4442/* virtual memory access for debug (includes writing to ROM) */
9349b4f9 4443int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
b448f2f3 4444 uint8_t *buf, int len, int is_write)
13eb76e0
FB
4445{
4446 int l;
c227f099 4447 target_phys_addr_t phys_addr;
9b3c35e0 4448 target_ulong page;
13eb76e0
FB
4449
4450 while (len > 0) {
4451 page = addr & TARGET_PAGE_MASK;
4452 phys_addr = cpu_get_phys_page_debug(env, page);
4453 /* if no physical page mapped, return an error */
4454 if (phys_addr == -1)
4455 return -1;
4456 l = (page + TARGET_PAGE_SIZE) - addr;
4457 if (l > len)
4458 l = len;
5e2972fd 4459 phys_addr += (addr & ~TARGET_PAGE_MASK);
5e2972fd
AL
4460 if (is_write)
4461 cpu_physical_memory_write_rom(phys_addr, buf, l);
4462 else
5e2972fd 4463 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
13eb76e0
FB
4464 len -= l;
4465 buf += l;
4466 addr += l;
4467 }
4468 return 0;
4469}
a68fe89c 4470#endif
13eb76e0 4471
2e70f6ef
PB
4472/* in deterministic execution mode, instructions doing device I/Os
4473 must be at the end of the TB */
9349b4f9 4474void cpu_io_recompile(CPUArchState *env, void *retaddr)
2e70f6ef
PB
4475{
4476 TranslationBlock *tb;
4477 uint32_t n, cflags;
4478 target_ulong pc, cs_base;
4479 uint64_t flags;
4480
4481 tb = tb_find_pc((unsigned long)retaddr);
4482 if (!tb) {
4483 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
4484 retaddr);
4485 }
4486 n = env->icount_decr.u16.low + tb->icount;
618ba8e6 4487 cpu_restore_state(tb, env, (unsigned long)retaddr);
2e70f6ef 4488 /* Calculate how many instructions had been executed before the fault
bf20dc07 4489 occurred. */
2e70f6ef
PB
4490 n = n - env->icount_decr.u16.low;
4491 /* Generate a new TB ending on the I/O insn. */
4492 n++;
4493 /* On MIPS and SH, delay slot instructions can only be restarted if
4494 they were already the first instruction in the TB. If this is not
bf20dc07 4495 the first instruction in a TB then re-execute the preceding
2e70f6ef
PB
4496 branch. */
4497#if defined(TARGET_MIPS)
4498 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4499 env->active_tc.PC -= 4;
4500 env->icount_decr.u16.low++;
4501 env->hflags &= ~MIPS_HFLAG_BMASK;
4502 }
4503#elif defined(TARGET_SH4)
4504 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4505 && n > 1) {
4506 env->pc -= 2;
4507 env->icount_decr.u16.low++;
4508 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4509 }
4510#endif
4511 /* This should never happen. */
4512 if (n > CF_COUNT_MASK)
4513 cpu_abort(env, "TB too big during recompile");
4514
4515 cflags = n | CF_LAST_IO;
4516 pc = tb->pc;
4517 cs_base = tb->cs_base;
4518 flags = tb->flags;
4519 tb_phys_invalidate(tb, -1);
4520 /* FIXME: In theory this could raise an exception. In practice
4521 we have already translated the block once so it's probably ok. */
4522 tb_gen_code(env, pc, cs_base, flags, cflags);
bf20dc07 4523 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
2e70f6ef
PB
4524 the first in the TB) then we end up generating a whole new TB and
4525 repeating the fault, which is horribly inefficient.
4526 Better would be to execute just this insn uncached, or generate a
4527 second new TB. */
4528 cpu_resume_from_signal(env, NULL);
4529}
4530
b3755a91
PB
4531#if !defined(CONFIG_USER_ONLY)
4532
055403b2 4533void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
e3db7226
FB
4534{
4535 int i, target_code_size, max_target_code_size;
4536 int direct_jmp_count, direct_jmp2_count, cross_page;
4537 TranslationBlock *tb;
3b46e624 4538
e3db7226
FB
4539 target_code_size = 0;
4540 max_target_code_size = 0;
4541 cross_page = 0;
4542 direct_jmp_count = 0;
4543 direct_jmp2_count = 0;
4544 for(i = 0; i < nb_tbs; i++) {
4545 tb = &tbs[i];
4546 target_code_size += tb->size;
4547 if (tb->size > max_target_code_size)
4548 max_target_code_size = tb->size;
4549 if (tb->page_addr[1] != -1)
4550 cross_page++;
4551 if (tb->tb_next_offset[0] != 0xffff) {
4552 direct_jmp_count++;
4553 if (tb->tb_next_offset[1] != 0xffff) {
4554 direct_jmp2_count++;
4555 }
4556 }
4557 }
4558 /* XXX: avoid using doubles ? */
57fec1fe 4559 cpu_fprintf(f, "Translation buffer state:\n");
055403b2 4560 cpu_fprintf(f, "gen code size %td/%ld\n",
26a5f13b
FB
4561 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4562 cpu_fprintf(f, "TB count %d/%d\n",
4563 nb_tbs, code_gen_max_blocks);
5fafdf24 4564 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
e3db7226
FB
4565 nb_tbs ? target_code_size / nb_tbs : 0,
4566 max_target_code_size);
055403b2 4567 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
e3db7226
FB
4568 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4569 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
5fafdf24
TS
4570 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4571 cross_page,
e3db7226
FB
4572 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4573 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
5fafdf24 4574 direct_jmp_count,
e3db7226
FB
4575 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4576 direct_jmp2_count,
4577 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
57fec1fe 4578 cpu_fprintf(f, "\nStatistics:\n");
e3db7226
FB
4579 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4580 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4581 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
b67d9a52 4582 tcg_dump_info(f, cpu_fprintf);
e3db7226
FB
4583}
4584
d39e8222
AK
4585/* NOTE: this function can trigger an exception */
4586/* NOTE2: the returned address is not exactly the physical address: it
4587 is the offset relative to phys_ram_base */
9349b4f9 4588tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr)
d39e8222
AK
4589{
4590 int mmu_idx, page_index, pd;
4591 void *p;
37ec01d4 4592 MemoryRegion *mr;
d39e8222
AK
4593
4594 page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
4595 mmu_idx = cpu_mmu_index(env1);
4596 if (unlikely(env1->tlb_table[mmu_idx][page_index].addr_code !=
4597 (addr & TARGET_PAGE_MASK))) {
4598 ldub_code(addr);
4599 }
ce5d64c2 4600 pd = env1->iotlb[mmu_idx][page_index] & ~TARGET_PAGE_MASK;
37ec01d4
AK
4601 mr = iotlb_to_region(pd);
4602 if (mr != &io_mem_ram && mr != &io_mem_rom
4603 && mr != &io_mem_notdirty && !mr->rom_device) {
d39e8222
AK
4604#if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_SPARC)
4605 cpu_unassigned_access(env1, addr, 0, 1, 0, 4);
4606#else
4607 cpu_abort(env1, "Trying to execute code outside RAM or ROM at 0x" TARGET_FMT_lx "\n", addr);
4608#endif
4609 }
4610 p = (void *)((uintptr_t)addr + env1->tlb_table[mmu_idx][page_index].addend);
4611 return qemu_ram_addr_from_host_nofail(p);
4612}
4613
82afa586
BH
4614/*
4615 * A helper function for the _utterly broken_ virtio device model to find out if
4616 * it's running on a big endian machine. Don't do this at home kids!
4617 */
4618bool virtio_is_big_endian(void);
4619bool virtio_is_big_endian(void)
4620{
4621#if defined(TARGET_WORDS_BIGENDIAN)
4622 return true;
4623#else
4624 return false;
4625#endif
4626}
4627
61382a50 4628#define MMUSUFFIX _cmmu
3917149d 4629#undef GETPC
61382a50
FB
4630#define GETPC() NULL
4631#define env cpu_single_env
b769d8fe 4632#define SOFTMMU_CODE_ACCESS
61382a50
FB
4633
4634#define SHIFT 0
4635#include "softmmu_template.h"
4636
4637#define SHIFT 1
4638#include "softmmu_template.h"
4639
4640#define SHIFT 2
4641#include "softmmu_template.h"
4642
4643#define SHIFT 3
4644#include "softmmu_template.h"
4645
4646#undef env
4647
4648#endif