]> git.proxmox.com Git - mirror_qemu.git/blame - exec.c
softmmu: Use uintptr_t for physaddr and rename it
[mirror_qemu.git] / exec.c
CommitLineData
54936004 1/*
fd6ce8f6 2 * virtual page mapping and translated block handling
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
54936004 18 */
67b915a5 19#include "config.h"
d5a8f07c
FB
20#ifdef _WIN32
21#include <windows.h>
22#else
a98d49b1 23#include <sys/types.h>
d5a8f07c
FB
24#include <sys/mman.h>
25#endif
54936004 26
055403b2 27#include "qemu-common.h"
6180a181 28#include "cpu.h"
b67d9a52 29#include "tcg.h"
b3c7724c 30#include "hw/hw.h"
cc9e98cb 31#include "hw/qdev.h"
74576198 32#include "osdep.h"
7ba1e619 33#include "kvm.h"
432d268c 34#include "hw/xen.h"
29e922b6 35#include "qemu-timer.h"
62152b8a
AK
36#include "memory.h"
37#include "exec-memory.h"
53a5960a
PB
38#if defined(CONFIG_USER_ONLY)
39#include <qemu.h>
f01576f1
JL
40#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41#include <sys/param.h>
42#if __FreeBSD_version >= 700104
43#define HAVE_KINFO_GETVMMAP
44#define sigqueue sigqueue_freebsd /* avoid redefinition */
45#include <sys/time.h>
46#include <sys/proc.h>
47#include <machine/profile.h>
48#define _KERNEL
49#include <sys/user.h>
50#undef _KERNEL
51#undef sigqueue
52#include <libutil.h>
53#endif
54#endif
432d268c
JN
55#else /* !CONFIG_USER_ONLY */
56#include "xen-mapcache.h"
6506e4f9 57#include "trace.h"
53a5960a 58#endif
54936004 59
67d95c15
AK
60#define WANT_EXEC_OBSOLETE
61#include "exec-obsolete.h"
62
fd6ce8f6 63//#define DEBUG_TB_INVALIDATE
66e85a21 64//#define DEBUG_FLUSH
9fa3e853 65//#define DEBUG_TLB
67d3b957 66//#define DEBUG_UNASSIGNED
fd6ce8f6
FB
67
68/* make various TB consistency checks */
5fafdf24
TS
69//#define DEBUG_TB_CHECK
70//#define DEBUG_TLB_CHECK
fd6ce8f6 71
1196be37 72//#define DEBUG_IOPORT
db7b5426 73//#define DEBUG_SUBPAGE
1196be37 74
99773bd4
PB
75#if !defined(CONFIG_USER_ONLY)
76/* TB consistency checks only implemented for usermode emulation. */
77#undef DEBUG_TB_CHECK
78#endif
79
9fa3e853
FB
80#define SMC_BITMAP_USE_THRESHOLD 10
81
bdaf78e0 82static TranslationBlock *tbs;
24ab68ac 83static int code_gen_max_blocks;
9fa3e853 84TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
bdaf78e0 85static int nb_tbs;
eb51d102 86/* any access to the tbs or the page table must use this lock */
c227f099 87spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
fd6ce8f6 88
141ac468
BS
89#if defined(__arm__) || defined(__sparc_v9__)
90/* The prologue must be reachable with a direct jump. ARM and Sparc64
91 have limited branch ranges (possibly also PPC) so place it in a
d03d860b
BS
92 section close to code segment. */
93#define code_gen_section \
94 __attribute__((__section__(".gen_code"))) \
95 __attribute__((aligned (32)))
6840981d 96#elif defined(_WIN32) && !defined(_WIN64)
f8e2af11
SW
97#define code_gen_section \
98 __attribute__((aligned (16)))
d03d860b
BS
99#else
100#define code_gen_section \
101 __attribute__((aligned (32)))
102#endif
103
104uint8_t code_gen_prologue[1024] code_gen_section;
bdaf78e0
BS
105static uint8_t *code_gen_buffer;
106static unsigned long code_gen_buffer_size;
26a5f13b 107/* threshold to flush the translated code buffer */
bdaf78e0 108static unsigned long code_gen_buffer_max_size;
24ab68ac 109static uint8_t *code_gen_ptr;
fd6ce8f6 110
e2eef170 111#if !defined(CONFIG_USER_ONLY)
9fa3e853 112int phys_ram_fd;
74576198 113static int in_migration;
94a6b54f 114
85d59fef 115RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
62152b8a
AK
116
117static MemoryRegion *system_memory;
309cb471 118static MemoryRegion *system_io;
62152b8a 119
0e0df1e2 120MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty;
de712f94 121static MemoryRegion io_mem_subpage_ram;
0e0df1e2 122
e2eef170 123#endif
9fa3e853 124
9349b4f9 125CPUArchState *first_cpu;
6a00d601
FB
126/* current CPU in the current thread. It is only valid inside
127 cpu_exec() */
9349b4f9 128DEFINE_TLS(CPUArchState *,cpu_single_env);
2e70f6ef 129/* 0 = Do not count executed instructions.
bf20dc07 130 1 = Precise instruction counting.
2e70f6ef
PB
131 2 = Adaptive rate instruction counting. */
132int use_icount = 0;
6a00d601 133
54936004 134typedef struct PageDesc {
92e873b9 135 /* list of TBs intersecting this ram page */
fd6ce8f6 136 TranslationBlock *first_tb;
9fa3e853
FB
137 /* in order to optimize self modifying code, we count the number
138 of lookups we do to a given page to use a bitmap */
139 unsigned int code_write_count;
140 uint8_t *code_bitmap;
141#if defined(CONFIG_USER_ONLY)
142 unsigned long flags;
143#endif
54936004
FB
144} PageDesc;
145
41c1b1c9 146/* In system mode we want L1_MAP to be based on ram offsets,
5cd2c5b6
RH
147 while in user mode we want it to be based on virtual addresses. */
148#if !defined(CONFIG_USER_ONLY)
41c1b1c9
PB
149#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
150# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
151#else
5cd2c5b6 152# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
41c1b1c9 153#endif
bedb69ea 154#else
5cd2c5b6 155# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
bedb69ea 156#endif
54936004 157
5cd2c5b6
RH
158/* Size of the L2 (and L3, etc) page tables. */
159#define L2_BITS 10
54936004
FB
160#define L2_SIZE (1 << L2_BITS)
161
3eef53df
AK
162#define P_L2_LEVELS \
163 (((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / L2_BITS) + 1)
164
5cd2c5b6 165/* The bits remaining after N lower levels of page tables. */
5cd2c5b6
RH
166#define V_L1_BITS_REM \
167 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
168
5cd2c5b6
RH
169#if V_L1_BITS_REM < 4
170#define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
171#else
172#define V_L1_BITS V_L1_BITS_REM
173#endif
174
5cd2c5b6
RH
175#define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
176
5cd2c5b6
RH
177#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
178
c6d50674
SW
179uintptr_t qemu_real_host_page_size;
180uintptr_t qemu_host_page_size;
181uintptr_t qemu_host_page_mask;
54936004 182
5cd2c5b6
RH
183/* This is a multi-level map on the virtual address space.
184 The bottom level has pointers to PageDesc. */
185static void *l1_map[V_L1_SIZE];
54936004 186
e2eef170 187#if !defined(CONFIG_USER_ONLY)
4346ae3e
AK
188typedef struct PhysPageEntry PhysPageEntry;
189
5312bd8b
AK
190static MemoryRegionSection *phys_sections;
191static unsigned phys_sections_nb, phys_sections_nb_alloc;
192static uint16_t phys_section_unassigned;
aa102231
AK
193static uint16_t phys_section_notdirty;
194static uint16_t phys_section_rom;
195static uint16_t phys_section_watch;
5312bd8b 196
4346ae3e 197struct PhysPageEntry {
07f07b31
AK
198 uint16_t is_leaf : 1;
199 /* index into phys_sections (is_leaf) or phys_map_nodes (!is_leaf) */
200 uint16_t ptr : 15;
4346ae3e
AK
201};
202
d6f2ea22
AK
203/* Simple allocator for PhysPageEntry nodes */
204static PhysPageEntry (*phys_map_nodes)[L2_SIZE];
205static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc;
206
07f07b31 207#define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
d6f2ea22 208
5cd2c5b6 209/* This is a multi-level map on the physical address space.
06ef3525 210 The bottom level has pointers to MemoryRegionSections. */
07f07b31 211static PhysPageEntry phys_map = { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
6d9a1304 212
e2eef170 213static void io_mem_init(void);
62152b8a 214static void memory_map_init(void);
e2eef170 215
1ec9b909 216static MemoryRegion io_mem_watch;
6658ffb8 217#endif
33417e70 218
34865134 219/* log support */
1e8b27ca
JR
220#ifdef WIN32
221static const char *logfilename = "qemu.log";
222#else
d9b630fd 223static const char *logfilename = "/tmp/qemu.log";
1e8b27ca 224#endif
34865134
FB
225FILE *logfile;
226int loglevel;
e735b91c 227static int log_append = 0;
34865134 228
e3db7226 229/* statistics */
b3755a91 230#if !defined(CONFIG_USER_ONLY)
e3db7226 231static int tlb_flush_count;
b3755a91 232#endif
e3db7226
FB
233static int tb_flush_count;
234static int tb_phys_invalidate_count;
235
7cb69cae
FB
236#ifdef _WIN32
237static void map_exec(void *addr, long size)
238{
239 DWORD old_protect;
240 VirtualProtect(addr, size,
241 PAGE_EXECUTE_READWRITE, &old_protect);
242
243}
244#else
245static void map_exec(void *addr, long size)
246{
4369415f 247 unsigned long start, end, page_size;
7cb69cae 248
4369415f 249 page_size = getpagesize();
7cb69cae 250 start = (unsigned long)addr;
4369415f 251 start &= ~(page_size - 1);
7cb69cae
FB
252
253 end = (unsigned long)addr + size;
4369415f
FB
254 end += page_size - 1;
255 end &= ~(page_size - 1);
7cb69cae
FB
256
257 mprotect((void *)start, end - start,
258 PROT_READ | PROT_WRITE | PROT_EXEC);
259}
260#endif
261
b346ff46 262static void page_init(void)
54936004 263{
83fb7adf 264 /* NOTE: we can always suppose that qemu_host_page_size >=
54936004 265 TARGET_PAGE_SIZE */
c2b48b69
AL
266#ifdef _WIN32
267 {
268 SYSTEM_INFO system_info;
269
270 GetSystemInfo(&system_info);
271 qemu_real_host_page_size = system_info.dwPageSize;
272 }
273#else
274 qemu_real_host_page_size = getpagesize();
275#endif
83fb7adf
FB
276 if (qemu_host_page_size == 0)
277 qemu_host_page_size = qemu_real_host_page_size;
278 if (qemu_host_page_size < TARGET_PAGE_SIZE)
279 qemu_host_page_size = TARGET_PAGE_SIZE;
83fb7adf 280 qemu_host_page_mask = ~(qemu_host_page_size - 1);
50a9569b 281
2e9a5713 282#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
50a9569b 283 {
f01576f1
JL
284#ifdef HAVE_KINFO_GETVMMAP
285 struct kinfo_vmentry *freep;
286 int i, cnt;
287
288 freep = kinfo_getvmmap(getpid(), &cnt);
289 if (freep) {
290 mmap_lock();
291 for (i = 0; i < cnt; i++) {
292 unsigned long startaddr, endaddr;
293
294 startaddr = freep[i].kve_start;
295 endaddr = freep[i].kve_end;
296 if (h2g_valid(startaddr)) {
297 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
298
299 if (h2g_valid(endaddr)) {
300 endaddr = h2g(endaddr);
fd436907 301 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
f01576f1
JL
302 } else {
303#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
304 endaddr = ~0ul;
fd436907 305 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
f01576f1
JL
306#endif
307 }
308 }
309 }
310 free(freep);
311 mmap_unlock();
312 }
313#else
50a9569b 314 FILE *f;
50a9569b 315
0776590d 316 last_brk = (unsigned long)sbrk(0);
5cd2c5b6 317
fd436907 318 f = fopen("/compat/linux/proc/self/maps", "r");
50a9569b 319 if (f) {
5cd2c5b6
RH
320 mmap_lock();
321
50a9569b 322 do {
5cd2c5b6
RH
323 unsigned long startaddr, endaddr;
324 int n;
325
326 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
327
328 if (n == 2 && h2g_valid(startaddr)) {
329 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
330
331 if (h2g_valid(endaddr)) {
332 endaddr = h2g(endaddr);
333 } else {
334 endaddr = ~0ul;
335 }
336 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
50a9569b
AZ
337 }
338 } while (!feof(f));
5cd2c5b6 339
50a9569b 340 fclose(f);
5cd2c5b6 341 mmap_unlock();
50a9569b 342 }
f01576f1 343#endif
50a9569b
AZ
344 }
345#endif
54936004
FB
346}
347
41c1b1c9 348static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
54936004 349{
41c1b1c9
PB
350 PageDesc *pd;
351 void **lp;
352 int i;
353
5cd2c5b6 354#if defined(CONFIG_USER_ONLY)
7267c094 355 /* We can't use g_malloc because it may recurse into a locked mutex. */
5cd2c5b6
RH
356# define ALLOC(P, SIZE) \
357 do { \
358 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
359 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
5cd2c5b6
RH
360 } while (0)
361#else
362# define ALLOC(P, SIZE) \
7267c094 363 do { P = g_malloc0(SIZE); } while (0)
17e2377a 364#endif
434929bf 365
5cd2c5b6
RH
366 /* Level 1. Always allocated. */
367 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
368
369 /* Level 2..N-1. */
370 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
371 void **p = *lp;
372
373 if (p == NULL) {
374 if (!alloc) {
375 return NULL;
376 }
377 ALLOC(p, sizeof(void *) * L2_SIZE);
378 *lp = p;
17e2377a 379 }
5cd2c5b6
RH
380
381 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
382 }
383
384 pd = *lp;
385 if (pd == NULL) {
386 if (!alloc) {
387 return NULL;
388 }
389 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
390 *lp = pd;
54936004 391 }
5cd2c5b6
RH
392
393#undef ALLOC
5cd2c5b6
RH
394
395 return pd + (index & (L2_SIZE - 1));
54936004
FB
396}
397
41c1b1c9 398static inline PageDesc *page_find(tb_page_addr_t index)
54936004 399{
5cd2c5b6 400 return page_find_alloc(index, 0);
fd6ce8f6
FB
401}
402
6d9a1304 403#if !defined(CONFIG_USER_ONLY)
d6f2ea22 404
f7bf5461 405static void phys_map_node_reserve(unsigned nodes)
d6f2ea22 406{
f7bf5461 407 if (phys_map_nodes_nb + nodes > phys_map_nodes_nb_alloc) {
d6f2ea22
AK
408 typedef PhysPageEntry Node[L2_SIZE];
409 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16);
f7bf5461
AK
410 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc,
411 phys_map_nodes_nb + nodes);
d6f2ea22
AK
412 phys_map_nodes = g_renew(Node, phys_map_nodes,
413 phys_map_nodes_nb_alloc);
414 }
f7bf5461
AK
415}
416
417static uint16_t phys_map_node_alloc(void)
418{
419 unsigned i;
420 uint16_t ret;
421
422 ret = phys_map_nodes_nb++;
423 assert(ret != PHYS_MAP_NODE_NIL);
424 assert(ret != phys_map_nodes_nb_alloc);
d6f2ea22 425 for (i = 0; i < L2_SIZE; ++i) {
07f07b31 426 phys_map_nodes[ret][i].is_leaf = 0;
c19e8800 427 phys_map_nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
d6f2ea22 428 }
f7bf5461 429 return ret;
d6f2ea22
AK
430}
431
432static void phys_map_nodes_reset(void)
433{
434 phys_map_nodes_nb = 0;
435}
436
92e873b9 437
2999097b
AK
438static void phys_page_set_level(PhysPageEntry *lp, target_phys_addr_t *index,
439 target_phys_addr_t *nb, uint16_t leaf,
440 int level)
f7bf5461
AK
441{
442 PhysPageEntry *p;
443 int i;
07f07b31 444 target_phys_addr_t step = (target_phys_addr_t)1 << (level * L2_BITS);
108c49b8 445
07f07b31 446 if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
c19e8800
AK
447 lp->ptr = phys_map_node_alloc();
448 p = phys_map_nodes[lp->ptr];
f7bf5461
AK
449 if (level == 0) {
450 for (i = 0; i < L2_SIZE; i++) {
07f07b31 451 p[i].is_leaf = 1;
c19e8800 452 p[i].ptr = phys_section_unassigned;
4346ae3e 453 }
67c4d23c 454 }
f7bf5461 455 } else {
c19e8800 456 p = phys_map_nodes[lp->ptr];
92e873b9 457 }
2999097b 458 lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
f7bf5461 459
2999097b 460 while (*nb && lp < &p[L2_SIZE]) {
07f07b31
AK
461 if ((*index & (step - 1)) == 0 && *nb >= step) {
462 lp->is_leaf = true;
c19e8800 463 lp->ptr = leaf;
07f07b31
AK
464 *index += step;
465 *nb -= step;
2999097b
AK
466 } else {
467 phys_page_set_level(lp, index, nb, leaf, level - 1);
468 }
469 ++lp;
f7bf5461
AK
470 }
471}
472
2999097b
AK
473static void phys_page_set(target_phys_addr_t index, target_phys_addr_t nb,
474 uint16_t leaf)
f7bf5461 475{
2999097b 476 /* Wildly overreserve - it doesn't matter much. */
07f07b31 477 phys_map_node_reserve(3 * P_L2_LEVELS);
5cd2c5b6 478
2999097b 479 phys_page_set_level(&phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
92e873b9
FB
480}
481
f3705d53 482static MemoryRegionSection *phys_page_find(target_phys_addr_t index)
92e873b9 483{
31ab2b4a
AK
484 PhysPageEntry lp = phys_map;
485 PhysPageEntry *p;
486 int i;
31ab2b4a 487 uint16_t s_index = phys_section_unassigned;
f1f6e3b8 488
07f07b31 489 for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
c19e8800 490 if (lp.ptr == PHYS_MAP_NODE_NIL) {
31ab2b4a
AK
491 goto not_found;
492 }
c19e8800 493 p = phys_map_nodes[lp.ptr];
31ab2b4a 494 lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
5312bd8b 495 }
31ab2b4a 496
c19e8800 497 s_index = lp.ptr;
31ab2b4a 498not_found:
f3705d53
AK
499 return &phys_sections[s_index];
500}
501
502static target_phys_addr_t section_addr(MemoryRegionSection *section,
503 target_phys_addr_t addr)
504{
505 addr -= section->offset_within_address_space;
506 addr += section->offset_within_region;
507 return addr;
92e873b9
FB
508}
509
c227f099 510static void tlb_protect_code(ram_addr_t ram_addr);
9349b4f9 511static void tlb_unprotect_code_phys(CPUArchState *env, ram_addr_t ram_addr,
3a7d929e 512 target_ulong vaddr);
c8a706fe
PB
513#define mmap_lock() do { } while(0)
514#define mmap_unlock() do { } while(0)
9fa3e853 515#endif
fd6ce8f6 516
4369415f
FB
517#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
518
519#if defined(CONFIG_USER_ONLY)
ccbb4d44 520/* Currently it is not recommended to allocate big chunks of data in
4369415f
FB
521 user mode. It will change when a dedicated libc will be used */
522#define USE_STATIC_CODE_GEN_BUFFER
523#endif
524
525#ifdef USE_STATIC_CODE_GEN_BUFFER
ebf50fb3
AJ
526static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
527 __attribute__((aligned (CODE_GEN_ALIGN)));
4369415f
FB
528#endif
529
8fcd3692 530static void code_gen_alloc(unsigned long tb_size)
26a5f13b 531{
4369415f
FB
532#ifdef USE_STATIC_CODE_GEN_BUFFER
533 code_gen_buffer = static_code_gen_buffer;
534 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
535 map_exec(code_gen_buffer, code_gen_buffer_size);
536#else
26a5f13b
FB
537 code_gen_buffer_size = tb_size;
538 if (code_gen_buffer_size == 0) {
4369415f 539#if defined(CONFIG_USER_ONLY)
4369415f
FB
540 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
541#else
ccbb4d44 542 /* XXX: needs adjustments */
94a6b54f 543 code_gen_buffer_size = (unsigned long)(ram_size / 4);
4369415f 544#endif
26a5f13b
FB
545 }
546 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
547 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
548 /* The code gen buffer location may have constraints depending on
549 the host cpu and OS */
550#if defined(__linux__)
551 {
552 int flags;
141ac468
BS
553 void *start = NULL;
554
26a5f13b
FB
555 flags = MAP_PRIVATE | MAP_ANONYMOUS;
556#if defined(__x86_64__)
557 flags |= MAP_32BIT;
558 /* Cannot map more than that */
559 if (code_gen_buffer_size > (800 * 1024 * 1024))
560 code_gen_buffer_size = (800 * 1024 * 1024);
141ac468
BS
561#elif defined(__sparc_v9__)
562 // Map the buffer below 2G, so we can use direct calls and branches
563 flags |= MAP_FIXED;
564 start = (void *) 0x60000000UL;
565 if (code_gen_buffer_size > (512 * 1024 * 1024))
566 code_gen_buffer_size = (512 * 1024 * 1024);
1cb0661e 567#elif defined(__arm__)
5c84bd90 568 /* Keep the buffer no bigger than 16MB to branch between blocks */
1cb0661e
AZ
569 if (code_gen_buffer_size > 16 * 1024 * 1024)
570 code_gen_buffer_size = 16 * 1024 * 1024;
eba0b893
RH
571#elif defined(__s390x__)
572 /* Map the buffer so that we can use direct calls and branches. */
573 /* We have a +- 4GB range on the branches; leave some slop. */
574 if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
575 code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
576 }
577 start = (void *)0x90000000UL;
26a5f13b 578#endif
141ac468
BS
579 code_gen_buffer = mmap(start, code_gen_buffer_size,
580 PROT_WRITE | PROT_READ | PROT_EXEC,
26a5f13b
FB
581 flags, -1, 0);
582 if (code_gen_buffer == MAP_FAILED) {
583 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
584 exit(1);
585 }
586 }
cbb608a5 587#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
9f4b09a4
TN
588 || defined(__DragonFly__) || defined(__OpenBSD__) \
589 || defined(__NetBSD__)
06e67a82
AL
590 {
591 int flags;
592 void *addr = NULL;
593 flags = MAP_PRIVATE | MAP_ANONYMOUS;
594#if defined(__x86_64__)
595 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
596 * 0x40000000 is free */
597 flags |= MAP_FIXED;
598 addr = (void *)0x40000000;
599 /* Cannot map more than that */
600 if (code_gen_buffer_size > (800 * 1024 * 1024))
601 code_gen_buffer_size = (800 * 1024 * 1024);
4cd31ad2
BS
602#elif defined(__sparc_v9__)
603 // Map the buffer below 2G, so we can use direct calls and branches
604 flags |= MAP_FIXED;
605 addr = (void *) 0x60000000UL;
606 if (code_gen_buffer_size > (512 * 1024 * 1024)) {
607 code_gen_buffer_size = (512 * 1024 * 1024);
608 }
06e67a82
AL
609#endif
610 code_gen_buffer = mmap(addr, code_gen_buffer_size,
611 PROT_WRITE | PROT_READ | PROT_EXEC,
612 flags, -1, 0);
613 if (code_gen_buffer == MAP_FAILED) {
614 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
615 exit(1);
616 }
617 }
26a5f13b 618#else
7267c094 619 code_gen_buffer = g_malloc(code_gen_buffer_size);
26a5f13b
FB
620 map_exec(code_gen_buffer, code_gen_buffer_size);
621#endif
4369415f 622#endif /* !USE_STATIC_CODE_GEN_BUFFER */
26a5f13b 623 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
a884da8a
PM
624 code_gen_buffer_max_size = code_gen_buffer_size -
625 (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
26a5f13b 626 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
7267c094 627 tbs = g_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
26a5f13b
FB
628}
629
630/* Must be called before using the QEMU cpus. 'tb_size' is the size
631 (in bytes) allocated to the translation buffer. Zero means default
632 size. */
d5ab9713 633void tcg_exec_init(unsigned long tb_size)
26a5f13b 634{
26a5f13b
FB
635 cpu_gen_init();
636 code_gen_alloc(tb_size);
637 code_gen_ptr = code_gen_buffer;
813da627 638 tcg_register_jit(code_gen_buffer, code_gen_buffer_size);
4369415f 639 page_init();
9002ec79
RH
640#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
641 /* There's no guest base to take into account, so go ahead and
642 initialize the prologue now. */
643 tcg_prologue_init(&tcg_ctx);
644#endif
26a5f13b
FB
645}
646
d5ab9713
JK
647bool tcg_enabled(void)
648{
649 return code_gen_buffer != NULL;
650}
651
652void cpu_exec_init_all(void)
653{
654#if !defined(CONFIG_USER_ONLY)
655 memory_map_init();
656 io_mem_init();
657#endif
658}
659
9656f324
PB
660#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
661
e59fb374 662static int cpu_common_post_load(void *opaque, int version_id)
e7f4eff7 663{
9349b4f9 664 CPUArchState *env = opaque;
9656f324 665
3098dba0
AJ
666 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
667 version_id is increased. */
668 env->interrupt_request &= ~0x01;
9656f324
PB
669 tlb_flush(env, 1);
670
671 return 0;
672}
e7f4eff7
JQ
673
674static const VMStateDescription vmstate_cpu_common = {
675 .name = "cpu_common",
676 .version_id = 1,
677 .minimum_version_id = 1,
678 .minimum_version_id_old = 1,
e7f4eff7
JQ
679 .post_load = cpu_common_post_load,
680 .fields = (VMStateField []) {
9349b4f9
AF
681 VMSTATE_UINT32(halted, CPUArchState),
682 VMSTATE_UINT32(interrupt_request, CPUArchState),
e7f4eff7
JQ
683 VMSTATE_END_OF_LIST()
684 }
685};
9656f324
PB
686#endif
687
9349b4f9 688CPUArchState *qemu_get_cpu(int cpu)
950f1472 689{
9349b4f9 690 CPUArchState *env = first_cpu;
950f1472
GC
691
692 while (env) {
693 if (env->cpu_index == cpu)
694 break;
695 env = env->next_cpu;
696 }
697
698 return env;
699}
700
9349b4f9 701void cpu_exec_init(CPUArchState *env)
fd6ce8f6 702{
9349b4f9 703 CPUArchState **penv;
6a00d601
FB
704 int cpu_index;
705
c2764719
PB
706#if defined(CONFIG_USER_ONLY)
707 cpu_list_lock();
708#endif
6a00d601
FB
709 env->next_cpu = NULL;
710 penv = &first_cpu;
711 cpu_index = 0;
712 while (*penv != NULL) {
1e9fa730 713 penv = &(*penv)->next_cpu;
6a00d601
FB
714 cpu_index++;
715 }
716 env->cpu_index = cpu_index;
268a362c 717 env->numa_node = 0;
72cf2d4f
BS
718 QTAILQ_INIT(&env->breakpoints);
719 QTAILQ_INIT(&env->watchpoints);
dc7a09cf
JK
720#ifndef CONFIG_USER_ONLY
721 env->thread_id = qemu_get_thread_id();
722#endif
6a00d601 723 *penv = env;
c2764719
PB
724#if defined(CONFIG_USER_ONLY)
725 cpu_list_unlock();
726#endif
b3c7724c 727#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
0be71e32
AW
728 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
729 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
b3c7724c
PB
730 cpu_save, cpu_load, env);
731#endif
fd6ce8f6
FB
732}
733
d1a1eb74
TG
734/* Allocate a new translation block. Flush the translation buffer if
735 too many translation blocks or too much generated code. */
736static TranslationBlock *tb_alloc(target_ulong pc)
737{
738 TranslationBlock *tb;
739
740 if (nb_tbs >= code_gen_max_blocks ||
741 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
742 return NULL;
743 tb = &tbs[nb_tbs++];
744 tb->pc = pc;
745 tb->cflags = 0;
746 return tb;
747}
748
749void tb_free(TranslationBlock *tb)
750{
751 /* In practice this is mostly used for single use temporary TB
752 Ignore the hard cases and just back up if this TB happens to
753 be the last one generated. */
754 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
755 code_gen_ptr = tb->tc_ptr;
756 nb_tbs--;
757 }
758}
759
9fa3e853
FB
760static inline void invalidate_page_bitmap(PageDesc *p)
761{
762 if (p->code_bitmap) {
7267c094 763 g_free(p->code_bitmap);
9fa3e853
FB
764 p->code_bitmap = NULL;
765 }
766 p->code_write_count = 0;
767}
768
5cd2c5b6
RH
769/* Set to NULL all the 'first_tb' fields in all PageDescs. */
770
771static void page_flush_tb_1 (int level, void **lp)
fd6ce8f6 772{
5cd2c5b6 773 int i;
fd6ce8f6 774
5cd2c5b6
RH
775 if (*lp == NULL) {
776 return;
777 }
778 if (level == 0) {
779 PageDesc *pd = *lp;
7296abac 780 for (i = 0; i < L2_SIZE; ++i) {
5cd2c5b6
RH
781 pd[i].first_tb = NULL;
782 invalidate_page_bitmap(pd + i);
fd6ce8f6 783 }
5cd2c5b6
RH
784 } else {
785 void **pp = *lp;
7296abac 786 for (i = 0; i < L2_SIZE; ++i) {
5cd2c5b6
RH
787 page_flush_tb_1 (level - 1, pp + i);
788 }
789 }
790}
791
792static void page_flush_tb(void)
793{
794 int i;
795 for (i = 0; i < V_L1_SIZE; i++) {
796 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
fd6ce8f6
FB
797 }
798}
799
800/* flush all the translation blocks */
d4e8164f 801/* XXX: tb_flush is currently not thread safe */
9349b4f9 802void tb_flush(CPUArchState *env1)
fd6ce8f6 803{
9349b4f9 804 CPUArchState *env;
0124311e 805#if defined(DEBUG_FLUSH)
ab3d1727
BS
806 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
807 (unsigned long)(code_gen_ptr - code_gen_buffer),
808 nb_tbs, nb_tbs > 0 ?
809 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
fd6ce8f6 810#endif
26a5f13b 811 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
a208e54a
PB
812 cpu_abort(env1, "Internal error: code buffer overflow\n");
813
fd6ce8f6 814 nb_tbs = 0;
3b46e624 815
6a00d601
FB
816 for(env = first_cpu; env != NULL; env = env->next_cpu) {
817 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
818 }
9fa3e853 819
8a8a608f 820 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
fd6ce8f6 821 page_flush_tb();
9fa3e853 822
fd6ce8f6 823 code_gen_ptr = code_gen_buffer;
d4e8164f
FB
824 /* XXX: flush processor icache at this point if cache flush is
825 expensive */
e3db7226 826 tb_flush_count++;
fd6ce8f6
FB
827}
828
829#ifdef DEBUG_TB_CHECK
830
bc98a7ef 831static void tb_invalidate_check(target_ulong address)
fd6ce8f6
FB
832{
833 TranslationBlock *tb;
834 int i;
835 address &= TARGET_PAGE_MASK;
99773bd4
PB
836 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
837 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
838 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
839 address >= tb->pc + tb->size)) {
0bf9e31a
BS
840 printf("ERROR invalidate: address=" TARGET_FMT_lx
841 " PC=%08lx size=%04x\n",
99773bd4 842 address, (long)tb->pc, tb->size);
fd6ce8f6
FB
843 }
844 }
845 }
846}
847
848/* verify that all the pages have correct rights for code */
849static void tb_page_check(void)
850{
851 TranslationBlock *tb;
852 int i, flags1, flags2;
3b46e624 853
99773bd4
PB
854 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
855 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
856 flags1 = page_get_flags(tb->pc);
857 flags2 = page_get_flags(tb->pc + tb->size - 1);
858 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
859 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
99773bd4 860 (long)tb->pc, tb->size, flags1, flags2);
fd6ce8f6
FB
861 }
862 }
863 }
864}
865
866#endif
867
868/* invalidate one TB */
869static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
870 int next_offset)
871{
872 TranslationBlock *tb1;
873 for(;;) {
874 tb1 = *ptb;
875 if (tb1 == tb) {
876 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
877 break;
878 }
879 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
880 }
881}
882
9fa3e853
FB
883static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
884{
885 TranslationBlock *tb1;
886 unsigned int n1;
887
888 for(;;) {
889 tb1 = *ptb;
890 n1 = (long)tb1 & 3;
891 tb1 = (TranslationBlock *)((long)tb1 & ~3);
892 if (tb1 == tb) {
893 *ptb = tb1->page_next[n1];
894 break;
895 }
896 ptb = &tb1->page_next[n1];
897 }
898}
899
d4e8164f
FB
900static inline void tb_jmp_remove(TranslationBlock *tb, int n)
901{
902 TranslationBlock *tb1, **ptb;
903 unsigned int n1;
904
905 ptb = &tb->jmp_next[n];
906 tb1 = *ptb;
907 if (tb1) {
908 /* find tb(n) in circular list */
909 for(;;) {
910 tb1 = *ptb;
911 n1 = (long)tb1 & 3;
912 tb1 = (TranslationBlock *)((long)tb1 & ~3);
913 if (n1 == n && tb1 == tb)
914 break;
915 if (n1 == 2) {
916 ptb = &tb1->jmp_first;
917 } else {
918 ptb = &tb1->jmp_next[n1];
919 }
920 }
921 /* now we can suppress tb(n) from the list */
922 *ptb = tb->jmp_next[n];
923
924 tb->jmp_next[n] = NULL;
925 }
926}
927
928/* reset the jump entry 'n' of a TB so that it is not chained to
929 another TB */
930static inline void tb_reset_jump(TranslationBlock *tb, int n)
931{
932 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
933}
934
41c1b1c9 935void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
fd6ce8f6 936{
9349b4f9 937 CPUArchState *env;
8a40a180 938 PageDesc *p;
d4e8164f 939 unsigned int h, n1;
41c1b1c9 940 tb_page_addr_t phys_pc;
8a40a180 941 TranslationBlock *tb1, *tb2;
3b46e624 942
8a40a180
FB
943 /* remove the TB from the hash list */
944 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
945 h = tb_phys_hash_func(phys_pc);
5fafdf24 946 tb_remove(&tb_phys_hash[h], tb,
8a40a180
FB
947 offsetof(TranslationBlock, phys_hash_next));
948
949 /* remove the TB from the page list */
950 if (tb->page_addr[0] != page_addr) {
951 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
952 tb_page_remove(&p->first_tb, tb);
953 invalidate_page_bitmap(p);
954 }
955 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
956 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
957 tb_page_remove(&p->first_tb, tb);
958 invalidate_page_bitmap(p);
959 }
960
36bdbe54 961 tb_invalidated_flag = 1;
59817ccb 962
fd6ce8f6 963 /* remove the TB from the hash list */
8a40a180 964 h = tb_jmp_cache_hash_func(tb->pc);
6a00d601
FB
965 for(env = first_cpu; env != NULL; env = env->next_cpu) {
966 if (env->tb_jmp_cache[h] == tb)
967 env->tb_jmp_cache[h] = NULL;
968 }
d4e8164f
FB
969
970 /* suppress this TB from the two jump lists */
971 tb_jmp_remove(tb, 0);
972 tb_jmp_remove(tb, 1);
973
974 /* suppress any remaining jumps to this TB */
975 tb1 = tb->jmp_first;
976 for(;;) {
977 n1 = (long)tb1 & 3;
978 if (n1 == 2)
979 break;
980 tb1 = (TranslationBlock *)((long)tb1 & ~3);
981 tb2 = tb1->jmp_next[n1];
982 tb_reset_jump(tb1, n1);
983 tb1->jmp_next[n1] = NULL;
984 tb1 = tb2;
985 }
986 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
9fa3e853 987
e3db7226 988 tb_phys_invalidate_count++;
9fa3e853
FB
989}
990
991static inline void set_bits(uint8_t *tab, int start, int len)
992{
993 int end, mask, end1;
994
995 end = start + len;
996 tab += start >> 3;
997 mask = 0xff << (start & 7);
998 if ((start & ~7) == (end & ~7)) {
999 if (start < end) {
1000 mask &= ~(0xff << (end & 7));
1001 *tab |= mask;
1002 }
1003 } else {
1004 *tab++ |= mask;
1005 start = (start + 8) & ~7;
1006 end1 = end & ~7;
1007 while (start < end1) {
1008 *tab++ = 0xff;
1009 start += 8;
1010 }
1011 if (start < end) {
1012 mask = ~(0xff << (end & 7));
1013 *tab |= mask;
1014 }
1015 }
1016}
1017
1018static void build_page_bitmap(PageDesc *p)
1019{
1020 int n, tb_start, tb_end;
1021 TranslationBlock *tb;
3b46e624 1022
7267c094 1023 p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8);
9fa3e853
FB
1024
1025 tb = p->first_tb;
1026 while (tb != NULL) {
1027 n = (long)tb & 3;
1028 tb = (TranslationBlock *)((long)tb & ~3);
1029 /* NOTE: this is subtle as a TB may span two physical pages */
1030 if (n == 0) {
1031 /* NOTE: tb_end may be after the end of the page, but
1032 it is not a problem */
1033 tb_start = tb->pc & ~TARGET_PAGE_MASK;
1034 tb_end = tb_start + tb->size;
1035 if (tb_end > TARGET_PAGE_SIZE)
1036 tb_end = TARGET_PAGE_SIZE;
1037 } else {
1038 tb_start = 0;
1039 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1040 }
1041 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
1042 tb = tb->page_next[n];
1043 }
1044}
1045
9349b4f9 1046TranslationBlock *tb_gen_code(CPUArchState *env,
2e70f6ef
PB
1047 target_ulong pc, target_ulong cs_base,
1048 int flags, int cflags)
d720b93d
FB
1049{
1050 TranslationBlock *tb;
1051 uint8_t *tc_ptr;
41c1b1c9
PB
1052 tb_page_addr_t phys_pc, phys_page2;
1053 target_ulong virt_page2;
d720b93d
FB
1054 int code_gen_size;
1055
41c1b1c9 1056 phys_pc = get_page_addr_code(env, pc);
c27004ec 1057 tb = tb_alloc(pc);
d720b93d
FB
1058 if (!tb) {
1059 /* flush must be done */
1060 tb_flush(env);
1061 /* cannot fail at this point */
c27004ec 1062 tb = tb_alloc(pc);
2e70f6ef
PB
1063 /* Don't forget to invalidate previous TB info. */
1064 tb_invalidated_flag = 1;
d720b93d
FB
1065 }
1066 tc_ptr = code_gen_ptr;
1067 tb->tc_ptr = tc_ptr;
1068 tb->cs_base = cs_base;
1069 tb->flags = flags;
1070 tb->cflags = cflags;
d07bde88 1071 cpu_gen_code(env, tb, &code_gen_size);
d720b93d 1072 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
3b46e624 1073
d720b93d 1074 /* check next page if needed */
c27004ec 1075 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
d720b93d 1076 phys_page2 = -1;
c27004ec 1077 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
41c1b1c9 1078 phys_page2 = get_page_addr_code(env, virt_page2);
d720b93d 1079 }
41c1b1c9 1080 tb_link_page(tb, phys_pc, phys_page2);
2e70f6ef 1081 return tb;
d720b93d 1082}
3b46e624 1083
9fa3e853
FB
1084/* invalidate all TBs which intersect with the target physical page
1085 starting in range [start;end[. NOTE: start and end must refer to
d720b93d
FB
1086 the same physical page. 'is_cpu_write_access' should be true if called
1087 from a real cpu write access: the virtual CPU will exit the current
1088 TB if code is modified inside this TB. */
41c1b1c9 1089void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
d720b93d
FB
1090 int is_cpu_write_access)
1091{
6b917547 1092 TranslationBlock *tb, *tb_next, *saved_tb;
9349b4f9 1093 CPUArchState *env = cpu_single_env;
41c1b1c9 1094 tb_page_addr_t tb_start, tb_end;
6b917547
AL
1095 PageDesc *p;
1096 int n;
1097#ifdef TARGET_HAS_PRECISE_SMC
1098 int current_tb_not_found = is_cpu_write_access;
1099 TranslationBlock *current_tb = NULL;
1100 int current_tb_modified = 0;
1101 target_ulong current_pc = 0;
1102 target_ulong current_cs_base = 0;
1103 int current_flags = 0;
1104#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
1105
1106 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 1107 if (!p)
9fa3e853 1108 return;
5fafdf24 1109 if (!p->code_bitmap &&
d720b93d
FB
1110 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1111 is_cpu_write_access) {
9fa3e853
FB
1112 /* build code bitmap */
1113 build_page_bitmap(p);
1114 }
1115
1116 /* we remove all the TBs in the range [start, end[ */
1117 /* XXX: see if in some cases it could be faster to invalidate all the code */
1118 tb = p->first_tb;
1119 while (tb != NULL) {
1120 n = (long)tb & 3;
1121 tb = (TranslationBlock *)((long)tb & ~3);
1122 tb_next = tb->page_next[n];
1123 /* NOTE: this is subtle as a TB may span two physical pages */
1124 if (n == 0) {
1125 /* NOTE: tb_end may be after the end of the page, but
1126 it is not a problem */
1127 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1128 tb_end = tb_start + tb->size;
1129 } else {
1130 tb_start = tb->page_addr[1];
1131 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1132 }
1133 if (!(tb_end <= start || tb_start >= end)) {
d720b93d
FB
1134#ifdef TARGET_HAS_PRECISE_SMC
1135 if (current_tb_not_found) {
1136 current_tb_not_found = 0;
1137 current_tb = NULL;
2e70f6ef 1138 if (env->mem_io_pc) {
d720b93d 1139 /* now we have a real cpu fault */
2e70f6ef 1140 current_tb = tb_find_pc(env->mem_io_pc);
d720b93d
FB
1141 }
1142 }
1143 if (current_tb == tb &&
2e70f6ef 1144 (current_tb->cflags & CF_COUNT_MASK) != 1) {
d720b93d
FB
1145 /* If we are modifying the current TB, we must stop
1146 its execution. We could be more precise by checking
1147 that the modification is after the current PC, but it
1148 would require a specialized function to partially
1149 restore the CPU state */
3b46e624 1150
d720b93d 1151 current_tb_modified = 1;
618ba8e6 1152 cpu_restore_state(current_tb, env, env->mem_io_pc);
6b917547
AL
1153 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1154 &current_flags);
d720b93d
FB
1155 }
1156#endif /* TARGET_HAS_PRECISE_SMC */
6f5a9f7e
FB
1157 /* we need to do that to handle the case where a signal
1158 occurs while doing tb_phys_invalidate() */
1159 saved_tb = NULL;
1160 if (env) {
1161 saved_tb = env->current_tb;
1162 env->current_tb = NULL;
1163 }
9fa3e853 1164 tb_phys_invalidate(tb, -1);
6f5a9f7e
FB
1165 if (env) {
1166 env->current_tb = saved_tb;
1167 if (env->interrupt_request && env->current_tb)
1168 cpu_interrupt(env, env->interrupt_request);
1169 }
9fa3e853
FB
1170 }
1171 tb = tb_next;
1172 }
1173#if !defined(CONFIG_USER_ONLY)
1174 /* if no code remaining, no need to continue to use slow writes */
1175 if (!p->first_tb) {
1176 invalidate_page_bitmap(p);
d720b93d 1177 if (is_cpu_write_access) {
2e70f6ef 1178 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
d720b93d
FB
1179 }
1180 }
1181#endif
1182#ifdef TARGET_HAS_PRECISE_SMC
1183 if (current_tb_modified) {
1184 /* we generate a block containing just the instruction
1185 modifying the memory. It will ensure that it cannot modify
1186 itself */
ea1c1802 1187 env->current_tb = NULL;
2e70f6ef 1188 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
d720b93d 1189 cpu_resume_from_signal(env, NULL);
9fa3e853 1190 }
fd6ce8f6 1191#endif
9fa3e853 1192}
fd6ce8f6 1193
9fa3e853 1194/* len must be <= 8 and start must be a multiple of len */
41c1b1c9 1195static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
9fa3e853
FB
1196{
1197 PageDesc *p;
1198 int offset, b;
59817ccb 1199#if 0
a4193c8a 1200 if (1) {
93fcfe39
AL
1201 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1202 cpu_single_env->mem_io_vaddr, len,
1203 cpu_single_env->eip,
1204 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
59817ccb
FB
1205 }
1206#endif
9fa3e853 1207 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 1208 if (!p)
9fa3e853
FB
1209 return;
1210 if (p->code_bitmap) {
1211 offset = start & ~TARGET_PAGE_MASK;
1212 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1213 if (b & ((1 << len) - 1))
1214 goto do_invalidate;
1215 } else {
1216 do_invalidate:
d720b93d 1217 tb_invalidate_phys_page_range(start, start + len, 1);
9fa3e853
FB
1218 }
1219}
1220
9fa3e853 1221#if !defined(CONFIG_SOFTMMU)
41c1b1c9 1222static void tb_invalidate_phys_page(tb_page_addr_t addr,
20503968 1223 uintptr_t pc, void *puc)
9fa3e853 1224{
6b917547 1225 TranslationBlock *tb;
9fa3e853 1226 PageDesc *p;
6b917547 1227 int n;
d720b93d 1228#ifdef TARGET_HAS_PRECISE_SMC
6b917547 1229 TranslationBlock *current_tb = NULL;
9349b4f9 1230 CPUArchState *env = cpu_single_env;
6b917547
AL
1231 int current_tb_modified = 0;
1232 target_ulong current_pc = 0;
1233 target_ulong current_cs_base = 0;
1234 int current_flags = 0;
d720b93d 1235#endif
9fa3e853
FB
1236
1237 addr &= TARGET_PAGE_MASK;
1238 p = page_find(addr >> TARGET_PAGE_BITS);
5fafdf24 1239 if (!p)
9fa3e853
FB
1240 return;
1241 tb = p->first_tb;
d720b93d
FB
1242#ifdef TARGET_HAS_PRECISE_SMC
1243 if (tb && pc != 0) {
1244 current_tb = tb_find_pc(pc);
1245 }
1246#endif
9fa3e853
FB
1247 while (tb != NULL) {
1248 n = (long)tb & 3;
1249 tb = (TranslationBlock *)((long)tb & ~3);
d720b93d
FB
1250#ifdef TARGET_HAS_PRECISE_SMC
1251 if (current_tb == tb &&
2e70f6ef 1252 (current_tb->cflags & CF_COUNT_MASK) != 1) {
d720b93d
FB
1253 /* If we are modifying the current TB, we must stop
1254 its execution. We could be more precise by checking
1255 that the modification is after the current PC, but it
1256 would require a specialized function to partially
1257 restore the CPU state */
3b46e624 1258
d720b93d 1259 current_tb_modified = 1;
618ba8e6 1260 cpu_restore_state(current_tb, env, pc);
6b917547
AL
1261 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1262 &current_flags);
d720b93d
FB
1263 }
1264#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
1265 tb_phys_invalidate(tb, addr);
1266 tb = tb->page_next[n];
1267 }
fd6ce8f6 1268 p->first_tb = NULL;
d720b93d
FB
1269#ifdef TARGET_HAS_PRECISE_SMC
1270 if (current_tb_modified) {
1271 /* we generate a block containing just the instruction
1272 modifying the memory. It will ensure that it cannot modify
1273 itself */
ea1c1802 1274 env->current_tb = NULL;
2e70f6ef 1275 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
d720b93d
FB
1276 cpu_resume_from_signal(env, puc);
1277 }
1278#endif
fd6ce8f6 1279}
9fa3e853 1280#endif
fd6ce8f6
FB
1281
1282/* add the tb in the target page and protect it if necessary */
5fafdf24 1283static inline void tb_alloc_page(TranslationBlock *tb,
41c1b1c9 1284 unsigned int n, tb_page_addr_t page_addr)
fd6ce8f6
FB
1285{
1286 PageDesc *p;
4429ab44
JQ
1287#ifndef CONFIG_USER_ONLY
1288 bool page_already_protected;
1289#endif
9fa3e853
FB
1290
1291 tb->page_addr[n] = page_addr;
5cd2c5b6 1292 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
9fa3e853 1293 tb->page_next[n] = p->first_tb;
4429ab44
JQ
1294#ifndef CONFIG_USER_ONLY
1295 page_already_protected = p->first_tb != NULL;
1296#endif
9fa3e853
FB
1297 p->first_tb = (TranslationBlock *)((long)tb | n);
1298 invalidate_page_bitmap(p);
fd6ce8f6 1299
107db443 1300#if defined(TARGET_HAS_SMC) || 1
d720b93d 1301
9fa3e853 1302#if defined(CONFIG_USER_ONLY)
fd6ce8f6 1303 if (p->flags & PAGE_WRITE) {
53a5960a
PB
1304 target_ulong addr;
1305 PageDesc *p2;
9fa3e853
FB
1306 int prot;
1307
fd6ce8f6
FB
1308 /* force the host page as non writable (writes will have a
1309 page fault + mprotect overhead) */
53a5960a 1310 page_addr &= qemu_host_page_mask;
fd6ce8f6 1311 prot = 0;
53a5960a
PB
1312 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1313 addr += TARGET_PAGE_SIZE) {
1314
1315 p2 = page_find (addr >> TARGET_PAGE_BITS);
1316 if (!p2)
1317 continue;
1318 prot |= p2->flags;
1319 p2->flags &= ~PAGE_WRITE;
53a5960a 1320 }
5fafdf24 1321 mprotect(g2h(page_addr), qemu_host_page_size,
fd6ce8f6
FB
1322 (prot & PAGE_BITS) & ~PAGE_WRITE);
1323#ifdef DEBUG_TB_INVALIDATE
ab3d1727 1324 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
53a5960a 1325 page_addr);
fd6ce8f6 1326#endif
fd6ce8f6 1327 }
9fa3e853
FB
1328#else
1329 /* if some code is already present, then the pages are already
1330 protected. So we handle the case where only the first TB is
1331 allocated in a physical page */
4429ab44 1332 if (!page_already_protected) {
6a00d601 1333 tlb_protect_code(page_addr);
9fa3e853
FB
1334 }
1335#endif
d720b93d
FB
1336
1337#endif /* TARGET_HAS_SMC */
fd6ce8f6
FB
1338}
1339
9fa3e853
FB
1340/* add a new TB and link it to the physical page tables. phys_page2 is
1341 (-1) to indicate that only one page contains the TB. */
41c1b1c9
PB
1342void tb_link_page(TranslationBlock *tb,
1343 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
d4e8164f 1344{
9fa3e853
FB
1345 unsigned int h;
1346 TranslationBlock **ptb;
1347
c8a706fe
PB
1348 /* Grab the mmap lock to stop another thread invalidating this TB
1349 before we are done. */
1350 mmap_lock();
9fa3e853
FB
1351 /* add in the physical hash table */
1352 h = tb_phys_hash_func(phys_pc);
1353 ptb = &tb_phys_hash[h];
1354 tb->phys_hash_next = *ptb;
1355 *ptb = tb;
fd6ce8f6
FB
1356
1357 /* add in the page list */
9fa3e853
FB
1358 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1359 if (phys_page2 != -1)
1360 tb_alloc_page(tb, 1, phys_page2);
1361 else
1362 tb->page_addr[1] = -1;
9fa3e853 1363
d4e8164f
FB
1364 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1365 tb->jmp_next[0] = NULL;
1366 tb->jmp_next[1] = NULL;
1367
1368 /* init original jump addresses */
1369 if (tb->tb_next_offset[0] != 0xffff)
1370 tb_reset_jump(tb, 0);
1371 if (tb->tb_next_offset[1] != 0xffff)
1372 tb_reset_jump(tb, 1);
8a40a180
FB
1373
1374#ifdef DEBUG_TB_CHECK
1375 tb_page_check();
1376#endif
c8a706fe 1377 mmap_unlock();
fd6ce8f6
FB
1378}
1379
9fa3e853
FB
1380/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1381 tb[1].tc_ptr. Return NULL if not found */
6375e09e 1382TranslationBlock *tb_find_pc(uintptr_t tc_ptr)
fd6ce8f6 1383{
9fa3e853
FB
1384 int m_min, m_max, m;
1385 unsigned long v;
1386 TranslationBlock *tb;
a513fe19
FB
1387
1388 if (nb_tbs <= 0)
1389 return NULL;
1390 if (tc_ptr < (unsigned long)code_gen_buffer ||
1391 tc_ptr >= (unsigned long)code_gen_ptr)
1392 return NULL;
1393 /* binary search (cf Knuth) */
1394 m_min = 0;
1395 m_max = nb_tbs - 1;
1396 while (m_min <= m_max) {
1397 m = (m_min + m_max) >> 1;
1398 tb = &tbs[m];
1399 v = (unsigned long)tb->tc_ptr;
1400 if (v == tc_ptr)
1401 return tb;
1402 else if (tc_ptr < v) {
1403 m_max = m - 1;
1404 } else {
1405 m_min = m + 1;
1406 }
5fafdf24 1407 }
a513fe19
FB
1408 return &tbs[m_max];
1409}
7501267e 1410
ea041c0e
FB
1411static void tb_reset_jump_recursive(TranslationBlock *tb);
1412
1413static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1414{
1415 TranslationBlock *tb1, *tb_next, **ptb;
1416 unsigned int n1;
1417
1418 tb1 = tb->jmp_next[n];
1419 if (tb1 != NULL) {
1420 /* find head of list */
1421 for(;;) {
1422 n1 = (long)tb1 & 3;
1423 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1424 if (n1 == 2)
1425 break;
1426 tb1 = tb1->jmp_next[n1];
1427 }
1428 /* we are now sure now that tb jumps to tb1 */
1429 tb_next = tb1;
1430
1431 /* remove tb from the jmp_first list */
1432 ptb = &tb_next->jmp_first;
1433 for(;;) {
1434 tb1 = *ptb;
1435 n1 = (long)tb1 & 3;
1436 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1437 if (n1 == n && tb1 == tb)
1438 break;
1439 ptb = &tb1->jmp_next[n1];
1440 }
1441 *ptb = tb->jmp_next[n];
1442 tb->jmp_next[n] = NULL;
3b46e624 1443
ea041c0e
FB
1444 /* suppress the jump to next tb in generated code */
1445 tb_reset_jump(tb, n);
1446
0124311e 1447 /* suppress jumps in the tb on which we could have jumped */
ea041c0e
FB
1448 tb_reset_jump_recursive(tb_next);
1449 }
1450}
1451
1452static void tb_reset_jump_recursive(TranslationBlock *tb)
1453{
1454 tb_reset_jump_recursive2(tb, 0);
1455 tb_reset_jump_recursive2(tb, 1);
1456}
1457
1fddef4b 1458#if defined(TARGET_HAS_ICE)
94df27fd 1459#if defined(CONFIG_USER_ONLY)
9349b4f9 1460static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
94df27fd
PB
1461{
1462 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1463}
1464#else
1e7855a5 1465void tb_invalidate_phys_addr(target_phys_addr_t addr)
d720b93d 1466{
c227f099 1467 ram_addr_t ram_addr;
f3705d53 1468 MemoryRegionSection *section;
d720b93d 1469
06ef3525 1470 section = phys_page_find(addr >> TARGET_PAGE_BITS);
f3705d53
AK
1471 if (!(memory_region_is_ram(section->mr)
1472 || (section->mr->rom_device && section->mr->readable))) {
06ef3525
AK
1473 return;
1474 }
f3705d53
AK
1475 ram_addr = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
1476 + section_addr(section, addr);
706cd4b5 1477 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
d720b93d 1478}
1e7855a5
MF
1479
1480static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
1481{
1482 tb_invalidate_phys_addr(cpu_get_phys_page_debug(env, pc));
1483}
c27004ec 1484#endif
94df27fd 1485#endif /* TARGET_HAS_ICE */
d720b93d 1486
c527ee8f 1487#if defined(CONFIG_USER_ONLY)
9349b4f9 1488void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
c527ee8f
PB
1489
1490{
1491}
1492
9349b4f9 1493int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
c527ee8f
PB
1494 int flags, CPUWatchpoint **watchpoint)
1495{
1496 return -ENOSYS;
1497}
1498#else
6658ffb8 1499/* Add a watchpoint. */
9349b4f9 1500int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
a1d1bb31 1501 int flags, CPUWatchpoint **watchpoint)
6658ffb8 1502{
b4051334 1503 target_ulong len_mask = ~(len - 1);
c0ce998e 1504 CPUWatchpoint *wp;
6658ffb8 1505
b4051334 1506 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
0dc23828
MF
1507 if ((len & (len - 1)) || (addr & ~len_mask) ||
1508 len == 0 || len > TARGET_PAGE_SIZE) {
b4051334
AL
1509 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1510 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1511 return -EINVAL;
1512 }
7267c094 1513 wp = g_malloc(sizeof(*wp));
a1d1bb31
AL
1514
1515 wp->vaddr = addr;
b4051334 1516 wp->len_mask = len_mask;
a1d1bb31
AL
1517 wp->flags = flags;
1518
2dc9f411 1519 /* keep all GDB-injected watchpoints in front */
c0ce998e 1520 if (flags & BP_GDB)
72cf2d4f 1521 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
c0ce998e 1522 else
72cf2d4f 1523 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
6658ffb8 1524
6658ffb8 1525 tlb_flush_page(env, addr);
a1d1bb31
AL
1526
1527 if (watchpoint)
1528 *watchpoint = wp;
1529 return 0;
6658ffb8
PB
1530}
1531
a1d1bb31 1532/* Remove a specific watchpoint. */
9349b4f9 1533int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
a1d1bb31 1534 int flags)
6658ffb8 1535{
b4051334 1536 target_ulong len_mask = ~(len - 1);
a1d1bb31 1537 CPUWatchpoint *wp;
6658ffb8 1538
72cf2d4f 1539 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334 1540 if (addr == wp->vaddr && len_mask == wp->len_mask
6e140f28 1541 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
a1d1bb31 1542 cpu_watchpoint_remove_by_ref(env, wp);
6658ffb8
PB
1543 return 0;
1544 }
1545 }
a1d1bb31 1546 return -ENOENT;
6658ffb8
PB
1547}
1548
a1d1bb31 1549/* Remove a specific watchpoint by reference. */
9349b4f9 1550void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
a1d1bb31 1551{
72cf2d4f 1552 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
7d03f82f 1553
a1d1bb31
AL
1554 tlb_flush_page(env, watchpoint->vaddr);
1555
7267c094 1556 g_free(watchpoint);
a1d1bb31
AL
1557}
1558
1559/* Remove all matching watchpoints. */
9349b4f9 1560void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
a1d1bb31 1561{
c0ce998e 1562 CPUWatchpoint *wp, *next;
a1d1bb31 1563
72cf2d4f 1564 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
a1d1bb31
AL
1565 if (wp->flags & mask)
1566 cpu_watchpoint_remove_by_ref(env, wp);
c0ce998e 1567 }
7d03f82f 1568}
c527ee8f 1569#endif
7d03f82f 1570
a1d1bb31 1571/* Add a breakpoint. */
9349b4f9 1572int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
a1d1bb31 1573 CPUBreakpoint **breakpoint)
4c3a88a2 1574{
1fddef4b 1575#if defined(TARGET_HAS_ICE)
c0ce998e 1576 CPUBreakpoint *bp;
3b46e624 1577
7267c094 1578 bp = g_malloc(sizeof(*bp));
4c3a88a2 1579
a1d1bb31
AL
1580 bp->pc = pc;
1581 bp->flags = flags;
1582
2dc9f411 1583 /* keep all GDB-injected breakpoints in front */
c0ce998e 1584 if (flags & BP_GDB)
72cf2d4f 1585 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
c0ce998e 1586 else
72cf2d4f 1587 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
3b46e624 1588
d720b93d 1589 breakpoint_invalidate(env, pc);
a1d1bb31
AL
1590
1591 if (breakpoint)
1592 *breakpoint = bp;
4c3a88a2
FB
1593 return 0;
1594#else
a1d1bb31 1595 return -ENOSYS;
4c3a88a2
FB
1596#endif
1597}
1598
a1d1bb31 1599/* Remove a specific breakpoint. */
9349b4f9 1600int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
a1d1bb31 1601{
7d03f82f 1602#if defined(TARGET_HAS_ICE)
a1d1bb31
AL
1603 CPUBreakpoint *bp;
1604
72cf2d4f 1605 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31
AL
1606 if (bp->pc == pc && bp->flags == flags) {
1607 cpu_breakpoint_remove_by_ref(env, bp);
1608 return 0;
1609 }
7d03f82f 1610 }
a1d1bb31
AL
1611 return -ENOENT;
1612#else
1613 return -ENOSYS;
7d03f82f
EI
1614#endif
1615}
1616
a1d1bb31 1617/* Remove a specific breakpoint by reference. */
9349b4f9 1618void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
4c3a88a2 1619{
1fddef4b 1620#if defined(TARGET_HAS_ICE)
72cf2d4f 1621 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
d720b93d 1622
a1d1bb31
AL
1623 breakpoint_invalidate(env, breakpoint->pc);
1624
7267c094 1625 g_free(breakpoint);
a1d1bb31
AL
1626#endif
1627}
1628
1629/* Remove all matching breakpoints. */
9349b4f9 1630void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
a1d1bb31
AL
1631{
1632#if defined(TARGET_HAS_ICE)
c0ce998e 1633 CPUBreakpoint *bp, *next;
a1d1bb31 1634
72cf2d4f 1635 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
a1d1bb31
AL
1636 if (bp->flags & mask)
1637 cpu_breakpoint_remove_by_ref(env, bp);
c0ce998e 1638 }
4c3a88a2
FB
1639#endif
1640}
1641
c33a346e
FB
1642/* enable or disable single step mode. EXCP_DEBUG is returned by the
1643 CPU loop after each instruction */
9349b4f9 1644void cpu_single_step(CPUArchState *env, int enabled)
c33a346e 1645{
1fddef4b 1646#if defined(TARGET_HAS_ICE)
c33a346e
FB
1647 if (env->singlestep_enabled != enabled) {
1648 env->singlestep_enabled = enabled;
e22a25c9
AL
1649 if (kvm_enabled())
1650 kvm_update_guest_debug(env, 0);
1651 else {
ccbb4d44 1652 /* must flush all the translated code to avoid inconsistencies */
e22a25c9
AL
1653 /* XXX: only flush what is necessary */
1654 tb_flush(env);
1655 }
c33a346e
FB
1656 }
1657#endif
1658}
1659
34865134
FB
1660/* enable or disable low levels log */
1661void cpu_set_log(int log_flags)
1662{
1663 loglevel = log_flags;
1664 if (loglevel && !logfile) {
11fcfab4 1665 logfile = fopen(logfilename, log_append ? "a" : "w");
34865134
FB
1666 if (!logfile) {
1667 perror(logfilename);
1668 _exit(1);
1669 }
9fa3e853
FB
1670#if !defined(CONFIG_SOFTMMU)
1671 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1672 {
b55266b5 1673 static char logfile_buf[4096];
9fa3e853
FB
1674 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1675 }
daf767b1
SW
1676#elif defined(_WIN32)
1677 /* Win32 doesn't support line-buffering, so use unbuffered output. */
1678 setvbuf(logfile, NULL, _IONBF, 0);
1679#else
34865134 1680 setvbuf(logfile, NULL, _IOLBF, 0);
9fa3e853 1681#endif
e735b91c
PB
1682 log_append = 1;
1683 }
1684 if (!loglevel && logfile) {
1685 fclose(logfile);
1686 logfile = NULL;
34865134
FB
1687 }
1688}
1689
1690void cpu_set_log_filename(const char *filename)
1691{
1692 logfilename = strdup(filename);
e735b91c
PB
1693 if (logfile) {
1694 fclose(logfile);
1695 logfile = NULL;
1696 }
1697 cpu_set_log(loglevel);
34865134 1698}
c33a346e 1699
9349b4f9 1700static void cpu_unlink_tb(CPUArchState *env)
ea041c0e 1701{
3098dba0
AJ
1702 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1703 problem and hope the cpu will stop of its own accord. For userspace
1704 emulation this often isn't actually as bad as it sounds. Often
1705 signals are used primarily to interrupt blocking syscalls. */
ea041c0e 1706 TranslationBlock *tb;
c227f099 1707 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
59817ccb 1708
cab1b4bd 1709 spin_lock(&interrupt_lock);
3098dba0
AJ
1710 tb = env->current_tb;
1711 /* if the cpu is currently executing code, we must unlink it and
1712 all the potentially executing TB */
f76cfe56 1713 if (tb) {
3098dba0
AJ
1714 env->current_tb = NULL;
1715 tb_reset_jump_recursive(tb);
be214e6c 1716 }
cab1b4bd 1717 spin_unlock(&interrupt_lock);
3098dba0
AJ
1718}
1719
97ffbd8d 1720#ifndef CONFIG_USER_ONLY
3098dba0 1721/* mask must never be zero, except for A20 change call */
9349b4f9 1722static void tcg_handle_interrupt(CPUArchState *env, int mask)
3098dba0
AJ
1723{
1724 int old_mask;
be214e6c 1725
2e70f6ef 1726 old_mask = env->interrupt_request;
68a79315 1727 env->interrupt_request |= mask;
3098dba0 1728
8edac960
AL
1729 /*
1730 * If called from iothread context, wake the target cpu in
1731 * case its halted.
1732 */
b7680cb6 1733 if (!qemu_cpu_is_self(env)) {
8edac960
AL
1734 qemu_cpu_kick(env);
1735 return;
1736 }
8edac960 1737
2e70f6ef 1738 if (use_icount) {
266910c4 1739 env->icount_decr.u16.high = 0xffff;
2e70f6ef 1740 if (!can_do_io(env)
be214e6c 1741 && (mask & ~old_mask) != 0) {
2e70f6ef
PB
1742 cpu_abort(env, "Raised interrupt while not in I/O function");
1743 }
2e70f6ef 1744 } else {
3098dba0 1745 cpu_unlink_tb(env);
ea041c0e
FB
1746 }
1747}
1748
ec6959d0
JK
1749CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1750
97ffbd8d
JK
1751#else /* CONFIG_USER_ONLY */
1752
9349b4f9 1753void cpu_interrupt(CPUArchState *env, int mask)
97ffbd8d
JK
1754{
1755 env->interrupt_request |= mask;
1756 cpu_unlink_tb(env);
1757}
1758#endif /* CONFIG_USER_ONLY */
1759
9349b4f9 1760void cpu_reset_interrupt(CPUArchState *env, int mask)
b54ad049
FB
1761{
1762 env->interrupt_request &= ~mask;
1763}
1764
9349b4f9 1765void cpu_exit(CPUArchState *env)
3098dba0
AJ
1766{
1767 env->exit_request = 1;
1768 cpu_unlink_tb(env);
1769}
1770
c7cd6a37 1771const CPULogItem cpu_log_items[] = {
5fafdf24 1772 { CPU_LOG_TB_OUT_ASM, "out_asm",
f193c797
FB
1773 "show generated host assembly code for each compiled TB" },
1774 { CPU_LOG_TB_IN_ASM, "in_asm",
1775 "show target assembly code for each compiled TB" },
5fafdf24 1776 { CPU_LOG_TB_OP, "op",
57fec1fe 1777 "show micro ops for each compiled TB" },
f193c797 1778 { CPU_LOG_TB_OP_OPT, "op_opt",
e01a1157
BS
1779 "show micro ops "
1780#ifdef TARGET_I386
1781 "before eflags optimization and "
f193c797 1782#endif
e01a1157 1783 "after liveness analysis" },
f193c797
FB
1784 { CPU_LOG_INT, "int",
1785 "show interrupts/exceptions in short format" },
1786 { CPU_LOG_EXEC, "exec",
1787 "show trace before each executed TB (lots of logs)" },
9fddaa0c 1788 { CPU_LOG_TB_CPU, "cpu",
e91c8a77 1789 "show CPU state before block translation" },
f193c797
FB
1790#ifdef TARGET_I386
1791 { CPU_LOG_PCALL, "pcall",
1792 "show protected mode far calls/returns/exceptions" },
eca1bdf4
AL
1793 { CPU_LOG_RESET, "cpu_reset",
1794 "show CPU state before CPU resets" },
f193c797 1795#endif
8e3a9fd2 1796#ifdef DEBUG_IOPORT
fd872598
FB
1797 { CPU_LOG_IOPORT, "ioport",
1798 "show all i/o ports accesses" },
8e3a9fd2 1799#endif
f193c797
FB
1800 { 0, NULL, NULL },
1801};
1802
1803static int cmp1(const char *s1, int n, const char *s2)
1804{
1805 if (strlen(s2) != n)
1806 return 0;
1807 return memcmp(s1, s2, n) == 0;
1808}
3b46e624 1809
f193c797
FB
1810/* takes a comma separated list of log masks. Return 0 if error. */
1811int cpu_str_to_log_mask(const char *str)
1812{
c7cd6a37 1813 const CPULogItem *item;
f193c797
FB
1814 int mask;
1815 const char *p, *p1;
1816
1817 p = str;
1818 mask = 0;
1819 for(;;) {
1820 p1 = strchr(p, ',');
1821 if (!p1)
1822 p1 = p + strlen(p);
9742bf26
YT
1823 if(cmp1(p,p1-p,"all")) {
1824 for(item = cpu_log_items; item->mask != 0; item++) {
1825 mask |= item->mask;
1826 }
1827 } else {
1828 for(item = cpu_log_items; item->mask != 0; item++) {
1829 if (cmp1(p, p1 - p, item->name))
1830 goto found;
1831 }
1832 return 0;
f193c797 1833 }
f193c797
FB
1834 found:
1835 mask |= item->mask;
1836 if (*p1 != ',')
1837 break;
1838 p = p1 + 1;
1839 }
1840 return mask;
1841}
ea041c0e 1842
9349b4f9 1843void cpu_abort(CPUArchState *env, const char *fmt, ...)
7501267e
FB
1844{
1845 va_list ap;
493ae1f0 1846 va_list ap2;
7501267e
FB
1847
1848 va_start(ap, fmt);
493ae1f0 1849 va_copy(ap2, ap);
7501267e
FB
1850 fprintf(stderr, "qemu: fatal: ");
1851 vfprintf(stderr, fmt, ap);
1852 fprintf(stderr, "\n");
1853#ifdef TARGET_I386
7fe48483
FB
1854 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1855#else
1856 cpu_dump_state(env, stderr, fprintf, 0);
7501267e 1857#endif
93fcfe39
AL
1858 if (qemu_log_enabled()) {
1859 qemu_log("qemu: fatal: ");
1860 qemu_log_vprintf(fmt, ap2);
1861 qemu_log("\n");
f9373291 1862#ifdef TARGET_I386
93fcfe39 1863 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
f9373291 1864#else
93fcfe39 1865 log_cpu_state(env, 0);
f9373291 1866#endif
31b1a7b4 1867 qemu_log_flush();
93fcfe39 1868 qemu_log_close();
924edcae 1869 }
493ae1f0 1870 va_end(ap2);
f9373291 1871 va_end(ap);
fd052bf6
RV
1872#if defined(CONFIG_USER_ONLY)
1873 {
1874 struct sigaction act;
1875 sigfillset(&act.sa_mask);
1876 act.sa_handler = SIG_DFL;
1877 sigaction(SIGABRT, &act, NULL);
1878 }
1879#endif
7501267e
FB
1880 abort();
1881}
1882
9349b4f9 1883CPUArchState *cpu_copy(CPUArchState *env)
c5be9f08 1884{
9349b4f9
AF
1885 CPUArchState *new_env = cpu_init(env->cpu_model_str);
1886 CPUArchState *next_cpu = new_env->next_cpu;
c5be9f08 1887 int cpu_index = new_env->cpu_index;
5a38f081
AL
1888#if defined(TARGET_HAS_ICE)
1889 CPUBreakpoint *bp;
1890 CPUWatchpoint *wp;
1891#endif
1892
9349b4f9 1893 memcpy(new_env, env, sizeof(CPUArchState));
5a38f081
AL
1894
1895 /* Preserve chaining and index. */
c5be9f08
TS
1896 new_env->next_cpu = next_cpu;
1897 new_env->cpu_index = cpu_index;
5a38f081
AL
1898
1899 /* Clone all break/watchpoints.
1900 Note: Once we support ptrace with hw-debug register access, make sure
1901 BP_CPU break/watchpoints are handled correctly on clone. */
72cf2d4f
BS
1902 QTAILQ_INIT(&env->breakpoints);
1903 QTAILQ_INIT(&env->watchpoints);
5a38f081 1904#if defined(TARGET_HAS_ICE)
72cf2d4f 1905 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
5a38f081
AL
1906 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1907 }
72cf2d4f 1908 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
5a38f081
AL
1909 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1910 wp->flags, NULL);
1911 }
1912#endif
1913
c5be9f08
TS
1914 return new_env;
1915}
1916
0124311e
FB
1917#if !defined(CONFIG_USER_ONLY)
1918
9349b4f9 1919static inline void tlb_flush_jmp_cache(CPUArchState *env, target_ulong addr)
5c751e99
EI
1920{
1921 unsigned int i;
1922
1923 /* Discard jump cache entries for any tb which might potentially
1924 overlap the flushed page. */
1925 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1926 memset (&env->tb_jmp_cache[i], 0,
9742bf26 1927 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
5c751e99
EI
1928
1929 i = tb_jmp_cache_hash_page(addr);
1930 memset (&env->tb_jmp_cache[i], 0,
9742bf26 1931 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
5c751e99
EI
1932}
1933
08738984
IK
1934static CPUTLBEntry s_cputlb_empty_entry = {
1935 .addr_read = -1,
1936 .addr_write = -1,
1937 .addr_code = -1,
1938 .addend = -1,
1939};
1940
771124e1
PM
1941/* NOTE:
1942 * If flush_global is true (the usual case), flush all tlb entries.
1943 * If flush_global is false, flush (at least) all tlb entries not
1944 * marked global.
1945 *
1946 * Since QEMU doesn't currently implement a global/not-global flag
1947 * for tlb entries, at the moment tlb_flush() will also flush all
1948 * tlb entries in the flush_global == false case. This is OK because
1949 * CPU architectures generally permit an implementation to drop
1950 * entries from the TLB at any time, so flushing more entries than
1951 * required is only an efficiency issue, not a correctness issue.
1952 */
9349b4f9 1953void tlb_flush(CPUArchState *env, int flush_global)
33417e70 1954{
33417e70 1955 int i;
0124311e 1956
9fa3e853
FB
1957#if defined(DEBUG_TLB)
1958 printf("tlb_flush:\n");
1959#endif
0124311e
FB
1960 /* must reset current TB so that interrupts cannot modify the
1961 links while we are modifying them */
1962 env->current_tb = NULL;
1963
33417e70 1964 for(i = 0; i < CPU_TLB_SIZE; i++) {
cfde4bd9
IY
1965 int mmu_idx;
1966 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
08738984 1967 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
cfde4bd9 1968 }
33417e70 1969 }
9fa3e853 1970
8a40a180 1971 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
9fa3e853 1972
d4c430a8
PB
1973 env->tlb_flush_addr = -1;
1974 env->tlb_flush_mask = 0;
e3db7226 1975 tlb_flush_count++;
33417e70
FB
1976}
1977
274da6b2 1978static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
61382a50 1979{
5fafdf24 1980 if (addr == (tlb_entry->addr_read &
84b7b8e7 1981 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1982 addr == (tlb_entry->addr_write &
84b7b8e7 1983 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1984 addr == (tlb_entry->addr_code &
84b7b8e7 1985 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
08738984 1986 *tlb_entry = s_cputlb_empty_entry;
84b7b8e7 1987 }
61382a50
FB
1988}
1989
9349b4f9 1990void tlb_flush_page(CPUArchState *env, target_ulong addr)
33417e70 1991{
8a40a180 1992 int i;
cfde4bd9 1993 int mmu_idx;
0124311e 1994
9fa3e853 1995#if defined(DEBUG_TLB)
108c49b8 1996 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
9fa3e853 1997#endif
d4c430a8
PB
1998 /* Check if we need to flush due to large pages. */
1999 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
2000#if defined(DEBUG_TLB)
2001 printf("tlb_flush_page: forced full flush ("
2002 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
2003 env->tlb_flush_addr, env->tlb_flush_mask);
2004#endif
2005 tlb_flush(env, 1);
2006 return;
2007 }
0124311e
FB
2008 /* must reset current TB so that interrupts cannot modify the
2009 links while we are modifying them */
2010 env->current_tb = NULL;
61382a50
FB
2011
2012 addr &= TARGET_PAGE_MASK;
2013 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
cfde4bd9
IY
2014 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2015 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
0124311e 2016
5c751e99 2017 tlb_flush_jmp_cache(env, addr);
9fa3e853
FB
2018}
2019
9fa3e853
FB
2020/* update the TLBs so that writes to code in the virtual page 'addr'
2021 can be detected */
c227f099 2022static void tlb_protect_code(ram_addr_t ram_addr)
9fa3e853 2023{
5fafdf24 2024 cpu_physical_memory_reset_dirty(ram_addr,
6a00d601
FB
2025 ram_addr + TARGET_PAGE_SIZE,
2026 CODE_DIRTY_FLAG);
9fa3e853
FB
2027}
2028
9fa3e853 2029/* update the TLB so that writes in physical page 'phys_addr' are no longer
3a7d929e 2030 tested for self modifying code */
9349b4f9 2031static void tlb_unprotect_code_phys(CPUArchState *env, ram_addr_t ram_addr,
3a7d929e 2032 target_ulong vaddr)
9fa3e853 2033{
f7c11b53 2034 cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
1ccde1cb
FB
2035}
2036
7859cc6e
AK
2037static bool tlb_is_dirty_ram(CPUTLBEntry *tlbe)
2038{
2039 return (tlbe->addr_write & (TLB_INVALID_MASK|TLB_MMIO|TLB_NOTDIRTY)) == 0;
2040}
2041
5fafdf24 2042static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1ccde1cb
FB
2043 unsigned long start, unsigned long length)
2044{
2045 unsigned long addr;
7859cc6e 2046 if (tlb_is_dirty_ram(tlb_entry)) {
84b7b8e7 2047 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1ccde1cb 2048 if ((addr - start) < length) {
7859cc6e 2049 tlb_entry->addr_write |= TLB_NOTDIRTY;
1ccde1cb
FB
2050 }
2051 }
2052}
2053
5579c7f3 2054/* Note: start and end must be within the same ram block. */
c227f099 2055void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 2056 int dirty_flags)
1ccde1cb 2057{
9349b4f9 2058 CPUArchState *env;
4f2ac237 2059 unsigned long length, start1;
f7c11b53 2060 int i;
1ccde1cb
FB
2061
2062 start &= TARGET_PAGE_MASK;
2063 end = TARGET_PAGE_ALIGN(end);
2064
2065 length = end - start;
2066 if (length == 0)
2067 return;
f7c11b53 2068 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
f23db169 2069
1ccde1cb
FB
2070 /* we modify the TLB cache so that the dirty bit will be set again
2071 when accessing the range */
b2e0a138 2072 start1 = (unsigned long)qemu_safe_ram_ptr(start);
a57d23e4 2073 /* Check that we don't span multiple blocks - this breaks the
5579c7f3 2074 address comparisons below. */
b2e0a138 2075 if ((unsigned long)qemu_safe_ram_ptr(end - 1) - start1
5579c7f3
PB
2076 != (end - 1) - start) {
2077 abort();
2078 }
2079
6a00d601 2080 for(env = first_cpu; env != NULL; env = env->next_cpu) {
cfde4bd9
IY
2081 int mmu_idx;
2082 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2083 for(i = 0; i < CPU_TLB_SIZE; i++)
2084 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
2085 start1, length);
2086 }
6a00d601 2087 }
1ccde1cb
FB
2088}
2089
74576198
AL
2090int cpu_physical_memory_set_dirty_tracking(int enable)
2091{
f6f3fbca 2092 int ret = 0;
74576198 2093 in_migration = enable;
f6f3fbca 2094 return ret;
74576198
AL
2095}
2096
3a7d929e
FB
2097static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2098{
c227f099 2099 ram_addr_t ram_addr;
5579c7f3 2100 void *p;
3a7d929e 2101
7859cc6e 2102 if (tlb_is_dirty_ram(tlb_entry)) {
5579c7f3
PB
2103 p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
2104 + tlb_entry->addend);
e890261f 2105 ram_addr = qemu_ram_addr_from_host_nofail(p);
3a7d929e 2106 if (!cpu_physical_memory_is_dirty(ram_addr)) {
0f459d16 2107 tlb_entry->addr_write |= TLB_NOTDIRTY;
3a7d929e
FB
2108 }
2109 }
2110}
2111
2112/* update the TLB according to the current state of the dirty bits */
9349b4f9 2113void cpu_tlb_update_dirty(CPUArchState *env)
3a7d929e
FB
2114{
2115 int i;
cfde4bd9
IY
2116 int mmu_idx;
2117 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2118 for(i = 0; i < CPU_TLB_SIZE; i++)
2119 tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2120 }
3a7d929e
FB
2121}
2122
0f459d16 2123static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1ccde1cb 2124{
0f459d16
PB
2125 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2126 tlb_entry->addr_write = vaddr;
1ccde1cb
FB
2127}
2128
0f459d16
PB
2129/* update the TLB corresponding to virtual page vaddr
2130 so that it is no longer dirty */
9349b4f9 2131static inline void tlb_set_dirty(CPUArchState *env, target_ulong vaddr)
1ccde1cb 2132{
1ccde1cb 2133 int i;
cfde4bd9 2134 int mmu_idx;
1ccde1cb 2135
0f459d16 2136 vaddr &= TARGET_PAGE_MASK;
1ccde1cb 2137 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
cfde4bd9
IY
2138 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2139 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
9fa3e853
FB
2140}
2141
d4c430a8
PB
2142/* Our TLB does not support large pages, so remember the area covered by
2143 large pages and trigger a full TLB flush if these are invalidated. */
9349b4f9 2144static void tlb_add_large_page(CPUArchState *env, target_ulong vaddr,
d4c430a8
PB
2145 target_ulong size)
2146{
2147 target_ulong mask = ~(size - 1);
2148
2149 if (env->tlb_flush_addr == (target_ulong)-1) {
2150 env->tlb_flush_addr = vaddr & mask;
2151 env->tlb_flush_mask = mask;
2152 return;
2153 }
2154 /* Extend the existing region to include the new page.
2155 This is a compromise between unnecessary flushes and the cost
2156 of maintaining a full variable size TLB. */
2157 mask &= env->tlb_flush_mask;
2158 while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
2159 mask <<= 1;
2160 }
2161 env->tlb_flush_addr &= mask;
2162 env->tlb_flush_mask = mask;
2163}
2164
06ef3525 2165static bool is_ram_rom(MemoryRegionSection *s)
1d393fa2 2166{
06ef3525 2167 return memory_region_is_ram(s->mr);
1d393fa2
AK
2168}
2169
06ef3525 2170static bool is_romd(MemoryRegionSection *s)
75c578dc 2171{
06ef3525 2172 MemoryRegion *mr = s->mr;
75c578dc 2173
75c578dc
AK
2174 return mr->rom_device && mr->readable;
2175}
2176
06ef3525 2177static bool is_ram_rom_romd(MemoryRegionSection *s)
1d393fa2 2178{
06ef3525 2179 return is_ram_rom(s) || is_romd(s);
1d393fa2
AK
2180}
2181
d4c430a8
PB
2182/* Add a new TLB entry. At most one entry for a given virtual address
2183 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2184 supplied size is only used by tlb_flush_page. */
9349b4f9 2185void tlb_set_page(CPUArchState *env, target_ulong vaddr,
d4c430a8
PB
2186 target_phys_addr_t paddr, int prot,
2187 int mmu_idx, target_ulong size)
9fa3e853 2188{
f3705d53 2189 MemoryRegionSection *section;
9fa3e853 2190 unsigned int index;
4f2ac237 2191 target_ulong address;
0f459d16 2192 target_ulong code_address;
355b1943 2193 unsigned long addend;
84b7b8e7 2194 CPUTLBEntry *te;
a1d1bb31 2195 CPUWatchpoint *wp;
c227f099 2196 target_phys_addr_t iotlb;
9fa3e853 2197
d4c430a8
PB
2198 assert(size >= TARGET_PAGE_SIZE);
2199 if (size != TARGET_PAGE_SIZE) {
2200 tlb_add_large_page(env, vaddr, size);
2201 }
06ef3525 2202 section = phys_page_find(paddr >> TARGET_PAGE_BITS);
9fa3e853 2203#if defined(DEBUG_TLB)
7fd3f494
SW
2204 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
2205 " prot=%x idx=%d pd=0x%08lx\n",
2206 vaddr, paddr, prot, mmu_idx, pd);
9fa3e853
FB
2207#endif
2208
0f459d16 2209 address = vaddr;
f3705d53 2210 if (!is_ram_rom_romd(section)) {
0f459d16
PB
2211 /* IO memory case (romd handled later) */
2212 address |= TLB_MMIO;
2213 }
f3705d53
AK
2214 if (is_ram_rom_romd(section)) {
2215 addend = (unsigned long)memory_region_get_ram_ptr(section->mr)
2216 + section_addr(section, paddr);
06ef3525
AK
2217 } else {
2218 addend = 0;
2219 }
f3705d53 2220 if (is_ram_rom(section)) {
0f459d16 2221 /* Normal RAM. */
f3705d53
AK
2222 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
2223 + section_addr(section, paddr);
2224 if (!section->readonly)
aa102231 2225 iotlb |= phys_section_notdirty;
0f459d16 2226 else
aa102231 2227 iotlb |= phys_section_rom;
0f459d16 2228 } else {
ccbb4d44 2229 /* IO handlers are currently passed a physical address.
0f459d16
PB
2230 It would be nice to pass an offset from the base address
2231 of that region. This would avoid having to special case RAM,
2232 and avoid full address decoding in every device.
2233 We can't use the high bits of pd for this because
2234 IO_MEM_ROMD uses these as a ram address. */
aa102231 2235 iotlb = section - phys_sections;
f3705d53 2236 iotlb += section_addr(section, paddr);
0f459d16
PB
2237 }
2238
2239 code_address = address;
2240 /* Make accesses to pages with watchpoints go via the
2241 watchpoint trap routines. */
72cf2d4f 2242 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
a1d1bb31 2243 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
bf298f83
JK
2244 /* Avoid trapping reads of pages with a write breakpoint. */
2245 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
aa102231 2246 iotlb = phys_section_watch + paddr;
bf298f83
JK
2247 address |= TLB_MMIO;
2248 break;
2249 }
6658ffb8 2250 }
0f459d16 2251 }
d79acba4 2252
0f459d16
PB
2253 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2254 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2255 te = &env->tlb_table[mmu_idx][index];
2256 te->addend = addend - vaddr;
2257 if (prot & PAGE_READ) {
2258 te->addr_read = address;
2259 } else {
2260 te->addr_read = -1;
2261 }
5c751e99 2262
0f459d16
PB
2263 if (prot & PAGE_EXEC) {
2264 te->addr_code = code_address;
2265 } else {
2266 te->addr_code = -1;
2267 }
2268 if (prot & PAGE_WRITE) {
f3705d53
AK
2269 if ((memory_region_is_ram(section->mr) && section->readonly)
2270 || is_romd(section)) {
0f459d16
PB
2271 /* Write access calls the I/O callback. */
2272 te->addr_write = address | TLB_MMIO;
f3705d53 2273 } else if (memory_region_is_ram(section->mr)
06ef3525 2274 && !cpu_physical_memory_is_dirty(
f3705d53
AK
2275 section->mr->ram_addr
2276 + section_addr(section, paddr))) {
0f459d16 2277 te->addr_write = address | TLB_NOTDIRTY;
9fa3e853 2278 } else {
0f459d16 2279 te->addr_write = address;
9fa3e853 2280 }
0f459d16
PB
2281 } else {
2282 te->addr_write = -1;
9fa3e853 2283 }
9fa3e853
FB
2284}
2285
0124311e
FB
2286#else
2287
9349b4f9 2288void tlb_flush(CPUArchState *env, int flush_global)
0124311e
FB
2289{
2290}
2291
9349b4f9 2292void tlb_flush_page(CPUArchState *env, target_ulong addr)
0124311e
FB
2293{
2294}
2295
edf8e2af
MW
2296/*
2297 * Walks guest process memory "regions" one by one
2298 * and calls callback function 'fn' for each region.
2299 */
5cd2c5b6
RH
2300
2301struct walk_memory_regions_data
2302{
2303 walk_memory_regions_fn fn;
2304 void *priv;
2305 unsigned long start;
2306 int prot;
2307};
2308
2309static int walk_memory_regions_end(struct walk_memory_regions_data *data,
b480d9b7 2310 abi_ulong end, int new_prot)
5cd2c5b6
RH
2311{
2312 if (data->start != -1ul) {
2313 int rc = data->fn(data->priv, data->start, end, data->prot);
2314 if (rc != 0) {
2315 return rc;
2316 }
2317 }
2318
2319 data->start = (new_prot ? end : -1ul);
2320 data->prot = new_prot;
2321
2322 return 0;
2323}
2324
2325static int walk_memory_regions_1(struct walk_memory_regions_data *data,
b480d9b7 2326 abi_ulong base, int level, void **lp)
5cd2c5b6 2327{
b480d9b7 2328 abi_ulong pa;
5cd2c5b6
RH
2329 int i, rc;
2330
2331 if (*lp == NULL) {
2332 return walk_memory_regions_end(data, base, 0);
2333 }
2334
2335 if (level == 0) {
2336 PageDesc *pd = *lp;
7296abac 2337 for (i = 0; i < L2_SIZE; ++i) {
5cd2c5b6
RH
2338 int prot = pd[i].flags;
2339
2340 pa = base | (i << TARGET_PAGE_BITS);
2341 if (prot != data->prot) {
2342 rc = walk_memory_regions_end(data, pa, prot);
2343 if (rc != 0) {
2344 return rc;
9fa3e853 2345 }
9fa3e853 2346 }
5cd2c5b6
RH
2347 }
2348 } else {
2349 void **pp = *lp;
7296abac 2350 for (i = 0; i < L2_SIZE; ++i) {
b480d9b7
PB
2351 pa = base | ((abi_ulong)i <<
2352 (TARGET_PAGE_BITS + L2_BITS * level));
5cd2c5b6
RH
2353 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2354 if (rc != 0) {
2355 return rc;
2356 }
2357 }
2358 }
2359
2360 return 0;
2361}
2362
2363int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2364{
2365 struct walk_memory_regions_data data;
2366 unsigned long i;
2367
2368 data.fn = fn;
2369 data.priv = priv;
2370 data.start = -1ul;
2371 data.prot = 0;
2372
2373 for (i = 0; i < V_L1_SIZE; i++) {
b480d9b7 2374 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
5cd2c5b6
RH
2375 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2376 if (rc != 0) {
2377 return rc;
9fa3e853 2378 }
33417e70 2379 }
5cd2c5b6
RH
2380
2381 return walk_memory_regions_end(&data, 0, 0);
edf8e2af
MW
2382}
2383
b480d9b7
PB
2384static int dump_region(void *priv, abi_ulong start,
2385 abi_ulong end, unsigned long prot)
edf8e2af
MW
2386{
2387 FILE *f = (FILE *)priv;
2388
b480d9b7
PB
2389 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2390 " "TARGET_ABI_FMT_lx" %c%c%c\n",
edf8e2af
MW
2391 start, end, end - start,
2392 ((prot & PAGE_READ) ? 'r' : '-'),
2393 ((prot & PAGE_WRITE) ? 'w' : '-'),
2394 ((prot & PAGE_EXEC) ? 'x' : '-'));
2395
2396 return (0);
2397}
2398
2399/* dump memory mappings */
2400void page_dump(FILE *f)
2401{
2402 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2403 "start", "end", "size", "prot");
2404 walk_memory_regions(f, dump_region);
33417e70
FB
2405}
2406
53a5960a 2407int page_get_flags(target_ulong address)
33417e70 2408{
9fa3e853
FB
2409 PageDesc *p;
2410
2411 p = page_find(address >> TARGET_PAGE_BITS);
33417e70 2412 if (!p)
9fa3e853
FB
2413 return 0;
2414 return p->flags;
2415}
2416
376a7909
RH
2417/* Modify the flags of a page and invalidate the code if necessary.
2418 The flag PAGE_WRITE_ORG is positioned automatically depending
2419 on PAGE_WRITE. The mmap_lock should already be held. */
53a5960a 2420void page_set_flags(target_ulong start, target_ulong end, int flags)
9fa3e853 2421{
376a7909
RH
2422 target_ulong addr, len;
2423
2424 /* This function should never be called with addresses outside the
2425 guest address space. If this assert fires, it probably indicates
2426 a missing call to h2g_valid. */
b480d9b7
PB
2427#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2428 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
376a7909
RH
2429#endif
2430 assert(start < end);
9fa3e853
FB
2431
2432 start = start & TARGET_PAGE_MASK;
2433 end = TARGET_PAGE_ALIGN(end);
376a7909
RH
2434
2435 if (flags & PAGE_WRITE) {
9fa3e853 2436 flags |= PAGE_WRITE_ORG;
376a7909
RH
2437 }
2438
2439 for (addr = start, len = end - start;
2440 len != 0;
2441 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2442 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2443
2444 /* If the write protection bit is set, then we invalidate
2445 the code inside. */
5fafdf24 2446 if (!(p->flags & PAGE_WRITE) &&
9fa3e853
FB
2447 (flags & PAGE_WRITE) &&
2448 p->first_tb) {
d720b93d 2449 tb_invalidate_phys_page(addr, 0, NULL);
9fa3e853
FB
2450 }
2451 p->flags = flags;
2452 }
33417e70
FB
2453}
2454
3d97b40b
TS
2455int page_check_range(target_ulong start, target_ulong len, int flags)
2456{
2457 PageDesc *p;
2458 target_ulong end;
2459 target_ulong addr;
2460
376a7909
RH
2461 /* This function should never be called with addresses outside the
2462 guest address space. If this assert fires, it probably indicates
2463 a missing call to h2g_valid. */
338e9e6c
BS
2464#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2465 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
376a7909
RH
2466#endif
2467
3e0650a9
RH
2468 if (len == 0) {
2469 return 0;
2470 }
376a7909
RH
2471 if (start + len - 1 < start) {
2472 /* We've wrapped around. */
55f280c9 2473 return -1;
376a7909 2474 }
55f280c9 2475
3d97b40b
TS
2476 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2477 start = start & TARGET_PAGE_MASK;
2478
376a7909
RH
2479 for (addr = start, len = end - start;
2480 len != 0;
2481 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
3d97b40b
TS
2482 p = page_find(addr >> TARGET_PAGE_BITS);
2483 if( !p )
2484 return -1;
2485 if( !(p->flags & PAGE_VALID) )
2486 return -1;
2487
dae3270c 2488 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
3d97b40b 2489 return -1;
dae3270c
FB
2490 if (flags & PAGE_WRITE) {
2491 if (!(p->flags & PAGE_WRITE_ORG))
2492 return -1;
2493 /* unprotect the page if it was put read-only because it
2494 contains translated code */
2495 if (!(p->flags & PAGE_WRITE)) {
2496 if (!page_unprotect(addr, 0, NULL))
2497 return -1;
2498 }
2499 return 0;
2500 }
3d97b40b
TS
2501 }
2502 return 0;
2503}
2504
9fa3e853 2505/* called from signal handler: invalidate the code and unprotect the
ccbb4d44 2506 page. Return TRUE if the fault was successfully handled. */
6375e09e 2507int page_unprotect(target_ulong address, uintptr_t pc, void *puc)
9fa3e853 2508{
45d679d6
AJ
2509 unsigned int prot;
2510 PageDesc *p;
53a5960a 2511 target_ulong host_start, host_end, addr;
9fa3e853 2512
c8a706fe
PB
2513 /* Technically this isn't safe inside a signal handler. However we
2514 know this only ever happens in a synchronous SEGV handler, so in
2515 practice it seems to be ok. */
2516 mmap_lock();
2517
45d679d6
AJ
2518 p = page_find(address >> TARGET_PAGE_BITS);
2519 if (!p) {
c8a706fe 2520 mmap_unlock();
9fa3e853 2521 return 0;
c8a706fe 2522 }
45d679d6 2523
9fa3e853
FB
2524 /* if the page was really writable, then we change its
2525 protection back to writable */
45d679d6
AJ
2526 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2527 host_start = address & qemu_host_page_mask;
2528 host_end = host_start + qemu_host_page_size;
2529
2530 prot = 0;
2531 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2532 p = page_find(addr >> TARGET_PAGE_BITS);
2533 p->flags |= PAGE_WRITE;
2534 prot |= p->flags;
2535
9fa3e853
FB
2536 /* and since the content will be modified, we must invalidate
2537 the corresponding translated code. */
45d679d6 2538 tb_invalidate_phys_page(addr, pc, puc);
9fa3e853 2539#ifdef DEBUG_TB_CHECK
45d679d6 2540 tb_invalidate_check(addr);
9fa3e853 2541#endif
9fa3e853 2542 }
45d679d6
AJ
2543 mprotect((void *)g2h(host_start), qemu_host_page_size,
2544 prot & PAGE_BITS);
2545
2546 mmap_unlock();
2547 return 1;
9fa3e853 2548 }
c8a706fe 2549 mmap_unlock();
9fa3e853
FB
2550 return 0;
2551}
2552
9349b4f9 2553static inline void tlb_set_dirty(CPUArchState *env,
6a00d601 2554 unsigned long addr, target_ulong vaddr)
1ccde1cb
FB
2555{
2556}
9fa3e853
FB
2557#endif /* defined(CONFIG_USER_ONLY) */
2558
e2eef170 2559#if !defined(CONFIG_USER_ONLY)
8da3ff18 2560
c04b2b78
PB
2561#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2562typedef struct subpage_t {
70c68e44 2563 MemoryRegion iomem;
c04b2b78 2564 target_phys_addr_t base;
5312bd8b 2565 uint16_t sub_section[TARGET_PAGE_SIZE];
c04b2b78
PB
2566} subpage_t;
2567
c227f099 2568static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 2569 uint16_t section);
0f0cb164 2570static subpage_t *subpage_init(target_phys_addr_t base);
5312bd8b 2571static void destroy_page_desc(uint16_t section_index)
54688b1e 2572{
5312bd8b
AK
2573 MemoryRegionSection *section = &phys_sections[section_index];
2574 MemoryRegion *mr = section->mr;
54688b1e
AK
2575
2576 if (mr->subpage) {
2577 subpage_t *subpage = container_of(mr, subpage_t, iomem);
2578 memory_region_destroy(&subpage->iomem);
2579 g_free(subpage);
2580 }
2581}
2582
4346ae3e 2583static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level)
54688b1e
AK
2584{
2585 unsigned i;
d6f2ea22 2586 PhysPageEntry *p;
54688b1e 2587
c19e8800 2588 if (lp->ptr == PHYS_MAP_NODE_NIL) {
54688b1e
AK
2589 return;
2590 }
2591
c19e8800 2592 p = phys_map_nodes[lp->ptr];
4346ae3e 2593 for (i = 0; i < L2_SIZE; ++i) {
07f07b31 2594 if (!p[i].is_leaf) {
54688b1e 2595 destroy_l2_mapping(&p[i], level - 1);
4346ae3e 2596 } else {
c19e8800 2597 destroy_page_desc(p[i].ptr);
54688b1e 2598 }
54688b1e 2599 }
07f07b31 2600 lp->is_leaf = 0;
c19e8800 2601 lp->ptr = PHYS_MAP_NODE_NIL;
54688b1e
AK
2602}
2603
2604static void destroy_all_mappings(void)
2605{
3eef53df 2606 destroy_l2_mapping(&phys_map, P_L2_LEVELS - 1);
d6f2ea22 2607 phys_map_nodes_reset();
54688b1e
AK
2608}
2609
5312bd8b
AK
2610static uint16_t phys_section_add(MemoryRegionSection *section)
2611{
2612 if (phys_sections_nb == phys_sections_nb_alloc) {
2613 phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16);
2614 phys_sections = g_renew(MemoryRegionSection, phys_sections,
2615 phys_sections_nb_alloc);
2616 }
2617 phys_sections[phys_sections_nb] = *section;
2618 return phys_sections_nb++;
2619}
2620
2621static void phys_sections_clear(void)
2622{
2623 phys_sections_nb = 0;
2624}
2625
8f2498f9
MT
2626/* register physical memory.
2627 For RAM, 'size' must be a multiple of the target page size.
2628 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
8da3ff18
PB
2629 io memory page. The address used when calling the IO function is
2630 the offset from the start of the region, plus region_offset. Both
ccbb4d44 2631 start_addr and region_offset are rounded down to a page boundary
8da3ff18
PB
2632 before calculating this offset. This should not be a problem unless
2633 the low bits of start_addr and region_offset differ. */
0f0cb164
AK
2634static void register_subpage(MemoryRegionSection *section)
2635{
2636 subpage_t *subpage;
2637 target_phys_addr_t base = section->offset_within_address_space
2638 & TARGET_PAGE_MASK;
f3705d53 2639 MemoryRegionSection *existing = phys_page_find(base >> TARGET_PAGE_BITS);
0f0cb164
AK
2640 MemoryRegionSection subsection = {
2641 .offset_within_address_space = base,
2642 .size = TARGET_PAGE_SIZE,
2643 };
0f0cb164
AK
2644 target_phys_addr_t start, end;
2645
f3705d53 2646 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
0f0cb164 2647
f3705d53 2648 if (!(existing->mr->subpage)) {
0f0cb164
AK
2649 subpage = subpage_init(base);
2650 subsection.mr = &subpage->iomem;
2999097b
AK
2651 phys_page_set(base >> TARGET_PAGE_BITS, 1,
2652 phys_section_add(&subsection));
0f0cb164 2653 } else {
f3705d53 2654 subpage = container_of(existing->mr, subpage_t, iomem);
0f0cb164
AK
2655 }
2656 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
2657 end = start + section->size;
2658 subpage_register(subpage, start, end, phys_section_add(section));
2659}
2660
2661
2662static void register_multipage(MemoryRegionSection *section)
33417e70 2663{
dd81124b
AK
2664 target_phys_addr_t start_addr = section->offset_within_address_space;
2665 ram_addr_t size = section->size;
2999097b 2666 target_phys_addr_t addr;
5312bd8b 2667 uint16_t section_index = phys_section_add(section);
dd81124b 2668
3b8e6a2d 2669 assert(size);
f6f3fbca 2670
3b8e6a2d 2671 addr = start_addr;
2999097b
AK
2672 phys_page_set(addr >> TARGET_PAGE_BITS, size >> TARGET_PAGE_BITS,
2673 section_index);
33417e70
FB
2674}
2675
0f0cb164
AK
2676void cpu_register_physical_memory_log(MemoryRegionSection *section,
2677 bool readonly)
2678{
2679 MemoryRegionSection now = *section, remain = *section;
2680
2681 if ((now.offset_within_address_space & ~TARGET_PAGE_MASK)
2682 || (now.size < TARGET_PAGE_SIZE)) {
2683 now.size = MIN(TARGET_PAGE_ALIGN(now.offset_within_address_space)
2684 - now.offset_within_address_space,
2685 now.size);
2686 register_subpage(&now);
2687 remain.size -= now.size;
2688 remain.offset_within_address_space += now.size;
2689 remain.offset_within_region += now.size;
2690 }
2691 now = remain;
2692 now.size &= TARGET_PAGE_MASK;
2693 if (now.size) {
2694 register_multipage(&now);
2695 remain.size -= now.size;
2696 remain.offset_within_address_space += now.size;
2697 remain.offset_within_region += now.size;
2698 }
2699 now = remain;
2700 if (now.size) {
2701 register_subpage(&now);
2702 }
2703}
2704
2705
c227f099 2706void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
f65ed4c1
AL
2707{
2708 if (kvm_enabled())
2709 kvm_coalesce_mmio_region(addr, size);
2710}
2711
c227f099 2712void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
f65ed4c1
AL
2713{
2714 if (kvm_enabled())
2715 kvm_uncoalesce_mmio_region(addr, size);
2716}
2717
62a2744c
SY
2718void qemu_flush_coalesced_mmio_buffer(void)
2719{
2720 if (kvm_enabled())
2721 kvm_flush_coalesced_mmio_buffer();
2722}
2723
c902760f
MT
2724#if defined(__linux__) && !defined(TARGET_S390X)
2725
2726#include <sys/vfs.h>
2727
2728#define HUGETLBFS_MAGIC 0x958458f6
2729
2730static long gethugepagesize(const char *path)
2731{
2732 struct statfs fs;
2733 int ret;
2734
2735 do {
9742bf26 2736 ret = statfs(path, &fs);
c902760f
MT
2737 } while (ret != 0 && errno == EINTR);
2738
2739 if (ret != 0) {
9742bf26
YT
2740 perror(path);
2741 return 0;
c902760f
MT
2742 }
2743
2744 if (fs.f_type != HUGETLBFS_MAGIC)
9742bf26 2745 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
c902760f
MT
2746
2747 return fs.f_bsize;
2748}
2749
04b16653
AW
2750static void *file_ram_alloc(RAMBlock *block,
2751 ram_addr_t memory,
2752 const char *path)
c902760f
MT
2753{
2754 char *filename;
2755 void *area;
2756 int fd;
2757#ifdef MAP_POPULATE
2758 int flags;
2759#endif
2760 unsigned long hpagesize;
2761
2762 hpagesize = gethugepagesize(path);
2763 if (!hpagesize) {
9742bf26 2764 return NULL;
c902760f
MT
2765 }
2766
2767 if (memory < hpagesize) {
2768 return NULL;
2769 }
2770
2771 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2772 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2773 return NULL;
2774 }
2775
2776 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
9742bf26 2777 return NULL;
c902760f
MT
2778 }
2779
2780 fd = mkstemp(filename);
2781 if (fd < 0) {
9742bf26
YT
2782 perror("unable to create backing store for hugepages");
2783 free(filename);
2784 return NULL;
c902760f
MT
2785 }
2786 unlink(filename);
2787 free(filename);
2788
2789 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2790
2791 /*
2792 * ftruncate is not supported by hugetlbfs in older
2793 * hosts, so don't bother bailing out on errors.
2794 * If anything goes wrong with it under other filesystems,
2795 * mmap will fail.
2796 */
2797 if (ftruncate(fd, memory))
9742bf26 2798 perror("ftruncate");
c902760f
MT
2799
2800#ifdef MAP_POPULATE
2801 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2802 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2803 * to sidestep this quirk.
2804 */
2805 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2806 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2807#else
2808 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2809#endif
2810 if (area == MAP_FAILED) {
9742bf26
YT
2811 perror("file_ram_alloc: can't mmap RAM pages");
2812 close(fd);
2813 return (NULL);
c902760f 2814 }
04b16653 2815 block->fd = fd;
c902760f
MT
2816 return area;
2817}
2818#endif
2819
d17b5288 2820static ram_addr_t find_ram_offset(ram_addr_t size)
04b16653
AW
2821{
2822 RAMBlock *block, *next_block;
3e837b2c 2823 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
04b16653
AW
2824
2825 if (QLIST_EMPTY(&ram_list.blocks))
2826 return 0;
2827
2828 QLIST_FOREACH(block, &ram_list.blocks, next) {
f15fbc4b 2829 ram_addr_t end, next = RAM_ADDR_MAX;
04b16653
AW
2830
2831 end = block->offset + block->length;
2832
2833 QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2834 if (next_block->offset >= end) {
2835 next = MIN(next, next_block->offset);
2836 }
2837 }
2838 if (next - end >= size && next - end < mingap) {
3e837b2c 2839 offset = end;
04b16653
AW
2840 mingap = next - end;
2841 }
2842 }
3e837b2c
AW
2843
2844 if (offset == RAM_ADDR_MAX) {
2845 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
2846 (uint64_t)size);
2847 abort();
2848 }
2849
04b16653
AW
2850 return offset;
2851}
2852
2853static ram_addr_t last_ram_offset(void)
d17b5288
AW
2854{
2855 RAMBlock *block;
2856 ram_addr_t last = 0;
2857
2858 QLIST_FOREACH(block, &ram_list.blocks, next)
2859 last = MAX(last, block->offset + block->length);
2860
2861 return last;
2862}
2863
c5705a77 2864void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
84b89d78
CM
2865{
2866 RAMBlock *new_block, *block;
2867
c5705a77
AK
2868 new_block = NULL;
2869 QLIST_FOREACH(block, &ram_list.blocks, next) {
2870 if (block->offset == addr) {
2871 new_block = block;
2872 break;
2873 }
2874 }
2875 assert(new_block);
2876 assert(!new_block->idstr[0]);
84b89d78
CM
2877
2878 if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
2879 char *id = dev->parent_bus->info->get_dev_path(dev);
2880 if (id) {
2881 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
7267c094 2882 g_free(id);
84b89d78
CM
2883 }
2884 }
2885 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2886
2887 QLIST_FOREACH(block, &ram_list.blocks, next) {
c5705a77 2888 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
84b89d78
CM
2889 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2890 new_block->idstr);
2891 abort();
2892 }
2893 }
c5705a77
AK
2894}
2895
2896ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
2897 MemoryRegion *mr)
2898{
2899 RAMBlock *new_block;
2900
2901 size = TARGET_PAGE_ALIGN(size);
2902 new_block = g_malloc0(sizeof(*new_block));
84b89d78 2903
7c637366 2904 new_block->mr = mr;
432d268c 2905 new_block->offset = find_ram_offset(size);
6977dfe6
YT
2906 if (host) {
2907 new_block->host = host;
cd19cfa2 2908 new_block->flags |= RAM_PREALLOC_MASK;
6977dfe6
YT
2909 } else {
2910 if (mem_path) {
c902760f 2911#if defined (__linux__) && !defined(TARGET_S390X)
6977dfe6
YT
2912 new_block->host = file_ram_alloc(new_block, size, mem_path);
2913 if (!new_block->host) {
2914 new_block->host = qemu_vmalloc(size);
e78815a5 2915 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
6977dfe6 2916 }
c902760f 2917#else
6977dfe6
YT
2918 fprintf(stderr, "-mem-path option unsupported\n");
2919 exit(1);
c902760f 2920#endif
6977dfe6 2921 } else {
6b02494d 2922#if defined(TARGET_S390X) && defined(CONFIG_KVM)
ff83678a
CB
2923 /* S390 KVM requires the topmost vma of the RAM to be smaller than
2924 an system defined value, which is at least 256GB. Larger systems
2925 have larger values. We put the guest between the end of data
2926 segment (system break) and this value. We use 32GB as a base to
2927 have enough room for the system break to grow. */
2928 new_block->host = mmap((void*)0x800000000, size,
6977dfe6 2929 PROT_EXEC|PROT_READ|PROT_WRITE,
ff83678a 2930 MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
fb8b2735
AG
2931 if (new_block->host == MAP_FAILED) {
2932 fprintf(stderr, "Allocating RAM failed\n");
2933 abort();
2934 }
6b02494d 2935#else
868bb33f 2936 if (xen_enabled()) {
fce537d4 2937 xen_ram_alloc(new_block->offset, size, mr);
432d268c
JN
2938 } else {
2939 new_block->host = qemu_vmalloc(size);
2940 }
6b02494d 2941#endif
e78815a5 2942 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
6977dfe6 2943 }
c902760f 2944 }
94a6b54f
PB
2945 new_block->length = size;
2946
f471a17e 2947 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
94a6b54f 2948
7267c094 2949 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
04b16653 2950 last_ram_offset() >> TARGET_PAGE_BITS);
d17b5288 2951 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
94a6b54f
PB
2952 0xff, size >> TARGET_PAGE_BITS);
2953
6f0437e8
JK
2954 if (kvm_enabled())
2955 kvm_setup_guest_memory(new_block->host, size);
2956
94a6b54f
PB
2957 return new_block->offset;
2958}
e9a1ab19 2959
c5705a77 2960ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
6977dfe6 2961{
c5705a77 2962 return qemu_ram_alloc_from_ptr(size, NULL, mr);
6977dfe6
YT
2963}
2964
1f2e98b6
AW
2965void qemu_ram_free_from_ptr(ram_addr_t addr)
2966{
2967 RAMBlock *block;
2968
2969 QLIST_FOREACH(block, &ram_list.blocks, next) {
2970 if (addr == block->offset) {
2971 QLIST_REMOVE(block, next);
7267c094 2972 g_free(block);
1f2e98b6
AW
2973 return;
2974 }
2975 }
2976}
2977
c227f099 2978void qemu_ram_free(ram_addr_t addr)
e9a1ab19 2979{
04b16653
AW
2980 RAMBlock *block;
2981
2982 QLIST_FOREACH(block, &ram_list.blocks, next) {
2983 if (addr == block->offset) {
2984 QLIST_REMOVE(block, next);
cd19cfa2
HY
2985 if (block->flags & RAM_PREALLOC_MASK) {
2986 ;
2987 } else if (mem_path) {
04b16653
AW
2988#if defined (__linux__) && !defined(TARGET_S390X)
2989 if (block->fd) {
2990 munmap(block->host, block->length);
2991 close(block->fd);
2992 } else {
2993 qemu_vfree(block->host);
2994 }
fd28aa13
JK
2995#else
2996 abort();
04b16653
AW
2997#endif
2998 } else {
2999#if defined(TARGET_S390X) && defined(CONFIG_KVM)
3000 munmap(block->host, block->length);
3001#else
868bb33f 3002 if (xen_enabled()) {
e41d7c69 3003 xen_invalidate_map_cache_entry(block->host);
432d268c
JN
3004 } else {
3005 qemu_vfree(block->host);
3006 }
04b16653
AW
3007#endif
3008 }
7267c094 3009 g_free(block);
04b16653
AW
3010 return;
3011 }
3012 }
3013
e9a1ab19
FB
3014}
3015
cd19cfa2
HY
3016#ifndef _WIN32
3017void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
3018{
3019 RAMBlock *block;
3020 ram_addr_t offset;
3021 int flags;
3022 void *area, *vaddr;
3023
3024 QLIST_FOREACH(block, &ram_list.blocks, next) {
3025 offset = addr - block->offset;
3026 if (offset < block->length) {
3027 vaddr = block->host + offset;
3028 if (block->flags & RAM_PREALLOC_MASK) {
3029 ;
3030 } else {
3031 flags = MAP_FIXED;
3032 munmap(vaddr, length);
3033 if (mem_path) {
3034#if defined(__linux__) && !defined(TARGET_S390X)
3035 if (block->fd) {
3036#ifdef MAP_POPULATE
3037 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
3038 MAP_PRIVATE;
3039#else
3040 flags |= MAP_PRIVATE;
3041#endif
3042 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3043 flags, block->fd, offset);
3044 } else {
3045 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
3046 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3047 flags, -1, 0);
3048 }
fd28aa13
JK
3049#else
3050 abort();
cd19cfa2
HY
3051#endif
3052 } else {
3053#if defined(TARGET_S390X) && defined(CONFIG_KVM)
3054 flags |= MAP_SHARED | MAP_ANONYMOUS;
3055 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
3056 flags, -1, 0);
3057#else
3058 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
3059 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3060 flags, -1, 0);
3061#endif
3062 }
3063 if (area != vaddr) {
f15fbc4b
AP
3064 fprintf(stderr, "Could not remap addr: "
3065 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
cd19cfa2
HY
3066 length, addr);
3067 exit(1);
3068 }
3069 qemu_madvise(vaddr, length, QEMU_MADV_MERGEABLE);
3070 }
3071 return;
3072 }
3073 }
3074}
3075#endif /* !_WIN32 */
3076
dc828ca1 3077/* Return a host pointer to ram allocated with qemu_ram_alloc.
5579c7f3
PB
3078 With the exception of the softmmu code in this file, this should
3079 only be used for local memory (e.g. video ram) that the device owns,
3080 and knows it isn't going to access beyond the end of the block.
3081
3082 It should not be used for general purpose DMA.
3083 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
3084 */
c227f099 3085void *qemu_get_ram_ptr(ram_addr_t addr)
dc828ca1 3086{
94a6b54f
PB
3087 RAMBlock *block;
3088
f471a17e
AW
3089 QLIST_FOREACH(block, &ram_list.blocks, next) {
3090 if (addr - block->offset < block->length) {
7d82af38
VP
3091 /* Move this entry to to start of the list. */
3092 if (block != QLIST_FIRST(&ram_list.blocks)) {
3093 QLIST_REMOVE(block, next);
3094 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
3095 }
868bb33f 3096 if (xen_enabled()) {
432d268c
JN
3097 /* We need to check if the requested address is in the RAM
3098 * because we don't want to map the entire memory in QEMU.
712c2b41 3099 * In that case just map until the end of the page.
432d268c
JN
3100 */
3101 if (block->offset == 0) {
e41d7c69 3102 return xen_map_cache(addr, 0, 0);
432d268c 3103 } else if (block->host == NULL) {
e41d7c69
JK
3104 block->host =
3105 xen_map_cache(block->offset, block->length, 1);
432d268c
JN
3106 }
3107 }
f471a17e
AW
3108 return block->host + (addr - block->offset);
3109 }
94a6b54f 3110 }
f471a17e
AW
3111
3112 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3113 abort();
3114
3115 return NULL;
dc828ca1
PB
3116}
3117
b2e0a138
MT
3118/* Return a host pointer to ram allocated with qemu_ram_alloc.
3119 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
3120 */
3121void *qemu_safe_ram_ptr(ram_addr_t addr)
3122{
3123 RAMBlock *block;
3124
3125 QLIST_FOREACH(block, &ram_list.blocks, next) {
3126 if (addr - block->offset < block->length) {
868bb33f 3127 if (xen_enabled()) {
432d268c
JN
3128 /* We need to check if the requested address is in the RAM
3129 * because we don't want to map the entire memory in QEMU.
712c2b41 3130 * In that case just map until the end of the page.
432d268c
JN
3131 */
3132 if (block->offset == 0) {
e41d7c69 3133 return xen_map_cache(addr, 0, 0);
432d268c 3134 } else if (block->host == NULL) {
e41d7c69
JK
3135 block->host =
3136 xen_map_cache(block->offset, block->length, 1);
432d268c
JN
3137 }
3138 }
b2e0a138
MT
3139 return block->host + (addr - block->offset);
3140 }
3141 }
3142
3143 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3144 abort();
3145
3146 return NULL;
3147}
3148
38bee5dc
SS
3149/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
3150 * but takes a size argument */
8ab934f9 3151void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
38bee5dc 3152{
8ab934f9
SS
3153 if (*size == 0) {
3154 return NULL;
3155 }
868bb33f 3156 if (xen_enabled()) {
e41d7c69 3157 return xen_map_cache(addr, *size, 1);
868bb33f 3158 } else {
38bee5dc
SS
3159 RAMBlock *block;
3160
3161 QLIST_FOREACH(block, &ram_list.blocks, next) {
3162 if (addr - block->offset < block->length) {
3163 if (addr - block->offset + *size > block->length)
3164 *size = block->length - addr + block->offset;
3165 return block->host + (addr - block->offset);
3166 }
3167 }
3168
3169 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3170 abort();
38bee5dc
SS
3171 }
3172}
3173
050a0ddf
AP
3174void qemu_put_ram_ptr(void *addr)
3175{
3176 trace_qemu_put_ram_ptr(addr);
050a0ddf
AP
3177}
3178
e890261f 3179int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
5579c7f3 3180{
94a6b54f
PB
3181 RAMBlock *block;
3182 uint8_t *host = ptr;
3183
868bb33f 3184 if (xen_enabled()) {
e41d7c69 3185 *ram_addr = xen_ram_addr_from_mapcache(ptr);
712c2b41
SS
3186 return 0;
3187 }
3188
f471a17e 3189 QLIST_FOREACH(block, &ram_list.blocks, next) {
432d268c
JN
3190 /* This case append when the block is not mapped. */
3191 if (block->host == NULL) {
3192 continue;
3193 }
f471a17e 3194 if (host - block->host < block->length) {
e890261f
MT
3195 *ram_addr = block->offset + (host - block->host);
3196 return 0;
f471a17e 3197 }
94a6b54f 3198 }
432d268c 3199
e890261f
MT
3200 return -1;
3201}
f471a17e 3202
e890261f
MT
3203/* Some of the softmmu routines need to translate from a host pointer
3204 (typically a TLB entry) back to a ram offset. */
3205ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
3206{
3207 ram_addr_t ram_addr;
f471a17e 3208
e890261f
MT
3209 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
3210 fprintf(stderr, "Bad ram pointer %p\n", ptr);
3211 abort();
3212 }
3213 return ram_addr;
5579c7f3
PB
3214}
3215
0e0df1e2
AK
3216static uint64_t unassigned_mem_read(void *opaque, target_phys_addr_t addr,
3217 unsigned size)
e18231a3
BS
3218{
3219#ifdef DEBUG_UNASSIGNED
3220 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3221#endif
5b450407 3222#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
0e0df1e2 3223 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
e18231a3
BS
3224#endif
3225 return 0;
3226}
3227
0e0df1e2
AK
3228static void unassigned_mem_write(void *opaque, target_phys_addr_t addr,
3229 uint64_t val, unsigned size)
e18231a3
BS
3230{
3231#ifdef DEBUG_UNASSIGNED
0e0df1e2 3232 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
e18231a3 3233#endif
5b450407 3234#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
0e0df1e2 3235 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
67d3b957 3236#endif
33417e70
FB
3237}
3238
0e0df1e2
AK
3239static const MemoryRegionOps unassigned_mem_ops = {
3240 .read = unassigned_mem_read,
3241 .write = unassigned_mem_write,
3242 .endianness = DEVICE_NATIVE_ENDIAN,
3243};
e18231a3 3244
0e0df1e2
AK
3245static uint64_t error_mem_read(void *opaque, target_phys_addr_t addr,
3246 unsigned size)
e18231a3 3247{
0e0df1e2 3248 abort();
e18231a3
BS
3249}
3250
0e0df1e2
AK
3251static void error_mem_write(void *opaque, target_phys_addr_t addr,
3252 uint64_t value, unsigned size)
e18231a3 3253{
0e0df1e2 3254 abort();
33417e70
FB
3255}
3256
0e0df1e2
AK
3257static const MemoryRegionOps error_mem_ops = {
3258 .read = error_mem_read,
3259 .write = error_mem_write,
3260 .endianness = DEVICE_NATIVE_ENDIAN,
33417e70
FB
3261};
3262
0e0df1e2
AK
3263static const MemoryRegionOps rom_mem_ops = {
3264 .read = error_mem_read,
3265 .write = unassigned_mem_write,
3266 .endianness = DEVICE_NATIVE_ENDIAN,
33417e70
FB
3267};
3268
0e0df1e2
AK
3269static void notdirty_mem_write(void *opaque, target_phys_addr_t ram_addr,
3270 uint64_t val, unsigned size)
9fa3e853 3271{
3a7d929e 3272 int dirty_flags;
f7c11b53 3273 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3a7d929e 3274 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 3275#if !defined(CONFIG_USER_ONLY)
0e0df1e2 3276 tb_invalidate_phys_page_fast(ram_addr, size);
f7c11b53 3277 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
9fa3e853 3278#endif
3a7d929e 3279 }
0e0df1e2
AK
3280 switch (size) {
3281 case 1:
3282 stb_p(qemu_get_ram_ptr(ram_addr), val);
3283 break;
3284 case 2:
3285 stw_p(qemu_get_ram_ptr(ram_addr), val);
3286 break;
3287 case 4:
3288 stl_p(qemu_get_ram_ptr(ram_addr), val);
3289 break;
3290 default:
3291 abort();
3a7d929e 3292 }
f23db169 3293 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
f7c11b53 3294 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
f23db169
FB
3295 /* we remove the notdirty callback only if the code has been
3296 flushed */
3297 if (dirty_flags == 0xff)
2e70f6ef 3298 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
3299}
3300
0e0df1e2
AK
3301static const MemoryRegionOps notdirty_mem_ops = {
3302 .read = error_mem_read,
3303 .write = notdirty_mem_write,
3304 .endianness = DEVICE_NATIVE_ENDIAN,
1ccde1cb
FB
3305};
3306
0f459d16 3307/* Generate a debug exception if a watchpoint has been hit. */
b4051334 3308static void check_watchpoint(int offset, int len_mask, int flags)
0f459d16 3309{
9349b4f9 3310 CPUArchState *env = cpu_single_env;
06d55cc1
AL
3311 target_ulong pc, cs_base;
3312 TranslationBlock *tb;
0f459d16 3313 target_ulong vaddr;
a1d1bb31 3314 CPUWatchpoint *wp;
06d55cc1 3315 int cpu_flags;
0f459d16 3316
06d55cc1
AL
3317 if (env->watchpoint_hit) {
3318 /* We re-entered the check after replacing the TB. Now raise
3319 * the debug interrupt so that is will trigger after the
3320 * current instruction. */
3321 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3322 return;
3323 }
2e70f6ef 3324 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
72cf2d4f 3325 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334
AL
3326 if ((vaddr == (wp->vaddr & len_mask) ||
3327 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
6e140f28
AL
3328 wp->flags |= BP_WATCHPOINT_HIT;
3329 if (!env->watchpoint_hit) {
3330 env->watchpoint_hit = wp;
3331 tb = tb_find_pc(env->mem_io_pc);
3332 if (!tb) {
3333 cpu_abort(env, "check_watchpoint: could not find TB for "
3334 "pc=%p", (void *)env->mem_io_pc);
3335 }
618ba8e6 3336 cpu_restore_state(tb, env, env->mem_io_pc);
6e140f28
AL
3337 tb_phys_invalidate(tb, -1);
3338 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3339 env->exception_index = EXCP_DEBUG;
488d6577 3340 cpu_loop_exit(env);
6e140f28
AL
3341 } else {
3342 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3343 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
488d6577 3344 cpu_resume_from_signal(env, NULL);
6e140f28 3345 }
06d55cc1 3346 }
6e140f28
AL
3347 } else {
3348 wp->flags &= ~BP_WATCHPOINT_HIT;
0f459d16
PB
3349 }
3350 }
3351}
3352
6658ffb8
PB
3353/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3354 so these check for a hit then pass through to the normal out-of-line
3355 phys routines. */
1ec9b909
AK
3356static uint64_t watch_mem_read(void *opaque, target_phys_addr_t addr,
3357 unsigned size)
6658ffb8 3358{
1ec9b909
AK
3359 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
3360 switch (size) {
3361 case 1: return ldub_phys(addr);
3362 case 2: return lduw_phys(addr);
3363 case 4: return ldl_phys(addr);
3364 default: abort();
3365 }
6658ffb8
PB
3366}
3367
1ec9b909
AK
3368static void watch_mem_write(void *opaque, target_phys_addr_t addr,
3369 uint64_t val, unsigned size)
6658ffb8 3370{
1ec9b909
AK
3371 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
3372 switch (size) {
67364150
MF
3373 case 1:
3374 stb_phys(addr, val);
3375 break;
3376 case 2:
3377 stw_phys(addr, val);
3378 break;
3379 case 4:
3380 stl_phys(addr, val);
3381 break;
1ec9b909
AK
3382 default: abort();
3383 }
6658ffb8
PB
3384}
3385
1ec9b909
AK
3386static const MemoryRegionOps watch_mem_ops = {
3387 .read = watch_mem_read,
3388 .write = watch_mem_write,
3389 .endianness = DEVICE_NATIVE_ENDIAN,
6658ffb8 3390};
6658ffb8 3391
70c68e44
AK
3392static uint64_t subpage_read(void *opaque, target_phys_addr_t addr,
3393 unsigned len)
db7b5426 3394{
70c68e44 3395 subpage_t *mmio = opaque;
f6405247 3396 unsigned int idx = SUBPAGE_IDX(addr);
5312bd8b 3397 MemoryRegionSection *section;
db7b5426
BS
3398#if defined(DEBUG_SUBPAGE)
3399 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3400 mmio, len, addr, idx);
3401#endif
db7b5426 3402
5312bd8b
AK
3403 section = &phys_sections[mmio->sub_section[idx]];
3404 addr += mmio->base;
3405 addr -= section->offset_within_address_space;
3406 addr += section->offset_within_region;
37ec01d4 3407 return io_mem_read(section->mr, addr, len);
db7b5426
BS
3408}
3409
70c68e44
AK
3410static void subpage_write(void *opaque, target_phys_addr_t addr,
3411 uint64_t value, unsigned len)
db7b5426 3412{
70c68e44 3413 subpage_t *mmio = opaque;
f6405247 3414 unsigned int idx = SUBPAGE_IDX(addr);
5312bd8b 3415 MemoryRegionSection *section;
db7b5426 3416#if defined(DEBUG_SUBPAGE)
70c68e44
AK
3417 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
3418 " idx %d value %"PRIx64"\n",
f6405247 3419 __func__, mmio, len, addr, idx, value);
db7b5426 3420#endif
f6405247 3421
5312bd8b
AK
3422 section = &phys_sections[mmio->sub_section[idx]];
3423 addr += mmio->base;
3424 addr -= section->offset_within_address_space;
3425 addr += section->offset_within_region;
37ec01d4 3426 io_mem_write(section->mr, addr, value, len);
db7b5426
BS
3427}
3428
70c68e44
AK
3429static const MemoryRegionOps subpage_ops = {
3430 .read = subpage_read,
3431 .write = subpage_write,
3432 .endianness = DEVICE_NATIVE_ENDIAN,
db7b5426
BS
3433};
3434
de712f94
AK
3435static uint64_t subpage_ram_read(void *opaque, target_phys_addr_t addr,
3436 unsigned size)
56384e8b
AF
3437{
3438 ram_addr_t raddr = addr;
3439 void *ptr = qemu_get_ram_ptr(raddr);
de712f94
AK
3440 switch (size) {
3441 case 1: return ldub_p(ptr);
3442 case 2: return lduw_p(ptr);
3443 case 4: return ldl_p(ptr);
3444 default: abort();
3445 }
56384e8b
AF
3446}
3447
de712f94
AK
3448static void subpage_ram_write(void *opaque, target_phys_addr_t addr,
3449 uint64_t value, unsigned size)
56384e8b
AF
3450{
3451 ram_addr_t raddr = addr;
3452 void *ptr = qemu_get_ram_ptr(raddr);
de712f94
AK
3453 switch (size) {
3454 case 1: return stb_p(ptr, value);
3455 case 2: return stw_p(ptr, value);
3456 case 4: return stl_p(ptr, value);
3457 default: abort();
3458 }
56384e8b
AF
3459}
3460
de712f94
AK
3461static const MemoryRegionOps subpage_ram_ops = {
3462 .read = subpage_ram_read,
3463 .write = subpage_ram_write,
3464 .endianness = DEVICE_NATIVE_ENDIAN,
56384e8b
AF
3465};
3466
c227f099 3467static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 3468 uint16_t section)
db7b5426
BS
3469{
3470 int idx, eidx;
3471
3472 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3473 return -1;
3474 idx = SUBPAGE_IDX(start);
3475 eidx = SUBPAGE_IDX(end);
3476#if defined(DEBUG_SUBPAGE)
0bf9e31a 3477 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
db7b5426
BS
3478 mmio, start, end, idx, eidx, memory);
3479#endif
5312bd8b
AK
3480 if (memory_region_is_ram(phys_sections[section].mr)) {
3481 MemoryRegionSection new_section = phys_sections[section];
3482 new_section.mr = &io_mem_subpage_ram;
3483 section = phys_section_add(&new_section);
56384e8b 3484 }
db7b5426 3485 for (; idx <= eidx; idx++) {
5312bd8b 3486 mmio->sub_section[idx] = section;
db7b5426
BS
3487 }
3488
3489 return 0;
3490}
3491
0f0cb164 3492static subpage_t *subpage_init(target_phys_addr_t base)
db7b5426 3493{
c227f099 3494 subpage_t *mmio;
db7b5426 3495
7267c094 3496 mmio = g_malloc0(sizeof(subpage_t));
1eec614b
AL
3497
3498 mmio->base = base;
70c68e44
AK
3499 memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
3500 "subpage", TARGET_PAGE_SIZE);
b3b00c78 3501 mmio->iomem.subpage = true;
db7b5426 3502#if defined(DEBUG_SUBPAGE)
1eec614b
AL
3503 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3504 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
db7b5426 3505#endif
0f0cb164 3506 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, phys_section_unassigned);
db7b5426
BS
3507
3508 return mmio;
3509}
3510
5312bd8b
AK
3511static uint16_t dummy_section(MemoryRegion *mr)
3512{
3513 MemoryRegionSection section = {
3514 .mr = mr,
3515 .offset_within_address_space = 0,
3516 .offset_within_region = 0,
3517 .size = UINT64_MAX,
3518 };
3519
3520 return phys_section_add(&section);
3521}
3522
37ec01d4 3523MemoryRegion *iotlb_to_region(target_phys_addr_t index)
aa102231 3524{
37ec01d4 3525 return phys_sections[index & ~TARGET_PAGE_MASK].mr;
aa102231
AK
3526}
3527
e9179ce1
AK
3528static void io_mem_init(void)
3529{
0e0df1e2 3530 memory_region_init_io(&io_mem_ram, &error_mem_ops, NULL, "ram", UINT64_MAX);
0e0df1e2
AK
3531 memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
3532 memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
3533 "unassigned", UINT64_MAX);
3534 memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
3535 "notdirty", UINT64_MAX);
de712f94
AK
3536 memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
3537 "subpage-ram", UINT64_MAX);
1ec9b909
AK
3538 memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
3539 "watch", UINT64_MAX);
e9179ce1
AK
3540}
3541
50c1e149
AK
3542static void core_begin(MemoryListener *listener)
3543{
54688b1e 3544 destroy_all_mappings();
5312bd8b 3545 phys_sections_clear();
c19e8800 3546 phys_map.ptr = PHYS_MAP_NODE_NIL;
5312bd8b 3547 phys_section_unassigned = dummy_section(&io_mem_unassigned);
aa102231
AK
3548 phys_section_notdirty = dummy_section(&io_mem_notdirty);
3549 phys_section_rom = dummy_section(&io_mem_rom);
3550 phys_section_watch = dummy_section(&io_mem_watch);
50c1e149
AK
3551}
3552
3553static void core_commit(MemoryListener *listener)
3554{
9349b4f9 3555 CPUArchState *env;
117712c3
AK
3556
3557 /* since each CPU stores ram addresses in its TLB cache, we must
3558 reset the modified entries */
3559 /* XXX: slow ! */
3560 for(env = first_cpu; env != NULL; env = env->next_cpu) {
3561 tlb_flush(env, 1);
3562 }
50c1e149
AK
3563}
3564
93632747
AK
3565static void core_region_add(MemoryListener *listener,
3566 MemoryRegionSection *section)
3567{
4855d41a 3568 cpu_register_physical_memory_log(section, section->readonly);
93632747
AK
3569}
3570
3571static void core_region_del(MemoryListener *listener,
3572 MemoryRegionSection *section)
3573{
93632747
AK
3574}
3575
50c1e149
AK
3576static void core_region_nop(MemoryListener *listener,
3577 MemoryRegionSection *section)
3578{
54688b1e 3579 cpu_register_physical_memory_log(section, section->readonly);
50c1e149
AK
3580}
3581
93632747
AK
3582static void core_log_start(MemoryListener *listener,
3583 MemoryRegionSection *section)
3584{
3585}
3586
3587static void core_log_stop(MemoryListener *listener,
3588 MemoryRegionSection *section)
3589{
3590}
3591
3592static void core_log_sync(MemoryListener *listener,
3593 MemoryRegionSection *section)
3594{
3595}
3596
3597static void core_log_global_start(MemoryListener *listener)
3598{
3599 cpu_physical_memory_set_dirty_tracking(1);
3600}
3601
3602static void core_log_global_stop(MemoryListener *listener)
3603{
3604 cpu_physical_memory_set_dirty_tracking(0);
3605}
3606
3607static void core_eventfd_add(MemoryListener *listener,
3608 MemoryRegionSection *section,
3609 bool match_data, uint64_t data, int fd)
3610{
3611}
3612
3613static void core_eventfd_del(MemoryListener *listener,
3614 MemoryRegionSection *section,
3615 bool match_data, uint64_t data, int fd)
3616{
3617}
3618
50c1e149
AK
3619static void io_begin(MemoryListener *listener)
3620{
3621}
3622
3623static void io_commit(MemoryListener *listener)
3624{
3625}
3626
4855d41a
AK
3627static void io_region_add(MemoryListener *listener,
3628 MemoryRegionSection *section)
3629{
a2d33521
AK
3630 MemoryRegionIORange *mrio = g_new(MemoryRegionIORange, 1);
3631
3632 mrio->mr = section->mr;
3633 mrio->offset = section->offset_within_region;
3634 iorange_init(&mrio->iorange, &memory_region_iorange_ops,
4855d41a 3635 section->offset_within_address_space, section->size);
a2d33521 3636 ioport_register(&mrio->iorange);
4855d41a
AK
3637}
3638
3639static void io_region_del(MemoryListener *listener,
3640 MemoryRegionSection *section)
3641{
3642 isa_unassign_ioport(section->offset_within_address_space, section->size);
3643}
3644
50c1e149
AK
3645static void io_region_nop(MemoryListener *listener,
3646 MemoryRegionSection *section)
3647{
3648}
3649
4855d41a
AK
3650static void io_log_start(MemoryListener *listener,
3651 MemoryRegionSection *section)
3652{
3653}
3654
3655static void io_log_stop(MemoryListener *listener,
3656 MemoryRegionSection *section)
3657{
3658}
3659
3660static void io_log_sync(MemoryListener *listener,
3661 MemoryRegionSection *section)
3662{
3663}
3664
3665static void io_log_global_start(MemoryListener *listener)
3666{
3667}
3668
3669static void io_log_global_stop(MemoryListener *listener)
3670{
3671}
3672
3673static void io_eventfd_add(MemoryListener *listener,
3674 MemoryRegionSection *section,
3675 bool match_data, uint64_t data, int fd)
3676{
3677}
3678
3679static void io_eventfd_del(MemoryListener *listener,
3680 MemoryRegionSection *section,
3681 bool match_data, uint64_t data, int fd)
3682{
3683}
3684
93632747 3685static MemoryListener core_memory_listener = {
50c1e149
AK
3686 .begin = core_begin,
3687 .commit = core_commit,
93632747
AK
3688 .region_add = core_region_add,
3689 .region_del = core_region_del,
50c1e149 3690 .region_nop = core_region_nop,
93632747
AK
3691 .log_start = core_log_start,
3692 .log_stop = core_log_stop,
3693 .log_sync = core_log_sync,
3694 .log_global_start = core_log_global_start,
3695 .log_global_stop = core_log_global_stop,
3696 .eventfd_add = core_eventfd_add,
3697 .eventfd_del = core_eventfd_del,
3698 .priority = 0,
3699};
3700
4855d41a 3701static MemoryListener io_memory_listener = {
50c1e149
AK
3702 .begin = io_begin,
3703 .commit = io_commit,
4855d41a
AK
3704 .region_add = io_region_add,
3705 .region_del = io_region_del,
50c1e149 3706 .region_nop = io_region_nop,
4855d41a
AK
3707 .log_start = io_log_start,
3708 .log_stop = io_log_stop,
3709 .log_sync = io_log_sync,
3710 .log_global_start = io_log_global_start,
3711 .log_global_stop = io_log_global_stop,
3712 .eventfd_add = io_eventfd_add,
3713 .eventfd_del = io_eventfd_del,
3714 .priority = 0,
3715};
3716
62152b8a
AK
3717static void memory_map_init(void)
3718{
7267c094 3719 system_memory = g_malloc(sizeof(*system_memory));
8417cebf 3720 memory_region_init(system_memory, "system", INT64_MAX);
62152b8a 3721 set_system_memory_map(system_memory);
309cb471 3722
7267c094 3723 system_io = g_malloc(sizeof(*system_io));
309cb471
AK
3724 memory_region_init(system_io, "io", 65536);
3725 set_system_io_map(system_io);
93632747 3726
4855d41a
AK
3727 memory_listener_register(&core_memory_listener, system_memory);
3728 memory_listener_register(&io_memory_listener, system_io);
62152b8a
AK
3729}
3730
3731MemoryRegion *get_system_memory(void)
3732{
3733 return system_memory;
3734}
3735
309cb471
AK
3736MemoryRegion *get_system_io(void)
3737{
3738 return system_io;
3739}
3740
e2eef170
PB
3741#endif /* !defined(CONFIG_USER_ONLY) */
3742
13eb76e0
FB
3743/* physical memory access (slow version, mainly for debug) */
3744#if defined(CONFIG_USER_ONLY)
9349b4f9 3745int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
a68fe89c 3746 uint8_t *buf, int len, int is_write)
13eb76e0
FB
3747{
3748 int l, flags;
3749 target_ulong page;
53a5960a 3750 void * p;
13eb76e0
FB
3751
3752 while (len > 0) {
3753 page = addr & TARGET_PAGE_MASK;
3754 l = (page + TARGET_PAGE_SIZE) - addr;
3755 if (l > len)
3756 l = len;
3757 flags = page_get_flags(page);
3758 if (!(flags & PAGE_VALID))
a68fe89c 3759 return -1;
13eb76e0
FB
3760 if (is_write) {
3761 if (!(flags & PAGE_WRITE))
a68fe89c 3762 return -1;
579a97f7 3763 /* XXX: this code should not depend on lock_user */
72fb7daa 3764 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
a68fe89c 3765 return -1;
72fb7daa
AJ
3766 memcpy(p, buf, l);
3767 unlock_user(p, addr, l);
13eb76e0
FB
3768 } else {
3769 if (!(flags & PAGE_READ))
a68fe89c 3770 return -1;
579a97f7 3771 /* XXX: this code should not depend on lock_user */
72fb7daa 3772 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
a68fe89c 3773 return -1;
72fb7daa 3774 memcpy(buf, p, l);
5b257578 3775 unlock_user(p, addr, 0);
13eb76e0
FB
3776 }
3777 len -= l;
3778 buf += l;
3779 addr += l;
3780 }
a68fe89c 3781 return 0;
13eb76e0 3782}
8df1cd07 3783
13eb76e0 3784#else
c227f099 3785void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
3786 int len, int is_write)
3787{
37ec01d4 3788 int l;
13eb76e0
FB
3789 uint8_t *ptr;
3790 uint32_t val;
c227f099 3791 target_phys_addr_t page;
f3705d53 3792 MemoryRegionSection *section;
3b46e624 3793
13eb76e0
FB
3794 while (len > 0) {
3795 page = addr & TARGET_PAGE_MASK;
3796 l = (page + TARGET_PAGE_SIZE) - addr;
3797 if (l > len)
3798 l = len;
06ef3525 3799 section = phys_page_find(page >> TARGET_PAGE_BITS);
3b46e624 3800
13eb76e0 3801 if (is_write) {
f3705d53 3802 if (!memory_region_is_ram(section->mr)) {
f1f6e3b8 3803 target_phys_addr_t addr1;
f3705d53 3804 addr1 = section_addr(section, addr);
6a00d601
FB
3805 /* XXX: could force cpu_single_env to NULL to avoid
3806 potential bugs */
6c2934db 3807 if (l >= 4 && ((addr1 & 3) == 0)) {
1c213d19 3808 /* 32 bit write access */
c27004ec 3809 val = ldl_p(buf);
37ec01d4 3810 io_mem_write(section->mr, addr1, val, 4);
13eb76e0 3811 l = 4;
6c2934db 3812 } else if (l >= 2 && ((addr1 & 1) == 0)) {
1c213d19 3813 /* 16 bit write access */
c27004ec 3814 val = lduw_p(buf);
37ec01d4 3815 io_mem_write(section->mr, addr1, val, 2);
13eb76e0
FB
3816 l = 2;
3817 } else {
1c213d19 3818 /* 8 bit write access */
c27004ec 3819 val = ldub_p(buf);
37ec01d4 3820 io_mem_write(section->mr, addr1, val, 1);
13eb76e0
FB
3821 l = 1;
3822 }
f3705d53 3823 } else if (!section->readonly) {
8ca5692d 3824 ram_addr_t addr1;
f3705d53
AK
3825 addr1 = memory_region_get_ram_addr(section->mr)
3826 + section_addr(section, addr);
13eb76e0 3827 /* RAM case */
5579c7f3 3828 ptr = qemu_get_ram_ptr(addr1);
13eb76e0 3829 memcpy(ptr, buf, l);
3a7d929e
FB
3830 if (!cpu_physical_memory_is_dirty(addr1)) {
3831 /* invalidate code */
3832 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3833 /* set dirty bit */
f7c11b53
YT
3834 cpu_physical_memory_set_dirty_flags(
3835 addr1, (0xff & ~CODE_DIRTY_FLAG));
3a7d929e 3836 }
050a0ddf 3837 qemu_put_ram_ptr(ptr);
13eb76e0
FB
3838 }
3839 } else {
f3705d53 3840 if (!is_ram_rom_romd(section)) {
f1f6e3b8 3841 target_phys_addr_t addr1;
13eb76e0 3842 /* I/O case */
f3705d53 3843 addr1 = section_addr(section, addr);
6c2934db 3844 if (l >= 4 && ((addr1 & 3) == 0)) {
13eb76e0 3845 /* 32 bit read access */
37ec01d4 3846 val = io_mem_read(section->mr, addr1, 4);
c27004ec 3847 stl_p(buf, val);
13eb76e0 3848 l = 4;
6c2934db 3849 } else if (l >= 2 && ((addr1 & 1) == 0)) {
13eb76e0 3850 /* 16 bit read access */
37ec01d4 3851 val = io_mem_read(section->mr, addr1, 2);
c27004ec 3852 stw_p(buf, val);
13eb76e0
FB
3853 l = 2;
3854 } else {
1c213d19 3855 /* 8 bit read access */
37ec01d4 3856 val = io_mem_read(section->mr, addr1, 1);
c27004ec 3857 stb_p(buf, val);
13eb76e0
FB
3858 l = 1;
3859 }
3860 } else {
3861 /* RAM case */
0a1b357f
AP
3862 ptr = qemu_get_ram_ptr(section->mr->ram_addr
3863 + section_addr(section, addr));
f3705d53 3864 memcpy(buf, ptr, l);
050a0ddf 3865 qemu_put_ram_ptr(ptr);
13eb76e0
FB
3866 }
3867 }
3868 len -= l;
3869 buf += l;
3870 addr += l;
3871 }
3872}
8df1cd07 3873
d0ecd2aa 3874/* used for ROM loading : can write in RAM and ROM */
c227f099 3875void cpu_physical_memory_write_rom(target_phys_addr_t addr,
d0ecd2aa
FB
3876 const uint8_t *buf, int len)
3877{
3878 int l;
3879 uint8_t *ptr;
c227f099 3880 target_phys_addr_t page;
f3705d53 3881 MemoryRegionSection *section;
3b46e624 3882
d0ecd2aa
FB
3883 while (len > 0) {
3884 page = addr & TARGET_PAGE_MASK;
3885 l = (page + TARGET_PAGE_SIZE) - addr;
3886 if (l > len)
3887 l = len;
06ef3525 3888 section = phys_page_find(page >> TARGET_PAGE_BITS);
3b46e624 3889
f3705d53 3890 if (!is_ram_rom_romd(section)) {
d0ecd2aa
FB
3891 /* do nothing */
3892 } else {
3893 unsigned long addr1;
f3705d53
AK
3894 addr1 = memory_region_get_ram_addr(section->mr)
3895 + section_addr(section, addr);
d0ecd2aa 3896 /* ROM/RAM case */
5579c7f3 3897 ptr = qemu_get_ram_ptr(addr1);
d0ecd2aa 3898 memcpy(ptr, buf, l);
050a0ddf 3899 qemu_put_ram_ptr(ptr);
d0ecd2aa
FB
3900 }
3901 len -= l;
3902 buf += l;
3903 addr += l;
3904 }
3905}
3906
6d16c2f8
AL
3907typedef struct {
3908 void *buffer;
c227f099
AL
3909 target_phys_addr_t addr;
3910 target_phys_addr_t len;
6d16c2f8
AL
3911} BounceBuffer;
3912
3913static BounceBuffer bounce;
3914
ba223c29
AL
3915typedef struct MapClient {
3916 void *opaque;
3917 void (*callback)(void *opaque);
72cf2d4f 3918 QLIST_ENTRY(MapClient) link;
ba223c29
AL
3919} MapClient;
3920
72cf2d4f
BS
3921static QLIST_HEAD(map_client_list, MapClient) map_client_list
3922 = QLIST_HEAD_INITIALIZER(map_client_list);
ba223c29
AL
3923
3924void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3925{
7267c094 3926 MapClient *client = g_malloc(sizeof(*client));
ba223c29
AL
3927
3928 client->opaque = opaque;
3929 client->callback = callback;
72cf2d4f 3930 QLIST_INSERT_HEAD(&map_client_list, client, link);
ba223c29
AL
3931 return client;
3932}
3933
3934void cpu_unregister_map_client(void *_client)
3935{
3936 MapClient *client = (MapClient *)_client;
3937
72cf2d4f 3938 QLIST_REMOVE(client, link);
7267c094 3939 g_free(client);
ba223c29
AL
3940}
3941
3942static void cpu_notify_map_clients(void)
3943{
3944 MapClient *client;
3945
72cf2d4f
BS
3946 while (!QLIST_EMPTY(&map_client_list)) {
3947 client = QLIST_FIRST(&map_client_list);
ba223c29 3948 client->callback(client->opaque);
34d5e948 3949 cpu_unregister_map_client(client);
ba223c29
AL
3950 }
3951}
3952
6d16c2f8
AL
3953/* Map a physical memory region into a host virtual address.
3954 * May map a subset of the requested range, given by and returned in *plen.
3955 * May return NULL if resources needed to perform the mapping are exhausted.
3956 * Use only for reads OR writes - not for read-modify-write operations.
ba223c29
AL
3957 * Use cpu_register_map_client() to know when retrying the map operation is
3958 * likely to succeed.
6d16c2f8 3959 */
c227f099
AL
3960void *cpu_physical_memory_map(target_phys_addr_t addr,
3961 target_phys_addr_t *plen,
6d16c2f8
AL
3962 int is_write)
3963{
c227f099 3964 target_phys_addr_t len = *plen;
38bee5dc 3965 target_phys_addr_t todo = 0;
6d16c2f8 3966 int l;
c227f099 3967 target_phys_addr_t page;
f3705d53 3968 MemoryRegionSection *section;
f15fbc4b 3969 ram_addr_t raddr = RAM_ADDR_MAX;
8ab934f9
SS
3970 ram_addr_t rlen;
3971 void *ret;
6d16c2f8
AL
3972
3973 while (len > 0) {
3974 page = addr & TARGET_PAGE_MASK;
3975 l = (page + TARGET_PAGE_SIZE) - addr;
3976 if (l > len)
3977 l = len;
06ef3525 3978 section = phys_page_find(page >> TARGET_PAGE_BITS);
6d16c2f8 3979
f3705d53 3980 if (!(memory_region_is_ram(section->mr) && !section->readonly)) {
38bee5dc 3981 if (todo || bounce.buffer) {
6d16c2f8
AL
3982 break;
3983 }
3984 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3985 bounce.addr = addr;
3986 bounce.len = l;
3987 if (!is_write) {
54f7b4a3 3988 cpu_physical_memory_read(addr, bounce.buffer, l);
6d16c2f8 3989 }
38bee5dc
SS
3990
3991 *plen = l;
3992 return bounce.buffer;
6d16c2f8 3993 }
8ab934f9 3994 if (!todo) {
f3705d53
AK
3995 raddr = memory_region_get_ram_addr(section->mr)
3996 + section_addr(section, addr);
8ab934f9 3997 }
6d16c2f8
AL
3998
3999 len -= l;
4000 addr += l;
38bee5dc 4001 todo += l;
6d16c2f8 4002 }
8ab934f9
SS
4003 rlen = todo;
4004 ret = qemu_ram_ptr_length(raddr, &rlen);
4005 *plen = rlen;
4006 return ret;
6d16c2f8
AL
4007}
4008
4009/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
4010 * Will also mark the memory as dirty if is_write == 1. access_len gives
4011 * the amount of memory that was actually read or written by the caller.
4012 */
c227f099
AL
4013void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
4014 int is_write, target_phys_addr_t access_len)
6d16c2f8
AL
4015{
4016 if (buffer != bounce.buffer) {
4017 if (is_write) {
e890261f 4018 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
6d16c2f8
AL
4019 while (access_len) {
4020 unsigned l;
4021 l = TARGET_PAGE_SIZE;
4022 if (l > access_len)
4023 l = access_len;
4024 if (!cpu_physical_memory_is_dirty(addr1)) {
4025 /* invalidate code */
4026 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
4027 /* set dirty bit */
f7c11b53
YT
4028 cpu_physical_memory_set_dirty_flags(
4029 addr1, (0xff & ~CODE_DIRTY_FLAG));
6d16c2f8
AL
4030 }
4031 addr1 += l;
4032 access_len -= l;
4033 }
4034 }
868bb33f 4035 if (xen_enabled()) {
e41d7c69 4036 xen_invalidate_map_cache_entry(buffer);
050a0ddf 4037 }
6d16c2f8
AL
4038 return;
4039 }
4040 if (is_write) {
4041 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
4042 }
f8a83245 4043 qemu_vfree(bounce.buffer);
6d16c2f8 4044 bounce.buffer = NULL;
ba223c29 4045 cpu_notify_map_clients();
6d16c2f8 4046}
d0ecd2aa 4047
8df1cd07 4048/* warning: addr must be aligned */
1e78bcc1
AG
4049static inline uint32_t ldl_phys_internal(target_phys_addr_t addr,
4050 enum device_endian endian)
8df1cd07 4051{
8df1cd07
FB
4052 uint8_t *ptr;
4053 uint32_t val;
f3705d53 4054 MemoryRegionSection *section;
8df1cd07 4055
06ef3525 4056 section = phys_page_find(addr >> TARGET_PAGE_BITS);
3b46e624 4057
f3705d53 4058 if (!is_ram_rom_romd(section)) {
8df1cd07 4059 /* I/O case */
f3705d53 4060 addr = section_addr(section, addr);
37ec01d4 4061 val = io_mem_read(section->mr, addr, 4);
1e78bcc1
AG
4062#if defined(TARGET_WORDS_BIGENDIAN)
4063 if (endian == DEVICE_LITTLE_ENDIAN) {
4064 val = bswap32(val);
4065 }
4066#else
4067 if (endian == DEVICE_BIG_ENDIAN) {
4068 val = bswap32(val);
4069 }
4070#endif
8df1cd07
FB
4071 } else {
4072 /* RAM case */
f3705d53 4073 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
06ef3525 4074 & TARGET_PAGE_MASK)
f3705d53 4075 + section_addr(section, addr));
1e78bcc1
AG
4076 switch (endian) {
4077 case DEVICE_LITTLE_ENDIAN:
4078 val = ldl_le_p(ptr);
4079 break;
4080 case DEVICE_BIG_ENDIAN:
4081 val = ldl_be_p(ptr);
4082 break;
4083 default:
4084 val = ldl_p(ptr);
4085 break;
4086 }
8df1cd07
FB
4087 }
4088 return val;
4089}
4090
1e78bcc1
AG
4091uint32_t ldl_phys(target_phys_addr_t addr)
4092{
4093 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4094}
4095
4096uint32_t ldl_le_phys(target_phys_addr_t addr)
4097{
4098 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4099}
4100
4101uint32_t ldl_be_phys(target_phys_addr_t addr)
4102{
4103 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
4104}
4105
84b7b8e7 4106/* warning: addr must be aligned */
1e78bcc1
AG
4107static inline uint64_t ldq_phys_internal(target_phys_addr_t addr,
4108 enum device_endian endian)
84b7b8e7 4109{
84b7b8e7
FB
4110 uint8_t *ptr;
4111 uint64_t val;
f3705d53 4112 MemoryRegionSection *section;
84b7b8e7 4113
06ef3525 4114 section = phys_page_find(addr >> TARGET_PAGE_BITS);
3b46e624 4115
f3705d53 4116 if (!is_ram_rom_romd(section)) {
84b7b8e7 4117 /* I/O case */
f3705d53 4118 addr = section_addr(section, addr);
1e78bcc1
AG
4119
4120 /* XXX This is broken when device endian != cpu endian.
4121 Fix and add "endian" variable check */
84b7b8e7 4122#ifdef TARGET_WORDS_BIGENDIAN
37ec01d4
AK
4123 val = io_mem_read(section->mr, addr, 4) << 32;
4124 val |= io_mem_read(section->mr, addr + 4, 4);
84b7b8e7 4125#else
37ec01d4
AK
4126 val = io_mem_read(section->mr, addr, 4);
4127 val |= io_mem_read(section->mr, addr + 4, 4) << 32;
84b7b8e7
FB
4128#endif
4129 } else {
4130 /* RAM case */
f3705d53 4131 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
06ef3525 4132 & TARGET_PAGE_MASK)
f3705d53 4133 + section_addr(section, addr));
1e78bcc1
AG
4134 switch (endian) {
4135 case DEVICE_LITTLE_ENDIAN:
4136 val = ldq_le_p(ptr);
4137 break;
4138 case DEVICE_BIG_ENDIAN:
4139 val = ldq_be_p(ptr);
4140 break;
4141 default:
4142 val = ldq_p(ptr);
4143 break;
4144 }
84b7b8e7
FB
4145 }
4146 return val;
4147}
4148
1e78bcc1
AG
4149uint64_t ldq_phys(target_phys_addr_t addr)
4150{
4151 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4152}
4153
4154uint64_t ldq_le_phys(target_phys_addr_t addr)
4155{
4156 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4157}
4158
4159uint64_t ldq_be_phys(target_phys_addr_t addr)
4160{
4161 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
4162}
4163
aab33094 4164/* XXX: optimize */
c227f099 4165uint32_t ldub_phys(target_phys_addr_t addr)
aab33094
FB
4166{
4167 uint8_t val;
4168 cpu_physical_memory_read(addr, &val, 1);
4169 return val;
4170}
4171
733f0b02 4172/* warning: addr must be aligned */
1e78bcc1
AG
4173static inline uint32_t lduw_phys_internal(target_phys_addr_t addr,
4174 enum device_endian endian)
aab33094 4175{
733f0b02
MT
4176 uint8_t *ptr;
4177 uint64_t val;
f3705d53 4178 MemoryRegionSection *section;
733f0b02 4179
06ef3525 4180 section = phys_page_find(addr >> TARGET_PAGE_BITS);
733f0b02 4181
f3705d53 4182 if (!is_ram_rom_romd(section)) {
733f0b02 4183 /* I/O case */
f3705d53 4184 addr = section_addr(section, addr);
37ec01d4 4185 val = io_mem_read(section->mr, addr, 2);
1e78bcc1
AG
4186#if defined(TARGET_WORDS_BIGENDIAN)
4187 if (endian == DEVICE_LITTLE_ENDIAN) {
4188 val = bswap16(val);
4189 }
4190#else
4191 if (endian == DEVICE_BIG_ENDIAN) {
4192 val = bswap16(val);
4193 }
4194#endif
733f0b02
MT
4195 } else {
4196 /* RAM case */
f3705d53 4197 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
06ef3525 4198 & TARGET_PAGE_MASK)
f3705d53 4199 + section_addr(section, addr));
1e78bcc1
AG
4200 switch (endian) {
4201 case DEVICE_LITTLE_ENDIAN:
4202 val = lduw_le_p(ptr);
4203 break;
4204 case DEVICE_BIG_ENDIAN:
4205 val = lduw_be_p(ptr);
4206 break;
4207 default:
4208 val = lduw_p(ptr);
4209 break;
4210 }
733f0b02
MT
4211 }
4212 return val;
aab33094
FB
4213}
4214
1e78bcc1
AG
4215uint32_t lduw_phys(target_phys_addr_t addr)
4216{
4217 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4218}
4219
4220uint32_t lduw_le_phys(target_phys_addr_t addr)
4221{
4222 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4223}
4224
4225uint32_t lduw_be_phys(target_phys_addr_t addr)
4226{
4227 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
4228}
4229
8df1cd07
FB
4230/* warning: addr must be aligned. The ram page is not masked as dirty
4231 and the code inside is not invalidated. It is useful if the dirty
4232 bits are used to track modified PTEs */
c227f099 4233void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
8df1cd07 4234{
8df1cd07 4235 uint8_t *ptr;
f3705d53 4236 MemoryRegionSection *section;
8df1cd07 4237
06ef3525 4238 section = phys_page_find(addr >> TARGET_PAGE_BITS);
3b46e624 4239
f3705d53 4240 if (!memory_region_is_ram(section->mr) || section->readonly) {
37ec01d4 4241 addr = section_addr(section, addr);
f3705d53 4242 if (memory_region_is_ram(section->mr)) {
37ec01d4 4243 section = &phys_sections[phys_section_rom];
06ef3525 4244 }
37ec01d4 4245 io_mem_write(section->mr, addr, val, 4);
8df1cd07 4246 } else {
f3705d53 4247 unsigned long addr1 = (memory_region_get_ram_addr(section->mr)
06ef3525 4248 & TARGET_PAGE_MASK)
f3705d53 4249 + section_addr(section, addr);
5579c7f3 4250 ptr = qemu_get_ram_ptr(addr1);
8df1cd07 4251 stl_p(ptr, val);
74576198
AL
4252
4253 if (unlikely(in_migration)) {
4254 if (!cpu_physical_memory_is_dirty(addr1)) {
4255 /* invalidate code */
4256 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4257 /* set dirty bit */
f7c11b53
YT
4258 cpu_physical_memory_set_dirty_flags(
4259 addr1, (0xff & ~CODE_DIRTY_FLAG));
74576198
AL
4260 }
4261 }
8df1cd07
FB
4262 }
4263}
4264
c227f099 4265void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
bc98a7ef 4266{
bc98a7ef 4267 uint8_t *ptr;
f3705d53 4268 MemoryRegionSection *section;
bc98a7ef 4269
06ef3525 4270 section = phys_page_find(addr >> TARGET_PAGE_BITS);
3b46e624 4271
f3705d53 4272 if (!memory_region_is_ram(section->mr) || section->readonly) {
37ec01d4 4273 addr = section_addr(section, addr);
f3705d53 4274 if (memory_region_is_ram(section->mr)) {
37ec01d4 4275 section = &phys_sections[phys_section_rom];
06ef3525 4276 }
bc98a7ef 4277#ifdef TARGET_WORDS_BIGENDIAN
37ec01d4
AK
4278 io_mem_write(section->mr, addr, val >> 32, 4);
4279 io_mem_write(section->mr, addr + 4, (uint32_t)val, 4);
bc98a7ef 4280#else
37ec01d4
AK
4281 io_mem_write(section->mr, addr, (uint32_t)val, 4);
4282 io_mem_write(section->mr, addr + 4, val >> 32, 4);
bc98a7ef
JM
4283#endif
4284 } else {
f3705d53 4285 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
06ef3525 4286 & TARGET_PAGE_MASK)
f3705d53 4287 + section_addr(section, addr));
bc98a7ef
JM
4288 stq_p(ptr, val);
4289 }
4290}
4291
8df1cd07 4292/* warning: addr must be aligned */
1e78bcc1
AG
4293static inline void stl_phys_internal(target_phys_addr_t addr, uint32_t val,
4294 enum device_endian endian)
8df1cd07 4295{
8df1cd07 4296 uint8_t *ptr;
f3705d53 4297 MemoryRegionSection *section;
8df1cd07 4298
06ef3525 4299 section = phys_page_find(addr >> TARGET_PAGE_BITS);
3b46e624 4300
f3705d53 4301 if (!memory_region_is_ram(section->mr) || section->readonly) {
37ec01d4 4302 addr = section_addr(section, addr);
f3705d53 4303 if (memory_region_is_ram(section->mr)) {
37ec01d4 4304 section = &phys_sections[phys_section_rom];
06ef3525 4305 }
1e78bcc1
AG
4306#if defined(TARGET_WORDS_BIGENDIAN)
4307 if (endian == DEVICE_LITTLE_ENDIAN) {
4308 val = bswap32(val);
4309 }
4310#else
4311 if (endian == DEVICE_BIG_ENDIAN) {
4312 val = bswap32(val);
4313 }
4314#endif
37ec01d4 4315 io_mem_write(section->mr, addr, val, 4);
8df1cd07
FB
4316 } else {
4317 unsigned long addr1;
f3705d53
AK
4318 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
4319 + section_addr(section, addr);
8df1cd07 4320 /* RAM case */
5579c7f3 4321 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
4322 switch (endian) {
4323 case DEVICE_LITTLE_ENDIAN:
4324 stl_le_p(ptr, val);
4325 break;
4326 case DEVICE_BIG_ENDIAN:
4327 stl_be_p(ptr, val);
4328 break;
4329 default:
4330 stl_p(ptr, val);
4331 break;
4332 }
3a7d929e
FB
4333 if (!cpu_physical_memory_is_dirty(addr1)) {
4334 /* invalidate code */
4335 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4336 /* set dirty bit */
f7c11b53
YT
4337 cpu_physical_memory_set_dirty_flags(addr1,
4338 (0xff & ~CODE_DIRTY_FLAG));
3a7d929e 4339 }
8df1cd07
FB
4340 }
4341}
4342
1e78bcc1
AG
4343void stl_phys(target_phys_addr_t addr, uint32_t val)
4344{
4345 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4346}
4347
4348void stl_le_phys(target_phys_addr_t addr, uint32_t val)
4349{
4350 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4351}
4352
4353void stl_be_phys(target_phys_addr_t addr, uint32_t val)
4354{
4355 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4356}
4357
aab33094 4358/* XXX: optimize */
c227f099 4359void stb_phys(target_phys_addr_t addr, uint32_t val)
aab33094
FB
4360{
4361 uint8_t v = val;
4362 cpu_physical_memory_write(addr, &v, 1);
4363}
4364
733f0b02 4365/* warning: addr must be aligned */
1e78bcc1
AG
4366static inline void stw_phys_internal(target_phys_addr_t addr, uint32_t val,
4367 enum device_endian endian)
aab33094 4368{
733f0b02 4369 uint8_t *ptr;
f3705d53 4370 MemoryRegionSection *section;
733f0b02 4371
06ef3525 4372 section = phys_page_find(addr >> TARGET_PAGE_BITS);
733f0b02 4373
f3705d53 4374 if (!memory_region_is_ram(section->mr) || section->readonly) {
37ec01d4 4375 addr = section_addr(section, addr);
f3705d53 4376 if (memory_region_is_ram(section->mr)) {
37ec01d4 4377 section = &phys_sections[phys_section_rom];
06ef3525 4378 }
1e78bcc1
AG
4379#if defined(TARGET_WORDS_BIGENDIAN)
4380 if (endian == DEVICE_LITTLE_ENDIAN) {
4381 val = bswap16(val);
4382 }
4383#else
4384 if (endian == DEVICE_BIG_ENDIAN) {
4385 val = bswap16(val);
4386 }
4387#endif
37ec01d4 4388 io_mem_write(section->mr, addr, val, 2);
733f0b02
MT
4389 } else {
4390 unsigned long addr1;
f3705d53
AK
4391 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
4392 + section_addr(section, addr);
733f0b02
MT
4393 /* RAM case */
4394 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
4395 switch (endian) {
4396 case DEVICE_LITTLE_ENDIAN:
4397 stw_le_p(ptr, val);
4398 break;
4399 case DEVICE_BIG_ENDIAN:
4400 stw_be_p(ptr, val);
4401 break;
4402 default:
4403 stw_p(ptr, val);
4404 break;
4405 }
733f0b02
MT
4406 if (!cpu_physical_memory_is_dirty(addr1)) {
4407 /* invalidate code */
4408 tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
4409 /* set dirty bit */
4410 cpu_physical_memory_set_dirty_flags(addr1,
4411 (0xff & ~CODE_DIRTY_FLAG));
4412 }
4413 }
aab33094
FB
4414}
4415
1e78bcc1
AG
4416void stw_phys(target_phys_addr_t addr, uint32_t val)
4417{
4418 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4419}
4420
4421void stw_le_phys(target_phys_addr_t addr, uint32_t val)
4422{
4423 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4424}
4425
4426void stw_be_phys(target_phys_addr_t addr, uint32_t val)
4427{
4428 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4429}
4430
aab33094 4431/* XXX: optimize */
c227f099 4432void stq_phys(target_phys_addr_t addr, uint64_t val)
aab33094
FB
4433{
4434 val = tswap64(val);
71d2b725 4435 cpu_physical_memory_write(addr, &val, 8);
aab33094
FB
4436}
4437
1e78bcc1
AG
4438void stq_le_phys(target_phys_addr_t addr, uint64_t val)
4439{
4440 val = cpu_to_le64(val);
4441 cpu_physical_memory_write(addr, &val, 8);
4442}
4443
4444void stq_be_phys(target_phys_addr_t addr, uint64_t val)
4445{
4446 val = cpu_to_be64(val);
4447 cpu_physical_memory_write(addr, &val, 8);
4448}
4449
5e2972fd 4450/* virtual memory access for debug (includes writing to ROM) */
9349b4f9 4451int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
b448f2f3 4452 uint8_t *buf, int len, int is_write)
13eb76e0
FB
4453{
4454 int l;
c227f099 4455 target_phys_addr_t phys_addr;
9b3c35e0 4456 target_ulong page;
13eb76e0
FB
4457
4458 while (len > 0) {
4459 page = addr & TARGET_PAGE_MASK;
4460 phys_addr = cpu_get_phys_page_debug(env, page);
4461 /* if no physical page mapped, return an error */
4462 if (phys_addr == -1)
4463 return -1;
4464 l = (page + TARGET_PAGE_SIZE) - addr;
4465 if (l > len)
4466 l = len;
5e2972fd 4467 phys_addr += (addr & ~TARGET_PAGE_MASK);
5e2972fd
AL
4468 if (is_write)
4469 cpu_physical_memory_write_rom(phys_addr, buf, l);
4470 else
5e2972fd 4471 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
13eb76e0
FB
4472 len -= l;
4473 buf += l;
4474 addr += l;
4475 }
4476 return 0;
4477}
a68fe89c 4478#endif
13eb76e0 4479
2e70f6ef
PB
4480/* in deterministic execution mode, instructions doing device I/Os
4481 must be at the end of the TB */
20503968 4482void cpu_io_recompile(CPUArchState *env, uintptr_t retaddr)
2e70f6ef
PB
4483{
4484 TranslationBlock *tb;
4485 uint32_t n, cflags;
4486 target_ulong pc, cs_base;
4487 uint64_t flags;
4488
20503968 4489 tb = tb_find_pc(retaddr);
2e70f6ef
PB
4490 if (!tb) {
4491 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
20503968 4492 (void *)retaddr);
2e70f6ef
PB
4493 }
4494 n = env->icount_decr.u16.low + tb->icount;
20503968 4495 cpu_restore_state(tb, env, retaddr);
2e70f6ef 4496 /* Calculate how many instructions had been executed before the fault
bf20dc07 4497 occurred. */
2e70f6ef
PB
4498 n = n - env->icount_decr.u16.low;
4499 /* Generate a new TB ending on the I/O insn. */
4500 n++;
4501 /* On MIPS and SH, delay slot instructions can only be restarted if
4502 they were already the first instruction in the TB. If this is not
bf20dc07 4503 the first instruction in a TB then re-execute the preceding
2e70f6ef
PB
4504 branch. */
4505#if defined(TARGET_MIPS)
4506 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4507 env->active_tc.PC -= 4;
4508 env->icount_decr.u16.low++;
4509 env->hflags &= ~MIPS_HFLAG_BMASK;
4510 }
4511#elif defined(TARGET_SH4)
4512 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4513 && n > 1) {
4514 env->pc -= 2;
4515 env->icount_decr.u16.low++;
4516 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4517 }
4518#endif
4519 /* This should never happen. */
4520 if (n > CF_COUNT_MASK)
4521 cpu_abort(env, "TB too big during recompile");
4522
4523 cflags = n | CF_LAST_IO;
4524 pc = tb->pc;
4525 cs_base = tb->cs_base;
4526 flags = tb->flags;
4527 tb_phys_invalidate(tb, -1);
4528 /* FIXME: In theory this could raise an exception. In practice
4529 we have already translated the block once so it's probably ok. */
4530 tb_gen_code(env, pc, cs_base, flags, cflags);
bf20dc07 4531 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
2e70f6ef
PB
4532 the first in the TB) then we end up generating a whole new TB and
4533 repeating the fault, which is horribly inefficient.
4534 Better would be to execute just this insn uncached, or generate a
4535 second new TB. */
4536 cpu_resume_from_signal(env, NULL);
4537}
4538
b3755a91
PB
4539#if !defined(CONFIG_USER_ONLY)
4540
055403b2 4541void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
e3db7226
FB
4542{
4543 int i, target_code_size, max_target_code_size;
4544 int direct_jmp_count, direct_jmp2_count, cross_page;
4545 TranslationBlock *tb;
3b46e624 4546
e3db7226
FB
4547 target_code_size = 0;
4548 max_target_code_size = 0;
4549 cross_page = 0;
4550 direct_jmp_count = 0;
4551 direct_jmp2_count = 0;
4552 for(i = 0; i < nb_tbs; i++) {
4553 tb = &tbs[i];
4554 target_code_size += tb->size;
4555 if (tb->size > max_target_code_size)
4556 max_target_code_size = tb->size;
4557 if (tb->page_addr[1] != -1)
4558 cross_page++;
4559 if (tb->tb_next_offset[0] != 0xffff) {
4560 direct_jmp_count++;
4561 if (tb->tb_next_offset[1] != 0xffff) {
4562 direct_jmp2_count++;
4563 }
4564 }
4565 }
4566 /* XXX: avoid using doubles ? */
57fec1fe 4567 cpu_fprintf(f, "Translation buffer state:\n");
055403b2 4568 cpu_fprintf(f, "gen code size %td/%ld\n",
26a5f13b
FB
4569 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4570 cpu_fprintf(f, "TB count %d/%d\n",
4571 nb_tbs, code_gen_max_blocks);
5fafdf24 4572 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
e3db7226
FB
4573 nb_tbs ? target_code_size / nb_tbs : 0,
4574 max_target_code_size);
055403b2 4575 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
e3db7226
FB
4576 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4577 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
5fafdf24
TS
4578 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4579 cross_page,
e3db7226
FB
4580 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4581 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
5fafdf24 4582 direct_jmp_count,
e3db7226
FB
4583 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4584 direct_jmp2_count,
4585 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
57fec1fe 4586 cpu_fprintf(f, "\nStatistics:\n");
e3db7226
FB
4587 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4588 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4589 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
b67d9a52 4590 tcg_dump_info(f, cpu_fprintf);
e3db7226
FB
4591}
4592
d39e8222
AK
4593/* NOTE: this function can trigger an exception */
4594/* NOTE2: the returned address is not exactly the physical address: it
4595 is the offset relative to phys_ram_base */
9349b4f9 4596tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr)
d39e8222
AK
4597{
4598 int mmu_idx, page_index, pd;
4599 void *p;
37ec01d4 4600 MemoryRegion *mr;
d39e8222
AK
4601
4602 page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
4603 mmu_idx = cpu_mmu_index(env1);
4604 if (unlikely(env1->tlb_table[mmu_idx][page_index].addr_code !=
4605 (addr & TARGET_PAGE_MASK))) {
e141ab52
BS
4606#ifdef CONFIG_TCG_PASS_AREG0
4607 cpu_ldub_code(env1, addr);
4608#else
d39e8222 4609 ldub_code(addr);
e141ab52 4610#endif
d39e8222 4611 }
ce5d64c2 4612 pd = env1->iotlb[mmu_idx][page_index] & ~TARGET_PAGE_MASK;
37ec01d4
AK
4613 mr = iotlb_to_region(pd);
4614 if (mr != &io_mem_ram && mr != &io_mem_rom
32b08980
AK
4615 && mr != &io_mem_notdirty && !mr->rom_device
4616 && mr != &io_mem_watch) {
d39e8222
AK
4617#if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_SPARC)
4618 cpu_unassigned_access(env1, addr, 0, 1, 0, 4);
4619#else
4620 cpu_abort(env1, "Trying to execute code outside RAM or ROM at 0x" TARGET_FMT_lx "\n", addr);
4621#endif
4622 }
4623 p = (void *)((uintptr_t)addr + env1->tlb_table[mmu_idx][page_index].addend);
4624 return qemu_ram_addr_from_host_nofail(p);
4625}
4626
82afa586
BH
4627/*
4628 * A helper function for the _utterly broken_ virtio device model to find out if
4629 * it's running on a big endian machine. Don't do this at home kids!
4630 */
4631bool virtio_is_big_endian(void);
4632bool virtio_is_big_endian(void)
4633{
4634#if defined(TARGET_WORDS_BIGENDIAN)
4635 return true;
4636#else
4637 return false;
4638#endif
4639}
4640
61382a50 4641#define MMUSUFFIX _cmmu
3917149d 4642#undef GETPC
20503968 4643#define GETPC() ((uintptr_t)0)
61382a50 4644#define env cpu_single_env
b769d8fe 4645#define SOFTMMU_CODE_ACCESS
61382a50
FB
4646
4647#define SHIFT 0
4648#include "softmmu_template.h"
4649
4650#define SHIFT 1
4651#include "softmmu_template.h"
4652
4653#define SHIFT 2
4654#include "softmmu_template.h"
4655
4656#define SHIFT 3
4657#include "softmmu_template.h"
4658
4659#undef env
4660
4661#endif