]> git.proxmox.com Git - qemu.git/blame - exec.c
Merge branch 'maintainers-up' of git://repo.or.cz/qemu/afaerber
[qemu.git] / exec.c
CommitLineData
54936004 1/*
fd6ce8f6 2 * virtual page mapping and translated block handling
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
54936004 18 */
67b915a5 19#include "config.h"
d5a8f07c
FB
20#ifdef _WIN32
21#include <windows.h>
22#else
a98d49b1 23#include <sys/types.h>
d5a8f07c
FB
24#include <sys/mman.h>
25#endif
54936004 26
055403b2 27#include "qemu-common.h"
6180a181 28#include "cpu.h"
b67d9a52 29#include "tcg.h"
b3c7724c 30#include "hw/hw.h"
cc9e98cb 31#include "hw/qdev.h"
74576198 32#include "osdep.h"
7ba1e619 33#include "kvm.h"
432d268c 34#include "hw/xen.h"
29e922b6 35#include "qemu-timer.h"
62152b8a
AK
36#include "memory.h"
37#include "exec-memory.h"
53a5960a
PB
38#if defined(CONFIG_USER_ONLY)
39#include <qemu.h>
f01576f1
JL
40#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41#include <sys/param.h>
42#if __FreeBSD_version >= 700104
43#define HAVE_KINFO_GETVMMAP
44#define sigqueue sigqueue_freebsd /* avoid redefinition */
45#include <sys/time.h>
46#include <sys/proc.h>
47#include <machine/profile.h>
48#define _KERNEL
49#include <sys/user.h>
50#undef _KERNEL
51#undef sigqueue
52#include <libutil.h>
53#endif
54#endif
432d268c
JN
55#else /* !CONFIG_USER_ONLY */
56#include "xen-mapcache.h"
6506e4f9 57#include "trace.h"
53a5960a 58#endif
54936004 59
67d95c15
AK
60#define WANT_EXEC_OBSOLETE
61#include "exec-obsolete.h"
62
fd6ce8f6 63//#define DEBUG_TB_INVALIDATE
66e85a21 64//#define DEBUG_FLUSH
9fa3e853 65//#define DEBUG_TLB
67d3b957 66//#define DEBUG_UNASSIGNED
fd6ce8f6
FB
67
68/* make various TB consistency checks */
5fafdf24
TS
69//#define DEBUG_TB_CHECK
70//#define DEBUG_TLB_CHECK
fd6ce8f6 71
1196be37 72//#define DEBUG_IOPORT
db7b5426 73//#define DEBUG_SUBPAGE
1196be37 74
99773bd4
PB
75#if !defined(CONFIG_USER_ONLY)
76/* TB consistency checks only implemented for usermode emulation. */
77#undef DEBUG_TB_CHECK
78#endif
79
9fa3e853
FB
80#define SMC_BITMAP_USE_THRESHOLD 10
81
bdaf78e0 82static TranslationBlock *tbs;
24ab68ac 83static int code_gen_max_blocks;
9fa3e853 84TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
bdaf78e0 85static int nb_tbs;
eb51d102 86/* any access to the tbs or the page table must use this lock */
c227f099 87spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
fd6ce8f6 88
141ac468
BS
89#if defined(__arm__) || defined(__sparc_v9__)
90/* The prologue must be reachable with a direct jump. ARM and Sparc64
91 have limited branch ranges (possibly also PPC) so place it in a
d03d860b
BS
92 section close to code segment. */
93#define code_gen_section \
94 __attribute__((__section__(".gen_code"))) \
95 __attribute__((aligned (32)))
6840981d 96#elif defined(_WIN32) && !defined(_WIN64)
f8e2af11
SW
97#define code_gen_section \
98 __attribute__((aligned (16)))
d03d860b
BS
99#else
100#define code_gen_section \
101 __attribute__((aligned (32)))
102#endif
103
104uint8_t code_gen_prologue[1024] code_gen_section;
bdaf78e0
BS
105static uint8_t *code_gen_buffer;
106static unsigned long code_gen_buffer_size;
26a5f13b 107/* threshold to flush the translated code buffer */
bdaf78e0 108static unsigned long code_gen_buffer_max_size;
24ab68ac 109static uint8_t *code_gen_ptr;
fd6ce8f6 110
e2eef170 111#if !defined(CONFIG_USER_ONLY)
9fa3e853 112int phys_ram_fd;
74576198 113static int in_migration;
94a6b54f 114
85d59fef 115RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
62152b8a
AK
116
117static MemoryRegion *system_memory;
309cb471 118static MemoryRegion *system_io;
62152b8a 119
0e0df1e2 120MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty;
de712f94 121static MemoryRegion io_mem_subpage_ram;
0e0df1e2 122
e2eef170 123#endif
9fa3e853 124
9349b4f9 125CPUArchState *first_cpu;
6a00d601
FB
126/* current CPU in the current thread. It is only valid inside
127 cpu_exec() */
9349b4f9 128DEFINE_TLS(CPUArchState *,cpu_single_env);
2e70f6ef 129/* 0 = Do not count executed instructions.
bf20dc07 130 1 = Precise instruction counting.
2e70f6ef
PB
131 2 = Adaptive rate instruction counting. */
132int use_icount = 0;
6a00d601 133
54936004 134typedef struct PageDesc {
92e873b9 135 /* list of TBs intersecting this ram page */
fd6ce8f6 136 TranslationBlock *first_tb;
9fa3e853
FB
137 /* in order to optimize self modifying code, we count the number
138 of lookups we do to a given page to use a bitmap */
139 unsigned int code_write_count;
140 uint8_t *code_bitmap;
141#if defined(CONFIG_USER_ONLY)
142 unsigned long flags;
143#endif
54936004
FB
144} PageDesc;
145
41c1b1c9 146/* In system mode we want L1_MAP to be based on ram offsets,
5cd2c5b6
RH
147 while in user mode we want it to be based on virtual addresses. */
148#if !defined(CONFIG_USER_ONLY)
41c1b1c9
PB
149#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
150# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
151#else
5cd2c5b6 152# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
41c1b1c9 153#endif
bedb69ea 154#else
5cd2c5b6 155# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
bedb69ea 156#endif
54936004 157
5cd2c5b6
RH
158/* Size of the L2 (and L3, etc) page tables. */
159#define L2_BITS 10
54936004
FB
160#define L2_SIZE (1 << L2_BITS)
161
3eef53df
AK
162#define P_L2_LEVELS \
163 (((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / L2_BITS) + 1)
164
5cd2c5b6 165/* The bits remaining after N lower levels of page tables. */
5cd2c5b6
RH
166#define V_L1_BITS_REM \
167 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
168
5cd2c5b6
RH
169#if V_L1_BITS_REM < 4
170#define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
171#else
172#define V_L1_BITS V_L1_BITS_REM
173#endif
174
5cd2c5b6
RH
175#define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
176
5cd2c5b6
RH
177#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
178
c6d50674
SW
179uintptr_t qemu_real_host_page_size;
180uintptr_t qemu_host_page_size;
181uintptr_t qemu_host_page_mask;
54936004 182
5cd2c5b6
RH
183/* This is a multi-level map on the virtual address space.
184 The bottom level has pointers to PageDesc. */
185static void *l1_map[V_L1_SIZE];
54936004 186
e2eef170 187#if !defined(CONFIG_USER_ONLY)
4346ae3e
AK
188typedef struct PhysPageEntry PhysPageEntry;
189
5312bd8b
AK
190static MemoryRegionSection *phys_sections;
191static unsigned phys_sections_nb, phys_sections_nb_alloc;
192static uint16_t phys_section_unassigned;
aa102231
AK
193static uint16_t phys_section_notdirty;
194static uint16_t phys_section_rom;
195static uint16_t phys_section_watch;
5312bd8b 196
4346ae3e 197struct PhysPageEntry {
07f07b31
AK
198 uint16_t is_leaf : 1;
199 /* index into phys_sections (is_leaf) or phys_map_nodes (!is_leaf) */
200 uint16_t ptr : 15;
4346ae3e
AK
201};
202
d6f2ea22
AK
203/* Simple allocator for PhysPageEntry nodes */
204static PhysPageEntry (*phys_map_nodes)[L2_SIZE];
205static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc;
206
07f07b31 207#define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
d6f2ea22 208
5cd2c5b6 209/* This is a multi-level map on the physical address space.
06ef3525 210 The bottom level has pointers to MemoryRegionSections. */
07f07b31 211static PhysPageEntry phys_map = { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
6d9a1304 212
e2eef170 213static void io_mem_init(void);
62152b8a 214static void memory_map_init(void);
e2eef170 215
1ec9b909 216static MemoryRegion io_mem_watch;
6658ffb8 217#endif
33417e70 218
34865134 219/* log support */
1e8b27ca
JR
220#ifdef WIN32
221static const char *logfilename = "qemu.log";
222#else
d9b630fd 223static const char *logfilename = "/tmp/qemu.log";
1e8b27ca 224#endif
34865134
FB
225FILE *logfile;
226int loglevel;
e735b91c 227static int log_append = 0;
34865134 228
e3db7226 229/* statistics */
b3755a91 230#if !defined(CONFIG_USER_ONLY)
e3db7226 231static int tlb_flush_count;
b3755a91 232#endif
e3db7226
FB
233static int tb_flush_count;
234static int tb_phys_invalidate_count;
235
7cb69cae
FB
236#ifdef _WIN32
237static void map_exec(void *addr, long size)
238{
239 DWORD old_protect;
240 VirtualProtect(addr, size,
241 PAGE_EXECUTE_READWRITE, &old_protect);
242
243}
244#else
245static void map_exec(void *addr, long size)
246{
4369415f 247 unsigned long start, end, page_size;
7cb69cae 248
4369415f 249 page_size = getpagesize();
7cb69cae 250 start = (unsigned long)addr;
4369415f 251 start &= ~(page_size - 1);
7cb69cae
FB
252
253 end = (unsigned long)addr + size;
4369415f
FB
254 end += page_size - 1;
255 end &= ~(page_size - 1);
7cb69cae
FB
256
257 mprotect((void *)start, end - start,
258 PROT_READ | PROT_WRITE | PROT_EXEC);
259}
260#endif
261
b346ff46 262static void page_init(void)
54936004 263{
83fb7adf 264 /* NOTE: we can always suppose that qemu_host_page_size >=
54936004 265 TARGET_PAGE_SIZE */
c2b48b69
AL
266#ifdef _WIN32
267 {
268 SYSTEM_INFO system_info;
269
270 GetSystemInfo(&system_info);
271 qemu_real_host_page_size = system_info.dwPageSize;
272 }
273#else
274 qemu_real_host_page_size = getpagesize();
275#endif
83fb7adf
FB
276 if (qemu_host_page_size == 0)
277 qemu_host_page_size = qemu_real_host_page_size;
278 if (qemu_host_page_size < TARGET_PAGE_SIZE)
279 qemu_host_page_size = TARGET_PAGE_SIZE;
83fb7adf 280 qemu_host_page_mask = ~(qemu_host_page_size - 1);
50a9569b 281
2e9a5713 282#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
50a9569b 283 {
f01576f1
JL
284#ifdef HAVE_KINFO_GETVMMAP
285 struct kinfo_vmentry *freep;
286 int i, cnt;
287
288 freep = kinfo_getvmmap(getpid(), &cnt);
289 if (freep) {
290 mmap_lock();
291 for (i = 0; i < cnt; i++) {
292 unsigned long startaddr, endaddr;
293
294 startaddr = freep[i].kve_start;
295 endaddr = freep[i].kve_end;
296 if (h2g_valid(startaddr)) {
297 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
298
299 if (h2g_valid(endaddr)) {
300 endaddr = h2g(endaddr);
fd436907 301 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
f01576f1
JL
302 } else {
303#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
304 endaddr = ~0ul;
fd436907 305 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
f01576f1
JL
306#endif
307 }
308 }
309 }
310 free(freep);
311 mmap_unlock();
312 }
313#else
50a9569b 314 FILE *f;
50a9569b 315
0776590d 316 last_brk = (unsigned long)sbrk(0);
5cd2c5b6 317
fd436907 318 f = fopen("/compat/linux/proc/self/maps", "r");
50a9569b 319 if (f) {
5cd2c5b6
RH
320 mmap_lock();
321
50a9569b 322 do {
5cd2c5b6
RH
323 unsigned long startaddr, endaddr;
324 int n;
325
326 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
327
328 if (n == 2 && h2g_valid(startaddr)) {
329 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
330
331 if (h2g_valid(endaddr)) {
332 endaddr = h2g(endaddr);
333 } else {
334 endaddr = ~0ul;
335 }
336 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
50a9569b
AZ
337 }
338 } while (!feof(f));
5cd2c5b6 339
50a9569b 340 fclose(f);
5cd2c5b6 341 mmap_unlock();
50a9569b 342 }
f01576f1 343#endif
50a9569b
AZ
344 }
345#endif
54936004
FB
346}
347
41c1b1c9 348static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
54936004 349{
41c1b1c9
PB
350 PageDesc *pd;
351 void **lp;
352 int i;
353
5cd2c5b6 354#if defined(CONFIG_USER_ONLY)
7267c094 355 /* We can't use g_malloc because it may recurse into a locked mutex. */
5cd2c5b6
RH
356# define ALLOC(P, SIZE) \
357 do { \
358 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
359 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
5cd2c5b6
RH
360 } while (0)
361#else
362# define ALLOC(P, SIZE) \
7267c094 363 do { P = g_malloc0(SIZE); } while (0)
17e2377a 364#endif
434929bf 365
5cd2c5b6
RH
366 /* Level 1. Always allocated. */
367 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
368
369 /* Level 2..N-1. */
370 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
371 void **p = *lp;
372
373 if (p == NULL) {
374 if (!alloc) {
375 return NULL;
376 }
377 ALLOC(p, sizeof(void *) * L2_SIZE);
378 *lp = p;
17e2377a 379 }
5cd2c5b6
RH
380
381 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
382 }
383
384 pd = *lp;
385 if (pd == NULL) {
386 if (!alloc) {
387 return NULL;
388 }
389 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
390 *lp = pd;
54936004 391 }
5cd2c5b6
RH
392
393#undef ALLOC
5cd2c5b6
RH
394
395 return pd + (index & (L2_SIZE - 1));
54936004
FB
396}
397
41c1b1c9 398static inline PageDesc *page_find(tb_page_addr_t index)
54936004 399{
5cd2c5b6 400 return page_find_alloc(index, 0);
fd6ce8f6
FB
401}
402
6d9a1304 403#if !defined(CONFIG_USER_ONLY)
d6f2ea22 404
f7bf5461 405static void phys_map_node_reserve(unsigned nodes)
d6f2ea22 406{
f7bf5461 407 if (phys_map_nodes_nb + nodes > phys_map_nodes_nb_alloc) {
d6f2ea22
AK
408 typedef PhysPageEntry Node[L2_SIZE];
409 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16);
f7bf5461
AK
410 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc,
411 phys_map_nodes_nb + nodes);
d6f2ea22
AK
412 phys_map_nodes = g_renew(Node, phys_map_nodes,
413 phys_map_nodes_nb_alloc);
414 }
f7bf5461
AK
415}
416
417static uint16_t phys_map_node_alloc(void)
418{
419 unsigned i;
420 uint16_t ret;
421
422 ret = phys_map_nodes_nb++;
423 assert(ret != PHYS_MAP_NODE_NIL);
424 assert(ret != phys_map_nodes_nb_alloc);
d6f2ea22 425 for (i = 0; i < L2_SIZE; ++i) {
07f07b31 426 phys_map_nodes[ret][i].is_leaf = 0;
c19e8800 427 phys_map_nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
d6f2ea22 428 }
f7bf5461 429 return ret;
d6f2ea22
AK
430}
431
432static void phys_map_nodes_reset(void)
433{
434 phys_map_nodes_nb = 0;
435}
436
92e873b9 437
2999097b
AK
438static void phys_page_set_level(PhysPageEntry *lp, target_phys_addr_t *index,
439 target_phys_addr_t *nb, uint16_t leaf,
440 int level)
f7bf5461
AK
441{
442 PhysPageEntry *p;
443 int i;
07f07b31 444 target_phys_addr_t step = (target_phys_addr_t)1 << (level * L2_BITS);
108c49b8 445
07f07b31 446 if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
c19e8800
AK
447 lp->ptr = phys_map_node_alloc();
448 p = phys_map_nodes[lp->ptr];
f7bf5461
AK
449 if (level == 0) {
450 for (i = 0; i < L2_SIZE; i++) {
07f07b31 451 p[i].is_leaf = 1;
c19e8800 452 p[i].ptr = phys_section_unassigned;
4346ae3e 453 }
67c4d23c 454 }
f7bf5461 455 } else {
c19e8800 456 p = phys_map_nodes[lp->ptr];
92e873b9 457 }
2999097b 458 lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
f7bf5461 459
2999097b 460 while (*nb && lp < &p[L2_SIZE]) {
07f07b31
AK
461 if ((*index & (step - 1)) == 0 && *nb >= step) {
462 lp->is_leaf = true;
c19e8800 463 lp->ptr = leaf;
07f07b31
AK
464 *index += step;
465 *nb -= step;
2999097b
AK
466 } else {
467 phys_page_set_level(lp, index, nb, leaf, level - 1);
468 }
469 ++lp;
f7bf5461
AK
470 }
471}
472
2999097b
AK
473static void phys_page_set(target_phys_addr_t index, target_phys_addr_t nb,
474 uint16_t leaf)
f7bf5461 475{
2999097b 476 /* Wildly overreserve - it doesn't matter much. */
07f07b31 477 phys_map_node_reserve(3 * P_L2_LEVELS);
5cd2c5b6 478
2999097b 479 phys_page_set_level(&phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
92e873b9
FB
480}
481
f3705d53 482static MemoryRegionSection *phys_page_find(target_phys_addr_t index)
92e873b9 483{
31ab2b4a
AK
484 PhysPageEntry lp = phys_map;
485 PhysPageEntry *p;
486 int i;
31ab2b4a 487 uint16_t s_index = phys_section_unassigned;
f1f6e3b8 488
07f07b31 489 for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
c19e8800 490 if (lp.ptr == PHYS_MAP_NODE_NIL) {
31ab2b4a
AK
491 goto not_found;
492 }
c19e8800 493 p = phys_map_nodes[lp.ptr];
31ab2b4a 494 lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
5312bd8b 495 }
31ab2b4a 496
c19e8800 497 s_index = lp.ptr;
31ab2b4a 498not_found:
f3705d53
AK
499 return &phys_sections[s_index];
500}
501
502static target_phys_addr_t section_addr(MemoryRegionSection *section,
503 target_phys_addr_t addr)
504{
505 addr -= section->offset_within_address_space;
506 addr += section->offset_within_region;
507 return addr;
92e873b9
FB
508}
509
c227f099 510static void tlb_protect_code(ram_addr_t ram_addr);
9349b4f9 511static void tlb_unprotect_code_phys(CPUArchState *env, ram_addr_t ram_addr,
3a7d929e 512 target_ulong vaddr);
c8a706fe
PB
513#define mmap_lock() do { } while(0)
514#define mmap_unlock() do { } while(0)
9fa3e853 515#endif
fd6ce8f6 516
4369415f
FB
517#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
518
519#if defined(CONFIG_USER_ONLY)
ccbb4d44 520/* Currently it is not recommended to allocate big chunks of data in
4369415f
FB
521 user mode. It will change when a dedicated libc will be used */
522#define USE_STATIC_CODE_GEN_BUFFER
523#endif
524
525#ifdef USE_STATIC_CODE_GEN_BUFFER
ebf50fb3
AJ
526static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
527 __attribute__((aligned (CODE_GEN_ALIGN)));
4369415f
FB
528#endif
529
8fcd3692 530static void code_gen_alloc(unsigned long tb_size)
26a5f13b 531{
4369415f
FB
532#ifdef USE_STATIC_CODE_GEN_BUFFER
533 code_gen_buffer = static_code_gen_buffer;
534 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
535 map_exec(code_gen_buffer, code_gen_buffer_size);
536#else
26a5f13b
FB
537 code_gen_buffer_size = tb_size;
538 if (code_gen_buffer_size == 0) {
4369415f 539#if defined(CONFIG_USER_ONLY)
4369415f
FB
540 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
541#else
ccbb4d44 542 /* XXX: needs adjustments */
94a6b54f 543 code_gen_buffer_size = (unsigned long)(ram_size / 4);
4369415f 544#endif
26a5f13b
FB
545 }
546 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
547 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
548 /* The code gen buffer location may have constraints depending on
549 the host cpu and OS */
550#if defined(__linux__)
551 {
552 int flags;
141ac468
BS
553 void *start = NULL;
554
26a5f13b
FB
555 flags = MAP_PRIVATE | MAP_ANONYMOUS;
556#if defined(__x86_64__)
557 flags |= MAP_32BIT;
558 /* Cannot map more than that */
559 if (code_gen_buffer_size > (800 * 1024 * 1024))
560 code_gen_buffer_size = (800 * 1024 * 1024);
141ac468
BS
561#elif defined(__sparc_v9__)
562 // Map the buffer below 2G, so we can use direct calls and branches
563 flags |= MAP_FIXED;
564 start = (void *) 0x60000000UL;
565 if (code_gen_buffer_size > (512 * 1024 * 1024))
566 code_gen_buffer_size = (512 * 1024 * 1024);
1cb0661e 567#elif defined(__arm__)
5c84bd90 568 /* Keep the buffer no bigger than 16MB to branch between blocks */
1cb0661e
AZ
569 if (code_gen_buffer_size > 16 * 1024 * 1024)
570 code_gen_buffer_size = 16 * 1024 * 1024;
eba0b893
RH
571#elif defined(__s390x__)
572 /* Map the buffer so that we can use direct calls and branches. */
573 /* We have a +- 4GB range on the branches; leave some slop. */
574 if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
575 code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
576 }
577 start = (void *)0x90000000UL;
26a5f13b 578#endif
141ac468
BS
579 code_gen_buffer = mmap(start, code_gen_buffer_size,
580 PROT_WRITE | PROT_READ | PROT_EXEC,
26a5f13b
FB
581 flags, -1, 0);
582 if (code_gen_buffer == MAP_FAILED) {
583 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
584 exit(1);
585 }
586 }
cbb608a5 587#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
9f4b09a4
TN
588 || defined(__DragonFly__) || defined(__OpenBSD__) \
589 || defined(__NetBSD__)
06e67a82
AL
590 {
591 int flags;
592 void *addr = NULL;
593 flags = MAP_PRIVATE | MAP_ANONYMOUS;
594#if defined(__x86_64__)
595 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
596 * 0x40000000 is free */
597 flags |= MAP_FIXED;
598 addr = (void *)0x40000000;
599 /* Cannot map more than that */
600 if (code_gen_buffer_size > (800 * 1024 * 1024))
601 code_gen_buffer_size = (800 * 1024 * 1024);
4cd31ad2
BS
602#elif defined(__sparc_v9__)
603 // Map the buffer below 2G, so we can use direct calls and branches
604 flags |= MAP_FIXED;
605 addr = (void *) 0x60000000UL;
606 if (code_gen_buffer_size > (512 * 1024 * 1024)) {
607 code_gen_buffer_size = (512 * 1024 * 1024);
608 }
06e67a82
AL
609#endif
610 code_gen_buffer = mmap(addr, code_gen_buffer_size,
611 PROT_WRITE | PROT_READ | PROT_EXEC,
612 flags, -1, 0);
613 if (code_gen_buffer == MAP_FAILED) {
614 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
615 exit(1);
616 }
617 }
26a5f13b 618#else
7267c094 619 code_gen_buffer = g_malloc(code_gen_buffer_size);
26a5f13b
FB
620 map_exec(code_gen_buffer, code_gen_buffer_size);
621#endif
4369415f 622#endif /* !USE_STATIC_CODE_GEN_BUFFER */
26a5f13b 623 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
a884da8a
PM
624 code_gen_buffer_max_size = code_gen_buffer_size -
625 (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
26a5f13b 626 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
7267c094 627 tbs = g_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
26a5f13b
FB
628}
629
630/* Must be called before using the QEMU cpus. 'tb_size' is the size
631 (in bytes) allocated to the translation buffer. Zero means default
632 size. */
d5ab9713 633void tcg_exec_init(unsigned long tb_size)
26a5f13b 634{
26a5f13b
FB
635 cpu_gen_init();
636 code_gen_alloc(tb_size);
637 code_gen_ptr = code_gen_buffer;
813da627 638 tcg_register_jit(code_gen_buffer, code_gen_buffer_size);
4369415f 639 page_init();
9002ec79
RH
640#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
641 /* There's no guest base to take into account, so go ahead and
642 initialize the prologue now. */
643 tcg_prologue_init(&tcg_ctx);
644#endif
26a5f13b
FB
645}
646
d5ab9713
JK
647bool tcg_enabled(void)
648{
649 return code_gen_buffer != NULL;
650}
651
652void cpu_exec_init_all(void)
653{
654#if !defined(CONFIG_USER_ONLY)
655 memory_map_init();
656 io_mem_init();
657#endif
658}
659
9656f324
PB
660#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
661
e59fb374 662static int cpu_common_post_load(void *opaque, int version_id)
e7f4eff7 663{
9349b4f9 664 CPUArchState *env = opaque;
9656f324 665
3098dba0
AJ
666 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
667 version_id is increased. */
668 env->interrupt_request &= ~0x01;
9656f324
PB
669 tlb_flush(env, 1);
670
671 return 0;
672}
e7f4eff7
JQ
673
674static const VMStateDescription vmstate_cpu_common = {
675 .name = "cpu_common",
676 .version_id = 1,
677 .minimum_version_id = 1,
678 .minimum_version_id_old = 1,
e7f4eff7
JQ
679 .post_load = cpu_common_post_load,
680 .fields = (VMStateField []) {
9349b4f9
AF
681 VMSTATE_UINT32(halted, CPUArchState),
682 VMSTATE_UINT32(interrupt_request, CPUArchState),
e7f4eff7
JQ
683 VMSTATE_END_OF_LIST()
684 }
685};
9656f324
PB
686#endif
687
9349b4f9 688CPUArchState *qemu_get_cpu(int cpu)
950f1472 689{
9349b4f9 690 CPUArchState *env = first_cpu;
950f1472
GC
691
692 while (env) {
693 if (env->cpu_index == cpu)
694 break;
695 env = env->next_cpu;
696 }
697
698 return env;
699}
700
9349b4f9 701void cpu_exec_init(CPUArchState *env)
fd6ce8f6 702{
9349b4f9 703 CPUArchState **penv;
6a00d601
FB
704 int cpu_index;
705
c2764719
PB
706#if defined(CONFIG_USER_ONLY)
707 cpu_list_lock();
708#endif
6a00d601
FB
709 env->next_cpu = NULL;
710 penv = &first_cpu;
711 cpu_index = 0;
712 while (*penv != NULL) {
1e9fa730 713 penv = &(*penv)->next_cpu;
6a00d601
FB
714 cpu_index++;
715 }
716 env->cpu_index = cpu_index;
268a362c 717 env->numa_node = 0;
72cf2d4f
BS
718 QTAILQ_INIT(&env->breakpoints);
719 QTAILQ_INIT(&env->watchpoints);
dc7a09cf
JK
720#ifndef CONFIG_USER_ONLY
721 env->thread_id = qemu_get_thread_id();
722#endif
6a00d601 723 *penv = env;
c2764719
PB
724#if defined(CONFIG_USER_ONLY)
725 cpu_list_unlock();
726#endif
b3c7724c 727#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
0be71e32
AW
728 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
729 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
b3c7724c
PB
730 cpu_save, cpu_load, env);
731#endif
fd6ce8f6
FB
732}
733
d1a1eb74
TG
734/* Allocate a new translation block. Flush the translation buffer if
735 too many translation blocks or too much generated code. */
736static TranslationBlock *tb_alloc(target_ulong pc)
737{
738 TranslationBlock *tb;
739
740 if (nb_tbs >= code_gen_max_blocks ||
741 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
742 return NULL;
743 tb = &tbs[nb_tbs++];
744 tb->pc = pc;
745 tb->cflags = 0;
746 return tb;
747}
748
749void tb_free(TranslationBlock *tb)
750{
751 /* In practice this is mostly used for single use temporary TB
752 Ignore the hard cases and just back up if this TB happens to
753 be the last one generated. */
754 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
755 code_gen_ptr = tb->tc_ptr;
756 nb_tbs--;
757 }
758}
759
9fa3e853
FB
760static inline void invalidate_page_bitmap(PageDesc *p)
761{
762 if (p->code_bitmap) {
7267c094 763 g_free(p->code_bitmap);
9fa3e853
FB
764 p->code_bitmap = NULL;
765 }
766 p->code_write_count = 0;
767}
768
5cd2c5b6
RH
769/* Set to NULL all the 'first_tb' fields in all PageDescs. */
770
771static void page_flush_tb_1 (int level, void **lp)
fd6ce8f6 772{
5cd2c5b6 773 int i;
fd6ce8f6 774
5cd2c5b6
RH
775 if (*lp == NULL) {
776 return;
777 }
778 if (level == 0) {
779 PageDesc *pd = *lp;
7296abac 780 for (i = 0; i < L2_SIZE; ++i) {
5cd2c5b6
RH
781 pd[i].first_tb = NULL;
782 invalidate_page_bitmap(pd + i);
fd6ce8f6 783 }
5cd2c5b6
RH
784 } else {
785 void **pp = *lp;
7296abac 786 for (i = 0; i < L2_SIZE; ++i) {
5cd2c5b6
RH
787 page_flush_tb_1 (level - 1, pp + i);
788 }
789 }
790}
791
792static void page_flush_tb(void)
793{
794 int i;
795 for (i = 0; i < V_L1_SIZE; i++) {
796 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
fd6ce8f6
FB
797 }
798}
799
800/* flush all the translation blocks */
d4e8164f 801/* XXX: tb_flush is currently not thread safe */
9349b4f9 802void tb_flush(CPUArchState *env1)
fd6ce8f6 803{
9349b4f9 804 CPUArchState *env;
0124311e 805#if defined(DEBUG_FLUSH)
ab3d1727
BS
806 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
807 (unsigned long)(code_gen_ptr - code_gen_buffer),
808 nb_tbs, nb_tbs > 0 ?
809 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
fd6ce8f6 810#endif
26a5f13b 811 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
a208e54a
PB
812 cpu_abort(env1, "Internal error: code buffer overflow\n");
813
fd6ce8f6 814 nb_tbs = 0;
3b46e624 815
6a00d601
FB
816 for(env = first_cpu; env != NULL; env = env->next_cpu) {
817 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
818 }
9fa3e853 819
8a8a608f 820 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
fd6ce8f6 821 page_flush_tb();
9fa3e853 822
fd6ce8f6 823 code_gen_ptr = code_gen_buffer;
d4e8164f
FB
824 /* XXX: flush processor icache at this point if cache flush is
825 expensive */
e3db7226 826 tb_flush_count++;
fd6ce8f6
FB
827}
828
829#ifdef DEBUG_TB_CHECK
830
bc98a7ef 831static void tb_invalidate_check(target_ulong address)
fd6ce8f6
FB
832{
833 TranslationBlock *tb;
834 int i;
835 address &= TARGET_PAGE_MASK;
99773bd4
PB
836 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
837 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
838 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
839 address >= tb->pc + tb->size)) {
0bf9e31a
BS
840 printf("ERROR invalidate: address=" TARGET_FMT_lx
841 " PC=%08lx size=%04x\n",
99773bd4 842 address, (long)tb->pc, tb->size);
fd6ce8f6
FB
843 }
844 }
845 }
846}
847
848/* verify that all the pages have correct rights for code */
849static void tb_page_check(void)
850{
851 TranslationBlock *tb;
852 int i, flags1, flags2;
3b46e624 853
99773bd4
PB
854 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
855 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
856 flags1 = page_get_flags(tb->pc);
857 flags2 = page_get_flags(tb->pc + tb->size - 1);
858 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
859 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
99773bd4 860 (long)tb->pc, tb->size, flags1, flags2);
fd6ce8f6
FB
861 }
862 }
863 }
864}
865
866#endif
867
868/* invalidate one TB */
869static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
870 int next_offset)
871{
872 TranslationBlock *tb1;
873 for(;;) {
874 tb1 = *ptb;
875 if (tb1 == tb) {
876 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
877 break;
878 }
879 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
880 }
881}
882
9fa3e853
FB
883static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
884{
885 TranslationBlock *tb1;
886 unsigned int n1;
887
888 for(;;) {
889 tb1 = *ptb;
8efe0ca8
SW
890 n1 = (uintptr_t)tb1 & 3;
891 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
9fa3e853
FB
892 if (tb1 == tb) {
893 *ptb = tb1->page_next[n1];
894 break;
895 }
896 ptb = &tb1->page_next[n1];
897 }
898}
899
d4e8164f
FB
900static inline void tb_jmp_remove(TranslationBlock *tb, int n)
901{
902 TranslationBlock *tb1, **ptb;
903 unsigned int n1;
904
905 ptb = &tb->jmp_next[n];
906 tb1 = *ptb;
907 if (tb1) {
908 /* find tb(n) in circular list */
909 for(;;) {
910 tb1 = *ptb;
8efe0ca8
SW
911 n1 = (uintptr_t)tb1 & 3;
912 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
d4e8164f
FB
913 if (n1 == n && tb1 == tb)
914 break;
915 if (n1 == 2) {
916 ptb = &tb1->jmp_first;
917 } else {
918 ptb = &tb1->jmp_next[n1];
919 }
920 }
921 /* now we can suppress tb(n) from the list */
922 *ptb = tb->jmp_next[n];
923
924 tb->jmp_next[n] = NULL;
925 }
926}
927
928/* reset the jump entry 'n' of a TB so that it is not chained to
929 another TB */
930static inline void tb_reset_jump(TranslationBlock *tb, int n)
931{
8efe0ca8 932 tb_set_jmp_target(tb, n, (uintptr_t)(tb->tc_ptr + tb->tb_next_offset[n]));
d4e8164f
FB
933}
934
41c1b1c9 935void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
fd6ce8f6 936{
9349b4f9 937 CPUArchState *env;
8a40a180 938 PageDesc *p;
d4e8164f 939 unsigned int h, n1;
41c1b1c9 940 tb_page_addr_t phys_pc;
8a40a180 941 TranslationBlock *tb1, *tb2;
3b46e624 942
8a40a180
FB
943 /* remove the TB from the hash list */
944 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
945 h = tb_phys_hash_func(phys_pc);
5fafdf24 946 tb_remove(&tb_phys_hash[h], tb,
8a40a180
FB
947 offsetof(TranslationBlock, phys_hash_next));
948
949 /* remove the TB from the page list */
950 if (tb->page_addr[0] != page_addr) {
951 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
952 tb_page_remove(&p->first_tb, tb);
953 invalidate_page_bitmap(p);
954 }
955 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
956 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
957 tb_page_remove(&p->first_tb, tb);
958 invalidate_page_bitmap(p);
959 }
960
36bdbe54 961 tb_invalidated_flag = 1;
59817ccb 962
fd6ce8f6 963 /* remove the TB from the hash list */
8a40a180 964 h = tb_jmp_cache_hash_func(tb->pc);
6a00d601
FB
965 for(env = first_cpu; env != NULL; env = env->next_cpu) {
966 if (env->tb_jmp_cache[h] == tb)
967 env->tb_jmp_cache[h] = NULL;
968 }
d4e8164f
FB
969
970 /* suppress this TB from the two jump lists */
971 tb_jmp_remove(tb, 0);
972 tb_jmp_remove(tb, 1);
973
974 /* suppress any remaining jumps to this TB */
975 tb1 = tb->jmp_first;
976 for(;;) {
8efe0ca8 977 n1 = (uintptr_t)tb1 & 3;
d4e8164f
FB
978 if (n1 == 2)
979 break;
8efe0ca8 980 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
d4e8164f
FB
981 tb2 = tb1->jmp_next[n1];
982 tb_reset_jump(tb1, n1);
983 tb1->jmp_next[n1] = NULL;
984 tb1 = tb2;
985 }
8efe0ca8 986 tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2); /* fail safe */
9fa3e853 987
e3db7226 988 tb_phys_invalidate_count++;
9fa3e853
FB
989}
990
991static inline void set_bits(uint8_t *tab, int start, int len)
992{
993 int end, mask, end1;
994
995 end = start + len;
996 tab += start >> 3;
997 mask = 0xff << (start & 7);
998 if ((start & ~7) == (end & ~7)) {
999 if (start < end) {
1000 mask &= ~(0xff << (end & 7));
1001 *tab |= mask;
1002 }
1003 } else {
1004 *tab++ |= mask;
1005 start = (start + 8) & ~7;
1006 end1 = end & ~7;
1007 while (start < end1) {
1008 *tab++ = 0xff;
1009 start += 8;
1010 }
1011 if (start < end) {
1012 mask = ~(0xff << (end & 7));
1013 *tab |= mask;
1014 }
1015 }
1016}
1017
1018static void build_page_bitmap(PageDesc *p)
1019{
1020 int n, tb_start, tb_end;
1021 TranslationBlock *tb;
3b46e624 1022
7267c094 1023 p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8);
9fa3e853
FB
1024
1025 tb = p->first_tb;
1026 while (tb != NULL) {
8efe0ca8
SW
1027 n = (uintptr_t)tb & 3;
1028 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
9fa3e853
FB
1029 /* NOTE: this is subtle as a TB may span two physical pages */
1030 if (n == 0) {
1031 /* NOTE: tb_end may be after the end of the page, but
1032 it is not a problem */
1033 tb_start = tb->pc & ~TARGET_PAGE_MASK;
1034 tb_end = tb_start + tb->size;
1035 if (tb_end > TARGET_PAGE_SIZE)
1036 tb_end = TARGET_PAGE_SIZE;
1037 } else {
1038 tb_start = 0;
1039 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1040 }
1041 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
1042 tb = tb->page_next[n];
1043 }
1044}
1045
9349b4f9 1046TranslationBlock *tb_gen_code(CPUArchState *env,
2e70f6ef
PB
1047 target_ulong pc, target_ulong cs_base,
1048 int flags, int cflags)
d720b93d
FB
1049{
1050 TranslationBlock *tb;
1051 uint8_t *tc_ptr;
41c1b1c9
PB
1052 tb_page_addr_t phys_pc, phys_page2;
1053 target_ulong virt_page2;
d720b93d
FB
1054 int code_gen_size;
1055
41c1b1c9 1056 phys_pc = get_page_addr_code(env, pc);
c27004ec 1057 tb = tb_alloc(pc);
d720b93d
FB
1058 if (!tb) {
1059 /* flush must be done */
1060 tb_flush(env);
1061 /* cannot fail at this point */
c27004ec 1062 tb = tb_alloc(pc);
2e70f6ef
PB
1063 /* Don't forget to invalidate previous TB info. */
1064 tb_invalidated_flag = 1;
d720b93d
FB
1065 }
1066 tc_ptr = code_gen_ptr;
1067 tb->tc_ptr = tc_ptr;
1068 tb->cs_base = cs_base;
1069 tb->flags = flags;
1070 tb->cflags = cflags;
d07bde88 1071 cpu_gen_code(env, tb, &code_gen_size);
8efe0ca8
SW
1072 code_gen_ptr = (void *)(((uintptr_t)code_gen_ptr + code_gen_size +
1073 CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
3b46e624 1074
d720b93d 1075 /* check next page if needed */
c27004ec 1076 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
d720b93d 1077 phys_page2 = -1;
c27004ec 1078 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
41c1b1c9 1079 phys_page2 = get_page_addr_code(env, virt_page2);
d720b93d 1080 }
41c1b1c9 1081 tb_link_page(tb, phys_pc, phys_page2);
2e70f6ef 1082 return tb;
d720b93d 1083}
3b46e624 1084
9fa3e853
FB
1085/* invalidate all TBs which intersect with the target physical page
1086 starting in range [start;end[. NOTE: start and end must refer to
d720b93d
FB
1087 the same physical page. 'is_cpu_write_access' should be true if called
1088 from a real cpu write access: the virtual CPU will exit the current
1089 TB if code is modified inside this TB. */
41c1b1c9 1090void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
d720b93d
FB
1091 int is_cpu_write_access)
1092{
6b917547 1093 TranslationBlock *tb, *tb_next, *saved_tb;
9349b4f9 1094 CPUArchState *env = cpu_single_env;
41c1b1c9 1095 tb_page_addr_t tb_start, tb_end;
6b917547
AL
1096 PageDesc *p;
1097 int n;
1098#ifdef TARGET_HAS_PRECISE_SMC
1099 int current_tb_not_found = is_cpu_write_access;
1100 TranslationBlock *current_tb = NULL;
1101 int current_tb_modified = 0;
1102 target_ulong current_pc = 0;
1103 target_ulong current_cs_base = 0;
1104 int current_flags = 0;
1105#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
1106
1107 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 1108 if (!p)
9fa3e853 1109 return;
5fafdf24 1110 if (!p->code_bitmap &&
d720b93d
FB
1111 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1112 is_cpu_write_access) {
9fa3e853
FB
1113 /* build code bitmap */
1114 build_page_bitmap(p);
1115 }
1116
1117 /* we remove all the TBs in the range [start, end[ */
1118 /* XXX: see if in some cases it could be faster to invalidate all the code */
1119 tb = p->first_tb;
1120 while (tb != NULL) {
8efe0ca8
SW
1121 n = (uintptr_t)tb & 3;
1122 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
9fa3e853
FB
1123 tb_next = tb->page_next[n];
1124 /* NOTE: this is subtle as a TB may span two physical pages */
1125 if (n == 0) {
1126 /* NOTE: tb_end may be after the end of the page, but
1127 it is not a problem */
1128 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1129 tb_end = tb_start + tb->size;
1130 } else {
1131 tb_start = tb->page_addr[1];
1132 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1133 }
1134 if (!(tb_end <= start || tb_start >= end)) {
d720b93d
FB
1135#ifdef TARGET_HAS_PRECISE_SMC
1136 if (current_tb_not_found) {
1137 current_tb_not_found = 0;
1138 current_tb = NULL;
2e70f6ef 1139 if (env->mem_io_pc) {
d720b93d 1140 /* now we have a real cpu fault */
2e70f6ef 1141 current_tb = tb_find_pc(env->mem_io_pc);
d720b93d
FB
1142 }
1143 }
1144 if (current_tb == tb &&
2e70f6ef 1145 (current_tb->cflags & CF_COUNT_MASK) != 1) {
d720b93d
FB
1146 /* If we are modifying the current TB, we must stop
1147 its execution. We could be more precise by checking
1148 that the modification is after the current PC, but it
1149 would require a specialized function to partially
1150 restore the CPU state */
3b46e624 1151
d720b93d 1152 current_tb_modified = 1;
618ba8e6 1153 cpu_restore_state(current_tb, env, env->mem_io_pc);
6b917547
AL
1154 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1155 &current_flags);
d720b93d
FB
1156 }
1157#endif /* TARGET_HAS_PRECISE_SMC */
6f5a9f7e
FB
1158 /* we need to do that to handle the case where a signal
1159 occurs while doing tb_phys_invalidate() */
1160 saved_tb = NULL;
1161 if (env) {
1162 saved_tb = env->current_tb;
1163 env->current_tb = NULL;
1164 }
9fa3e853 1165 tb_phys_invalidate(tb, -1);
6f5a9f7e
FB
1166 if (env) {
1167 env->current_tb = saved_tb;
1168 if (env->interrupt_request && env->current_tb)
1169 cpu_interrupt(env, env->interrupt_request);
1170 }
9fa3e853
FB
1171 }
1172 tb = tb_next;
1173 }
1174#if !defined(CONFIG_USER_ONLY)
1175 /* if no code remaining, no need to continue to use slow writes */
1176 if (!p->first_tb) {
1177 invalidate_page_bitmap(p);
d720b93d 1178 if (is_cpu_write_access) {
2e70f6ef 1179 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
d720b93d
FB
1180 }
1181 }
1182#endif
1183#ifdef TARGET_HAS_PRECISE_SMC
1184 if (current_tb_modified) {
1185 /* we generate a block containing just the instruction
1186 modifying the memory. It will ensure that it cannot modify
1187 itself */
ea1c1802 1188 env->current_tb = NULL;
2e70f6ef 1189 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
d720b93d 1190 cpu_resume_from_signal(env, NULL);
9fa3e853 1191 }
fd6ce8f6 1192#endif
9fa3e853 1193}
fd6ce8f6 1194
9fa3e853 1195/* len must be <= 8 and start must be a multiple of len */
41c1b1c9 1196static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
9fa3e853
FB
1197{
1198 PageDesc *p;
1199 int offset, b;
59817ccb 1200#if 0
a4193c8a 1201 if (1) {
93fcfe39
AL
1202 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1203 cpu_single_env->mem_io_vaddr, len,
1204 cpu_single_env->eip,
8efe0ca8
SW
1205 cpu_single_env->eip +
1206 (intptr_t)cpu_single_env->segs[R_CS].base);
59817ccb
FB
1207 }
1208#endif
9fa3e853 1209 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 1210 if (!p)
9fa3e853
FB
1211 return;
1212 if (p->code_bitmap) {
1213 offset = start & ~TARGET_PAGE_MASK;
1214 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1215 if (b & ((1 << len) - 1))
1216 goto do_invalidate;
1217 } else {
1218 do_invalidate:
d720b93d 1219 tb_invalidate_phys_page_range(start, start + len, 1);
9fa3e853
FB
1220 }
1221}
1222
9fa3e853 1223#if !defined(CONFIG_SOFTMMU)
41c1b1c9 1224static void tb_invalidate_phys_page(tb_page_addr_t addr,
20503968 1225 uintptr_t pc, void *puc)
9fa3e853 1226{
6b917547 1227 TranslationBlock *tb;
9fa3e853 1228 PageDesc *p;
6b917547 1229 int n;
d720b93d 1230#ifdef TARGET_HAS_PRECISE_SMC
6b917547 1231 TranslationBlock *current_tb = NULL;
9349b4f9 1232 CPUArchState *env = cpu_single_env;
6b917547
AL
1233 int current_tb_modified = 0;
1234 target_ulong current_pc = 0;
1235 target_ulong current_cs_base = 0;
1236 int current_flags = 0;
d720b93d 1237#endif
9fa3e853
FB
1238
1239 addr &= TARGET_PAGE_MASK;
1240 p = page_find(addr >> TARGET_PAGE_BITS);
5fafdf24 1241 if (!p)
9fa3e853
FB
1242 return;
1243 tb = p->first_tb;
d720b93d
FB
1244#ifdef TARGET_HAS_PRECISE_SMC
1245 if (tb && pc != 0) {
1246 current_tb = tb_find_pc(pc);
1247 }
1248#endif
9fa3e853 1249 while (tb != NULL) {
8efe0ca8
SW
1250 n = (uintptr_t)tb & 3;
1251 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
d720b93d
FB
1252#ifdef TARGET_HAS_PRECISE_SMC
1253 if (current_tb == tb &&
2e70f6ef 1254 (current_tb->cflags & CF_COUNT_MASK) != 1) {
d720b93d
FB
1255 /* If we are modifying the current TB, we must stop
1256 its execution. We could be more precise by checking
1257 that the modification is after the current PC, but it
1258 would require a specialized function to partially
1259 restore the CPU state */
3b46e624 1260
d720b93d 1261 current_tb_modified = 1;
618ba8e6 1262 cpu_restore_state(current_tb, env, pc);
6b917547
AL
1263 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1264 &current_flags);
d720b93d
FB
1265 }
1266#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
1267 tb_phys_invalidate(tb, addr);
1268 tb = tb->page_next[n];
1269 }
fd6ce8f6 1270 p->first_tb = NULL;
d720b93d
FB
1271#ifdef TARGET_HAS_PRECISE_SMC
1272 if (current_tb_modified) {
1273 /* we generate a block containing just the instruction
1274 modifying the memory. It will ensure that it cannot modify
1275 itself */
ea1c1802 1276 env->current_tb = NULL;
2e70f6ef 1277 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
d720b93d
FB
1278 cpu_resume_from_signal(env, puc);
1279 }
1280#endif
fd6ce8f6 1281}
9fa3e853 1282#endif
fd6ce8f6
FB
1283
1284/* add the tb in the target page and protect it if necessary */
5fafdf24 1285static inline void tb_alloc_page(TranslationBlock *tb,
41c1b1c9 1286 unsigned int n, tb_page_addr_t page_addr)
fd6ce8f6
FB
1287{
1288 PageDesc *p;
4429ab44
JQ
1289#ifndef CONFIG_USER_ONLY
1290 bool page_already_protected;
1291#endif
9fa3e853
FB
1292
1293 tb->page_addr[n] = page_addr;
5cd2c5b6 1294 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
9fa3e853 1295 tb->page_next[n] = p->first_tb;
4429ab44
JQ
1296#ifndef CONFIG_USER_ONLY
1297 page_already_protected = p->first_tb != NULL;
1298#endif
8efe0ca8 1299 p->first_tb = (TranslationBlock *)((uintptr_t)tb | n);
9fa3e853 1300 invalidate_page_bitmap(p);
fd6ce8f6 1301
107db443 1302#if defined(TARGET_HAS_SMC) || 1
d720b93d 1303
9fa3e853 1304#if defined(CONFIG_USER_ONLY)
fd6ce8f6 1305 if (p->flags & PAGE_WRITE) {
53a5960a
PB
1306 target_ulong addr;
1307 PageDesc *p2;
9fa3e853
FB
1308 int prot;
1309
fd6ce8f6
FB
1310 /* force the host page as non writable (writes will have a
1311 page fault + mprotect overhead) */
53a5960a 1312 page_addr &= qemu_host_page_mask;
fd6ce8f6 1313 prot = 0;
53a5960a
PB
1314 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1315 addr += TARGET_PAGE_SIZE) {
1316
1317 p2 = page_find (addr >> TARGET_PAGE_BITS);
1318 if (!p2)
1319 continue;
1320 prot |= p2->flags;
1321 p2->flags &= ~PAGE_WRITE;
53a5960a 1322 }
5fafdf24 1323 mprotect(g2h(page_addr), qemu_host_page_size,
fd6ce8f6
FB
1324 (prot & PAGE_BITS) & ~PAGE_WRITE);
1325#ifdef DEBUG_TB_INVALIDATE
ab3d1727 1326 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
53a5960a 1327 page_addr);
fd6ce8f6 1328#endif
fd6ce8f6 1329 }
9fa3e853
FB
1330#else
1331 /* if some code is already present, then the pages are already
1332 protected. So we handle the case where only the first TB is
1333 allocated in a physical page */
4429ab44 1334 if (!page_already_protected) {
6a00d601 1335 tlb_protect_code(page_addr);
9fa3e853
FB
1336 }
1337#endif
d720b93d
FB
1338
1339#endif /* TARGET_HAS_SMC */
fd6ce8f6
FB
1340}
1341
9fa3e853
FB
1342/* add a new TB and link it to the physical page tables. phys_page2 is
1343 (-1) to indicate that only one page contains the TB. */
41c1b1c9
PB
1344void tb_link_page(TranslationBlock *tb,
1345 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
d4e8164f 1346{
9fa3e853
FB
1347 unsigned int h;
1348 TranslationBlock **ptb;
1349
c8a706fe
PB
1350 /* Grab the mmap lock to stop another thread invalidating this TB
1351 before we are done. */
1352 mmap_lock();
9fa3e853
FB
1353 /* add in the physical hash table */
1354 h = tb_phys_hash_func(phys_pc);
1355 ptb = &tb_phys_hash[h];
1356 tb->phys_hash_next = *ptb;
1357 *ptb = tb;
fd6ce8f6
FB
1358
1359 /* add in the page list */
9fa3e853
FB
1360 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1361 if (phys_page2 != -1)
1362 tb_alloc_page(tb, 1, phys_page2);
1363 else
1364 tb->page_addr[1] = -1;
9fa3e853 1365
8efe0ca8 1366 tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2);
d4e8164f
FB
1367 tb->jmp_next[0] = NULL;
1368 tb->jmp_next[1] = NULL;
1369
1370 /* init original jump addresses */
1371 if (tb->tb_next_offset[0] != 0xffff)
1372 tb_reset_jump(tb, 0);
1373 if (tb->tb_next_offset[1] != 0xffff)
1374 tb_reset_jump(tb, 1);
8a40a180
FB
1375
1376#ifdef DEBUG_TB_CHECK
1377 tb_page_check();
1378#endif
c8a706fe 1379 mmap_unlock();
fd6ce8f6
FB
1380}
1381
9fa3e853
FB
1382/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1383 tb[1].tc_ptr. Return NULL if not found */
6375e09e 1384TranslationBlock *tb_find_pc(uintptr_t tc_ptr)
fd6ce8f6 1385{
9fa3e853 1386 int m_min, m_max, m;
8efe0ca8 1387 uintptr_t v;
9fa3e853 1388 TranslationBlock *tb;
a513fe19
FB
1389
1390 if (nb_tbs <= 0)
1391 return NULL;
8efe0ca8
SW
1392 if (tc_ptr < (uintptr_t)code_gen_buffer ||
1393 tc_ptr >= (uintptr_t)code_gen_ptr) {
a513fe19 1394 return NULL;
8efe0ca8 1395 }
a513fe19
FB
1396 /* binary search (cf Knuth) */
1397 m_min = 0;
1398 m_max = nb_tbs - 1;
1399 while (m_min <= m_max) {
1400 m = (m_min + m_max) >> 1;
1401 tb = &tbs[m];
8efe0ca8 1402 v = (uintptr_t)tb->tc_ptr;
a513fe19
FB
1403 if (v == tc_ptr)
1404 return tb;
1405 else if (tc_ptr < v) {
1406 m_max = m - 1;
1407 } else {
1408 m_min = m + 1;
1409 }
5fafdf24 1410 }
a513fe19
FB
1411 return &tbs[m_max];
1412}
7501267e 1413
ea041c0e
FB
1414static void tb_reset_jump_recursive(TranslationBlock *tb);
1415
1416static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1417{
1418 TranslationBlock *tb1, *tb_next, **ptb;
1419 unsigned int n1;
1420
1421 tb1 = tb->jmp_next[n];
1422 if (tb1 != NULL) {
1423 /* find head of list */
1424 for(;;) {
8efe0ca8
SW
1425 n1 = (uintptr_t)tb1 & 3;
1426 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
ea041c0e
FB
1427 if (n1 == 2)
1428 break;
1429 tb1 = tb1->jmp_next[n1];
1430 }
1431 /* we are now sure now that tb jumps to tb1 */
1432 tb_next = tb1;
1433
1434 /* remove tb from the jmp_first list */
1435 ptb = &tb_next->jmp_first;
1436 for(;;) {
1437 tb1 = *ptb;
8efe0ca8
SW
1438 n1 = (uintptr_t)tb1 & 3;
1439 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
ea041c0e
FB
1440 if (n1 == n && tb1 == tb)
1441 break;
1442 ptb = &tb1->jmp_next[n1];
1443 }
1444 *ptb = tb->jmp_next[n];
1445 tb->jmp_next[n] = NULL;
3b46e624 1446
ea041c0e
FB
1447 /* suppress the jump to next tb in generated code */
1448 tb_reset_jump(tb, n);
1449
0124311e 1450 /* suppress jumps in the tb on which we could have jumped */
ea041c0e
FB
1451 tb_reset_jump_recursive(tb_next);
1452 }
1453}
1454
1455static void tb_reset_jump_recursive(TranslationBlock *tb)
1456{
1457 tb_reset_jump_recursive2(tb, 0);
1458 tb_reset_jump_recursive2(tb, 1);
1459}
1460
1fddef4b 1461#if defined(TARGET_HAS_ICE)
94df27fd 1462#if defined(CONFIG_USER_ONLY)
9349b4f9 1463static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
94df27fd
PB
1464{
1465 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1466}
1467#else
1e7855a5 1468void tb_invalidate_phys_addr(target_phys_addr_t addr)
d720b93d 1469{
c227f099 1470 ram_addr_t ram_addr;
f3705d53 1471 MemoryRegionSection *section;
d720b93d 1472
06ef3525 1473 section = phys_page_find(addr >> TARGET_PAGE_BITS);
f3705d53
AK
1474 if (!(memory_region_is_ram(section->mr)
1475 || (section->mr->rom_device && section->mr->readable))) {
06ef3525
AK
1476 return;
1477 }
f3705d53
AK
1478 ram_addr = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
1479 + section_addr(section, addr);
706cd4b5 1480 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
d720b93d 1481}
1e7855a5
MF
1482
1483static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
1484{
1485 tb_invalidate_phys_addr(cpu_get_phys_page_debug(env, pc));
1486}
c27004ec 1487#endif
94df27fd 1488#endif /* TARGET_HAS_ICE */
d720b93d 1489
c527ee8f 1490#if defined(CONFIG_USER_ONLY)
9349b4f9 1491void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
c527ee8f
PB
1492
1493{
1494}
1495
9349b4f9 1496int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
c527ee8f
PB
1497 int flags, CPUWatchpoint **watchpoint)
1498{
1499 return -ENOSYS;
1500}
1501#else
6658ffb8 1502/* Add a watchpoint. */
9349b4f9 1503int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
a1d1bb31 1504 int flags, CPUWatchpoint **watchpoint)
6658ffb8 1505{
b4051334 1506 target_ulong len_mask = ~(len - 1);
c0ce998e 1507 CPUWatchpoint *wp;
6658ffb8 1508
b4051334 1509 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
0dc23828
MF
1510 if ((len & (len - 1)) || (addr & ~len_mask) ||
1511 len == 0 || len > TARGET_PAGE_SIZE) {
b4051334
AL
1512 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1513 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1514 return -EINVAL;
1515 }
7267c094 1516 wp = g_malloc(sizeof(*wp));
a1d1bb31
AL
1517
1518 wp->vaddr = addr;
b4051334 1519 wp->len_mask = len_mask;
a1d1bb31
AL
1520 wp->flags = flags;
1521
2dc9f411 1522 /* keep all GDB-injected watchpoints in front */
c0ce998e 1523 if (flags & BP_GDB)
72cf2d4f 1524 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
c0ce998e 1525 else
72cf2d4f 1526 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
6658ffb8 1527
6658ffb8 1528 tlb_flush_page(env, addr);
a1d1bb31
AL
1529
1530 if (watchpoint)
1531 *watchpoint = wp;
1532 return 0;
6658ffb8
PB
1533}
1534
a1d1bb31 1535/* Remove a specific watchpoint. */
9349b4f9 1536int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
a1d1bb31 1537 int flags)
6658ffb8 1538{
b4051334 1539 target_ulong len_mask = ~(len - 1);
a1d1bb31 1540 CPUWatchpoint *wp;
6658ffb8 1541
72cf2d4f 1542 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334 1543 if (addr == wp->vaddr && len_mask == wp->len_mask
6e140f28 1544 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
a1d1bb31 1545 cpu_watchpoint_remove_by_ref(env, wp);
6658ffb8
PB
1546 return 0;
1547 }
1548 }
a1d1bb31 1549 return -ENOENT;
6658ffb8
PB
1550}
1551
a1d1bb31 1552/* Remove a specific watchpoint by reference. */
9349b4f9 1553void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
a1d1bb31 1554{
72cf2d4f 1555 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
7d03f82f 1556
a1d1bb31
AL
1557 tlb_flush_page(env, watchpoint->vaddr);
1558
7267c094 1559 g_free(watchpoint);
a1d1bb31
AL
1560}
1561
1562/* Remove all matching watchpoints. */
9349b4f9 1563void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
a1d1bb31 1564{
c0ce998e 1565 CPUWatchpoint *wp, *next;
a1d1bb31 1566
72cf2d4f 1567 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
a1d1bb31
AL
1568 if (wp->flags & mask)
1569 cpu_watchpoint_remove_by_ref(env, wp);
c0ce998e 1570 }
7d03f82f 1571}
c527ee8f 1572#endif
7d03f82f 1573
a1d1bb31 1574/* Add a breakpoint. */
9349b4f9 1575int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
a1d1bb31 1576 CPUBreakpoint **breakpoint)
4c3a88a2 1577{
1fddef4b 1578#if defined(TARGET_HAS_ICE)
c0ce998e 1579 CPUBreakpoint *bp;
3b46e624 1580
7267c094 1581 bp = g_malloc(sizeof(*bp));
4c3a88a2 1582
a1d1bb31
AL
1583 bp->pc = pc;
1584 bp->flags = flags;
1585
2dc9f411 1586 /* keep all GDB-injected breakpoints in front */
c0ce998e 1587 if (flags & BP_GDB)
72cf2d4f 1588 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
c0ce998e 1589 else
72cf2d4f 1590 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
3b46e624 1591
d720b93d 1592 breakpoint_invalidate(env, pc);
a1d1bb31
AL
1593
1594 if (breakpoint)
1595 *breakpoint = bp;
4c3a88a2
FB
1596 return 0;
1597#else
a1d1bb31 1598 return -ENOSYS;
4c3a88a2
FB
1599#endif
1600}
1601
a1d1bb31 1602/* Remove a specific breakpoint. */
9349b4f9 1603int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
a1d1bb31 1604{
7d03f82f 1605#if defined(TARGET_HAS_ICE)
a1d1bb31
AL
1606 CPUBreakpoint *bp;
1607
72cf2d4f 1608 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31
AL
1609 if (bp->pc == pc && bp->flags == flags) {
1610 cpu_breakpoint_remove_by_ref(env, bp);
1611 return 0;
1612 }
7d03f82f 1613 }
a1d1bb31
AL
1614 return -ENOENT;
1615#else
1616 return -ENOSYS;
7d03f82f
EI
1617#endif
1618}
1619
a1d1bb31 1620/* Remove a specific breakpoint by reference. */
9349b4f9 1621void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
4c3a88a2 1622{
1fddef4b 1623#if defined(TARGET_HAS_ICE)
72cf2d4f 1624 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
d720b93d 1625
a1d1bb31
AL
1626 breakpoint_invalidate(env, breakpoint->pc);
1627
7267c094 1628 g_free(breakpoint);
a1d1bb31
AL
1629#endif
1630}
1631
1632/* Remove all matching breakpoints. */
9349b4f9 1633void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
a1d1bb31
AL
1634{
1635#if defined(TARGET_HAS_ICE)
c0ce998e 1636 CPUBreakpoint *bp, *next;
a1d1bb31 1637
72cf2d4f 1638 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
a1d1bb31
AL
1639 if (bp->flags & mask)
1640 cpu_breakpoint_remove_by_ref(env, bp);
c0ce998e 1641 }
4c3a88a2
FB
1642#endif
1643}
1644
c33a346e
FB
1645/* enable or disable single step mode. EXCP_DEBUG is returned by the
1646 CPU loop after each instruction */
9349b4f9 1647void cpu_single_step(CPUArchState *env, int enabled)
c33a346e 1648{
1fddef4b 1649#if defined(TARGET_HAS_ICE)
c33a346e
FB
1650 if (env->singlestep_enabled != enabled) {
1651 env->singlestep_enabled = enabled;
e22a25c9
AL
1652 if (kvm_enabled())
1653 kvm_update_guest_debug(env, 0);
1654 else {
ccbb4d44 1655 /* must flush all the translated code to avoid inconsistencies */
e22a25c9
AL
1656 /* XXX: only flush what is necessary */
1657 tb_flush(env);
1658 }
c33a346e
FB
1659 }
1660#endif
1661}
1662
34865134
FB
1663/* enable or disable low levels log */
1664void cpu_set_log(int log_flags)
1665{
1666 loglevel = log_flags;
1667 if (loglevel && !logfile) {
11fcfab4 1668 logfile = fopen(logfilename, log_append ? "a" : "w");
34865134
FB
1669 if (!logfile) {
1670 perror(logfilename);
1671 _exit(1);
1672 }
9fa3e853
FB
1673#if !defined(CONFIG_SOFTMMU)
1674 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1675 {
b55266b5 1676 static char logfile_buf[4096];
9fa3e853
FB
1677 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1678 }
daf767b1
SW
1679#elif defined(_WIN32)
1680 /* Win32 doesn't support line-buffering, so use unbuffered output. */
1681 setvbuf(logfile, NULL, _IONBF, 0);
1682#else
34865134 1683 setvbuf(logfile, NULL, _IOLBF, 0);
9fa3e853 1684#endif
e735b91c
PB
1685 log_append = 1;
1686 }
1687 if (!loglevel && logfile) {
1688 fclose(logfile);
1689 logfile = NULL;
34865134
FB
1690 }
1691}
1692
1693void cpu_set_log_filename(const char *filename)
1694{
1695 logfilename = strdup(filename);
e735b91c
PB
1696 if (logfile) {
1697 fclose(logfile);
1698 logfile = NULL;
1699 }
1700 cpu_set_log(loglevel);
34865134 1701}
c33a346e 1702
9349b4f9 1703static void cpu_unlink_tb(CPUArchState *env)
ea041c0e 1704{
3098dba0
AJ
1705 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1706 problem and hope the cpu will stop of its own accord. For userspace
1707 emulation this often isn't actually as bad as it sounds. Often
1708 signals are used primarily to interrupt blocking syscalls. */
ea041c0e 1709 TranslationBlock *tb;
c227f099 1710 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
59817ccb 1711
cab1b4bd 1712 spin_lock(&interrupt_lock);
3098dba0
AJ
1713 tb = env->current_tb;
1714 /* if the cpu is currently executing code, we must unlink it and
1715 all the potentially executing TB */
f76cfe56 1716 if (tb) {
3098dba0
AJ
1717 env->current_tb = NULL;
1718 tb_reset_jump_recursive(tb);
be214e6c 1719 }
cab1b4bd 1720 spin_unlock(&interrupt_lock);
3098dba0
AJ
1721}
1722
97ffbd8d 1723#ifndef CONFIG_USER_ONLY
3098dba0 1724/* mask must never be zero, except for A20 change call */
9349b4f9 1725static void tcg_handle_interrupt(CPUArchState *env, int mask)
3098dba0
AJ
1726{
1727 int old_mask;
be214e6c 1728
2e70f6ef 1729 old_mask = env->interrupt_request;
68a79315 1730 env->interrupt_request |= mask;
3098dba0 1731
8edac960
AL
1732 /*
1733 * If called from iothread context, wake the target cpu in
1734 * case its halted.
1735 */
b7680cb6 1736 if (!qemu_cpu_is_self(env)) {
8edac960
AL
1737 qemu_cpu_kick(env);
1738 return;
1739 }
8edac960 1740
2e70f6ef 1741 if (use_icount) {
266910c4 1742 env->icount_decr.u16.high = 0xffff;
2e70f6ef 1743 if (!can_do_io(env)
be214e6c 1744 && (mask & ~old_mask) != 0) {
2e70f6ef
PB
1745 cpu_abort(env, "Raised interrupt while not in I/O function");
1746 }
2e70f6ef 1747 } else {
3098dba0 1748 cpu_unlink_tb(env);
ea041c0e
FB
1749 }
1750}
1751
ec6959d0
JK
1752CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1753
97ffbd8d
JK
1754#else /* CONFIG_USER_ONLY */
1755
9349b4f9 1756void cpu_interrupt(CPUArchState *env, int mask)
97ffbd8d
JK
1757{
1758 env->interrupt_request |= mask;
1759 cpu_unlink_tb(env);
1760}
1761#endif /* CONFIG_USER_ONLY */
1762
9349b4f9 1763void cpu_reset_interrupt(CPUArchState *env, int mask)
b54ad049
FB
1764{
1765 env->interrupt_request &= ~mask;
1766}
1767
9349b4f9 1768void cpu_exit(CPUArchState *env)
3098dba0
AJ
1769{
1770 env->exit_request = 1;
1771 cpu_unlink_tb(env);
1772}
1773
c7cd6a37 1774const CPULogItem cpu_log_items[] = {
5fafdf24 1775 { CPU_LOG_TB_OUT_ASM, "out_asm",
f193c797
FB
1776 "show generated host assembly code for each compiled TB" },
1777 { CPU_LOG_TB_IN_ASM, "in_asm",
1778 "show target assembly code for each compiled TB" },
5fafdf24 1779 { CPU_LOG_TB_OP, "op",
57fec1fe 1780 "show micro ops for each compiled TB" },
f193c797 1781 { CPU_LOG_TB_OP_OPT, "op_opt",
e01a1157
BS
1782 "show micro ops "
1783#ifdef TARGET_I386
1784 "before eflags optimization and "
f193c797 1785#endif
e01a1157 1786 "after liveness analysis" },
f193c797
FB
1787 { CPU_LOG_INT, "int",
1788 "show interrupts/exceptions in short format" },
1789 { CPU_LOG_EXEC, "exec",
1790 "show trace before each executed TB (lots of logs)" },
9fddaa0c 1791 { CPU_LOG_TB_CPU, "cpu",
e91c8a77 1792 "show CPU state before block translation" },
f193c797
FB
1793#ifdef TARGET_I386
1794 { CPU_LOG_PCALL, "pcall",
1795 "show protected mode far calls/returns/exceptions" },
eca1bdf4
AL
1796 { CPU_LOG_RESET, "cpu_reset",
1797 "show CPU state before CPU resets" },
f193c797 1798#endif
8e3a9fd2 1799#ifdef DEBUG_IOPORT
fd872598
FB
1800 { CPU_LOG_IOPORT, "ioport",
1801 "show all i/o ports accesses" },
8e3a9fd2 1802#endif
f193c797
FB
1803 { 0, NULL, NULL },
1804};
1805
1806static int cmp1(const char *s1, int n, const char *s2)
1807{
1808 if (strlen(s2) != n)
1809 return 0;
1810 return memcmp(s1, s2, n) == 0;
1811}
3b46e624 1812
f193c797
FB
1813/* takes a comma separated list of log masks. Return 0 if error. */
1814int cpu_str_to_log_mask(const char *str)
1815{
c7cd6a37 1816 const CPULogItem *item;
f193c797
FB
1817 int mask;
1818 const char *p, *p1;
1819
1820 p = str;
1821 mask = 0;
1822 for(;;) {
1823 p1 = strchr(p, ',');
1824 if (!p1)
1825 p1 = p + strlen(p);
9742bf26
YT
1826 if(cmp1(p,p1-p,"all")) {
1827 for(item = cpu_log_items; item->mask != 0; item++) {
1828 mask |= item->mask;
1829 }
1830 } else {
1831 for(item = cpu_log_items; item->mask != 0; item++) {
1832 if (cmp1(p, p1 - p, item->name))
1833 goto found;
1834 }
1835 return 0;
f193c797 1836 }
f193c797
FB
1837 found:
1838 mask |= item->mask;
1839 if (*p1 != ',')
1840 break;
1841 p = p1 + 1;
1842 }
1843 return mask;
1844}
ea041c0e 1845
9349b4f9 1846void cpu_abort(CPUArchState *env, const char *fmt, ...)
7501267e
FB
1847{
1848 va_list ap;
493ae1f0 1849 va_list ap2;
7501267e
FB
1850
1851 va_start(ap, fmt);
493ae1f0 1852 va_copy(ap2, ap);
7501267e
FB
1853 fprintf(stderr, "qemu: fatal: ");
1854 vfprintf(stderr, fmt, ap);
1855 fprintf(stderr, "\n");
1856#ifdef TARGET_I386
7fe48483
FB
1857 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1858#else
1859 cpu_dump_state(env, stderr, fprintf, 0);
7501267e 1860#endif
93fcfe39
AL
1861 if (qemu_log_enabled()) {
1862 qemu_log("qemu: fatal: ");
1863 qemu_log_vprintf(fmt, ap2);
1864 qemu_log("\n");
f9373291 1865#ifdef TARGET_I386
93fcfe39 1866 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
f9373291 1867#else
93fcfe39 1868 log_cpu_state(env, 0);
f9373291 1869#endif
31b1a7b4 1870 qemu_log_flush();
93fcfe39 1871 qemu_log_close();
924edcae 1872 }
493ae1f0 1873 va_end(ap2);
f9373291 1874 va_end(ap);
fd052bf6
RV
1875#if defined(CONFIG_USER_ONLY)
1876 {
1877 struct sigaction act;
1878 sigfillset(&act.sa_mask);
1879 act.sa_handler = SIG_DFL;
1880 sigaction(SIGABRT, &act, NULL);
1881 }
1882#endif
7501267e
FB
1883 abort();
1884}
1885
9349b4f9 1886CPUArchState *cpu_copy(CPUArchState *env)
c5be9f08 1887{
9349b4f9
AF
1888 CPUArchState *new_env = cpu_init(env->cpu_model_str);
1889 CPUArchState *next_cpu = new_env->next_cpu;
c5be9f08 1890 int cpu_index = new_env->cpu_index;
5a38f081
AL
1891#if defined(TARGET_HAS_ICE)
1892 CPUBreakpoint *bp;
1893 CPUWatchpoint *wp;
1894#endif
1895
9349b4f9 1896 memcpy(new_env, env, sizeof(CPUArchState));
5a38f081
AL
1897
1898 /* Preserve chaining and index. */
c5be9f08
TS
1899 new_env->next_cpu = next_cpu;
1900 new_env->cpu_index = cpu_index;
5a38f081
AL
1901
1902 /* Clone all break/watchpoints.
1903 Note: Once we support ptrace with hw-debug register access, make sure
1904 BP_CPU break/watchpoints are handled correctly on clone. */
72cf2d4f
BS
1905 QTAILQ_INIT(&env->breakpoints);
1906 QTAILQ_INIT(&env->watchpoints);
5a38f081 1907#if defined(TARGET_HAS_ICE)
72cf2d4f 1908 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
5a38f081
AL
1909 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1910 }
72cf2d4f 1911 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
5a38f081
AL
1912 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1913 wp->flags, NULL);
1914 }
1915#endif
1916
c5be9f08
TS
1917 return new_env;
1918}
1919
0124311e
FB
1920#if !defined(CONFIG_USER_ONLY)
1921
9349b4f9 1922static inline void tlb_flush_jmp_cache(CPUArchState *env, target_ulong addr)
5c751e99
EI
1923{
1924 unsigned int i;
1925
1926 /* Discard jump cache entries for any tb which might potentially
1927 overlap the flushed page. */
1928 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1929 memset (&env->tb_jmp_cache[i], 0,
9742bf26 1930 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
5c751e99
EI
1931
1932 i = tb_jmp_cache_hash_page(addr);
1933 memset (&env->tb_jmp_cache[i], 0,
9742bf26 1934 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
5c751e99
EI
1935}
1936
08738984
IK
1937static CPUTLBEntry s_cputlb_empty_entry = {
1938 .addr_read = -1,
1939 .addr_write = -1,
1940 .addr_code = -1,
1941 .addend = -1,
1942};
1943
771124e1
PM
1944/* NOTE:
1945 * If flush_global is true (the usual case), flush all tlb entries.
1946 * If flush_global is false, flush (at least) all tlb entries not
1947 * marked global.
1948 *
1949 * Since QEMU doesn't currently implement a global/not-global flag
1950 * for tlb entries, at the moment tlb_flush() will also flush all
1951 * tlb entries in the flush_global == false case. This is OK because
1952 * CPU architectures generally permit an implementation to drop
1953 * entries from the TLB at any time, so flushing more entries than
1954 * required is only an efficiency issue, not a correctness issue.
1955 */
9349b4f9 1956void tlb_flush(CPUArchState *env, int flush_global)
33417e70 1957{
33417e70 1958 int i;
0124311e 1959
9fa3e853
FB
1960#if defined(DEBUG_TLB)
1961 printf("tlb_flush:\n");
1962#endif
0124311e
FB
1963 /* must reset current TB so that interrupts cannot modify the
1964 links while we are modifying them */
1965 env->current_tb = NULL;
1966
33417e70 1967 for(i = 0; i < CPU_TLB_SIZE; i++) {
cfde4bd9
IY
1968 int mmu_idx;
1969 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
08738984 1970 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
cfde4bd9 1971 }
33417e70 1972 }
9fa3e853 1973
8a40a180 1974 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
9fa3e853 1975
d4c430a8
PB
1976 env->tlb_flush_addr = -1;
1977 env->tlb_flush_mask = 0;
e3db7226 1978 tlb_flush_count++;
33417e70
FB
1979}
1980
274da6b2 1981static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
61382a50 1982{
5fafdf24 1983 if (addr == (tlb_entry->addr_read &
84b7b8e7 1984 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1985 addr == (tlb_entry->addr_write &
84b7b8e7 1986 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1987 addr == (tlb_entry->addr_code &
84b7b8e7 1988 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
08738984 1989 *tlb_entry = s_cputlb_empty_entry;
84b7b8e7 1990 }
61382a50
FB
1991}
1992
9349b4f9 1993void tlb_flush_page(CPUArchState *env, target_ulong addr)
33417e70 1994{
8a40a180 1995 int i;
cfde4bd9 1996 int mmu_idx;
0124311e 1997
9fa3e853 1998#if defined(DEBUG_TLB)
108c49b8 1999 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
9fa3e853 2000#endif
d4c430a8
PB
2001 /* Check if we need to flush due to large pages. */
2002 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
2003#if defined(DEBUG_TLB)
2004 printf("tlb_flush_page: forced full flush ("
2005 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
2006 env->tlb_flush_addr, env->tlb_flush_mask);
2007#endif
2008 tlb_flush(env, 1);
2009 return;
2010 }
0124311e
FB
2011 /* must reset current TB so that interrupts cannot modify the
2012 links while we are modifying them */
2013 env->current_tb = NULL;
61382a50
FB
2014
2015 addr &= TARGET_PAGE_MASK;
2016 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
cfde4bd9
IY
2017 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2018 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
0124311e 2019
5c751e99 2020 tlb_flush_jmp_cache(env, addr);
9fa3e853
FB
2021}
2022
9fa3e853
FB
2023/* update the TLBs so that writes to code in the virtual page 'addr'
2024 can be detected */
c227f099 2025static void tlb_protect_code(ram_addr_t ram_addr)
9fa3e853 2026{
5fafdf24 2027 cpu_physical_memory_reset_dirty(ram_addr,
6a00d601
FB
2028 ram_addr + TARGET_PAGE_SIZE,
2029 CODE_DIRTY_FLAG);
9fa3e853
FB
2030}
2031
9fa3e853 2032/* update the TLB so that writes in physical page 'phys_addr' are no longer
3a7d929e 2033 tested for self modifying code */
9349b4f9 2034static void tlb_unprotect_code_phys(CPUArchState *env, ram_addr_t ram_addr,
3a7d929e 2035 target_ulong vaddr)
9fa3e853 2036{
f7c11b53 2037 cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
1ccde1cb
FB
2038}
2039
7859cc6e
AK
2040static bool tlb_is_dirty_ram(CPUTLBEntry *tlbe)
2041{
2042 return (tlbe->addr_write & (TLB_INVALID_MASK|TLB_MMIO|TLB_NOTDIRTY)) == 0;
2043}
2044
5fafdf24 2045static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
8efe0ca8 2046 uintptr_t start, uintptr_t length)
1ccde1cb 2047{
8efe0ca8 2048 uintptr_t addr;
7859cc6e 2049 if (tlb_is_dirty_ram(tlb_entry)) {
84b7b8e7 2050 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1ccde1cb 2051 if ((addr - start) < length) {
7859cc6e 2052 tlb_entry->addr_write |= TLB_NOTDIRTY;
1ccde1cb
FB
2053 }
2054 }
2055}
2056
5579c7f3 2057/* Note: start and end must be within the same ram block. */
c227f099 2058void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 2059 int dirty_flags)
1ccde1cb 2060{
9349b4f9 2061 CPUArchState *env;
8efe0ca8 2062 uintptr_t length, start1;
f7c11b53 2063 int i;
1ccde1cb
FB
2064
2065 start &= TARGET_PAGE_MASK;
2066 end = TARGET_PAGE_ALIGN(end);
2067
2068 length = end - start;
2069 if (length == 0)
2070 return;
f7c11b53 2071 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
f23db169 2072
1ccde1cb
FB
2073 /* we modify the TLB cache so that the dirty bit will be set again
2074 when accessing the range */
8efe0ca8 2075 start1 = (uintptr_t)qemu_safe_ram_ptr(start);
a57d23e4 2076 /* Check that we don't span multiple blocks - this breaks the
5579c7f3 2077 address comparisons below. */
8efe0ca8 2078 if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1
5579c7f3
PB
2079 != (end - 1) - start) {
2080 abort();
2081 }
2082
6a00d601 2083 for(env = first_cpu; env != NULL; env = env->next_cpu) {
cfde4bd9
IY
2084 int mmu_idx;
2085 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2086 for(i = 0; i < CPU_TLB_SIZE; i++)
2087 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
2088 start1, length);
2089 }
6a00d601 2090 }
1ccde1cb
FB
2091}
2092
74576198
AL
2093int cpu_physical_memory_set_dirty_tracking(int enable)
2094{
f6f3fbca 2095 int ret = 0;
74576198 2096 in_migration = enable;
f6f3fbca 2097 return ret;
74576198
AL
2098}
2099
3a7d929e
FB
2100static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2101{
c227f099 2102 ram_addr_t ram_addr;
5579c7f3 2103 void *p;
3a7d929e 2104
7859cc6e 2105 if (tlb_is_dirty_ram(tlb_entry)) {
8efe0ca8 2106 p = (void *)(uintptr_t)((tlb_entry->addr_write & TARGET_PAGE_MASK)
5579c7f3 2107 + tlb_entry->addend);
e890261f 2108 ram_addr = qemu_ram_addr_from_host_nofail(p);
3a7d929e 2109 if (!cpu_physical_memory_is_dirty(ram_addr)) {
0f459d16 2110 tlb_entry->addr_write |= TLB_NOTDIRTY;
3a7d929e
FB
2111 }
2112 }
2113}
2114
2115/* update the TLB according to the current state of the dirty bits */
9349b4f9 2116void cpu_tlb_update_dirty(CPUArchState *env)
3a7d929e
FB
2117{
2118 int i;
cfde4bd9
IY
2119 int mmu_idx;
2120 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2121 for(i = 0; i < CPU_TLB_SIZE; i++)
2122 tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2123 }
3a7d929e
FB
2124}
2125
0f459d16 2126static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1ccde1cb 2127{
0f459d16
PB
2128 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2129 tlb_entry->addr_write = vaddr;
1ccde1cb
FB
2130}
2131
0f459d16
PB
2132/* update the TLB corresponding to virtual page vaddr
2133 so that it is no longer dirty */
9349b4f9 2134static inline void tlb_set_dirty(CPUArchState *env, target_ulong vaddr)
1ccde1cb 2135{
1ccde1cb 2136 int i;
cfde4bd9 2137 int mmu_idx;
1ccde1cb 2138
0f459d16 2139 vaddr &= TARGET_PAGE_MASK;
1ccde1cb 2140 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
cfde4bd9
IY
2141 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2142 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
9fa3e853
FB
2143}
2144
d4c430a8
PB
2145/* Our TLB does not support large pages, so remember the area covered by
2146 large pages and trigger a full TLB flush if these are invalidated. */
9349b4f9 2147static void tlb_add_large_page(CPUArchState *env, target_ulong vaddr,
d4c430a8
PB
2148 target_ulong size)
2149{
2150 target_ulong mask = ~(size - 1);
2151
2152 if (env->tlb_flush_addr == (target_ulong)-1) {
2153 env->tlb_flush_addr = vaddr & mask;
2154 env->tlb_flush_mask = mask;
2155 return;
2156 }
2157 /* Extend the existing region to include the new page.
2158 This is a compromise between unnecessary flushes and the cost
2159 of maintaining a full variable size TLB. */
2160 mask &= env->tlb_flush_mask;
2161 while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
2162 mask <<= 1;
2163 }
2164 env->tlb_flush_addr &= mask;
2165 env->tlb_flush_mask = mask;
2166}
2167
06ef3525 2168static bool is_ram_rom(MemoryRegionSection *s)
1d393fa2 2169{
06ef3525 2170 return memory_region_is_ram(s->mr);
1d393fa2
AK
2171}
2172
06ef3525 2173static bool is_romd(MemoryRegionSection *s)
75c578dc 2174{
06ef3525 2175 MemoryRegion *mr = s->mr;
75c578dc 2176
75c578dc
AK
2177 return mr->rom_device && mr->readable;
2178}
2179
06ef3525 2180static bool is_ram_rom_romd(MemoryRegionSection *s)
1d393fa2 2181{
06ef3525 2182 return is_ram_rom(s) || is_romd(s);
1d393fa2
AK
2183}
2184
d4c430a8
PB
2185/* Add a new TLB entry. At most one entry for a given virtual address
2186 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2187 supplied size is only used by tlb_flush_page. */
9349b4f9 2188void tlb_set_page(CPUArchState *env, target_ulong vaddr,
d4c430a8
PB
2189 target_phys_addr_t paddr, int prot,
2190 int mmu_idx, target_ulong size)
9fa3e853 2191{
f3705d53 2192 MemoryRegionSection *section;
9fa3e853 2193 unsigned int index;
4f2ac237 2194 target_ulong address;
0f459d16 2195 target_ulong code_address;
8efe0ca8 2196 uintptr_t addend;
84b7b8e7 2197 CPUTLBEntry *te;
a1d1bb31 2198 CPUWatchpoint *wp;
c227f099 2199 target_phys_addr_t iotlb;
9fa3e853 2200
d4c430a8
PB
2201 assert(size >= TARGET_PAGE_SIZE);
2202 if (size != TARGET_PAGE_SIZE) {
2203 tlb_add_large_page(env, vaddr, size);
2204 }
06ef3525 2205 section = phys_page_find(paddr >> TARGET_PAGE_BITS);
9fa3e853 2206#if defined(DEBUG_TLB)
7fd3f494
SW
2207 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
2208 " prot=%x idx=%d pd=0x%08lx\n",
2209 vaddr, paddr, prot, mmu_idx, pd);
9fa3e853
FB
2210#endif
2211
0f459d16 2212 address = vaddr;
f3705d53 2213 if (!is_ram_rom_romd(section)) {
0f459d16
PB
2214 /* IO memory case (romd handled later) */
2215 address |= TLB_MMIO;
2216 }
f3705d53 2217 if (is_ram_rom_romd(section)) {
8efe0ca8 2218 addend = (uintptr_t)memory_region_get_ram_ptr(section->mr)
f3705d53 2219 + section_addr(section, paddr);
06ef3525
AK
2220 } else {
2221 addend = 0;
2222 }
f3705d53 2223 if (is_ram_rom(section)) {
0f459d16 2224 /* Normal RAM. */
f3705d53
AK
2225 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
2226 + section_addr(section, paddr);
2227 if (!section->readonly)
aa102231 2228 iotlb |= phys_section_notdirty;
0f459d16 2229 else
aa102231 2230 iotlb |= phys_section_rom;
0f459d16 2231 } else {
ccbb4d44 2232 /* IO handlers are currently passed a physical address.
0f459d16
PB
2233 It would be nice to pass an offset from the base address
2234 of that region. This would avoid having to special case RAM,
2235 and avoid full address decoding in every device.
2236 We can't use the high bits of pd for this because
2237 IO_MEM_ROMD uses these as a ram address. */
aa102231 2238 iotlb = section - phys_sections;
f3705d53 2239 iotlb += section_addr(section, paddr);
0f459d16
PB
2240 }
2241
2242 code_address = address;
2243 /* Make accesses to pages with watchpoints go via the
2244 watchpoint trap routines. */
72cf2d4f 2245 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
a1d1bb31 2246 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
bf298f83
JK
2247 /* Avoid trapping reads of pages with a write breakpoint. */
2248 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
aa102231 2249 iotlb = phys_section_watch + paddr;
bf298f83
JK
2250 address |= TLB_MMIO;
2251 break;
2252 }
6658ffb8 2253 }
0f459d16 2254 }
d79acba4 2255
0f459d16
PB
2256 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2257 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2258 te = &env->tlb_table[mmu_idx][index];
2259 te->addend = addend - vaddr;
2260 if (prot & PAGE_READ) {
2261 te->addr_read = address;
2262 } else {
2263 te->addr_read = -1;
2264 }
5c751e99 2265
0f459d16
PB
2266 if (prot & PAGE_EXEC) {
2267 te->addr_code = code_address;
2268 } else {
2269 te->addr_code = -1;
2270 }
2271 if (prot & PAGE_WRITE) {
f3705d53
AK
2272 if ((memory_region_is_ram(section->mr) && section->readonly)
2273 || is_romd(section)) {
0f459d16
PB
2274 /* Write access calls the I/O callback. */
2275 te->addr_write = address | TLB_MMIO;
f3705d53 2276 } else if (memory_region_is_ram(section->mr)
06ef3525 2277 && !cpu_physical_memory_is_dirty(
f3705d53
AK
2278 section->mr->ram_addr
2279 + section_addr(section, paddr))) {
0f459d16 2280 te->addr_write = address | TLB_NOTDIRTY;
9fa3e853 2281 } else {
0f459d16 2282 te->addr_write = address;
9fa3e853 2283 }
0f459d16
PB
2284 } else {
2285 te->addr_write = -1;
9fa3e853 2286 }
9fa3e853
FB
2287}
2288
0124311e
FB
2289#else
2290
9349b4f9 2291void tlb_flush(CPUArchState *env, int flush_global)
0124311e
FB
2292{
2293}
2294
9349b4f9 2295void tlb_flush_page(CPUArchState *env, target_ulong addr)
0124311e
FB
2296{
2297}
2298
edf8e2af
MW
2299/*
2300 * Walks guest process memory "regions" one by one
2301 * and calls callback function 'fn' for each region.
2302 */
5cd2c5b6
RH
2303
2304struct walk_memory_regions_data
2305{
2306 walk_memory_regions_fn fn;
2307 void *priv;
8efe0ca8 2308 uintptr_t start;
5cd2c5b6
RH
2309 int prot;
2310};
2311
2312static int walk_memory_regions_end(struct walk_memory_regions_data *data,
b480d9b7 2313 abi_ulong end, int new_prot)
5cd2c5b6
RH
2314{
2315 if (data->start != -1ul) {
2316 int rc = data->fn(data->priv, data->start, end, data->prot);
2317 if (rc != 0) {
2318 return rc;
2319 }
2320 }
2321
2322 data->start = (new_prot ? end : -1ul);
2323 data->prot = new_prot;
2324
2325 return 0;
2326}
2327
2328static int walk_memory_regions_1(struct walk_memory_regions_data *data,
b480d9b7 2329 abi_ulong base, int level, void **lp)
5cd2c5b6 2330{
b480d9b7 2331 abi_ulong pa;
5cd2c5b6
RH
2332 int i, rc;
2333
2334 if (*lp == NULL) {
2335 return walk_memory_regions_end(data, base, 0);
2336 }
2337
2338 if (level == 0) {
2339 PageDesc *pd = *lp;
7296abac 2340 for (i = 0; i < L2_SIZE; ++i) {
5cd2c5b6
RH
2341 int prot = pd[i].flags;
2342
2343 pa = base | (i << TARGET_PAGE_BITS);
2344 if (prot != data->prot) {
2345 rc = walk_memory_regions_end(data, pa, prot);
2346 if (rc != 0) {
2347 return rc;
9fa3e853 2348 }
9fa3e853 2349 }
5cd2c5b6
RH
2350 }
2351 } else {
2352 void **pp = *lp;
7296abac 2353 for (i = 0; i < L2_SIZE; ++i) {
b480d9b7
PB
2354 pa = base | ((abi_ulong)i <<
2355 (TARGET_PAGE_BITS + L2_BITS * level));
5cd2c5b6
RH
2356 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2357 if (rc != 0) {
2358 return rc;
2359 }
2360 }
2361 }
2362
2363 return 0;
2364}
2365
2366int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2367{
2368 struct walk_memory_regions_data data;
8efe0ca8 2369 uintptr_t i;
5cd2c5b6
RH
2370
2371 data.fn = fn;
2372 data.priv = priv;
2373 data.start = -1ul;
2374 data.prot = 0;
2375
2376 for (i = 0; i < V_L1_SIZE; i++) {
b480d9b7 2377 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
5cd2c5b6
RH
2378 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2379 if (rc != 0) {
2380 return rc;
9fa3e853 2381 }
33417e70 2382 }
5cd2c5b6
RH
2383
2384 return walk_memory_regions_end(&data, 0, 0);
edf8e2af
MW
2385}
2386
b480d9b7
PB
2387static int dump_region(void *priv, abi_ulong start,
2388 abi_ulong end, unsigned long prot)
edf8e2af
MW
2389{
2390 FILE *f = (FILE *)priv;
2391
b480d9b7
PB
2392 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2393 " "TARGET_ABI_FMT_lx" %c%c%c\n",
edf8e2af
MW
2394 start, end, end - start,
2395 ((prot & PAGE_READ) ? 'r' : '-'),
2396 ((prot & PAGE_WRITE) ? 'w' : '-'),
2397 ((prot & PAGE_EXEC) ? 'x' : '-'));
2398
2399 return (0);
2400}
2401
2402/* dump memory mappings */
2403void page_dump(FILE *f)
2404{
2405 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2406 "start", "end", "size", "prot");
2407 walk_memory_regions(f, dump_region);
33417e70
FB
2408}
2409
53a5960a 2410int page_get_flags(target_ulong address)
33417e70 2411{
9fa3e853
FB
2412 PageDesc *p;
2413
2414 p = page_find(address >> TARGET_PAGE_BITS);
33417e70 2415 if (!p)
9fa3e853
FB
2416 return 0;
2417 return p->flags;
2418}
2419
376a7909
RH
2420/* Modify the flags of a page and invalidate the code if necessary.
2421 The flag PAGE_WRITE_ORG is positioned automatically depending
2422 on PAGE_WRITE. The mmap_lock should already be held. */
53a5960a 2423void page_set_flags(target_ulong start, target_ulong end, int flags)
9fa3e853 2424{
376a7909
RH
2425 target_ulong addr, len;
2426
2427 /* This function should never be called with addresses outside the
2428 guest address space. If this assert fires, it probably indicates
2429 a missing call to h2g_valid. */
b480d9b7
PB
2430#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2431 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
376a7909
RH
2432#endif
2433 assert(start < end);
9fa3e853
FB
2434
2435 start = start & TARGET_PAGE_MASK;
2436 end = TARGET_PAGE_ALIGN(end);
376a7909
RH
2437
2438 if (flags & PAGE_WRITE) {
9fa3e853 2439 flags |= PAGE_WRITE_ORG;
376a7909
RH
2440 }
2441
2442 for (addr = start, len = end - start;
2443 len != 0;
2444 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2445 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2446
2447 /* If the write protection bit is set, then we invalidate
2448 the code inside. */
5fafdf24 2449 if (!(p->flags & PAGE_WRITE) &&
9fa3e853
FB
2450 (flags & PAGE_WRITE) &&
2451 p->first_tb) {
d720b93d 2452 tb_invalidate_phys_page(addr, 0, NULL);
9fa3e853
FB
2453 }
2454 p->flags = flags;
2455 }
33417e70
FB
2456}
2457
3d97b40b
TS
2458int page_check_range(target_ulong start, target_ulong len, int flags)
2459{
2460 PageDesc *p;
2461 target_ulong end;
2462 target_ulong addr;
2463
376a7909
RH
2464 /* This function should never be called with addresses outside the
2465 guest address space. If this assert fires, it probably indicates
2466 a missing call to h2g_valid. */
338e9e6c
BS
2467#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2468 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
376a7909
RH
2469#endif
2470
3e0650a9
RH
2471 if (len == 0) {
2472 return 0;
2473 }
376a7909
RH
2474 if (start + len - 1 < start) {
2475 /* We've wrapped around. */
55f280c9 2476 return -1;
376a7909 2477 }
55f280c9 2478
3d97b40b
TS
2479 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2480 start = start & TARGET_PAGE_MASK;
2481
376a7909
RH
2482 for (addr = start, len = end - start;
2483 len != 0;
2484 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
3d97b40b
TS
2485 p = page_find(addr >> TARGET_PAGE_BITS);
2486 if( !p )
2487 return -1;
2488 if( !(p->flags & PAGE_VALID) )
2489 return -1;
2490
dae3270c 2491 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
3d97b40b 2492 return -1;
dae3270c
FB
2493 if (flags & PAGE_WRITE) {
2494 if (!(p->flags & PAGE_WRITE_ORG))
2495 return -1;
2496 /* unprotect the page if it was put read-only because it
2497 contains translated code */
2498 if (!(p->flags & PAGE_WRITE)) {
2499 if (!page_unprotect(addr, 0, NULL))
2500 return -1;
2501 }
2502 return 0;
2503 }
3d97b40b
TS
2504 }
2505 return 0;
2506}
2507
9fa3e853 2508/* called from signal handler: invalidate the code and unprotect the
ccbb4d44 2509 page. Return TRUE if the fault was successfully handled. */
6375e09e 2510int page_unprotect(target_ulong address, uintptr_t pc, void *puc)
9fa3e853 2511{
45d679d6
AJ
2512 unsigned int prot;
2513 PageDesc *p;
53a5960a 2514 target_ulong host_start, host_end, addr;
9fa3e853 2515
c8a706fe
PB
2516 /* Technically this isn't safe inside a signal handler. However we
2517 know this only ever happens in a synchronous SEGV handler, so in
2518 practice it seems to be ok. */
2519 mmap_lock();
2520
45d679d6
AJ
2521 p = page_find(address >> TARGET_PAGE_BITS);
2522 if (!p) {
c8a706fe 2523 mmap_unlock();
9fa3e853 2524 return 0;
c8a706fe 2525 }
45d679d6 2526
9fa3e853
FB
2527 /* if the page was really writable, then we change its
2528 protection back to writable */
45d679d6
AJ
2529 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2530 host_start = address & qemu_host_page_mask;
2531 host_end = host_start + qemu_host_page_size;
2532
2533 prot = 0;
2534 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2535 p = page_find(addr >> TARGET_PAGE_BITS);
2536 p->flags |= PAGE_WRITE;
2537 prot |= p->flags;
2538
9fa3e853
FB
2539 /* and since the content will be modified, we must invalidate
2540 the corresponding translated code. */
45d679d6 2541 tb_invalidate_phys_page(addr, pc, puc);
9fa3e853 2542#ifdef DEBUG_TB_CHECK
45d679d6 2543 tb_invalidate_check(addr);
9fa3e853 2544#endif
9fa3e853 2545 }
45d679d6
AJ
2546 mprotect((void *)g2h(host_start), qemu_host_page_size,
2547 prot & PAGE_BITS);
2548
2549 mmap_unlock();
2550 return 1;
9fa3e853 2551 }
c8a706fe 2552 mmap_unlock();
9fa3e853
FB
2553 return 0;
2554}
2555
9349b4f9 2556static inline void tlb_set_dirty(CPUArchState *env,
8efe0ca8 2557 uintptr_t addr, target_ulong vaddr)
1ccde1cb
FB
2558{
2559}
9fa3e853
FB
2560#endif /* defined(CONFIG_USER_ONLY) */
2561
e2eef170 2562#if !defined(CONFIG_USER_ONLY)
8da3ff18 2563
c04b2b78
PB
2564#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2565typedef struct subpage_t {
70c68e44 2566 MemoryRegion iomem;
c04b2b78 2567 target_phys_addr_t base;
5312bd8b 2568 uint16_t sub_section[TARGET_PAGE_SIZE];
c04b2b78
PB
2569} subpage_t;
2570
c227f099 2571static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 2572 uint16_t section);
0f0cb164 2573static subpage_t *subpage_init(target_phys_addr_t base);
5312bd8b 2574static void destroy_page_desc(uint16_t section_index)
54688b1e 2575{
5312bd8b
AK
2576 MemoryRegionSection *section = &phys_sections[section_index];
2577 MemoryRegion *mr = section->mr;
54688b1e
AK
2578
2579 if (mr->subpage) {
2580 subpage_t *subpage = container_of(mr, subpage_t, iomem);
2581 memory_region_destroy(&subpage->iomem);
2582 g_free(subpage);
2583 }
2584}
2585
4346ae3e 2586static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level)
54688b1e
AK
2587{
2588 unsigned i;
d6f2ea22 2589 PhysPageEntry *p;
54688b1e 2590
c19e8800 2591 if (lp->ptr == PHYS_MAP_NODE_NIL) {
54688b1e
AK
2592 return;
2593 }
2594
c19e8800 2595 p = phys_map_nodes[lp->ptr];
4346ae3e 2596 for (i = 0; i < L2_SIZE; ++i) {
07f07b31 2597 if (!p[i].is_leaf) {
54688b1e 2598 destroy_l2_mapping(&p[i], level - 1);
4346ae3e 2599 } else {
c19e8800 2600 destroy_page_desc(p[i].ptr);
54688b1e 2601 }
54688b1e 2602 }
07f07b31 2603 lp->is_leaf = 0;
c19e8800 2604 lp->ptr = PHYS_MAP_NODE_NIL;
54688b1e
AK
2605}
2606
2607static void destroy_all_mappings(void)
2608{
3eef53df 2609 destroy_l2_mapping(&phys_map, P_L2_LEVELS - 1);
d6f2ea22 2610 phys_map_nodes_reset();
54688b1e
AK
2611}
2612
5312bd8b
AK
2613static uint16_t phys_section_add(MemoryRegionSection *section)
2614{
2615 if (phys_sections_nb == phys_sections_nb_alloc) {
2616 phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16);
2617 phys_sections = g_renew(MemoryRegionSection, phys_sections,
2618 phys_sections_nb_alloc);
2619 }
2620 phys_sections[phys_sections_nb] = *section;
2621 return phys_sections_nb++;
2622}
2623
2624static void phys_sections_clear(void)
2625{
2626 phys_sections_nb = 0;
2627}
2628
8f2498f9
MT
2629/* register physical memory.
2630 For RAM, 'size' must be a multiple of the target page size.
2631 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
8da3ff18
PB
2632 io memory page. The address used when calling the IO function is
2633 the offset from the start of the region, plus region_offset. Both
ccbb4d44 2634 start_addr and region_offset are rounded down to a page boundary
8da3ff18
PB
2635 before calculating this offset. This should not be a problem unless
2636 the low bits of start_addr and region_offset differ. */
0f0cb164
AK
2637static void register_subpage(MemoryRegionSection *section)
2638{
2639 subpage_t *subpage;
2640 target_phys_addr_t base = section->offset_within_address_space
2641 & TARGET_PAGE_MASK;
f3705d53 2642 MemoryRegionSection *existing = phys_page_find(base >> TARGET_PAGE_BITS);
0f0cb164
AK
2643 MemoryRegionSection subsection = {
2644 .offset_within_address_space = base,
2645 .size = TARGET_PAGE_SIZE,
2646 };
0f0cb164
AK
2647 target_phys_addr_t start, end;
2648
f3705d53 2649 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
0f0cb164 2650
f3705d53 2651 if (!(existing->mr->subpage)) {
0f0cb164
AK
2652 subpage = subpage_init(base);
2653 subsection.mr = &subpage->iomem;
2999097b
AK
2654 phys_page_set(base >> TARGET_PAGE_BITS, 1,
2655 phys_section_add(&subsection));
0f0cb164 2656 } else {
f3705d53 2657 subpage = container_of(existing->mr, subpage_t, iomem);
0f0cb164
AK
2658 }
2659 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
2660 end = start + section->size;
2661 subpage_register(subpage, start, end, phys_section_add(section));
2662}
2663
2664
2665static void register_multipage(MemoryRegionSection *section)
33417e70 2666{
dd81124b
AK
2667 target_phys_addr_t start_addr = section->offset_within_address_space;
2668 ram_addr_t size = section->size;
2999097b 2669 target_phys_addr_t addr;
5312bd8b 2670 uint16_t section_index = phys_section_add(section);
dd81124b 2671
3b8e6a2d 2672 assert(size);
f6f3fbca 2673
3b8e6a2d 2674 addr = start_addr;
2999097b
AK
2675 phys_page_set(addr >> TARGET_PAGE_BITS, size >> TARGET_PAGE_BITS,
2676 section_index);
33417e70
FB
2677}
2678
0f0cb164
AK
2679void cpu_register_physical_memory_log(MemoryRegionSection *section,
2680 bool readonly)
2681{
2682 MemoryRegionSection now = *section, remain = *section;
2683
2684 if ((now.offset_within_address_space & ~TARGET_PAGE_MASK)
2685 || (now.size < TARGET_PAGE_SIZE)) {
2686 now.size = MIN(TARGET_PAGE_ALIGN(now.offset_within_address_space)
2687 - now.offset_within_address_space,
2688 now.size);
2689 register_subpage(&now);
2690 remain.size -= now.size;
2691 remain.offset_within_address_space += now.size;
2692 remain.offset_within_region += now.size;
2693 }
2694 now = remain;
2695 now.size &= TARGET_PAGE_MASK;
2696 if (now.size) {
2697 register_multipage(&now);
2698 remain.size -= now.size;
2699 remain.offset_within_address_space += now.size;
2700 remain.offset_within_region += now.size;
2701 }
2702 now = remain;
2703 if (now.size) {
2704 register_subpage(&now);
2705 }
2706}
2707
2708
c227f099 2709void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
f65ed4c1
AL
2710{
2711 if (kvm_enabled())
2712 kvm_coalesce_mmio_region(addr, size);
2713}
2714
c227f099 2715void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
f65ed4c1
AL
2716{
2717 if (kvm_enabled())
2718 kvm_uncoalesce_mmio_region(addr, size);
2719}
2720
62a2744c
SY
2721void qemu_flush_coalesced_mmio_buffer(void)
2722{
2723 if (kvm_enabled())
2724 kvm_flush_coalesced_mmio_buffer();
2725}
2726
c902760f
MT
2727#if defined(__linux__) && !defined(TARGET_S390X)
2728
2729#include <sys/vfs.h>
2730
2731#define HUGETLBFS_MAGIC 0x958458f6
2732
2733static long gethugepagesize(const char *path)
2734{
2735 struct statfs fs;
2736 int ret;
2737
2738 do {
9742bf26 2739 ret = statfs(path, &fs);
c902760f
MT
2740 } while (ret != 0 && errno == EINTR);
2741
2742 if (ret != 0) {
9742bf26
YT
2743 perror(path);
2744 return 0;
c902760f
MT
2745 }
2746
2747 if (fs.f_type != HUGETLBFS_MAGIC)
9742bf26 2748 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
c902760f
MT
2749
2750 return fs.f_bsize;
2751}
2752
04b16653
AW
2753static void *file_ram_alloc(RAMBlock *block,
2754 ram_addr_t memory,
2755 const char *path)
c902760f
MT
2756{
2757 char *filename;
2758 void *area;
2759 int fd;
2760#ifdef MAP_POPULATE
2761 int flags;
2762#endif
2763 unsigned long hpagesize;
2764
2765 hpagesize = gethugepagesize(path);
2766 if (!hpagesize) {
9742bf26 2767 return NULL;
c902760f
MT
2768 }
2769
2770 if (memory < hpagesize) {
2771 return NULL;
2772 }
2773
2774 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2775 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2776 return NULL;
2777 }
2778
2779 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
9742bf26 2780 return NULL;
c902760f
MT
2781 }
2782
2783 fd = mkstemp(filename);
2784 if (fd < 0) {
9742bf26
YT
2785 perror("unable to create backing store for hugepages");
2786 free(filename);
2787 return NULL;
c902760f
MT
2788 }
2789 unlink(filename);
2790 free(filename);
2791
2792 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2793
2794 /*
2795 * ftruncate is not supported by hugetlbfs in older
2796 * hosts, so don't bother bailing out on errors.
2797 * If anything goes wrong with it under other filesystems,
2798 * mmap will fail.
2799 */
2800 if (ftruncate(fd, memory))
9742bf26 2801 perror("ftruncate");
c902760f
MT
2802
2803#ifdef MAP_POPULATE
2804 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2805 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2806 * to sidestep this quirk.
2807 */
2808 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2809 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2810#else
2811 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2812#endif
2813 if (area == MAP_FAILED) {
9742bf26
YT
2814 perror("file_ram_alloc: can't mmap RAM pages");
2815 close(fd);
2816 return (NULL);
c902760f 2817 }
04b16653 2818 block->fd = fd;
c902760f
MT
2819 return area;
2820}
2821#endif
2822
d17b5288 2823static ram_addr_t find_ram_offset(ram_addr_t size)
04b16653
AW
2824{
2825 RAMBlock *block, *next_block;
3e837b2c 2826 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
04b16653
AW
2827
2828 if (QLIST_EMPTY(&ram_list.blocks))
2829 return 0;
2830
2831 QLIST_FOREACH(block, &ram_list.blocks, next) {
f15fbc4b 2832 ram_addr_t end, next = RAM_ADDR_MAX;
04b16653
AW
2833
2834 end = block->offset + block->length;
2835
2836 QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2837 if (next_block->offset >= end) {
2838 next = MIN(next, next_block->offset);
2839 }
2840 }
2841 if (next - end >= size && next - end < mingap) {
3e837b2c 2842 offset = end;
04b16653
AW
2843 mingap = next - end;
2844 }
2845 }
3e837b2c
AW
2846
2847 if (offset == RAM_ADDR_MAX) {
2848 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
2849 (uint64_t)size);
2850 abort();
2851 }
2852
04b16653
AW
2853 return offset;
2854}
2855
2856static ram_addr_t last_ram_offset(void)
d17b5288
AW
2857{
2858 RAMBlock *block;
2859 ram_addr_t last = 0;
2860
2861 QLIST_FOREACH(block, &ram_list.blocks, next)
2862 last = MAX(last, block->offset + block->length);
2863
2864 return last;
2865}
2866
c5705a77 2867void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
84b89d78
CM
2868{
2869 RAMBlock *new_block, *block;
2870
c5705a77
AK
2871 new_block = NULL;
2872 QLIST_FOREACH(block, &ram_list.blocks, next) {
2873 if (block->offset == addr) {
2874 new_block = block;
2875 break;
2876 }
2877 }
2878 assert(new_block);
2879 assert(!new_block->idstr[0]);
84b89d78
CM
2880
2881 if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
2882 char *id = dev->parent_bus->info->get_dev_path(dev);
2883 if (id) {
2884 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
7267c094 2885 g_free(id);
84b89d78
CM
2886 }
2887 }
2888 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2889
2890 QLIST_FOREACH(block, &ram_list.blocks, next) {
c5705a77 2891 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
84b89d78
CM
2892 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2893 new_block->idstr);
2894 abort();
2895 }
2896 }
c5705a77
AK
2897}
2898
2899ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
2900 MemoryRegion *mr)
2901{
2902 RAMBlock *new_block;
2903
2904 size = TARGET_PAGE_ALIGN(size);
2905 new_block = g_malloc0(sizeof(*new_block));
84b89d78 2906
7c637366 2907 new_block->mr = mr;
432d268c 2908 new_block->offset = find_ram_offset(size);
6977dfe6
YT
2909 if (host) {
2910 new_block->host = host;
cd19cfa2 2911 new_block->flags |= RAM_PREALLOC_MASK;
6977dfe6
YT
2912 } else {
2913 if (mem_path) {
c902760f 2914#if defined (__linux__) && !defined(TARGET_S390X)
6977dfe6
YT
2915 new_block->host = file_ram_alloc(new_block, size, mem_path);
2916 if (!new_block->host) {
2917 new_block->host = qemu_vmalloc(size);
e78815a5 2918 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
6977dfe6 2919 }
c902760f 2920#else
6977dfe6
YT
2921 fprintf(stderr, "-mem-path option unsupported\n");
2922 exit(1);
c902760f 2923#endif
6977dfe6 2924 } else {
6b02494d 2925#if defined(TARGET_S390X) && defined(CONFIG_KVM)
ff83678a
CB
2926 /* S390 KVM requires the topmost vma of the RAM to be smaller than
2927 an system defined value, which is at least 256GB. Larger systems
2928 have larger values. We put the guest between the end of data
2929 segment (system break) and this value. We use 32GB as a base to
2930 have enough room for the system break to grow. */
2931 new_block->host = mmap((void*)0x800000000, size,
6977dfe6 2932 PROT_EXEC|PROT_READ|PROT_WRITE,
ff83678a 2933 MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
fb8b2735
AG
2934 if (new_block->host == MAP_FAILED) {
2935 fprintf(stderr, "Allocating RAM failed\n");
2936 abort();
2937 }
6b02494d 2938#else
868bb33f 2939 if (xen_enabled()) {
fce537d4 2940 xen_ram_alloc(new_block->offset, size, mr);
432d268c
JN
2941 } else {
2942 new_block->host = qemu_vmalloc(size);
2943 }
6b02494d 2944#endif
e78815a5 2945 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
6977dfe6 2946 }
c902760f 2947 }
94a6b54f
PB
2948 new_block->length = size;
2949
f471a17e 2950 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
94a6b54f 2951
7267c094 2952 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
04b16653 2953 last_ram_offset() >> TARGET_PAGE_BITS);
d17b5288 2954 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
94a6b54f
PB
2955 0xff, size >> TARGET_PAGE_BITS);
2956
6f0437e8
JK
2957 if (kvm_enabled())
2958 kvm_setup_guest_memory(new_block->host, size);
2959
94a6b54f
PB
2960 return new_block->offset;
2961}
e9a1ab19 2962
c5705a77 2963ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
6977dfe6 2964{
c5705a77 2965 return qemu_ram_alloc_from_ptr(size, NULL, mr);
6977dfe6
YT
2966}
2967
1f2e98b6
AW
2968void qemu_ram_free_from_ptr(ram_addr_t addr)
2969{
2970 RAMBlock *block;
2971
2972 QLIST_FOREACH(block, &ram_list.blocks, next) {
2973 if (addr == block->offset) {
2974 QLIST_REMOVE(block, next);
7267c094 2975 g_free(block);
1f2e98b6
AW
2976 return;
2977 }
2978 }
2979}
2980
c227f099 2981void qemu_ram_free(ram_addr_t addr)
e9a1ab19 2982{
04b16653
AW
2983 RAMBlock *block;
2984
2985 QLIST_FOREACH(block, &ram_list.blocks, next) {
2986 if (addr == block->offset) {
2987 QLIST_REMOVE(block, next);
cd19cfa2
HY
2988 if (block->flags & RAM_PREALLOC_MASK) {
2989 ;
2990 } else if (mem_path) {
04b16653
AW
2991#if defined (__linux__) && !defined(TARGET_S390X)
2992 if (block->fd) {
2993 munmap(block->host, block->length);
2994 close(block->fd);
2995 } else {
2996 qemu_vfree(block->host);
2997 }
fd28aa13
JK
2998#else
2999 abort();
04b16653
AW
3000#endif
3001 } else {
3002#if defined(TARGET_S390X) && defined(CONFIG_KVM)
3003 munmap(block->host, block->length);
3004#else
868bb33f 3005 if (xen_enabled()) {
e41d7c69 3006 xen_invalidate_map_cache_entry(block->host);
432d268c
JN
3007 } else {
3008 qemu_vfree(block->host);
3009 }
04b16653
AW
3010#endif
3011 }
7267c094 3012 g_free(block);
04b16653
AW
3013 return;
3014 }
3015 }
3016
e9a1ab19
FB
3017}
3018
cd19cfa2
HY
3019#ifndef _WIN32
3020void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
3021{
3022 RAMBlock *block;
3023 ram_addr_t offset;
3024 int flags;
3025 void *area, *vaddr;
3026
3027 QLIST_FOREACH(block, &ram_list.blocks, next) {
3028 offset = addr - block->offset;
3029 if (offset < block->length) {
3030 vaddr = block->host + offset;
3031 if (block->flags & RAM_PREALLOC_MASK) {
3032 ;
3033 } else {
3034 flags = MAP_FIXED;
3035 munmap(vaddr, length);
3036 if (mem_path) {
3037#if defined(__linux__) && !defined(TARGET_S390X)
3038 if (block->fd) {
3039#ifdef MAP_POPULATE
3040 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
3041 MAP_PRIVATE;
3042#else
3043 flags |= MAP_PRIVATE;
3044#endif
3045 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3046 flags, block->fd, offset);
3047 } else {
3048 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
3049 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3050 flags, -1, 0);
3051 }
fd28aa13
JK
3052#else
3053 abort();
cd19cfa2
HY
3054#endif
3055 } else {
3056#if defined(TARGET_S390X) && defined(CONFIG_KVM)
3057 flags |= MAP_SHARED | MAP_ANONYMOUS;
3058 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
3059 flags, -1, 0);
3060#else
3061 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
3062 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3063 flags, -1, 0);
3064#endif
3065 }
3066 if (area != vaddr) {
f15fbc4b
AP
3067 fprintf(stderr, "Could not remap addr: "
3068 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
cd19cfa2
HY
3069 length, addr);
3070 exit(1);
3071 }
3072 qemu_madvise(vaddr, length, QEMU_MADV_MERGEABLE);
3073 }
3074 return;
3075 }
3076 }
3077}
3078#endif /* !_WIN32 */
3079
dc828ca1 3080/* Return a host pointer to ram allocated with qemu_ram_alloc.
5579c7f3
PB
3081 With the exception of the softmmu code in this file, this should
3082 only be used for local memory (e.g. video ram) that the device owns,
3083 and knows it isn't going to access beyond the end of the block.
3084
3085 It should not be used for general purpose DMA.
3086 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
3087 */
c227f099 3088void *qemu_get_ram_ptr(ram_addr_t addr)
dc828ca1 3089{
94a6b54f
PB
3090 RAMBlock *block;
3091
f471a17e
AW
3092 QLIST_FOREACH(block, &ram_list.blocks, next) {
3093 if (addr - block->offset < block->length) {
7d82af38
VP
3094 /* Move this entry to to start of the list. */
3095 if (block != QLIST_FIRST(&ram_list.blocks)) {
3096 QLIST_REMOVE(block, next);
3097 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
3098 }
868bb33f 3099 if (xen_enabled()) {
432d268c
JN
3100 /* We need to check if the requested address is in the RAM
3101 * because we don't want to map the entire memory in QEMU.
712c2b41 3102 * In that case just map until the end of the page.
432d268c
JN
3103 */
3104 if (block->offset == 0) {
e41d7c69 3105 return xen_map_cache(addr, 0, 0);
432d268c 3106 } else if (block->host == NULL) {
e41d7c69
JK
3107 block->host =
3108 xen_map_cache(block->offset, block->length, 1);
432d268c
JN
3109 }
3110 }
f471a17e
AW
3111 return block->host + (addr - block->offset);
3112 }
94a6b54f 3113 }
f471a17e
AW
3114
3115 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3116 abort();
3117
3118 return NULL;
dc828ca1
PB
3119}
3120
b2e0a138
MT
3121/* Return a host pointer to ram allocated with qemu_ram_alloc.
3122 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
3123 */
3124void *qemu_safe_ram_ptr(ram_addr_t addr)
3125{
3126 RAMBlock *block;
3127
3128 QLIST_FOREACH(block, &ram_list.blocks, next) {
3129 if (addr - block->offset < block->length) {
868bb33f 3130 if (xen_enabled()) {
432d268c
JN
3131 /* We need to check if the requested address is in the RAM
3132 * because we don't want to map the entire memory in QEMU.
712c2b41 3133 * In that case just map until the end of the page.
432d268c
JN
3134 */
3135 if (block->offset == 0) {
e41d7c69 3136 return xen_map_cache(addr, 0, 0);
432d268c 3137 } else if (block->host == NULL) {
e41d7c69
JK
3138 block->host =
3139 xen_map_cache(block->offset, block->length, 1);
432d268c
JN
3140 }
3141 }
b2e0a138
MT
3142 return block->host + (addr - block->offset);
3143 }
3144 }
3145
3146 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3147 abort();
3148
3149 return NULL;
3150}
3151
38bee5dc
SS
3152/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
3153 * but takes a size argument */
8ab934f9 3154void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
38bee5dc 3155{
8ab934f9
SS
3156 if (*size == 0) {
3157 return NULL;
3158 }
868bb33f 3159 if (xen_enabled()) {
e41d7c69 3160 return xen_map_cache(addr, *size, 1);
868bb33f 3161 } else {
38bee5dc
SS
3162 RAMBlock *block;
3163
3164 QLIST_FOREACH(block, &ram_list.blocks, next) {
3165 if (addr - block->offset < block->length) {
3166 if (addr - block->offset + *size > block->length)
3167 *size = block->length - addr + block->offset;
3168 return block->host + (addr - block->offset);
3169 }
3170 }
3171
3172 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3173 abort();
38bee5dc
SS
3174 }
3175}
3176
050a0ddf
AP
3177void qemu_put_ram_ptr(void *addr)
3178{
3179 trace_qemu_put_ram_ptr(addr);
050a0ddf
AP
3180}
3181
e890261f 3182int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
5579c7f3 3183{
94a6b54f
PB
3184 RAMBlock *block;
3185 uint8_t *host = ptr;
3186
868bb33f 3187 if (xen_enabled()) {
e41d7c69 3188 *ram_addr = xen_ram_addr_from_mapcache(ptr);
712c2b41
SS
3189 return 0;
3190 }
3191
f471a17e 3192 QLIST_FOREACH(block, &ram_list.blocks, next) {
432d268c
JN
3193 /* This case append when the block is not mapped. */
3194 if (block->host == NULL) {
3195 continue;
3196 }
f471a17e 3197 if (host - block->host < block->length) {
e890261f
MT
3198 *ram_addr = block->offset + (host - block->host);
3199 return 0;
f471a17e 3200 }
94a6b54f 3201 }
432d268c 3202
e890261f
MT
3203 return -1;
3204}
f471a17e 3205
e890261f
MT
3206/* Some of the softmmu routines need to translate from a host pointer
3207 (typically a TLB entry) back to a ram offset. */
3208ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
3209{
3210 ram_addr_t ram_addr;
f471a17e 3211
e890261f
MT
3212 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
3213 fprintf(stderr, "Bad ram pointer %p\n", ptr);
3214 abort();
3215 }
3216 return ram_addr;
5579c7f3
PB
3217}
3218
0e0df1e2
AK
3219static uint64_t unassigned_mem_read(void *opaque, target_phys_addr_t addr,
3220 unsigned size)
e18231a3
BS
3221{
3222#ifdef DEBUG_UNASSIGNED
3223 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3224#endif
5b450407 3225#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
0e0df1e2 3226 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
e18231a3
BS
3227#endif
3228 return 0;
3229}
3230
0e0df1e2
AK
3231static void unassigned_mem_write(void *opaque, target_phys_addr_t addr,
3232 uint64_t val, unsigned size)
e18231a3
BS
3233{
3234#ifdef DEBUG_UNASSIGNED
0e0df1e2 3235 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
e18231a3 3236#endif
5b450407 3237#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
0e0df1e2 3238 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
67d3b957 3239#endif
33417e70
FB
3240}
3241
0e0df1e2
AK
3242static const MemoryRegionOps unassigned_mem_ops = {
3243 .read = unassigned_mem_read,
3244 .write = unassigned_mem_write,
3245 .endianness = DEVICE_NATIVE_ENDIAN,
3246};
e18231a3 3247
0e0df1e2
AK
3248static uint64_t error_mem_read(void *opaque, target_phys_addr_t addr,
3249 unsigned size)
e18231a3 3250{
0e0df1e2 3251 abort();
e18231a3
BS
3252}
3253
0e0df1e2
AK
3254static void error_mem_write(void *opaque, target_phys_addr_t addr,
3255 uint64_t value, unsigned size)
e18231a3 3256{
0e0df1e2 3257 abort();
33417e70
FB
3258}
3259
0e0df1e2
AK
3260static const MemoryRegionOps error_mem_ops = {
3261 .read = error_mem_read,
3262 .write = error_mem_write,
3263 .endianness = DEVICE_NATIVE_ENDIAN,
33417e70
FB
3264};
3265
0e0df1e2
AK
3266static const MemoryRegionOps rom_mem_ops = {
3267 .read = error_mem_read,
3268 .write = unassigned_mem_write,
3269 .endianness = DEVICE_NATIVE_ENDIAN,
33417e70
FB
3270};
3271
0e0df1e2
AK
3272static void notdirty_mem_write(void *opaque, target_phys_addr_t ram_addr,
3273 uint64_t val, unsigned size)
9fa3e853 3274{
3a7d929e 3275 int dirty_flags;
f7c11b53 3276 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3a7d929e 3277 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 3278#if !defined(CONFIG_USER_ONLY)
0e0df1e2 3279 tb_invalidate_phys_page_fast(ram_addr, size);
f7c11b53 3280 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
9fa3e853 3281#endif
3a7d929e 3282 }
0e0df1e2
AK
3283 switch (size) {
3284 case 1:
3285 stb_p(qemu_get_ram_ptr(ram_addr), val);
3286 break;
3287 case 2:
3288 stw_p(qemu_get_ram_ptr(ram_addr), val);
3289 break;
3290 case 4:
3291 stl_p(qemu_get_ram_ptr(ram_addr), val);
3292 break;
3293 default:
3294 abort();
3a7d929e 3295 }
f23db169 3296 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
f7c11b53 3297 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
f23db169
FB
3298 /* we remove the notdirty callback only if the code has been
3299 flushed */
3300 if (dirty_flags == 0xff)
2e70f6ef 3301 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
3302}
3303
0e0df1e2
AK
3304static const MemoryRegionOps notdirty_mem_ops = {
3305 .read = error_mem_read,
3306 .write = notdirty_mem_write,
3307 .endianness = DEVICE_NATIVE_ENDIAN,
1ccde1cb
FB
3308};
3309
0f459d16 3310/* Generate a debug exception if a watchpoint has been hit. */
b4051334 3311static void check_watchpoint(int offset, int len_mask, int flags)
0f459d16 3312{
9349b4f9 3313 CPUArchState *env = cpu_single_env;
06d55cc1
AL
3314 target_ulong pc, cs_base;
3315 TranslationBlock *tb;
0f459d16 3316 target_ulong vaddr;
a1d1bb31 3317 CPUWatchpoint *wp;
06d55cc1 3318 int cpu_flags;
0f459d16 3319
06d55cc1
AL
3320 if (env->watchpoint_hit) {
3321 /* We re-entered the check after replacing the TB. Now raise
3322 * the debug interrupt so that is will trigger after the
3323 * current instruction. */
3324 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3325 return;
3326 }
2e70f6ef 3327 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
72cf2d4f 3328 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334
AL
3329 if ((vaddr == (wp->vaddr & len_mask) ||
3330 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
6e140f28
AL
3331 wp->flags |= BP_WATCHPOINT_HIT;
3332 if (!env->watchpoint_hit) {
3333 env->watchpoint_hit = wp;
3334 tb = tb_find_pc(env->mem_io_pc);
3335 if (!tb) {
3336 cpu_abort(env, "check_watchpoint: could not find TB for "
3337 "pc=%p", (void *)env->mem_io_pc);
3338 }
618ba8e6 3339 cpu_restore_state(tb, env, env->mem_io_pc);
6e140f28
AL
3340 tb_phys_invalidate(tb, -1);
3341 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3342 env->exception_index = EXCP_DEBUG;
488d6577 3343 cpu_loop_exit(env);
6e140f28
AL
3344 } else {
3345 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3346 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
488d6577 3347 cpu_resume_from_signal(env, NULL);
6e140f28 3348 }
06d55cc1 3349 }
6e140f28
AL
3350 } else {
3351 wp->flags &= ~BP_WATCHPOINT_HIT;
0f459d16
PB
3352 }
3353 }
3354}
3355
6658ffb8
PB
3356/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3357 so these check for a hit then pass through to the normal out-of-line
3358 phys routines. */
1ec9b909
AK
3359static uint64_t watch_mem_read(void *opaque, target_phys_addr_t addr,
3360 unsigned size)
6658ffb8 3361{
1ec9b909
AK
3362 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
3363 switch (size) {
3364 case 1: return ldub_phys(addr);
3365 case 2: return lduw_phys(addr);
3366 case 4: return ldl_phys(addr);
3367 default: abort();
3368 }
6658ffb8
PB
3369}
3370
1ec9b909
AK
3371static void watch_mem_write(void *opaque, target_phys_addr_t addr,
3372 uint64_t val, unsigned size)
6658ffb8 3373{
1ec9b909
AK
3374 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
3375 switch (size) {
67364150
MF
3376 case 1:
3377 stb_phys(addr, val);
3378 break;
3379 case 2:
3380 stw_phys(addr, val);
3381 break;
3382 case 4:
3383 stl_phys(addr, val);
3384 break;
1ec9b909
AK
3385 default: abort();
3386 }
6658ffb8
PB
3387}
3388
1ec9b909
AK
3389static const MemoryRegionOps watch_mem_ops = {
3390 .read = watch_mem_read,
3391 .write = watch_mem_write,
3392 .endianness = DEVICE_NATIVE_ENDIAN,
6658ffb8 3393};
6658ffb8 3394
70c68e44
AK
3395static uint64_t subpage_read(void *opaque, target_phys_addr_t addr,
3396 unsigned len)
db7b5426 3397{
70c68e44 3398 subpage_t *mmio = opaque;
f6405247 3399 unsigned int idx = SUBPAGE_IDX(addr);
5312bd8b 3400 MemoryRegionSection *section;
db7b5426
BS
3401#if defined(DEBUG_SUBPAGE)
3402 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3403 mmio, len, addr, idx);
3404#endif
db7b5426 3405
5312bd8b
AK
3406 section = &phys_sections[mmio->sub_section[idx]];
3407 addr += mmio->base;
3408 addr -= section->offset_within_address_space;
3409 addr += section->offset_within_region;
37ec01d4 3410 return io_mem_read(section->mr, addr, len);
db7b5426
BS
3411}
3412
70c68e44
AK
3413static void subpage_write(void *opaque, target_phys_addr_t addr,
3414 uint64_t value, unsigned len)
db7b5426 3415{
70c68e44 3416 subpage_t *mmio = opaque;
f6405247 3417 unsigned int idx = SUBPAGE_IDX(addr);
5312bd8b 3418 MemoryRegionSection *section;
db7b5426 3419#if defined(DEBUG_SUBPAGE)
70c68e44
AK
3420 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
3421 " idx %d value %"PRIx64"\n",
f6405247 3422 __func__, mmio, len, addr, idx, value);
db7b5426 3423#endif
f6405247 3424
5312bd8b
AK
3425 section = &phys_sections[mmio->sub_section[idx]];
3426 addr += mmio->base;
3427 addr -= section->offset_within_address_space;
3428 addr += section->offset_within_region;
37ec01d4 3429 io_mem_write(section->mr, addr, value, len);
db7b5426
BS
3430}
3431
70c68e44
AK
3432static const MemoryRegionOps subpage_ops = {
3433 .read = subpage_read,
3434 .write = subpage_write,
3435 .endianness = DEVICE_NATIVE_ENDIAN,
db7b5426
BS
3436};
3437
de712f94
AK
3438static uint64_t subpage_ram_read(void *opaque, target_phys_addr_t addr,
3439 unsigned size)
56384e8b
AF
3440{
3441 ram_addr_t raddr = addr;
3442 void *ptr = qemu_get_ram_ptr(raddr);
de712f94
AK
3443 switch (size) {
3444 case 1: return ldub_p(ptr);
3445 case 2: return lduw_p(ptr);
3446 case 4: return ldl_p(ptr);
3447 default: abort();
3448 }
56384e8b
AF
3449}
3450
de712f94
AK
3451static void subpage_ram_write(void *opaque, target_phys_addr_t addr,
3452 uint64_t value, unsigned size)
56384e8b
AF
3453{
3454 ram_addr_t raddr = addr;
3455 void *ptr = qemu_get_ram_ptr(raddr);
de712f94
AK
3456 switch (size) {
3457 case 1: return stb_p(ptr, value);
3458 case 2: return stw_p(ptr, value);
3459 case 4: return stl_p(ptr, value);
3460 default: abort();
3461 }
56384e8b
AF
3462}
3463
de712f94
AK
3464static const MemoryRegionOps subpage_ram_ops = {
3465 .read = subpage_ram_read,
3466 .write = subpage_ram_write,
3467 .endianness = DEVICE_NATIVE_ENDIAN,
56384e8b
AF
3468};
3469
c227f099 3470static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 3471 uint16_t section)
db7b5426
BS
3472{
3473 int idx, eidx;
3474
3475 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3476 return -1;
3477 idx = SUBPAGE_IDX(start);
3478 eidx = SUBPAGE_IDX(end);
3479#if defined(DEBUG_SUBPAGE)
0bf9e31a 3480 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
db7b5426
BS
3481 mmio, start, end, idx, eidx, memory);
3482#endif
5312bd8b
AK
3483 if (memory_region_is_ram(phys_sections[section].mr)) {
3484 MemoryRegionSection new_section = phys_sections[section];
3485 new_section.mr = &io_mem_subpage_ram;
3486 section = phys_section_add(&new_section);
56384e8b 3487 }
db7b5426 3488 for (; idx <= eidx; idx++) {
5312bd8b 3489 mmio->sub_section[idx] = section;
db7b5426
BS
3490 }
3491
3492 return 0;
3493}
3494
0f0cb164 3495static subpage_t *subpage_init(target_phys_addr_t base)
db7b5426 3496{
c227f099 3497 subpage_t *mmio;
db7b5426 3498
7267c094 3499 mmio = g_malloc0(sizeof(subpage_t));
1eec614b
AL
3500
3501 mmio->base = base;
70c68e44
AK
3502 memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
3503 "subpage", TARGET_PAGE_SIZE);
b3b00c78 3504 mmio->iomem.subpage = true;
db7b5426 3505#if defined(DEBUG_SUBPAGE)
1eec614b
AL
3506 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3507 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
db7b5426 3508#endif
0f0cb164 3509 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, phys_section_unassigned);
db7b5426
BS
3510
3511 return mmio;
3512}
3513
5312bd8b
AK
3514static uint16_t dummy_section(MemoryRegion *mr)
3515{
3516 MemoryRegionSection section = {
3517 .mr = mr,
3518 .offset_within_address_space = 0,
3519 .offset_within_region = 0,
3520 .size = UINT64_MAX,
3521 };
3522
3523 return phys_section_add(&section);
3524}
3525
37ec01d4 3526MemoryRegion *iotlb_to_region(target_phys_addr_t index)
aa102231 3527{
37ec01d4 3528 return phys_sections[index & ~TARGET_PAGE_MASK].mr;
aa102231
AK
3529}
3530
e9179ce1
AK
3531static void io_mem_init(void)
3532{
0e0df1e2 3533 memory_region_init_io(&io_mem_ram, &error_mem_ops, NULL, "ram", UINT64_MAX);
0e0df1e2
AK
3534 memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
3535 memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
3536 "unassigned", UINT64_MAX);
3537 memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
3538 "notdirty", UINT64_MAX);
de712f94
AK
3539 memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
3540 "subpage-ram", UINT64_MAX);
1ec9b909
AK
3541 memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
3542 "watch", UINT64_MAX);
e9179ce1
AK
3543}
3544
50c1e149
AK
3545static void core_begin(MemoryListener *listener)
3546{
54688b1e 3547 destroy_all_mappings();
5312bd8b 3548 phys_sections_clear();
c19e8800 3549 phys_map.ptr = PHYS_MAP_NODE_NIL;
5312bd8b 3550 phys_section_unassigned = dummy_section(&io_mem_unassigned);
aa102231
AK
3551 phys_section_notdirty = dummy_section(&io_mem_notdirty);
3552 phys_section_rom = dummy_section(&io_mem_rom);
3553 phys_section_watch = dummy_section(&io_mem_watch);
50c1e149
AK
3554}
3555
3556static void core_commit(MemoryListener *listener)
3557{
9349b4f9 3558 CPUArchState *env;
117712c3
AK
3559
3560 /* since each CPU stores ram addresses in its TLB cache, we must
3561 reset the modified entries */
3562 /* XXX: slow ! */
3563 for(env = first_cpu; env != NULL; env = env->next_cpu) {
3564 tlb_flush(env, 1);
3565 }
50c1e149
AK
3566}
3567
93632747
AK
3568static void core_region_add(MemoryListener *listener,
3569 MemoryRegionSection *section)
3570{
4855d41a 3571 cpu_register_physical_memory_log(section, section->readonly);
93632747
AK
3572}
3573
3574static void core_region_del(MemoryListener *listener,
3575 MemoryRegionSection *section)
3576{
93632747
AK
3577}
3578
50c1e149
AK
3579static void core_region_nop(MemoryListener *listener,
3580 MemoryRegionSection *section)
3581{
54688b1e 3582 cpu_register_physical_memory_log(section, section->readonly);
50c1e149
AK
3583}
3584
93632747
AK
3585static void core_log_start(MemoryListener *listener,
3586 MemoryRegionSection *section)
3587{
3588}
3589
3590static void core_log_stop(MemoryListener *listener,
3591 MemoryRegionSection *section)
3592{
3593}
3594
3595static void core_log_sync(MemoryListener *listener,
3596 MemoryRegionSection *section)
3597{
3598}
3599
3600static void core_log_global_start(MemoryListener *listener)
3601{
3602 cpu_physical_memory_set_dirty_tracking(1);
3603}
3604
3605static void core_log_global_stop(MemoryListener *listener)
3606{
3607 cpu_physical_memory_set_dirty_tracking(0);
3608}
3609
3610static void core_eventfd_add(MemoryListener *listener,
3611 MemoryRegionSection *section,
3612 bool match_data, uint64_t data, int fd)
3613{
3614}
3615
3616static void core_eventfd_del(MemoryListener *listener,
3617 MemoryRegionSection *section,
3618 bool match_data, uint64_t data, int fd)
3619{
3620}
3621
50c1e149
AK
3622static void io_begin(MemoryListener *listener)
3623{
3624}
3625
3626static void io_commit(MemoryListener *listener)
3627{
3628}
3629
4855d41a
AK
3630static void io_region_add(MemoryListener *listener,
3631 MemoryRegionSection *section)
3632{
a2d33521
AK
3633 MemoryRegionIORange *mrio = g_new(MemoryRegionIORange, 1);
3634
3635 mrio->mr = section->mr;
3636 mrio->offset = section->offset_within_region;
3637 iorange_init(&mrio->iorange, &memory_region_iorange_ops,
4855d41a 3638 section->offset_within_address_space, section->size);
a2d33521 3639 ioport_register(&mrio->iorange);
4855d41a
AK
3640}
3641
3642static void io_region_del(MemoryListener *listener,
3643 MemoryRegionSection *section)
3644{
3645 isa_unassign_ioport(section->offset_within_address_space, section->size);
3646}
3647
50c1e149
AK
3648static void io_region_nop(MemoryListener *listener,
3649 MemoryRegionSection *section)
3650{
3651}
3652
4855d41a
AK
3653static void io_log_start(MemoryListener *listener,
3654 MemoryRegionSection *section)
3655{
3656}
3657
3658static void io_log_stop(MemoryListener *listener,
3659 MemoryRegionSection *section)
3660{
3661}
3662
3663static void io_log_sync(MemoryListener *listener,
3664 MemoryRegionSection *section)
3665{
3666}
3667
3668static void io_log_global_start(MemoryListener *listener)
3669{
3670}
3671
3672static void io_log_global_stop(MemoryListener *listener)
3673{
3674}
3675
3676static void io_eventfd_add(MemoryListener *listener,
3677 MemoryRegionSection *section,
3678 bool match_data, uint64_t data, int fd)
3679{
3680}
3681
3682static void io_eventfd_del(MemoryListener *listener,
3683 MemoryRegionSection *section,
3684 bool match_data, uint64_t data, int fd)
3685{
3686}
3687
93632747 3688static MemoryListener core_memory_listener = {
50c1e149
AK
3689 .begin = core_begin,
3690 .commit = core_commit,
93632747
AK
3691 .region_add = core_region_add,
3692 .region_del = core_region_del,
50c1e149 3693 .region_nop = core_region_nop,
93632747
AK
3694 .log_start = core_log_start,
3695 .log_stop = core_log_stop,
3696 .log_sync = core_log_sync,
3697 .log_global_start = core_log_global_start,
3698 .log_global_stop = core_log_global_stop,
3699 .eventfd_add = core_eventfd_add,
3700 .eventfd_del = core_eventfd_del,
3701 .priority = 0,
3702};
3703
4855d41a 3704static MemoryListener io_memory_listener = {
50c1e149
AK
3705 .begin = io_begin,
3706 .commit = io_commit,
4855d41a
AK
3707 .region_add = io_region_add,
3708 .region_del = io_region_del,
50c1e149 3709 .region_nop = io_region_nop,
4855d41a
AK
3710 .log_start = io_log_start,
3711 .log_stop = io_log_stop,
3712 .log_sync = io_log_sync,
3713 .log_global_start = io_log_global_start,
3714 .log_global_stop = io_log_global_stop,
3715 .eventfd_add = io_eventfd_add,
3716 .eventfd_del = io_eventfd_del,
3717 .priority = 0,
3718};
3719
62152b8a
AK
3720static void memory_map_init(void)
3721{
7267c094 3722 system_memory = g_malloc(sizeof(*system_memory));
8417cebf 3723 memory_region_init(system_memory, "system", INT64_MAX);
62152b8a 3724 set_system_memory_map(system_memory);
309cb471 3725
7267c094 3726 system_io = g_malloc(sizeof(*system_io));
309cb471
AK
3727 memory_region_init(system_io, "io", 65536);
3728 set_system_io_map(system_io);
93632747 3729
4855d41a
AK
3730 memory_listener_register(&core_memory_listener, system_memory);
3731 memory_listener_register(&io_memory_listener, system_io);
62152b8a
AK
3732}
3733
3734MemoryRegion *get_system_memory(void)
3735{
3736 return system_memory;
3737}
3738
309cb471
AK
3739MemoryRegion *get_system_io(void)
3740{
3741 return system_io;
3742}
3743
e2eef170
PB
3744#endif /* !defined(CONFIG_USER_ONLY) */
3745
13eb76e0
FB
3746/* physical memory access (slow version, mainly for debug) */
3747#if defined(CONFIG_USER_ONLY)
9349b4f9 3748int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
a68fe89c 3749 uint8_t *buf, int len, int is_write)
13eb76e0
FB
3750{
3751 int l, flags;
3752 target_ulong page;
53a5960a 3753 void * p;
13eb76e0
FB
3754
3755 while (len > 0) {
3756 page = addr & TARGET_PAGE_MASK;
3757 l = (page + TARGET_PAGE_SIZE) - addr;
3758 if (l > len)
3759 l = len;
3760 flags = page_get_flags(page);
3761 if (!(flags & PAGE_VALID))
a68fe89c 3762 return -1;
13eb76e0
FB
3763 if (is_write) {
3764 if (!(flags & PAGE_WRITE))
a68fe89c 3765 return -1;
579a97f7 3766 /* XXX: this code should not depend on lock_user */
72fb7daa 3767 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
a68fe89c 3768 return -1;
72fb7daa
AJ
3769 memcpy(p, buf, l);
3770 unlock_user(p, addr, l);
13eb76e0
FB
3771 } else {
3772 if (!(flags & PAGE_READ))
a68fe89c 3773 return -1;
579a97f7 3774 /* XXX: this code should not depend on lock_user */
72fb7daa 3775 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
a68fe89c 3776 return -1;
72fb7daa 3777 memcpy(buf, p, l);
5b257578 3778 unlock_user(p, addr, 0);
13eb76e0
FB
3779 }
3780 len -= l;
3781 buf += l;
3782 addr += l;
3783 }
a68fe89c 3784 return 0;
13eb76e0 3785}
8df1cd07 3786
13eb76e0 3787#else
c227f099 3788void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
3789 int len, int is_write)
3790{
37ec01d4 3791 int l;
13eb76e0
FB
3792 uint8_t *ptr;
3793 uint32_t val;
c227f099 3794 target_phys_addr_t page;
f3705d53 3795 MemoryRegionSection *section;
3b46e624 3796
13eb76e0
FB
3797 while (len > 0) {
3798 page = addr & TARGET_PAGE_MASK;
3799 l = (page + TARGET_PAGE_SIZE) - addr;
3800 if (l > len)
3801 l = len;
06ef3525 3802 section = phys_page_find(page >> TARGET_PAGE_BITS);
3b46e624 3803
13eb76e0 3804 if (is_write) {
f3705d53 3805 if (!memory_region_is_ram(section->mr)) {
f1f6e3b8 3806 target_phys_addr_t addr1;
f3705d53 3807 addr1 = section_addr(section, addr);
6a00d601
FB
3808 /* XXX: could force cpu_single_env to NULL to avoid
3809 potential bugs */
6c2934db 3810 if (l >= 4 && ((addr1 & 3) == 0)) {
1c213d19 3811 /* 32 bit write access */
c27004ec 3812 val = ldl_p(buf);
37ec01d4 3813 io_mem_write(section->mr, addr1, val, 4);
13eb76e0 3814 l = 4;
6c2934db 3815 } else if (l >= 2 && ((addr1 & 1) == 0)) {
1c213d19 3816 /* 16 bit write access */
c27004ec 3817 val = lduw_p(buf);
37ec01d4 3818 io_mem_write(section->mr, addr1, val, 2);
13eb76e0
FB
3819 l = 2;
3820 } else {
1c213d19 3821 /* 8 bit write access */
c27004ec 3822 val = ldub_p(buf);
37ec01d4 3823 io_mem_write(section->mr, addr1, val, 1);
13eb76e0
FB
3824 l = 1;
3825 }
f3705d53 3826 } else if (!section->readonly) {
8ca5692d 3827 ram_addr_t addr1;
f3705d53
AK
3828 addr1 = memory_region_get_ram_addr(section->mr)
3829 + section_addr(section, addr);
13eb76e0 3830 /* RAM case */
5579c7f3 3831 ptr = qemu_get_ram_ptr(addr1);
13eb76e0 3832 memcpy(ptr, buf, l);
3a7d929e
FB
3833 if (!cpu_physical_memory_is_dirty(addr1)) {
3834 /* invalidate code */
3835 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3836 /* set dirty bit */
f7c11b53
YT
3837 cpu_physical_memory_set_dirty_flags(
3838 addr1, (0xff & ~CODE_DIRTY_FLAG));
3a7d929e 3839 }
050a0ddf 3840 qemu_put_ram_ptr(ptr);
13eb76e0
FB
3841 }
3842 } else {
f3705d53 3843 if (!is_ram_rom_romd(section)) {
f1f6e3b8 3844 target_phys_addr_t addr1;
13eb76e0 3845 /* I/O case */
f3705d53 3846 addr1 = section_addr(section, addr);
6c2934db 3847 if (l >= 4 && ((addr1 & 3) == 0)) {
13eb76e0 3848 /* 32 bit read access */
37ec01d4 3849 val = io_mem_read(section->mr, addr1, 4);
c27004ec 3850 stl_p(buf, val);
13eb76e0 3851 l = 4;
6c2934db 3852 } else if (l >= 2 && ((addr1 & 1) == 0)) {
13eb76e0 3853 /* 16 bit read access */
37ec01d4 3854 val = io_mem_read(section->mr, addr1, 2);
c27004ec 3855 stw_p(buf, val);
13eb76e0
FB
3856 l = 2;
3857 } else {
1c213d19 3858 /* 8 bit read access */
37ec01d4 3859 val = io_mem_read(section->mr, addr1, 1);
c27004ec 3860 stb_p(buf, val);
13eb76e0
FB
3861 l = 1;
3862 }
3863 } else {
3864 /* RAM case */
0a1b357f
AP
3865 ptr = qemu_get_ram_ptr(section->mr->ram_addr
3866 + section_addr(section, addr));
f3705d53 3867 memcpy(buf, ptr, l);
050a0ddf 3868 qemu_put_ram_ptr(ptr);
13eb76e0
FB
3869 }
3870 }
3871 len -= l;
3872 buf += l;
3873 addr += l;
3874 }
3875}
8df1cd07 3876
d0ecd2aa 3877/* used for ROM loading : can write in RAM and ROM */
c227f099 3878void cpu_physical_memory_write_rom(target_phys_addr_t addr,
d0ecd2aa
FB
3879 const uint8_t *buf, int len)
3880{
3881 int l;
3882 uint8_t *ptr;
c227f099 3883 target_phys_addr_t page;
f3705d53 3884 MemoryRegionSection *section;
3b46e624 3885
d0ecd2aa
FB
3886 while (len > 0) {
3887 page = addr & TARGET_PAGE_MASK;
3888 l = (page + TARGET_PAGE_SIZE) - addr;
3889 if (l > len)
3890 l = len;
06ef3525 3891 section = phys_page_find(page >> TARGET_PAGE_BITS);
3b46e624 3892
f3705d53 3893 if (!is_ram_rom_romd(section)) {
d0ecd2aa
FB
3894 /* do nothing */
3895 } else {
3896 unsigned long addr1;
f3705d53
AK
3897 addr1 = memory_region_get_ram_addr(section->mr)
3898 + section_addr(section, addr);
d0ecd2aa 3899 /* ROM/RAM case */
5579c7f3 3900 ptr = qemu_get_ram_ptr(addr1);
d0ecd2aa 3901 memcpy(ptr, buf, l);
050a0ddf 3902 qemu_put_ram_ptr(ptr);
d0ecd2aa
FB
3903 }
3904 len -= l;
3905 buf += l;
3906 addr += l;
3907 }
3908}
3909
6d16c2f8
AL
3910typedef struct {
3911 void *buffer;
c227f099
AL
3912 target_phys_addr_t addr;
3913 target_phys_addr_t len;
6d16c2f8
AL
3914} BounceBuffer;
3915
3916static BounceBuffer bounce;
3917
ba223c29
AL
3918typedef struct MapClient {
3919 void *opaque;
3920 void (*callback)(void *opaque);
72cf2d4f 3921 QLIST_ENTRY(MapClient) link;
ba223c29
AL
3922} MapClient;
3923
72cf2d4f
BS
3924static QLIST_HEAD(map_client_list, MapClient) map_client_list
3925 = QLIST_HEAD_INITIALIZER(map_client_list);
ba223c29
AL
3926
3927void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3928{
7267c094 3929 MapClient *client = g_malloc(sizeof(*client));
ba223c29
AL
3930
3931 client->opaque = opaque;
3932 client->callback = callback;
72cf2d4f 3933 QLIST_INSERT_HEAD(&map_client_list, client, link);
ba223c29
AL
3934 return client;
3935}
3936
3937void cpu_unregister_map_client(void *_client)
3938{
3939 MapClient *client = (MapClient *)_client;
3940
72cf2d4f 3941 QLIST_REMOVE(client, link);
7267c094 3942 g_free(client);
ba223c29
AL
3943}
3944
3945static void cpu_notify_map_clients(void)
3946{
3947 MapClient *client;
3948
72cf2d4f
BS
3949 while (!QLIST_EMPTY(&map_client_list)) {
3950 client = QLIST_FIRST(&map_client_list);
ba223c29 3951 client->callback(client->opaque);
34d5e948 3952 cpu_unregister_map_client(client);
ba223c29
AL
3953 }
3954}
3955
6d16c2f8
AL
3956/* Map a physical memory region into a host virtual address.
3957 * May map a subset of the requested range, given by and returned in *plen.
3958 * May return NULL if resources needed to perform the mapping are exhausted.
3959 * Use only for reads OR writes - not for read-modify-write operations.
ba223c29
AL
3960 * Use cpu_register_map_client() to know when retrying the map operation is
3961 * likely to succeed.
6d16c2f8 3962 */
c227f099
AL
3963void *cpu_physical_memory_map(target_phys_addr_t addr,
3964 target_phys_addr_t *plen,
6d16c2f8
AL
3965 int is_write)
3966{
c227f099 3967 target_phys_addr_t len = *plen;
38bee5dc 3968 target_phys_addr_t todo = 0;
6d16c2f8 3969 int l;
c227f099 3970 target_phys_addr_t page;
f3705d53 3971 MemoryRegionSection *section;
f15fbc4b 3972 ram_addr_t raddr = RAM_ADDR_MAX;
8ab934f9
SS
3973 ram_addr_t rlen;
3974 void *ret;
6d16c2f8
AL
3975
3976 while (len > 0) {
3977 page = addr & TARGET_PAGE_MASK;
3978 l = (page + TARGET_PAGE_SIZE) - addr;
3979 if (l > len)
3980 l = len;
06ef3525 3981 section = phys_page_find(page >> TARGET_PAGE_BITS);
6d16c2f8 3982
f3705d53 3983 if (!(memory_region_is_ram(section->mr) && !section->readonly)) {
38bee5dc 3984 if (todo || bounce.buffer) {
6d16c2f8
AL
3985 break;
3986 }
3987 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3988 bounce.addr = addr;
3989 bounce.len = l;
3990 if (!is_write) {
54f7b4a3 3991 cpu_physical_memory_read(addr, bounce.buffer, l);
6d16c2f8 3992 }
38bee5dc
SS
3993
3994 *plen = l;
3995 return bounce.buffer;
6d16c2f8 3996 }
8ab934f9 3997 if (!todo) {
f3705d53
AK
3998 raddr = memory_region_get_ram_addr(section->mr)
3999 + section_addr(section, addr);
8ab934f9 4000 }
6d16c2f8
AL
4001
4002 len -= l;
4003 addr += l;
38bee5dc 4004 todo += l;
6d16c2f8 4005 }
8ab934f9
SS
4006 rlen = todo;
4007 ret = qemu_ram_ptr_length(raddr, &rlen);
4008 *plen = rlen;
4009 return ret;
6d16c2f8
AL
4010}
4011
4012/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
4013 * Will also mark the memory as dirty if is_write == 1. access_len gives
4014 * the amount of memory that was actually read or written by the caller.
4015 */
c227f099
AL
4016void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
4017 int is_write, target_phys_addr_t access_len)
6d16c2f8
AL
4018{
4019 if (buffer != bounce.buffer) {
4020 if (is_write) {
e890261f 4021 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
6d16c2f8
AL
4022 while (access_len) {
4023 unsigned l;
4024 l = TARGET_PAGE_SIZE;
4025 if (l > access_len)
4026 l = access_len;
4027 if (!cpu_physical_memory_is_dirty(addr1)) {
4028 /* invalidate code */
4029 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
4030 /* set dirty bit */
f7c11b53
YT
4031 cpu_physical_memory_set_dirty_flags(
4032 addr1, (0xff & ~CODE_DIRTY_FLAG));
6d16c2f8
AL
4033 }
4034 addr1 += l;
4035 access_len -= l;
4036 }
4037 }
868bb33f 4038 if (xen_enabled()) {
e41d7c69 4039 xen_invalidate_map_cache_entry(buffer);
050a0ddf 4040 }
6d16c2f8
AL
4041 return;
4042 }
4043 if (is_write) {
4044 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
4045 }
f8a83245 4046 qemu_vfree(bounce.buffer);
6d16c2f8 4047 bounce.buffer = NULL;
ba223c29 4048 cpu_notify_map_clients();
6d16c2f8 4049}
d0ecd2aa 4050
8df1cd07 4051/* warning: addr must be aligned */
1e78bcc1
AG
4052static inline uint32_t ldl_phys_internal(target_phys_addr_t addr,
4053 enum device_endian endian)
8df1cd07 4054{
8df1cd07
FB
4055 uint8_t *ptr;
4056 uint32_t val;
f3705d53 4057 MemoryRegionSection *section;
8df1cd07 4058
06ef3525 4059 section = phys_page_find(addr >> TARGET_PAGE_BITS);
3b46e624 4060
f3705d53 4061 if (!is_ram_rom_romd(section)) {
8df1cd07 4062 /* I/O case */
f3705d53 4063 addr = section_addr(section, addr);
37ec01d4 4064 val = io_mem_read(section->mr, addr, 4);
1e78bcc1
AG
4065#if defined(TARGET_WORDS_BIGENDIAN)
4066 if (endian == DEVICE_LITTLE_ENDIAN) {
4067 val = bswap32(val);
4068 }
4069#else
4070 if (endian == DEVICE_BIG_ENDIAN) {
4071 val = bswap32(val);
4072 }
4073#endif
8df1cd07
FB
4074 } else {
4075 /* RAM case */
f3705d53 4076 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
06ef3525 4077 & TARGET_PAGE_MASK)
f3705d53 4078 + section_addr(section, addr));
1e78bcc1
AG
4079 switch (endian) {
4080 case DEVICE_LITTLE_ENDIAN:
4081 val = ldl_le_p(ptr);
4082 break;
4083 case DEVICE_BIG_ENDIAN:
4084 val = ldl_be_p(ptr);
4085 break;
4086 default:
4087 val = ldl_p(ptr);
4088 break;
4089 }
8df1cd07
FB
4090 }
4091 return val;
4092}
4093
1e78bcc1
AG
4094uint32_t ldl_phys(target_phys_addr_t addr)
4095{
4096 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4097}
4098
4099uint32_t ldl_le_phys(target_phys_addr_t addr)
4100{
4101 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4102}
4103
4104uint32_t ldl_be_phys(target_phys_addr_t addr)
4105{
4106 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
4107}
4108
84b7b8e7 4109/* warning: addr must be aligned */
1e78bcc1
AG
4110static inline uint64_t ldq_phys_internal(target_phys_addr_t addr,
4111 enum device_endian endian)
84b7b8e7 4112{
84b7b8e7
FB
4113 uint8_t *ptr;
4114 uint64_t val;
f3705d53 4115 MemoryRegionSection *section;
84b7b8e7 4116
06ef3525 4117 section = phys_page_find(addr >> TARGET_PAGE_BITS);
3b46e624 4118
f3705d53 4119 if (!is_ram_rom_romd(section)) {
84b7b8e7 4120 /* I/O case */
f3705d53 4121 addr = section_addr(section, addr);
1e78bcc1
AG
4122
4123 /* XXX This is broken when device endian != cpu endian.
4124 Fix and add "endian" variable check */
84b7b8e7 4125#ifdef TARGET_WORDS_BIGENDIAN
37ec01d4
AK
4126 val = io_mem_read(section->mr, addr, 4) << 32;
4127 val |= io_mem_read(section->mr, addr + 4, 4);
84b7b8e7 4128#else
37ec01d4
AK
4129 val = io_mem_read(section->mr, addr, 4);
4130 val |= io_mem_read(section->mr, addr + 4, 4) << 32;
84b7b8e7
FB
4131#endif
4132 } else {
4133 /* RAM case */
f3705d53 4134 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
06ef3525 4135 & TARGET_PAGE_MASK)
f3705d53 4136 + section_addr(section, addr));
1e78bcc1
AG
4137 switch (endian) {
4138 case DEVICE_LITTLE_ENDIAN:
4139 val = ldq_le_p(ptr);
4140 break;
4141 case DEVICE_BIG_ENDIAN:
4142 val = ldq_be_p(ptr);
4143 break;
4144 default:
4145 val = ldq_p(ptr);
4146 break;
4147 }
84b7b8e7
FB
4148 }
4149 return val;
4150}
4151
1e78bcc1
AG
4152uint64_t ldq_phys(target_phys_addr_t addr)
4153{
4154 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4155}
4156
4157uint64_t ldq_le_phys(target_phys_addr_t addr)
4158{
4159 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4160}
4161
4162uint64_t ldq_be_phys(target_phys_addr_t addr)
4163{
4164 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
4165}
4166
aab33094 4167/* XXX: optimize */
c227f099 4168uint32_t ldub_phys(target_phys_addr_t addr)
aab33094
FB
4169{
4170 uint8_t val;
4171 cpu_physical_memory_read(addr, &val, 1);
4172 return val;
4173}
4174
733f0b02 4175/* warning: addr must be aligned */
1e78bcc1
AG
4176static inline uint32_t lduw_phys_internal(target_phys_addr_t addr,
4177 enum device_endian endian)
aab33094 4178{
733f0b02
MT
4179 uint8_t *ptr;
4180 uint64_t val;
f3705d53 4181 MemoryRegionSection *section;
733f0b02 4182
06ef3525 4183 section = phys_page_find(addr >> TARGET_PAGE_BITS);
733f0b02 4184
f3705d53 4185 if (!is_ram_rom_romd(section)) {
733f0b02 4186 /* I/O case */
f3705d53 4187 addr = section_addr(section, addr);
37ec01d4 4188 val = io_mem_read(section->mr, addr, 2);
1e78bcc1
AG
4189#if defined(TARGET_WORDS_BIGENDIAN)
4190 if (endian == DEVICE_LITTLE_ENDIAN) {
4191 val = bswap16(val);
4192 }
4193#else
4194 if (endian == DEVICE_BIG_ENDIAN) {
4195 val = bswap16(val);
4196 }
4197#endif
733f0b02
MT
4198 } else {
4199 /* RAM case */
f3705d53 4200 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
06ef3525 4201 & TARGET_PAGE_MASK)
f3705d53 4202 + section_addr(section, addr));
1e78bcc1
AG
4203 switch (endian) {
4204 case DEVICE_LITTLE_ENDIAN:
4205 val = lduw_le_p(ptr);
4206 break;
4207 case DEVICE_BIG_ENDIAN:
4208 val = lduw_be_p(ptr);
4209 break;
4210 default:
4211 val = lduw_p(ptr);
4212 break;
4213 }
733f0b02
MT
4214 }
4215 return val;
aab33094
FB
4216}
4217
1e78bcc1
AG
4218uint32_t lduw_phys(target_phys_addr_t addr)
4219{
4220 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4221}
4222
4223uint32_t lduw_le_phys(target_phys_addr_t addr)
4224{
4225 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4226}
4227
4228uint32_t lduw_be_phys(target_phys_addr_t addr)
4229{
4230 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
4231}
4232
8df1cd07
FB
4233/* warning: addr must be aligned. The ram page is not masked as dirty
4234 and the code inside is not invalidated. It is useful if the dirty
4235 bits are used to track modified PTEs */
c227f099 4236void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
8df1cd07 4237{
8df1cd07 4238 uint8_t *ptr;
f3705d53 4239 MemoryRegionSection *section;
8df1cd07 4240
06ef3525 4241 section = phys_page_find(addr >> TARGET_PAGE_BITS);
3b46e624 4242
f3705d53 4243 if (!memory_region_is_ram(section->mr) || section->readonly) {
37ec01d4 4244 addr = section_addr(section, addr);
f3705d53 4245 if (memory_region_is_ram(section->mr)) {
37ec01d4 4246 section = &phys_sections[phys_section_rom];
06ef3525 4247 }
37ec01d4 4248 io_mem_write(section->mr, addr, val, 4);
8df1cd07 4249 } else {
f3705d53 4250 unsigned long addr1 = (memory_region_get_ram_addr(section->mr)
06ef3525 4251 & TARGET_PAGE_MASK)
f3705d53 4252 + section_addr(section, addr);
5579c7f3 4253 ptr = qemu_get_ram_ptr(addr1);
8df1cd07 4254 stl_p(ptr, val);
74576198
AL
4255
4256 if (unlikely(in_migration)) {
4257 if (!cpu_physical_memory_is_dirty(addr1)) {
4258 /* invalidate code */
4259 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4260 /* set dirty bit */
f7c11b53
YT
4261 cpu_physical_memory_set_dirty_flags(
4262 addr1, (0xff & ~CODE_DIRTY_FLAG));
74576198
AL
4263 }
4264 }
8df1cd07
FB
4265 }
4266}
4267
c227f099 4268void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
bc98a7ef 4269{
bc98a7ef 4270 uint8_t *ptr;
f3705d53 4271 MemoryRegionSection *section;
bc98a7ef 4272
06ef3525 4273 section = phys_page_find(addr >> TARGET_PAGE_BITS);
3b46e624 4274
f3705d53 4275 if (!memory_region_is_ram(section->mr) || section->readonly) {
37ec01d4 4276 addr = section_addr(section, addr);
f3705d53 4277 if (memory_region_is_ram(section->mr)) {
37ec01d4 4278 section = &phys_sections[phys_section_rom];
06ef3525 4279 }
bc98a7ef 4280#ifdef TARGET_WORDS_BIGENDIAN
37ec01d4
AK
4281 io_mem_write(section->mr, addr, val >> 32, 4);
4282 io_mem_write(section->mr, addr + 4, (uint32_t)val, 4);
bc98a7ef 4283#else
37ec01d4
AK
4284 io_mem_write(section->mr, addr, (uint32_t)val, 4);
4285 io_mem_write(section->mr, addr + 4, val >> 32, 4);
bc98a7ef
JM
4286#endif
4287 } else {
f3705d53 4288 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
06ef3525 4289 & TARGET_PAGE_MASK)
f3705d53 4290 + section_addr(section, addr));
bc98a7ef
JM
4291 stq_p(ptr, val);
4292 }
4293}
4294
8df1cd07 4295/* warning: addr must be aligned */
1e78bcc1
AG
4296static inline void stl_phys_internal(target_phys_addr_t addr, uint32_t val,
4297 enum device_endian endian)
8df1cd07 4298{
8df1cd07 4299 uint8_t *ptr;
f3705d53 4300 MemoryRegionSection *section;
8df1cd07 4301
06ef3525 4302 section = phys_page_find(addr >> TARGET_PAGE_BITS);
3b46e624 4303
f3705d53 4304 if (!memory_region_is_ram(section->mr) || section->readonly) {
37ec01d4 4305 addr = section_addr(section, addr);
f3705d53 4306 if (memory_region_is_ram(section->mr)) {
37ec01d4 4307 section = &phys_sections[phys_section_rom];
06ef3525 4308 }
1e78bcc1
AG
4309#if defined(TARGET_WORDS_BIGENDIAN)
4310 if (endian == DEVICE_LITTLE_ENDIAN) {
4311 val = bswap32(val);
4312 }
4313#else
4314 if (endian == DEVICE_BIG_ENDIAN) {
4315 val = bswap32(val);
4316 }
4317#endif
37ec01d4 4318 io_mem_write(section->mr, addr, val, 4);
8df1cd07
FB
4319 } else {
4320 unsigned long addr1;
f3705d53
AK
4321 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
4322 + section_addr(section, addr);
8df1cd07 4323 /* RAM case */
5579c7f3 4324 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
4325 switch (endian) {
4326 case DEVICE_LITTLE_ENDIAN:
4327 stl_le_p(ptr, val);
4328 break;
4329 case DEVICE_BIG_ENDIAN:
4330 stl_be_p(ptr, val);
4331 break;
4332 default:
4333 stl_p(ptr, val);
4334 break;
4335 }
3a7d929e
FB
4336 if (!cpu_physical_memory_is_dirty(addr1)) {
4337 /* invalidate code */
4338 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4339 /* set dirty bit */
f7c11b53
YT
4340 cpu_physical_memory_set_dirty_flags(addr1,
4341 (0xff & ~CODE_DIRTY_FLAG));
3a7d929e 4342 }
8df1cd07
FB
4343 }
4344}
4345
1e78bcc1
AG
4346void stl_phys(target_phys_addr_t addr, uint32_t val)
4347{
4348 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4349}
4350
4351void stl_le_phys(target_phys_addr_t addr, uint32_t val)
4352{
4353 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4354}
4355
4356void stl_be_phys(target_phys_addr_t addr, uint32_t val)
4357{
4358 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4359}
4360
aab33094 4361/* XXX: optimize */
c227f099 4362void stb_phys(target_phys_addr_t addr, uint32_t val)
aab33094
FB
4363{
4364 uint8_t v = val;
4365 cpu_physical_memory_write(addr, &v, 1);
4366}
4367
733f0b02 4368/* warning: addr must be aligned */
1e78bcc1
AG
4369static inline void stw_phys_internal(target_phys_addr_t addr, uint32_t val,
4370 enum device_endian endian)
aab33094 4371{
733f0b02 4372 uint8_t *ptr;
f3705d53 4373 MemoryRegionSection *section;
733f0b02 4374
06ef3525 4375 section = phys_page_find(addr >> TARGET_PAGE_BITS);
733f0b02 4376
f3705d53 4377 if (!memory_region_is_ram(section->mr) || section->readonly) {
37ec01d4 4378 addr = section_addr(section, addr);
f3705d53 4379 if (memory_region_is_ram(section->mr)) {
37ec01d4 4380 section = &phys_sections[phys_section_rom];
06ef3525 4381 }
1e78bcc1
AG
4382#if defined(TARGET_WORDS_BIGENDIAN)
4383 if (endian == DEVICE_LITTLE_ENDIAN) {
4384 val = bswap16(val);
4385 }
4386#else
4387 if (endian == DEVICE_BIG_ENDIAN) {
4388 val = bswap16(val);
4389 }
4390#endif
37ec01d4 4391 io_mem_write(section->mr, addr, val, 2);
733f0b02
MT
4392 } else {
4393 unsigned long addr1;
f3705d53
AK
4394 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
4395 + section_addr(section, addr);
733f0b02
MT
4396 /* RAM case */
4397 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
4398 switch (endian) {
4399 case DEVICE_LITTLE_ENDIAN:
4400 stw_le_p(ptr, val);
4401 break;
4402 case DEVICE_BIG_ENDIAN:
4403 stw_be_p(ptr, val);
4404 break;
4405 default:
4406 stw_p(ptr, val);
4407 break;
4408 }
733f0b02
MT
4409 if (!cpu_physical_memory_is_dirty(addr1)) {
4410 /* invalidate code */
4411 tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
4412 /* set dirty bit */
4413 cpu_physical_memory_set_dirty_flags(addr1,
4414 (0xff & ~CODE_DIRTY_FLAG));
4415 }
4416 }
aab33094
FB
4417}
4418
1e78bcc1
AG
4419void stw_phys(target_phys_addr_t addr, uint32_t val)
4420{
4421 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4422}
4423
4424void stw_le_phys(target_phys_addr_t addr, uint32_t val)
4425{
4426 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4427}
4428
4429void stw_be_phys(target_phys_addr_t addr, uint32_t val)
4430{
4431 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4432}
4433
aab33094 4434/* XXX: optimize */
c227f099 4435void stq_phys(target_phys_addr_t addr, uint64_t val)
aab33094
FB
4436{
4437 val = tswap64(val);
71d2b725 4438 cpu_physical_memory_write(addr, &val, 8);
aab33094
FB
4439}
4440
1e78bcc1
AG
4441void stq_le_phys(target_phys_addr_t addr, uint64_t val)
4442{
4443 val = cpu_to_le64(val);
4444 cpu_physical_memory_write(addr, &val, 8);
4445}
4446
4447void stq_be_phys(target_phys_addr_t addr, uint64_t val)
4448{
4449 val = cpu_to_be64(val);
4450 cpu_physical_memory_write(addr, &val, 8);
4451}
4452
5e2972fd 4453/* virtual memory access for debug (includes writing to ROM) */
9349b4f9 4454int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
b448f2f3 4455 uint8_t *buf, int len, int is_write)
13eb76e0
FB
4456{
4457 int l;
c227f099 4458 target_phys_addr_t phys_addr;
9b3c35e0 4459 target_ulong page;
13eb76e0
FB
4460
4461 while (len > 0) {
4462 page = addr & TARGET_PAGE_MASK;
4463 phys_addr = cpu_get_phys_page_debug(env, page);
4464 /* if no physical page mapped, return an error */
4465 if (phys_addr == -1)
4466 return -1;
4467 l = (page + TARGET_PAGE_SIZE) - addr;
4468 if (l > len)
4469 l = len;
5e2972fd 4470 phys_addr += (addr & ~TARGET_PAGE_MASK);
5e2972fd
AL
4471 if (is_write)
4472 cpu_physical_memory_write_rom(phys_addr, buf, l);
4473 else
5e2972fd 4474 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
13eb76e0
FB
4475 len -= l;
4476 buf += l;
4477 addr += l;
4478 }
4479 return 0;
4480}
a68fe89c 4481#endif
13eb76e0 4482
2e70f6ef
PB
4483/* in deterministic execution mode, instructions doing device I/Os
4484 must be at the end of the TB */
20503968 4485void cpu_io_recompile(CPUArchState *env, uintptr_t retaddr)
2e70f6ef
PB
4486{
4487 TranslationBlock *tb;
4488 uint32_t n, cflags;
4489 target_ulong pc, cs_base;
4490 uint64_t flags;
4491
20503968 4492 tb = tb_find_pc(retaddr);
2e70f6ef
PB
4493 if (!tb) {
4494 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
20503968 4495 (void *)retaddr);
2e70f6ef
PB
4496 }
4497 n = env->icount_decr.u16.low + tb->icount;
20503968 4498 cpu_restore_state(tb, env, retaddr);
2e70f6ef 4499 /* Calculate how many instructions had been executed before the fault
bf20dc07 4500 occurred. */
2e70f6ef
PB
4501 n = n - env->icount_decr.u16.low;
4502 /* Generate a new TB ending on the I/O insn. */
4503 n++;
4504 /* On MIPS and SH, delay slot instructions can only be restarted if
4505 they were already the first instruction in the TB. If this is not
bf20dc07 4506 the first instruction in a TB then re-execute the preceding
2e70f6ef
PB
4507 branch. */
4508#if defined(TARGET_MIPS)
4509 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4510 env->active_tc.PC -= 4;
4511 env->icount_decr.u16.low++;
4512 env->hflags &= ~MIPS_HFLAG_BMASK;
4513 }
4514#elif defined(TARGET_SH4)
4515 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4516 && n > 1) {
4517 env->pc -= 2;
4518 env->icount_decr.u16.low++;
4519 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4520 }
4521#endif
4522 /* This should never happen. */
4523 if (n > CF_COUNT_MASK)
4524 cpu_abort(env, "TB too big during recompile");
4525
4526 cflags = n | CF_LAST_IO;
4527 pc = tb->pc;
4528 cs_base = tb->cs_base;
4529 flags = tb->flags;
4530 tb_phys_invalidate(tb, -1);
4531 /* FIXME: In theory this could raise an exception. In practice
4532 we have already translated the block once so it's probably ok. */
4533 tb_gen_code(env, pc, cs_base, flags, cflags);
bf20dc07 4534 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
2e70f6ef
PB
4535 the first in the TB) then we end up generating a whole new TB and
4536 repeating the fault, which is horribly inefficient.
4537 Better would be to execute just this insn uncached, or generate a
4538 second new TB. */
4539 cpu_resume_from_signal(env, NULL);
4540}
4541
b3755a91
PB
4542#if !defined(CONFIG_USER_ONLY)
4543
055403b2 4544void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
e3db7226
FB
4545{
4546 int i, target_code_size, max_target_code_size;
4547 int direct_jmp_count, direct_jmp2_count, cross_page;
4548 TranslationBlock *tb;
3b46e624 4549
e3db7226
FB
4550 target_code_size = 0;
4551 max_target_code_size = 0;
4552 cross_page = 0;
4553 direct_jmp_count = 0;
4554 direct_jmp2_count = 0;
4555 for(i = 0; i < nb_tbs; i++) {
4556 tb = &tbs[i];
4557 target_code_size += tb->size;
4558 if (tb->size > max_target_code_size)
4559 max_target_code_size = tb->size;
4560 if (tb->page_addr[1] != -1)
4561 cross_page++;
4562 if (tb->tb_next_offset[0] != 0xffff) {
4563 direct_jmp_count++;
4564 if (tb->tb_next_offset[1] != 0xffff) {
4565 direct_jmp2_count++;
4566 }
4567 }
4568 }
4569 /* XXX: avoid using doubles ? */
57fec1fe 4570 cpu_fprintf(f, "Translation buffer state:\n");
055403b2 4571 cpu_fprintf(f, "gen code size %td/%ld\n",
26a5f13b
FB
4572 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4573 cpu_fprintf(f, "TB count %d/%d\n",
4574 nb_tbs, code_gen_max_blocks);
5fafdf24 4575 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
e3db7226
FB
4576 nb_tbs ? target_code_size / nb_tbs : 0,
4577 max_target_code_size);
055403b2 4578 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
e3db7226
FB
4579 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4580 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
5fafdf24
TS
4581 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4582 cross_page,
e3db7226
FB
4583 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4584 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
5fafdf24 4585 direct_jmp_count,
e3db7226
FB
4586 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4587 direct_jmp2_count,
4588 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
57fec1fe 4589 cpu_fprintf(f, "\nStatistics:\n");
e3db7226
FB
4590 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4591 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4592 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
b67d9a52 4593 tcg_dump_info(f, cpu_fprintf);
e3db7226
FB
4594}
4595
d39e8222
AK
4596/* NOTE: this function can trigger an exception */
4597/* NOTE2: the returned address is not exactly the physical address: it
4598 is the offset relative to phys_ram_base */
9349b4f9 4599tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr)
d39e8222
AK
4600{
4601 int mmu_idx, page_index, pd;
4602 void *p;
37ec01d4 4603 MemoryRegion *mr;
d39e8222
AK
4604
4605 page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
4606 mmu_idx = cpu_mmu_index(env1);
4607 if (unlikely(env1->tlb_table[mmu_idx][page_index].addr_code !=
4608 (addr & TARGET_PAGE_MASK))) {
e141ab52
BS
4609#ifdef CONFIG_TCG_PASS_AREG0
4610 cpu_ldub_code(env1, addr);
4611#else
d39e8222 4612 ldub_code(addr);
e141ab52 4613#endif
d39e8222 4614 }
ce5d64c2 4615 pd = env1->iotlb[mmu_idx][page_index] & ~TARGET_PAGE_MASK;
37ec01d4
AK
4616 mr = iotlb_to_region(pd);
4617 if (mr != &io_mem_ram && mr != &io_mem_rom
32b08980
AK
4618 && mr != &io_mem_notdirty && !mr->rom_device
4619 && mr != &io_mem_watch) {
d39e8222
AK
4620#if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_SPARC)
4621 cpu_unassigned_access(env1, addr, 0, 1, 0, 4);
4622#else
4623 cpu_abort(env1, "Trying to execute code outside RAM or ROM at 0x" TARGET_FMT_lx "\n", addr);
4624#endif
4625 }
4626 p = (void *)((uintptr_t)addr + env1->tlb_table[mmu_idx][page_index].addend);
4627 return qemu_ram_addr_from_host_nofail(p);
4628}
4629
82afa586
BH
4630/*
4631 * A helper function for the _utterly broken_ virtio device model to find out if
4632 * it's running on a big endian machine. Don't do this at home kids!
4633 */
4634bool virtio_is_big_endian(void);
4635bool virtio_is_big_endian(void)
4636{
4637#if defined(TARGET_WORDS_BIGENDIAN)
4638 return true;
4639#else
4640 return false;
4641#endif
4642}
4643
61382a50 4644#define MMUSUFFIX _cmmu
3917149d 4645#undef GETPC
20503968 4646#define GETPC() ((uintptr_t)0)
61382a50 4647#define env cpu_single_env
b769d8fe 4648#define SOFTMMU_CODE_ACCESS
61382a50
FB
4649
4650#define SHIFT 0
4651#include "softmmu_template.h"
4652
4653#define SHIFT 1
4654#include "softmmu_template.h"
4655
4656#define SHIFT 2
4657#include "softmmu_template.h"
4658
4659#define SHIFT 3
4660#include "softmmu_template.h"
4661
4662#undef env
4663
4664#endif