]> git.proxmox.com Git - mirror_qemu.git/blame - exec.c
softfloat: roundAndPackInt{32, 64}: Don't assume int32 is 32 bits
[mirror_qemu.git] / exec.c
CommitLineData
54936004 1/*
fd6ce8f6 2 * virtual page mapping and translated block handling
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
54936004 18 */
67b915a5 19#include "config.h"
d5a8f07c
FB
20#ifdef _WIN32
21#include <windows.h>
22#else
a98d49b1 23#include <sys/types.h>
d5a8f07c
FB
24#include <sys/mman.h>
25#endif
54936004 26
055403b2 27#include "qemu-common.h"
6180a181 28#include "cpu.h"
b67d9a52 29#include "tcg.h"
b3c7724c 30#include "hw/hw.h"
cc9e98cb 31#include "hw/qdev.h"
74576198 32#include "osdep.h"
7ba1e619 33#include "kvm.h"
432d268c 34#include "hw/xen.h"
29e922b6 35#include "qemu-timer.h"
62152b8a
AK
36#include "memory.h"
37#include "exec-memory.h"
53a5960a
PB
38#if defined(CONFIG_USER_ONLY)
39#include <qemu.h>
f01576f1
JL
40#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41#include <sys/param.h>
42#if __FreeBSD_version >= 700104
43#define HAVE_KINFO_GETVMMAP
44#define sigqueue sigqueue_freebsd /* avoid redefinition */
45#include <sys/time.h>
46#include <sys/proc.h>
47#include <machine/profile.h>
48#define _KERNEL
49#include <sys/user.h>
50#undef _KERNEL
51#undef sigqueue
52#include <libutil.h>
53#endif
54#endif
432d268c
JN
55#else /* !CONFIG_USER_ONLY */
56#include "xen-mapcache.h"
6506e4f9 57#include "trace.h"
53a5960a 58#endif
54936004 59
67d95c15
AK
60#define WANT_EXEC_OBSOLETE
61#include "exec-obsolete.h"
62
fd6ce8f6 63//#define DEBUG_TB_INVALIDATE
66e85a21 64//#define DEBUG_FLUSH
9fa3e853 65//#define DEBUG_TLB
67d3b957 66//#define DEBUG_UNASSIGNED
fd6ce8f6
FB
67
68/* make various TB consistency checks */
5fafdf24
TS
69//#define DEBUG_TB_CHECK
70//#define DEBUG_TLB_CHECK
fd6ce8f6 71
1196be37 72//#define DEBUG_IOPORT
db7b5426 73//#define DEBUG_SUBPAGE
1196be37 74
99773bd4
PB
75#if !defined(CONFIG_USER_ONLY)
76/* TB consistency checks only implemented for usermode emulation. */
77#undef DEBUG_TB_CHECK
78#endif
79
9fa3e853
FB
80#define SMC_BITMAP_USE_THRESHOLD 10
81
bdaf78e0 82static TranslationBlock *tbs;
24ab68ac 83static int code_gen_max_blocks;
9fa3e853 84TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
bdaf78e0 85static int nb_tbs;
eb51d102 86/* any access to the tbs or the page table must use this lock */
c227f099 87spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
fd6ce8f6 88
141ac468
BS
89#if defined(__arm__) || defined(__sparc_v9__)
90/* The prologue must be reachable with a direct jump. ARM and Sparc64
91 have limited branch ranges (possibly also PPC) so place it in a
d03d860b
BS
92 section close to code segment. */
93#define code_gen_section \
94 __attribute__((__section__(".gen_code"))) \
95 __attribute__((aligned (32)))
f8e2af11
SW
96#elif defined(_WIN32)
97/* Maximum alignment for Win32 is 16. */
98#define code_gen_section \
99 __attribute__((aligned (16)))
d03d860b
BS
100#else
101#define code_gen_section \
102 __attribute__((aligned (32)))
103#endif
104
105uint8_t code_gen_prologue[1024] code_gen_section;
bdaf78e0
BS
106static uint8_t *code_gen_buffer;
107static unsigned long code_gen_buffer_size;
26a5f13b 108/* threshold to flush the translated code buffer */
bdaf78e0 109static unsigned long code_gen_buffer_max_size;
24ab68ac 110static uint8_t *code_gen_ptr;
fd6ce8f6 111
e2eef170 112#if !defined(CONFIG_USER_ONLY)
9fa3e853 113int phys_ram_fd;
74576198 114static int in_migration;
94a6b54f 115
85d59fef 116RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
62152b8a
AK
117
118static MemoryRegion *system_memory;
309cb471 119static MemoryRegion *system_io;
62152b8a 120
0e0df1e2 121MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty;
de712f94 122static MemoryRegion io_mem_subpage_ram;
0e0df1e2 123
e2eef170 124#endif
9fa3e853 125
9349b4f9 126CPUArchState *first_cpu;
6a00d601
FB
127/* current CPU in the current thread. It is only valid inside
128 cpu_exec() */
9349b4f9 129DEFINE_TLS(CPUArchState *,cpu_single_env);
2e70f6ef 130/* 0 = Do not count executed instructions.
bf20dc07 131 1 = Precise instruction counting.
2e70f6ef
PB
132 2 = Adaptive rate instruction counting. */
133int use_icount = 0;
6a00d601 134
54936004 135typedef struct PageDesc {
92e873b9 136 /* list of TBs intersecting this ram page */
fd6ce8f6 137 TranslationBlock *first_tb;
9fa3e853
FB
138 /* in order to optimize self modifying code, we count the number
139 of lookups we do to a given page to use a bitmap */
140 unsigned int code_write_count;
141 uint8_t *code_bitmap;
142#if defined(CONFIG_USER_ONLY)
143 unsigned long flags;
144#endif
54936004
FB
145} PageDesc;
146
41c1b1c9 147/* In system mode we want L1_MAP to be based on ram offsets,
5cd2c5b6
RH
148 while in user mode we want it to be based on virtual addresses. */
149#if !defined(CONFIG_USER_ONLY)
41c1b1c9
PB
150#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
151# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
152#else
5cd2c5b6 153# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
41c1b1c9 154#endif
bedb69ea 155#else
5cd2c5b6 156# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
bedb69ea 157#endif
54936004 158
5cd2c5b6
RH
159/* Size of the L2 (and L3, etc) page tables. */
160#define L2_BITS 10
54936004
FB
161#define L2_SIZE (1 << L2_BITS)
162
3eef53df
AK
163#define P_L2_LEVELS \
164 (((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / L2_BITS) + 1)
165
5cd2c5b6 166/* The bits remaining after N lower levels of page tables. */
5cd2c5b6
RH
167#define V_L1_BITS_REM \
168 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
169
5cd2c5b6
RH
170#if V_L1_BITS_REM < 4
171#define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
172#else
173#define V_L1_BITS V_L1_BITS_REM
174#endif
175
5cd2c5b6
RH
176#define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
177
5cd2c5b6
RH
178#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
179
83fb7adf 180unsigned long qemu_real_host_page_size;
83fb7adf
FB
181unsigned long qemu_host_page_size;
182unsigned long qemu_host_page_mask;
54936004 183
5cd2c5b6
RH
184/* This is a multi-level map on the virtual address space.
185 The bottom level has pointers to PageDesc. */
186static void *l1_map[V_L1_SIZE];
54936004 187
e2eef170 188#if !defined(CONFIG_USER_ONLY)
4346ae3e
AK
189typedef struct PhysPageEntry PhysPageEntry;
190
5312bd8b
AK
191static MemoryRegionSection *phys_sections;
192static unsigned phys_sections_nb, phys_sections_nb_alloc;
193static uint16_t phys_section_unassigned;
aa102231
AK
194static uint16_t phys_section_notdirty;
195static uint16_t phys_section_rom;
196static uint16_t phys_section_watch;
5312bd8b 197
4346ae3e 198struct PhysPageEntry {
07f07b31
AK
199 uint16_t is_leaf : 1;
200 /* index into phys_sections (is_leaf) or phys_map_nodes (!is_leaf) */
201 uint16_t ptr : 15;
4346ae3e
AK
202};
203
d6f2ea22
AK
204/* Simple allocator for PhysPageEntry nodes */
205static PhysPageEntry (*phys_map_nodes)[L2_SIZE];
206static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc;
207
07f07b31 208#define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
d6f2ea22 209
5cd2c5b6 210/* This is a multi-level map on the physical address space.
06ef3525 211 The bottom level has pointers to MemoryRegionSections. */
07f07b31 212static PhysPageEntry phys_map = { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
6d9a1304 213
e2eef170 214static void io_mem_init(void);
62152b8a 215static void memory_map_init(void);
e2eef170 216
1ec9b909 217static MemoryRegion io_mem_watch;
6658ffb8 218#endif
33417e70 219
34865134 220/* log support */
1e8b27ca
JR
221#ifdef WIN32
222static const char *logfilename = "qemu.log";
223#else
d9b630fd 224static const char *logfilename = "/tmp/qemu.log";
1e8b27ca 225#endif
34865134
FB
226FILE *logfile;
227int loglevel;
e735b91c 228static int log_append = 0;
34865134 229
e3db7226 230/* statistics */
b3755a91 231#if !defined(CONFIG_USER_ONLY)
e3db7226 232static int tlb_flush_count;
b3755a91 233#endif
e3db7226
FB
234static int tb_flush_count;
235static int tb_phys_invalidate_count;
236
7cb69cae
FB
237#ifdef _WIN32
238static void map_exec(void *addr, long size)
239{
240 DWORD old_protect;
241 VirtualProtect(addr, size,
242 PAGE_EXECUTE_READWRITE, &old_protect);
243
244}
245#else
246static void map_exec(void *addr, long size)
247{
4369415f 248 unsigned long start, end, page_size;
7cb69cae 249
4369415f 250 page_size = getpagesize();
7cb69cae 251 start = (unsigned long)addr;
4369415f 252 start &= ~(page_size - 1);
7cb69cae
FB
253
254 end = (unsigned long)addr + size;
4369415f
FB
255 end += page_size - 1;
256 end &= ~(page_size - 1);
7cb69cae
FB
257
258 mprotect((void *)start, end - start,
259 PROT_READ | PROT_WRITE | PROT_EXEC);
260}
261#endif
262
b346ff46 263static void page_init(void)
54936004 264{
83fb7adf 265 /* NOTE: we can always suppose that qemu_host_page_size >=
54936004 266 TARGET_PAGE_SIZE */
c2b48b69
AL
267#ifdef _WIN32
268 {
269 SYSTEM_INFO system_info;
270
271 GetSystemInfo(&system_info);
272 qemu_real_host_page_size = system_info.dwPageSize;
273 }
274#else
275 qemu_real_host_page_size = getpagesize();
276#endif
83fb7adf
FB
277 if (qemu_host_page_size == 0)
278 qemu_host_page_size = qemu_real_host_page_size;
279 if (qemu_host_page_size < TARGET_PAGE_SIZE)
280 qemu_host_page_size = TARGET_PAGE_SIZE;
83fb7adf 281 qemu_host_page_mask = ~(qemu_host_page_size - 1);
50a9569b 282
2e9a5713 283#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
50a9569b 284 {
f01576f1
JL
285#ifdef HAVE_KINFO_GETVMMAP
286 struct kinfo_vmentry *freep;
287 int i, cnt;
288
289 freep = kinfo_getvmmap(getpid(), &cnt);
290 if (freep) {
291 mmap_lock();
292 for (i = 0; i < cnt; i++) {
293 unsigned long startaddr, endaddr;
294
295 startaddr = freep[i].kve_start;
296 endaddr = freep[i].kve_end;
297 if (h2g_valid(startaddr)) {
298 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
299
300 if (h2g_valid(endaddr)) {
301 endaddr = h2g(endaddr);
fd436907 302 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
f01576f1
JL
303 } else {
304#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
305 endaddr = ~0ul;
fd436907 306 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
f01576f1
JL
307#endif
308 }
309 }
310 }
311 free(freep);
312 mmap_unlock();
313 }
314#else
50a9569b 315 FILE *f;
50a9569b 316
0776590d 317 last_brk = (unsigned long)sbrk(0);
5cd2c5b6 318
fd436907 319 f = fopen("/compat/linux/proc/self/maps", "r");
50a9569b 320 if (f) {
5cd2c5b6
RH
321 mmap_lock();
322
50a9569b 323 do {
5cd2c5b6
RH
324 unsigned long startaddr, endaddr;
325 int n;
326
327 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
328
329 if (n == 2 && h2g_valid(startaddr)) {
330 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
331
332 if (h2g_valid(endaddr)) {
333 endaddr = h2g(endaddr);
334 } else {
335 endaddr = ~0ul;
336 }
337 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
50a9569b
AZ
338 }
339 } while (!feof(f));
5cd2c5b6 340
50a9569b 341 fclose(f);
5cd2c5b6 342 mmap_unlock();
50a9569b 343 }
f01576f1 344#endif
50a9569b
AZ
345 }
346#endif
54936004
FB
347}
348
41c1b1c9 349static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
54936004 350{
41c1b1c9
PB
351 PageDesc *pd;
352 void **lp;
353 int i;
354
5cd2c5b6 355#if defined(CONFIG_USER_ONLY)
7267c094 356 /* We can't use g_malloc because it may recurse into a locked mutex. */
5cd2c5b6
RH
357# define ALLOC(P, SIZE) \
358 do { \
359 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
360 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
5cd2c5b6
RH
361 } while (0)
362#else
363# define ALLOC(P, SIZE) \
7267c094 364 do { P = g_malloc0(SIZE); } while (0)
17e2377a 365#endif
434929bf 366
5cd2c5b6
RH
367 /* Level 1. Always allocated. */
368 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
369
370 /* Level 2..N-1. */
371 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
372 void **p = *lp;
373
374 if (p == NULL) {
375 if (!alloc) {
376 return NULL;
377 }
378 ALLOC(p, sizeof(void *) * L2_SIZE);
379 *lp = p;
17e2377a 380 }
5cd2c5b6
RH
381
382 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
383 }
384
385 pd = *lp;
386 if (pd == NULL) {
387 if (!alloc) {
388 return NULL;
389 }
390 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
391 *lp = pd;
54936004 392 }
5cd2c5b6
RH
393
394#undef ALLOC
5cd2c5b6
RH
395
396 return pd + (index & (L2_SIZE - 1));
54936004
FB
397}
398
41c1b1c9 399static inline PageDesc *page_find(tb_page_addr_t index)
54936004 400{
5cd2c5b6 401 return page_find_alloc(index, 0);
fd6ce8f6
FB
402}
403
6d9a1304 404#if !defined(CONFIG_USER_ONLY)
d6f2ea22 405
f7bf5461 406static void phys_map_node_reserve(unsigned nodes)
d6f2ea22 407{
f7bf5461 408 if (phys_map_nodes_nb + nodes > phys_map_nodes_nb_alloc) {
d6f2ea22
AK
409 typedef PhysPageEntry Node[L2_SIZE];
410 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16);
f7bf5461
AK
411 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc,
412 phys_map_nodes_nb + nodes);
d6f2ea22
AK
413 phys_map_nodes = g_renew(Node, phys_map_nodes,
414 phys_map_nodes_nb_alloc);
415 }
f7bf5461
AK
416}
417
418static uint16_t phys_map_node_alloc(void)
419{
420 unsigned i;
421 uint16_t ret;
422
423 ret = phys_map_nodes_nb++;
424 assert(ret != PHYS_MAP_NODE_NIL);
425 assert(ret != phys_map_nodes_nb_alloc);
d6f2ea22 426 for (i = 0; i < L2_SIZE; ++i) {
07f07b31 427 phys_map_nodes[ret][i].is_leaf = 0;
c19e8800 428 phys_map_nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
d6f2ea22 429 }
f7bf5461 430 return ret;
d6f2ea22
AK
431}
432
433static void phys_map_nodes_reset(void)
434{
435 phys_map_nodes_nb = 0;
436}
437
92e873b9 438
2999097b
AK
439static void phys_page_set_level(PhysPageEntry *lp, target_phys_addr_t *index,
440 target_phys_addr_t *nb, uint16_t leaf,
441 int level)
f7bf5461
AK
442{
443 PhysPageEntry *p;
444 int i;
07f07b31 445 target_phys_addr_t step = (target_phys_addr_t)1 << (level * L2_BITS);
108c49b8 446
07f07b31 447 if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
c19e8800
AK
448 lp->ptr = phys_map_node_alloc();
449 p = phys_map_nodes[lp->ptr];
f7bf5461
AK
450 if (level == 0) {
451 for (i = 0; i < L2_SIZE; i++) {
07f07b31 452 p[i].is_leaf = 1;
c19e8800 453 p[i].ptr = phys_section_unassigned;
4346ae3e 454 }
67c4d23c 455 }
f7bf5461 456 } else {
c19e8800 457 p = phys_map_nodes[lp->ptr];
92e873b9 458 }
2999097b 459 lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
f7bf5461 460
2999097b 461 while (*nb && lp < &p[L2_SIZE]) {
07f07b31
AK
462 if ((*index & (step - 1)) == 0 && *nb >= step) {
463 lp->is_leaf = true;
c19e8800 464 lp->ptr = leaf;
07f07b31
AK
465 *index += step;
466 *nb -= step;
2999097b
AK
467 } else {
468 phys_page_set_level(lp, index, nb, leaf, level - 1);
469 }
470 ++lp;
f7bf5461
AK
471 }
472}
473
2999097b
AK
474static void phys_page_set(target_phys_addr_t index, target_phys_addr_t nb,
475 uint16_t leaf)
f7bf5461 476{
2999097b 477 /* Wildly overreserve - it doesn't matter much. */
07f07b31 478 phys_map_node_reserve(3 * P_L2_LEVELS);
5cd2c5b6 479
2999097b 480 phys_page_set_level(&phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
92e873b9
FB
481}
482
f3705d53 483static MemoryRegionSection *phys_page_find(target_phys_addr_t index)
92e873b9 484{
31ab2b4a
AK
485 PhysPageEntry lp = phys_map;
486 PhysPageEntry *p;
487 int i;
31ab2b4a 488 uint16_t s_index = phys_section_unassigned;
f1f6e3b8 489
07f07b31 490 for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
c19e8800 491 if (lp.ptr == PHYS_MAP_NODE_NIL) {
31ab2b4a
AK
492 goto not_found;
493 }
c19e8800 494 p = phys_map_nodes[lp.ptr];
31ab2b4a 495 lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
5312bd8b 496 }
31ab2b4a 497
c19e8800 498 s_index = lp.ptr;
31ab2b4a 499not_found:
f3705d53
AK
500 return &phys_sections[s_index];
501}
502
503static target_phys_addr_t section_addr(MemoryRegionSection *section,
504 target_phys_addr_t addr)
505{
506 addr -= section->offset_within_address_space;
507 addr += section->offset_within_region;
508 return addr;
92e873b9
FB
509}
510
c227f099 511static void tlb_protect_code(ram_addr_t ram_addr);
9349b4f9 512static void tlb_unprotect_code_phys(CPUArchState *env, ram_addr_t ram_addr,
3a7d929e 513 target_ulong vaddr);
c8a706fe
PB
514#define mmap_lock() do { } while(0)
515#define mmap_unlock() do { } while(0)
9fa3e853 516#endif
fd6ce8f6 517
4369415f
FB
518#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
519
520#if defined(CONFIG_USER_ONLY)
ccbb4d44 521/* Currently it is not recommended to allocate big chunks of data in
4369415f
FB
522 user mode. It will change when a dedicated libc will be used */
523#define USE_STATIC_CODE_GEN_BUFFER
524#endif
525
526#ifdef USE_STATIC_CODE_GEN_BUFFER
ebf50fb3
AJ
527static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
528 __attribute__((aligned (CODE_GEN_ALIGN)));
4369415f
FB
529#endif
530
8fcd3692 531static void code_gen_alloc(unsigned long tb_size)
26a5f13b 532{
4369415f
FB
533#ifdef USE_STATIC_CODE_GEN_BUFFER
534 code_gen_buffer = static_code_gen_buffer;
535 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
536 map_exec(code_gen_buffer, code_gen_buffer_size);
537#else
26a5f13b
FB
538 code_gen_buffer_size = tb_size;
539 if (code_gen_buffer_size == 0) {
4369415f 540#if defined(CONFIG_USER_ONLY)
4369415f
FB
541 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
542#else
ccbb4d44 543 /* XXX: needs adjustments */
94a6b54f 544 code_gen_buffer_size = (unsigned long)(ram_size / 4);
4369415f 545#endif
26a5f13b
FB
546 }
547 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
548 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
549 /* The code gen buffer location may have constraints depending on
550 the host cpu and OS */
551#if defined(__linux__)
552 {
553 int flags;
141ac468
BS
554 void *start = NULL;
555
26a5f13b
FB
556 flags = MAP_PRIVATE | MAP_ANONYMOUS;
557#if defined(__x86_64__)
558 flags |= MAP_32BIT;
559 /* Cannot map more than that */
560 if (code_gen_buffer_size > (800 * 1024 * 1024))
561 code_gen_buffer_size = (800 * 1024 * 1024);
141ac468
BS
562#elif defined(__sparc_v9__)
563 // Map the buffer below 2G, so we can use direct calls and branches
564 flags |= MAP_FIXED;
565 start = (void *) 0x60000000UL;
566 if (code_gen_buffer_size > (512 * 1024 * 1024))
567 code_gen_buffer_size = (512 * 1024 * 1024);
1cb0661e 568#elif defined(__arm__)
5c84bd90 569 /* Keep the buffer no bigger than 16MB to branch between blocks */
1cb0661e
AZ
570 if (code_gen_buffer_size > 16 * 1024 * 1024)
571 code_gen_buffer_size = 16 * 1024 * 1024;
eba0b893
RH
572#elif defined(__s390x__)
573 /* Map the buffer so that we can use direct calls and branches. */
574 /* We have a +- 4GB range on the branches; leave some slop. */
575 if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
576 code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
577 }
578 start = (void *)0x90000000UL;
26a5f13b 579#endif
141ac468
BS
580 code_gen_buffer = mmap(start, code_gen_buffer_size,
581 PROT_WRITE | PROT_READ | PROT_EXEC,
26a5f13b
FB
582 flags, -1, 0);
583 if (code_gen_buffer == MAP_FAILED) {
584 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
585 exit(1);
586 }
587 }
cbb608a5 588#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
9f4b09a4
TN
589 || defined(__DragonFly__) || defined(__OpenBSD__) \
590 || defined(__NetBSD__)
06e67a82
AL
591 {
592 int flags;
593 void *addr = NULL;
594 flags = MAP_PRIVATE | MAP_ANONYMOUS;
595#if defined(__x86_64__)
596 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
597 * 0x40000000 is free */
598 flags |= MAP_FIXED;
599 addr = (void *)0x40000000;
600 /* Cannot map more than that */
601 if (code_gen_buffer_size > (800 * 1024 * 1024))
602 code_gen_buffer_size = (800 * 1024 * 1024);
4cd31ad2
BS
603#elif defined(__sparc_v9__)
604 // Map the buffer below 2G, so we can use direct calls and branches
605 flags |= MAP_FIXED;
606 addr = (void *) 0x60000000UL;
607 if (code_gen_buffer_size > (512 * 1024 * 1024)) {
608 code_gen_buffer_size = (512 * 1024 * 1024);
609 }
06e67a82
AL
610#endif
611 code_gen_buffer = mmap(addr, code_gen_buffer_size,
612 PROT_WRITE | PROT_READ | PROT_EXEC,
613 flags, -1, 0);
614 if (code_gen_buffer == MAP_FAILED) {
615 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
616 exit(1);
617 }
618 }
26a5f13b 619#else
7267c094 620 code_gen_buffer = g_malloc(code_gen_buffer_size);
26a5f13b
FB
621 map_exec(code_gen_buffer, code_gen_buffer_size);
622#endif
4369415f 623#endif /* !USE_STATIC_CODE_GEN_BUFFER */
26a5f13b 624 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
a884da8a
PM
625 code_gen_buffer_max_size = code_gen_buffer_size -
626 (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
26a5f13b 627 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
7267c094 628 tbs = g_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
26a5f13b
FB
629}
630
631/* Must be called before using the QEMU cpus. 'tb_size' is the size
632 (in bytes) allocated to the translation buffer. Zero means default
633 size. */
d5ab9713 634void tcg_exec_init(unsigned long tb_size)
26a5f13b 635{
26a5f13b
FB
636 cpu_gen_init();
637 code_gen_alloc(tb_size);
638 code_gen_ptr = code_gen_buffer;
813da627 639 tcg_register_jit(code_gen_buffer, code_gen_buffer_size);
4369415f 640 page_init();
9002ec79
RH
641#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
642 /* There's no guest base to take into account, so go ahead and
643 initialize the prologue now. */
644 tcg_prologue_init(&tcg_ctx);
645#endif
26a5f13b
FB
646}
647
d5ab9713
JK
648bool tcg_enabled(void)
649{
650 return code_gen_buffer != NULL;
651}
652
653void cpu_exec_init_all(void)
654{
655#if !defined(CONFIG_USER_ONLY)
656 memory_map_init();
657 io_mem_init();
658#endif
659}
660
9656f324
PB
661#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
662
e59fb374 663static int cpu_common_post_load(void *opaque, int version_id)
e7f4eff7 664{
9349b4f9 665 CPUArchState *env = opaque;
9656f324 666
3098dba0
AJ
667 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
668 version_id is increased. */
669 env->interrupt_request &= ~0x01;
9656f324
PB
670 tlb_flush(env, 1);
671
672 return 0;
673}
e7f4eff7
JQ
674
675static const VMStateDescription vmstate_cpu_common = {
676 .name = "cpu_common",
677 .version_id = 1,
678 .minimum_version_id = 1,
679 .minimum_version_id_old = 1,
e7f4eff7
JQ
680 .post_load = cpu_common_post_load,
681 .fields = (VMStateField []) {
9349b4f9
AF
682 VMSTATE_UINT32(halted, CPUArchState),
683 VMSTATE_UINT32(interrupt_request, CPUArchState),
e7f4eff7
JQ
684 VMSTATE_END_OF_LIST()
685 }
686};
9656f324
PB
687#endif
688
9349b4f9 689CPUArchState *qemu_get_cpu(int cpu)
950f1472 690{
9349b4f9 691 CPUArchState *env = first_cpu;
950f1472
GC
692
693 while (env) {
694 if (env->cpu_index == cpu)
695 break;
696 env = env->next_cpu;
697 }
698
699 return env;
700}
701
9349b4f9 702void cpu_exec_init(CPUArchState *env)
fd6ce8f6 703{
9349b4f9 704 CPUArchState **penv;
6a00d601
FB
705 int cpu_index;
706
c2764719
PB
707#if defined(CONFIG_USER_ONLY)
708 cpu_list_lock();
709#endif
6a00d601
FB
710 env->next_cpu = NULL;
711 penv = &first_cpu;
712 cpu_index = 0;
713 while (*penv != NULL) {
1e9fa730 714 penv = &(*penv)->next_cpu;
6a00d601
FB
715 cpu_index++;
716 }
717 env->cpu_index = cpu_index;
268a362c 718 env->numa_node = 0;
72cf2d4f
BS
719 QTAILQ_INIT(&env->breakpoints);
720 QTAILQ_INIT(&env->watchpoints);
dc7a09cf
JK
721#ifndef CONFIG_USER_ONLY
722 env->thread_id = qemu_get_thread_id();
723#endif
6a00d601 724 *penv = env;
c2764719
PB
725#if defined(CONFIG_USER_ONLY)
726 cpu_list_unlock();
727#endif
b3c7724c 728#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
0be71e32
AW
729 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
730 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
b3c7724c
PB
731 cpu_save, cpu_load, env);
732#endif
fd6ce8f6
FB
733}
734
d1a1eb74
TG
735/* Allocate a new translation block. Flush the translation buffer if
736 too many translation blocks or too much generated code. */
737static TranslationBlock *tb_alloc(target_ulong pc)
738{
739 TranslationBlock *tb;
740
741 if (nb_tbs >= code_gen_max_blocks ||
742 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
743 return NULL;
744 tb = &tbs[nb_tbs++];
745 tb->pc = pc;
746 tb->cflags = 0;
747 return tb;
748}
749
750void tb_free(TranslationBlock *tb)
751{
752 /* In practice this is mostly used for single use temporary TB
753 Ignore the hard cases and just back up if this TB happens to
754 be the last one generated. */
755 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
756 code_gen_ptr = tb->tc_ptr;
757 nb_tbs--;
758 }
759}
760
9fa3e853
FB
761static inline void invalidate_page_bitmap(PageDesc *p)
762{
763 if (p->code_bitmap) {
7267c094 764 g_free(p->code_bitmap);
9fa3e853
FB
765 p->code_bitmap = NULL;
766 }
767 p->code_write_count = 0;
768}
769
5cd2c5b6
RH
770/* Set to NULL all the 'first_tb' fields in all PageDescs. */
771
772static void page_flush_tb_1 (int level, void **lp)
fd6ce8f6 773{
5cd2c5b6 774 int i;
fd6ce8f6 775
5cd2c5b6
RH
776 if (*lp == NULL) {
777 return;
778 }
779 if (level == 0) {
780 PageDesc *pd = *lp;
7296abac 781 for (i = 0; i < L2_SIZE; ++i) {
5cd2c5b6
RH
782 pd[i].first_tb = NULL;
783 invalidate_page_bitmap(pd + i);
fd6ce8f6 784 }
5cd2c5b6
RH
785 } else {
786 void **pp = *lp;
7296abac 787 for (i = 0; i < L2_SIZE; ++i) {
5cd2c5b6
RH
788 page_flush_tb_1 (level - 1, pp + i);
789 }
790 }
791}
792
793static void page_flush_tb(void)
794{
795 int i;
796 for (i = 0; i < V_L1_SIZE; i++) {
797 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
fd6ce8f6
FB
798 }
799}
800
801/* flush all the translation blocks */
d4e8164f 802/* XXX: tb_flush is currently not thread safe */
9349b4f9 803void tb_flush(CPUArchState *env1)
fd6ce8f6 804{
9349b4f9 805 CPUArchState *env;
0124311e 806#if defined(DEBUG_FLUSH)
ab3d1727
BS
807 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
808 (unsigned long)(code_gen_ptr - code_gen_buffer),
809 nb_tbs, nb_tbs > 0 ?
810 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
fd6ce8f6 811#endif
26a5f13b 812 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
a208e54a
PB
813 cpu_abort(env1, "Internal error: code buffer overflow\n");
814
fd6ce8f6 815 nb_tbs = 0;
3b46e624 816
6a00d601
FB
817 for(env = first_cpu; env != NULL; env = env->next_cpu) {
818 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
819 }
9fa3e853 820
8a8a608f 821 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
fd6ce8f6 822 page_flush_tb();
9fa3e853 823
fd6ce8f6 824 code_gen_ptr = code_gen_buffer;
d4e8164f
FB
825 /* XXX: flush processor icache at this point if cache flush is
826 expensive */
e3db7226 827 tb_flush_count++;
fd6ce8f6
FB
828}
829
830#ifdef DEBUG_TB_CHECK
831
bc98a7ef 832static void tb_invalidate_check(target_ulong address)
fd6ce8f6
FB
833{
834 TranslationBlock *tb;
835 int i;
836 address &= TARGET_PAGE_MASK;
99773bd4
PB
837 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
838 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
839 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
840 address >= tb->pc + tb->size)) {
0bf9e31a
BS
841 printf("ERROR invalidate: address=" TARGET_FMT_lx
842 " PC=%08lx size=%04x\n",
99773bd4 843 address, (long)tb->pc, tb->size);
fd6ce8f6
FB
844 }
845 }
846 }
847}
848
849/* verify that all the pages have correct rights for code */
850static void tb_page_check(void)
851{
852 TranslationBlock *tb;
853 int i, flags1, flags2;
3b46e624 854
99773bd4
PB
855 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
856 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
857 flags1 = page_get_flags(tb->pc);
858 flags2 = page_get_flags(tb->pc + tb->size - 1);
859 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
860 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
99773bd4 861 (long)tb->pc, tb->size, flags1, flags2);
fd6ce8f6
FB
862 }
863 }
864 }
865}
866
867#endif
868
869/* invalidate one TB */
870static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
871 int next_offset)
872{
873 TranslationBlock *tb1;
874 for(;;) {
875 tb1 = *ptb;
876 if (tb1 == tb) {
877 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
878 break;
879 }
880 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
881 }
882}
883
9fa3e853
FB
884static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
885{
886 TranslationBlock *tb1;
887 unsigned int n1;
888
889 for(;;) {
890 tb1 = *ptb;
891 n1 = (long)tb1 & 3;
892 tb1 = (TranslationBlock *)((long)tb1 & ~3);
893 if (tb1 == tb) {
894 *ptb = tb1->page_next[n1];
895 break;
896 }
897 ptb = &tb1->page_next[n1];
898 }
899}
900
d4e8164f
FB
901static inline void tb_jmp_remove(TranslationBlock *tb, int n)
902{
903 TranslationBlock *tb1, **ptb;
904 unsigned int n1;
905
906 ptb = &tb->jmp_next[n];
907 tb1 = *ptb;
908 if (tb1) {
909 /* find tb(n) in circular list */
910 for(;;) {
911 tb1 = *ptb;
912 n1 = (long)tb1 & 3;
913 tb1 = (TranslationBlock *)((long)tb1 & ~3);
914 if (n1 == n && tb1 == tb)
915 break;
916 if (n1 == 2) {
917 ptb = &tb1->jmp_first;
918 } else {
919 ptb = &tb1->jmp_next[n1];
920 }
921 }
922 /* now we can suppress tb(n) from the list */
923 *ptb = tb->jmp_next[n];
924
925 tb->jmp_next[n] = NULL;
926 }
927}
928
929/* reset the jump entry 'n' of a TB so that it is not chained to
930 another TB */
931static inline void tb_reset_jump(TranslationBlock *tb, int n)
932{
933 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
934}
935
41c1b1c9 936void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
fd6ce8f6 937{
9349b4f9 938 CPUArchState *env;
8a40a180 939 PageDesc *p;
d4e8164f 940 unsigned int h, n1;
41c1b1c9 941 tb_page_addr_t phys_pc;
8a40a180 942 TranslationBlock *tb1, *tb2;
3b46e624 943
8a40a180
FB
944 /* remove the TB from the hash list */
945 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
946 h = tb_phys_hash_func(phys_pc);
5fafdf24 947 tb_remove(&tb_phys_hash[h], tb,
8a40a180
FB
948 offsetof(TranslationBlock, phys_hash_next));
949
950 /* remove the TB from the page list */
951 if (tb->page_addr[0] != page_addr) {
952 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
953 tb_page_remove(&p->first_tb, tb);
954 invalidate_page_bitmap(p);
955 }
956 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
957 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
958 tb_page_remove(&p->first_tb, tb);
959 invalidate_page_bitmap(p);
960 }
961
36bdbe54 962 tb_invalidated_flag = 1;
59817ccb 963
fd6ce8f6 964 /* remove the TB from the hash list */
8a40a180 965 h = tb_jmp_cache_hash_func(tb->pc);
6a00d601
FB
966 for(env = first_cpu; env != NULL; env = env->next_cpu) {
967 if (env->tb_jmp_cache[h] == tb)
968 env->tb_jmp_cache[h] = NULL;
969 }
d4e8164f
FB
970
971 /* suppress this TB from the two jump lists */
972 tb_jmp_remove(tb, 0);
973 tb_jmp_remove(tb, 1);
974
975 /* suppress any remaining jumps to this TB */
976 tb1 = tb->jmp_first;
977 for(;;) {
978 n1 = (long)tb1 & 3;
979 if (n1 == 2)
980 break;
981 tb1 = (TranslationBlock *)((long)tb1 & ~3);
982 tb2 = tb1->jmp_next[n1];
983 tb_reset_jump(tb1, n1);
984 tb1->jmp_next[n1] = NULL;
985 tb1 = tb2;
986 }
987 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
9fa3e853 988
e3db7226 989 tb_phys_invalidate_count++;
9fa3e853
FB
990}
991
992static inline void set_bits(uint8_t *tab, int start, int len)
993{
994 int end, mask, end1;
995
996 end = start + len;
997 tab += start >> 3;
998 mask = 0xff << (start & 7);
999 if ((start & ~7) == (end & ~7)) {
1000 if (start < end) {
1001 mask &= ~(0xff << (end & 7));
1002 *tab |= mask;
1003 }
1004 } else {
1005 *tab++ |= mask;
1006 start = (start + 8) & ~7;
1007 end1 = end & ~7;
1008 while (start < end1) {
1009 *tab++ = 0xff;
1010 start += 8;
1011 }
1012 if (start < end) {
1013 mask = ~(0xff << (end & 7));
1014 *tab |= mask;
1015 }
1016 }
1017}
1018
1019static void build_page_bitmap(PageDesc *p)
1020{
1021 int n, tb_start, tb_end;
1022 TranslationBlock *tb;
3b46e624 1023
7267c094 1024 p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8);
9fa3e853
FB
1025
1026 tb = p->first_tb;
1027 while (tb != NULL) {
1028 n = (long)tb & 3;
1029 tb = (TranslationBlock *)((long)tb & ~3);
1030 /* NOTE: this is subtle as a TB may span two physical pages */
1031 if (n == 0) {
1032 /* NOTE: tb_end may be after the end of the page, but
1033 it is not a problem */
1034 tb_start = tb->pc & ~TARGET_PAGE_MASK;
1035 tb_end = tb_start + tb->size;
1036 if (tb_end > TARGET_PAGE_SIZE)
1037 tb_end = TARGET_PAGE_SIZE;
1038 } else {
1039 tb_start = 0;
1040 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1041 }
1042 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
1043 tb = tb->page_next[n];
1044 }
1045}
1046
9349b4f9 1047TranslationBlock *tb_gen_code(CPUArchState *env,
2e70f6ef
PB
1048 target_ulong pc, target_ulong cs_base,
1049 int flags, int cflags)
d720b93d
FB
1050{
1051 TranslationBlock *tb;
1052 uint8_t *tc_ptr;
41c1b1c9
PB
1053 tb_page_addr_t phys_pc, phys_page2;
1054 target_ulong virt_page2;
d720b93d
FB
1055 int code_gen_size;
1056
41c1b1c9 1057 phys_pc = get_page_addr_code(env, pc);
c27004ec 1058 tb = tb_alloc(pc);
d720b93d
FB
1059 if (!tb) {
1060 /* flush must be done */
1061 tb_flush(env);
1062 /* cannot fail at this point */
c27004ec 1063 tb = tb_alloc(pc);
2e70f6ef
PB
1064 /* Don't forget to invalidate previous TB info. */
1065 tb_invalidated_flag = 1;
d720b93d
FB
1066 }
1067 tc_ptr = code_gen_ptr;
1068 tb->tc_ptr = tc_ptr;
1069 tb->cs_base = cs_base;
1070 tb->flags = flags;
1071 tb->cflags = cflags;
d07bde88 1072 cpu_gen_code(env, tb, &code_gen_size);
d720b93d 1073 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
3b46e624 1074
d720b93d 1075 /* check next page if needed */
c27004ec 1076 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
d720b93d 1077 phys_page2 = -1;
c27004ec 1078 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
41c1b1c9 1079 phys_page2 = get_page_addr_code(env, virt_page2);
d720b93d 1080 }
41c1b1c9 1081 tb_link_page(tb, phys_pc, phys_page2);
2e70f6ef 1082 return tb;
d720b93d 1083}
3b46e624 1084
9fa3e853
FB
1085/* invalidate all TBs which intersect with the target physical page
1086 starting in range [start;end[. NOTE: start and end must refer to
d720b93d
FB
1087 the same physical page. 'is_cpu_write_access' should be true if called
1088 from a real cpu write access: the virtual CPU will exit the current
1089 TB if code is modified inside this TB. */
41c1b1c9 1090void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
d720b93d
FB
1091 int is_cpu_write_access)
1092{
6b917547 1093 TranslationBlock *tb, *tb_next, *saved_tb;
9349b4f9 1094 CPUArchState *env = cpu_single_env;
41c1b1c9 1095 tb_page_addr_t tb_start, tb_end;
6b917547
AL
1096 PageDesc *p;
1097 int n;
1098#ifdef TARGET_HAS_PRECISE_SMC
1099 int current_tb_not_found = is_cpu_write_access;
1100 TranslationBlock *current_tb = NULL;
1101 int current_tb_modified = 0;
1102 target_ulong current_pc = 0;
1103 target_ulong current_cs_base = 0;
1104 int current_flags = 0;
1105#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
1106
1107 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 1108 if (!p)
9fa3e853 1109 return;
5fafdf24 1110 if (!p->code_bitmap &&
d720b93d
FB
1111 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1112 is_cpu_write_access) {
9fa3e853
FB
1113 /* build code bitmap */
1114 build_page_bitmap(p);
1115 }
1116
1117 /* we remove all the TBs in the range [start, end[ */
1118 /* XXX: see if in some cases it could be faster to invalidate all the code */
1119 tb = p->first_tb;
1120 while (tb != NULL) {
1121 n = (long)tb & 3;
1122 tb = (TranslationBlock *)((long)tb & ~3);
1123 tb_next = tb->page_next[n];
1124 /* NOTE: this is subtle as a TB may span two physical pages */
1125 if (n == 0) {
1126 /* NOTE: tb_end may be after the end of the page, but
1127 it is not a problem */
1128 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1129 tb_end = tb_start + tb->size;
1130 } else {
1131 tb_start = tb->page_addr[1];
1132 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1133 }
1134 if (!(tb_end <= start || tb_start >= end)) {
d720b93d
FB
1135#ifdef TARGET_HAS_PRECISE_SMC
1136 if (current_tb_not_found) {
1137 current_tb_not_found = 0;
1138 current_tb = NULL;
2e70f6ef 1139 if (env->mem_io_pc) {
d720b93d 1140 /* now we have a real cpu fault */
2e70f6ef 1141 current_tb = tb_find_pc(env->mem_io_pc);
d720b93d
FB
1142 }
1143 }
1144 if (current_tb == tb &&
2e70f6ef 1145 (current_tb->cflags & CF_COUNT_MASK) != 1) {
d720b93d
FB
1146 /* If we are modifying the current TB, we must stop
1147 its execution. We could be more precise by checking
1148 that the modification is after the current PC, but it
1149 would require a specialized function to partially
1150 restore the CPU state */
3b46e624 1151
d720b93d 1152 current_tb_modified = 1;
618ba8e6 1153 cpu_restore_state(current_tb, env, env->mem_io_pc);
6b917547
AL
1154 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1155 &current_flags);
d720b93d
FB
1156 }
1157#endif /* TARGET_HAS_PRECISE_SMC */
6f5a9f7e
FB
1158 /* we need to do that to handle the case where a signal
1159 occurs while doing tb_phys_invalidate() */
1160 saved_tb = NULL;
1161 if (env) {
1162 saved_tb = env->current_tb;
1163 env->current_tb = NULL;
1164 }
9fa3e853 1165 tb_phys_invalidate(tb, -1);
6f5a9f7e
FB
1166 if (env) {
1167 env->current_tb = saved_tb;
1168 if (env->interrupt_request && env->current_tb)
1169 cpu_interrupt(env, env->interrupt_request);
1170 }
9fa3e853
FB
1171 }
1172 tb = tb_next;
1173 }
1174#if !defined(CONFIG_USER_ONLY)
1175 /* if no code remaining, no need to continue to use slow writes */
1176 if (!p->first_tb) {
1177 invalidate_page_bitmap(p);
d720b93d 1178 if (is_cpu_write_access) {
2e70f6ef 1179 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
d720b93d
FB
1180 }
1181 }
1182#endif
1183#ifdef TARGET_HAS_PRECISE_SMC
1184 if (current_tb_modified) {
1185 /* we generate a block containing just the instruction
1186 modifying the memory. It will ensure that it cannot modify
1187 itself */
ea1c1802 1188 env->current_tb = NULL;
2e70f6ef 1189 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
d720b93d 1190 cpu_resume_from_signal(env, NULL);
9fa3e853 1191 }
fd6ce8f6 1192#endif
9fa3e853 1193}
fd6ce8f6 1194
9fa3e853 1195/* len must be <= 8 and start must be a multiple of len */
41c1b1c9 1196static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
9fa3e853
FB
1197{
1198 PageDesc *p;
1199 int offset, b;
59817ccb 1200#if 0
a4193c8a 1201 if (1) {
93fcfe39
AL
1202 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1203 cpu_single_env->mem_io_vaddr, len,
1204 cpu_single_env->eip,
1205 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
59817ccb
FB
1206 }
1207#endif
9fa3e853 1208 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 1209 if (!p)
9fa3e853
FB
1210 return;
1211 if (p->code_bitmap) {
1212 offset = start & ~TARGET_PAGE_MASK;
1213 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1214 if (b & ((1 << len) - 1))
1215 goto do_invalidate;
1216 } else {
1217 do_invalidate:
d720b93d 1218 tb_invalidate_phys_page_range(start, start + len, 1);
9fa3e853
FB
1219 }
1220}
1221
9fa3e853 1222#if !defined(CONFIG_SOFTMMU)
41c1b1c9 1223static void tb_invalidate_phys_page(tb_page_addr_t addr,
d720b93d 1224 unsigned long pc, void *puc)
9fa3e853 1225{
6b917547 1226 TranslationBlock *tb;
9fa3e853 1227 PageDesc *p;
6b917547 1228 int n;
d720b93d 1229#ifdef TARGET_HAS_PRECISE_SMC
6b917547 1230 TranslationBlock *current_tb = NULL;
9349b4f9 1231 CPUArchState *env = cpu_single_env;
6b917547
AL
1232 int current_tb_modified = 0;
1233 target_ulong current_pc = 0;
1234 target_ulong current_cs_base = 0;
1235 int current_flags = 0;
d720b93d 1236#endif
9fa3e853
FB
1237
1238 addr &= TARGET_PAGE_MASK;
1239 p = page_find(addr >> TARGET_PAGE_BITS);
5fafdf24 1240 if (!p)
9fa3e853
FB
1241 return;
1242 tb = p->first_tb;
d720b93d
FB
1243#ifdef TARGET_HAS_PRECISE_SMC
1244 if (tb && pc != 0) {
1245 current_tb = tb_find_pc(pc);
1246 }
1247#endif
9fa3e853
FB
1248 while (tb != NULL) {
1249 n = (long)tb & 3;
1250 tb = (TranslationBlock *)((long)tb & ~3);
d720b93d
FB
1251#ifdef TARGET_HAS_PRECISE_SMC
1252 if (current_tb == tb &&
2e70f6ef 1253 (current_tb->cflags & CF_COUNT_MASK) != 1) {
d720b93d
FB
1254 /* If we are modifying the current TB, we must stop
1255 its execution. We could be more precise by checking
1256 that the modification is after the current PC, but it
1257 would require a specialized function to partially
1258 restore the CPU state */
3b46e624 1259
d720b93d 1260 current_tb_modified = 1;
618ba8e6 1261 cpu_restore_state(current_tb, env, pc);
6b917547
AL
1262 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1263 &current_flags);
d720b93d
FB
1264 }
1265#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
1266 tb_phys_invalidate(tb, addr);
1267 tb = tb->page_next[n];
1268 }
fd6ce8f6 1269 p->first_tb = NULL;
d720b93d
FB
1270#ifdef TARGET_HAS_PRECISE_SMC
1271 if (current_tb_modified) {
1272 /* we generate a block containing just the instruction
1273 modifying the memory. It will ensure that it cannot modify
1274 itself */
ea1c1802 1275 env->current_tb = NULL;
2e70f6ef 1276 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
d720b93d
FB
1277 cpu_resume_from_signal(env, puc);
1278 }
1279#endif
fd6ce8f6 1280}
9fa3e853 1281#endif
fd6ce8f6
FB
1282
1283/* add the tb in the target page and protect it if necessary */
5fafdf24 1284static inline void tb_alloc_page(TranslationBlock *tb,
41c1b1c9 1285 unsigned int n, tb_page_addr_t page_addr)
fd6ce8f6
FB
1286{
1287 PageDesc *p;
4429ab44
JQ
1288#ifndef CONFIG_USER_ONLY
1289 bool page_already_protected;
1290#endif
9fa3e853
FB
1291
1292 tb->page_addr[n] = page_addr;
5cd2c5b6 1293 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
9fa3e853 1294 tb->page_next[n] = p->first_tb;
4429ab44
JQ
1295#ifndef CONFIG_USER_ONLY
1296 page_already_protected = p->first_tb != NULL;
1297#endif
9fa3e853
FB
1298 p->first_tb = (TranslationBlock *)((long)tb | n);
1299 invalidate_page_bitmap(p);
fd6ce8f6 1300
107db443 1301#if defined(TARGET_HAS_SMC) || 1
d720b93d 1302
9fa3e853 1303#if defined(CONFIG_USER_ONLY)
fd6ce8f6 1304 if (p->flags & PAGE_WRITE) {
53a5960a
PB
1305 target_ulong addr;
1306 PageDesc *p2;
9fa3e853
FB
1307 int prot;
1308
fd6ce8f6
FB
1309 /* force the host page as non writable (writes will have a
1310 page fault + mprotect overhead) */
53a5960a 1311 page_addr &= qemu_host_page_mask;
fd6ce8f6 1312 prot = 0;
53a5960a
PB
1313 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1314 addr += TARGET_PAGE_SIZE) {
1315
1316 p2 = page_find (addr >> TARGET_PAGE_BITS);
1317 if (!p2)
1318 continue;
1319 prot |= p2->flags;
1320 p2->flags &= ~PAGE_WRITE;
53a5960a 1321 }
5fafdf24 1322 mprotect(g2h(page_addr), qemu_host_page_size,
fd6ce8f6
FB
1323 (prot & PAGE_BITS) & ~PAGE_WRITE);
1324#ifdef DEBUG_TB_INVALIDATE
ab3d1727 1325 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
53a5960a 1326 page_addr);
fd6ce8f6 1327#endif
fd6ce8f6 1328 }
9fa3e853
FB
1329#else
1330 /* if some code is already present, then the pages are already
1331 protected. So we handle the case where only the first TB is
1332 allocated in a physical page */
4429ab44 1333 if (!page_already_protected) {
6a00d601 1334 tlb_protect_code(page_addr);
9fa3e853
FB
1335 }
1336#endif
d720b93d
FB
1337
1338#endif /* TARGET_HAS_SMC */
fd6ce8f6
FB
1339}
1340
9fa3e853
FB
1341/* add a new TB and link it to the physical page tables. phys_page2 is
1342 (-1) to indicate that only one page contains the TB. */
41c1b1c9
PB
1343void tb_link_page(TranslationBlock *tb,
1344 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
d4e8164f 1345{
9fa3e853
FB
1346 unsigned int h;
1347 TranslationBlock **ptb;
1348
c8a706fe
PB
1349 /* Grab the mmap lock to stop another thread invalidating this TB
1350 before we are done. */
1351 mmap_lock();
9fa3e853
FB
1352 /* add in the physical hash table */
1353 h = tb_phys_hash_func(phys_pc);
1354 ptb = &tb_phys_hash[h];
1355 tb->phys_hash_next = *ptb;
1356 *ptb = tb;
fd6ce8f6
FB
1357
1358 /* add in the page list */
9fa3e853
FB
1359 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1360 if (phys_page2 != -1)
1361 tb_alloc_page(tb, 1, phys_page2);
1362 else
1363 tb->page_addr[1] = -1;
9fa3e853 1364
d4e8164f
FB
1365 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1366 tb->jmp_next[0] = NULL;
1367 tb->jmp_next[1] = NULL;
1368
1369 /* init original jump addresses */
1370 if (tb->tb_next_offset[0] != 0xffff)
1371 tb_reset_jump(tb, 0);
1372 if (tb->tb_next_offset[1] != 0xffff)
1373 tb_reset_jump(tb, 1);
8a40a180
FB
1374
1375#ifdef DEBUG_TB_CHECK
1376 tb_page_check();
1377#endif
c8a706fe 1378 mmap_unlock();
fd6ce8f6
FB
1379}
1380
9fa3e853
FB
1381/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1382 tb[1].tc_ptr. Return NULL if not found */
1383TranslationBlock *tb_find_pc(unsigned long tc_ptr)
fd6ce8f6 1384{
9fa3e853
FB
1385 int m_min, m_max, m;
1386 unsigned long v;
1387 TranslationBlock *tb;
a513fe19
FB
1388
1389 if (nb_tbs <= 0)
1390 return NULL;
1391 if (tc_ptr < (unsigned long)code_gen_buffer ||
1392 tc_ptr >= (unsigned long)code_gen_ptr)
1393 return NULL;
1394 /* binary search (cf Knuth) */
1395 m_min = 0;
1396 m_max = nb_tbs - 1;
1397 while (m_min <= m_max) {
1398 m = (m_min + m_max) >> 1;
1399 tb = &tbs[m];
1400 v = (unsigned long)tb->tc_ptr;
1401 if (v == tc_ptr)
1402 return tb;
1403 else if (tc_ptr < v) {
1404 m_max = m - 1;
1405 } else {
1406 m_min = m + 1;
1407 }
5fafdf24 1408 }
a513fe19
FB
1409 return &tbs[m_max];
1410}
7501267e 1411
ea041c0e
FB
1412static void tb_reset_jump_recursive(TranslationBlock *tb);
1413
1414static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1415{
1416 TranslationBlock *tb1, *tb_next, **ptb;
1417 unsigned int n1;
1418
1419 tb1 = tb->jmp_next[n];
1420 if (tb1 != NULL) {
1421 /* find head of list */
1422 for(;;) {
1423 n1 = (long)tb1 & 3;
1424 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1425 if (n1 == 2)
1426 break;
1427 tb1 = tb1->jmp_next[n1];
1428 }
1429 /* we are now sure now that tb jumps to tb1 */
1430 tb_next = tb1;
1431
1432 /* remove tb from the jmp_first list */
1433 ptb = &tb_next->jmp_first;
1434 for(;;) {
1435 tb1 = *ptb;
1436 n1 = (long)tb1 & 3;
1437 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1438 if (n1 == n && tb1 == tb)
1439 break;
1440 ptb = &tb1->jmp_next[n1];
1441 }
1442 *ptb = tb->jmp_next[n];
1443 tb->jmp_next[n] = NULL;
3b46e624 1444
ea041c0e
FB
1445 /* suppress the jump to next tb in generated code */
1446 tb_reset_jump(tb, n);
1447
0124311e 1448 /* suppress jumps in the tb on which we could have jumped */
ea041c0e
FB
1449 tb_reset_jump_recursive(tb_next);
1450 }
1451}
1452
1453static void tb_reset_jump_recursive(TranslationBlock *tb)
1454{
1455 tb_reset_jump_recursive2(tb, 0);
1456 tb_reset_jump_recursive2(tb, 1);
1457}
1458
1fddef4b 1459#if defined(TARGET_HAS_ICE)
94df27fd 1460#if defined(CONFIG_USER_ONLY)
9349b4f9 1461static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
94df27fd
PB
1462{
1463 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1464}
1465#else
9349b4f9 1466static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
d720b93d 1467{
c227f099 1468 target_phys_addr_t addr;
c227f099 1469 ram_addr_t ram_addr;
f3705d53 1470 MemoryRegionSection *section;
d720b93d 1471
c2f07f81 1472 addr = cpu_get_phys_page_debug(env, pc);
06ef3525 1473 section = phys_page_find(addr >> TARGET_PAGE_BITS);
f3705d53
AK
1474 if (!(memory_region_is_ram(section->mr)
1475 || (section->mr->rom_device && section->mr->readable))) {
06ef3525
AK
1476 return;
1477 }
f3705d53
AK
1478 ram_addr = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
1479 + section_addr(section, addr);
706cd4b5 1480 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
d720b93d 1481}
c27004ec 1482#endif
94df27fd 1483#endif /* TARGET_HAS_ICE */
d720b93d 1484
c527ee8f 1485#if defined(CONFIG_USER_ONLY)
9349b4f9 1486void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
c527ee8f
PB
1487
1488{
1489}
1490
9349b4f9 1491int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
c527ee8f
PB
1492 int flags, CPUWatchpoint **watchpoint)
1493{
1494 return -ENOSYS;
1495}
1496#else
6658ffb8 1497/* Add a watchpoint. */
9349b4f9 1498int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
a1d1bb31 1499 int flags, CPUWatchpoint **watchpoint)
6658ffb8 1500{
b4051334 1501 target_ulong len_mask = ~(len - 1);
c0ce998e 1502 CPUWatchpoint *wp;
6658ffb8 1503
b4051334 1504 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
0dc23828
MF
1505 if ((len & (len - 1)) || (addr & ~len_mask) ||
1506 len == 0 || len > TARGET_PAGE_SIZE) {
b4051334
AL
1507 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1508 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1509 return -EINVAL;
1510 }
7267c094 1511 wp = g_malloc(sizeof(*wp));
a1d1bb31
AL
1512
1513 wp->vaddr = addr;
b4051334 1514 wp->len_mask = len_mask;
a1d1bb31
AL
1515 wp->flags = flags;
1516
2dc9f411 1517 /* keep all GDB-injected watchpoints in front */
c0ce998e 1518 if (flags & BP_GDB)
72cf2d4f 1519 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
c0ce998e 1520 else
72cf2d4f 1521 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
6658ffb8 1522
6658ffb8 1523 tlb_flush_page(env, addr);
a1d1bb31
AL
1524
1525 if (watchpoint)
1526 *watchpoint = wp;
1527 return 0;
6658ffb8
PB
1528}
1529
a1d1bb31 1530/* Remove a specific watchpoint. */
9349b4f9 1531int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
a1d1bb31 1532 int flags)
6658ffb8 1533{
b4051334 1534 target_ulong len_mask = ~(len - 1);
a1d1bb31 1535 CPUWatchpoint *wp;
6658ffb8 1536
72cf2d4f 1537 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334 1538 if (addr == wp->vaddr && len_mask == wp->len_mask
6e140f28 1539 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
a1d1bb31 1540 cpu_watchpoint_remove_by_ref(env, wp);
6658ffb8
PB
1541 return 0;
1542 }
1543 }
a1d1bb31 1544 return -ENOENT;
6658ffb8
PB
1545}
1546
a1d1bb31 1547/* Remove a specific watchpoint by reference. */
9349b4f9 1548void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
a1d1bb31 1549{
72cf2d4f 1550 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
7d03f82f 1551
a1d1bb31
AL
1552 tlb_flush_page(env, watchpoint->vaddr);
1553
7267c094 1554 g_free(watchpoint);
a1d1bb31
AL
1555}
1556
1557/* Remove all matching watchpoints. */
9349b4f9 1558void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
a1d1bb31 1559{
c0ce998e 1560 CPUWatchpoint *wp, *next;
a1d1bb31 1561
72cf2d4f 1562 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
a1d1bb31
AL
1563 if (wp->flags & mask)
1564 cpu_watchpoint_remove_by_ref(env, wp);
c0ce998e 1565 }
7d03f82f 1566}
c527ee8f 1567#endif
7d03f82f 1568
a1d1bb31 1569/* Add a breakpoint. */
9349b4f9 1570int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
a1d1bb31 1571 CPUBreakpoint **breakpoint)
4c3a88a2 1572{
1fddef4b 1573#if defined(TARGET_HAS_ICE)
c0ce998e 1574 CPUBreakpoint *bp;
3b46e624 1575
7267c094 1576 bp = g_malloc(sizeof(*bp));
4c3a88a2 1577
a1d1bb31
AL
1578 bp->pc = pc;
1579 bp->flags = flags;
1580
2dc9f411 1581 /* keep all GDB-injected breakpoints in front */
c0ce998e 1582 if (flags & BP_GDB)
72cf2d4f 1583 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
c0ce998e 1584 else
72cf2d4f 1585 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
3b46e624 1586
d720b93d 1587 breakpoint_invalidate(env, pc);
a1d1bb31
AL
1588
1589 if (breakpoint)
1590 *breakpoint = bp;
4c3a88a2
FB
1591 return 0;
1592#else
a1d1bb31 1593 return -ENOSYS;
4c3a88a2
FB
1594#endif
1595}
1596
a1d1bb31 1597/* Remove a specific breakpoint. */
9349b4f9 1598int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
a1d1bb31 1599{
7d03f82f 1600#if defined(TARGET_HAS_ICE)
a1d1bb31
AL
1601 CPUBreakpoint *bp;
1602
72cf2d4f 1603 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31
AL
1604 if (bp->pc == pc && bp->flags == flags) {
1605 cpu_breakpoint_remove_by_ref(env, bp);
1606 return 0;
1607 }
7d03f82f 1608 }
a1d1bb31
AL
1609 return -ENOENT;
1610#else
1611 return -ENOSYS;
7d03f82f
EI
1612#endif
1613}
1614
a1d1bb31 1615/* Remove a specific breakpoint by reference. */
9349b4f9 1616void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
4c3a88a2 1617{
1fddef4b 1618#if defined(TARGET_HAS_ICE)
72cf2d4f 1619 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
d720b93d 1620
a1d1bb31
AL
1621 breakpoint_invalidate(env, breakpoint->pc);
1622
7267c094 1623 g_free(breakpoint);
a1d1bb31
AL
1624#endif
1625}
1626
1627/* Remove all matching breakpoints. */
9349b4f9 1628void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
a1d1bb31
AL
1629{
1630#if defined(TARGET_HAS_ICE)
c0ce998e 1631 CPUBreakpoint *bp, *next;
a1d1bb31 1632
72cf2d4f 1633 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
a1d1bb31
AL
1634 if (bp->flags & mask)
1635 cpu_breakpoint_remove_by_ref(env, bp);
c0ce998e 1636 }
4c3a88a2
FB
1637#endif
1638}
1639
c33a346e
FB
1640/* enable or disable single step mode. EXCP_DEBUG is returned by the
1641 CPU loop after each instruction */
9349b4f9 1642void cpu_single_step(CPUArchState *env, int enabled)
c33a346e 1643{
1fddef4b 1644#if defined(TARGET_HAS_ICE)
c33a346e
FB
1645 if (env->singlestep_enabled != enabled) {
1646 env->singlestep_enabled = enabled;
e22a25c9
AL
1647 if (kvm_enabled())
1648 kvm_update_guest_debug(env, 0);
1649 else {
ccbb4d44 1650 /* must flush all the translated code to avoid inconsistencies */
e22a25c9
AL
1651 /* XXX: only flush what is necessary */
1652 tb_flush(env);
1653 }
c33a346e
FB
1654 }
1655#endif
1656}
1657
34865134
FB
1658/* enable or disable low levels log */
1659void cpu_set_log(int log_flags)
1660{
1661 loglevel = log_flags;
1662 if (loglevel && !logfile) {
11fcfab4 1663 logfile = fopen(logfilename, log_append ? "a" : "w");
34865134
FB
1664 if (!logfile) {
1665 perror(logfilename);
1666 _exit(1);
1667 }
9fa3e853
FB
1668#if !defined(CONFIG_SOFTMMU)
1669 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1670 {
b55266b5 1671 static char logfile_buf[4096];
9fa3e853
FB
1672 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1673 }
daf767b1
SW
1674#elif defined(_WIN32)
1675 /* Win32 doesn't support line-buffering, so use unbuffered output. */
1676 setvbuf(logfile, NULL, _IONBF, 0);
1677#else
34865134 1678 setvbuf(logfile, NULL, _IOLBF, 0);
9fa3e853 1679#endif
e735b91c
PB
1680 log_append = 1;
1681 }
1682 if (!loglevel && logfile) {
1683 fclose(logfile);
1684 logfile = NULL;
34865134
FB
1685 }
1686}
1687
1688void cpu_set_log_filename(const char *filename)
1689{
1690 logfilename = strdup(filename);
e735b91c
PB
1691 if (logfile) {
1692 fclose(logfile);
1693 logfile = NULL;
1694 }
1695 cpu_set_log(loglevel);
34865134 1696}
c33a346e 1697
9349b4f9 1698static void cpu_unlink_tb(CPUArchState *env)
ea041c0e 1699{
3098dba0
AJ
1700 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1701 problem and hope the cpu will stop of its own accord. For userspace
1702 emulation this often isn't actually as bad as it sounds. Often
1703 signals are used primarily to interrupt blocking syscalls. */
ea041c0e 1704 TranslationBlock *tb;
c227f099 1705 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
59817ccb 1706
cab1b4bd 1707 spin_lock(&interrupt_lock);
3098dba0
AJ
1708 tb = env->current_tb;
1709 /* if the cpu is currently executing code, we must unlink it and
1710 all the potentially executing TB */
f76cfe56 1711 if (tb) {
3098dba0
AJ
1712 env->current_tb = NULL;
1713 tb_reset_jump_recursive(tb);
be214e6c 1714 }
cab1b4bd 1715 spin_unlock(&interrupt_lock);
3098dba0
AJ
1716}
1717
97ffbd8d 1718#ifndef CONFIG_USER_ONLY
3098dba0 1719/* mask must never be zero, except for A20 change call */
9349b4f9 1720static void tcg_handle_interrupt(CPUArchState *env, int mask)
3098dba0
AJ
1721{
1722 int old_mask;
be214e6c 1723
2e70f6ef 1724 old_mask = env->interrupt_request;
68a79315 1725 env->interrupt_request |= mask;
3098dba0 1726
8edac960
AL
1727 /*
1728 * If called from iothread context, wake the target cpu in
1729 * case its halted.
1730 */
b7680cb6 1731 if (!qemu_cpu_is_self(env)) {
8edac960
AL
1732 qemu_cpu_kick(env);
1733 return;
1734 }
8edac960 1735
2e70f6ef 1736 if (use_icount) {
266910c4 1737 env->icount_decr.u16.high = 0xffff;
2e70f6ef 1738 if (!can_do_io(env)
be214e6c 1739 && (mask & ~old_mask) != 0) {
2e70f6ef
PB
1740 cpu_abort(env, "Raised interrupt while not in I/O function");
1741 }
2e70f6ef 1742 } else {
3098dba0 1743 cpu_unlink_tb(env);
ea041c0e
FB
1744 }
1745}
1746
ec6959d0
JK
1747CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1748
97ffbd8d
JK
1749#else /* CONFIG_USER_ONLY */
1750
9349b4f9 1751void cpu_interrupt(CPUArchState *env, int mask)
97ffbd8d
JK
1752{
1753 env->interrupt_request |= mask;
1754 cpu_unlink_tb(env);
1755}
1756#endif /* CONFIG_USER_ONLY */
1757
9349b4f9 1758void cpu_reset_interrupt(CPUArchState *env, int mask)
b54ad049
FB
1759{
1760 env->interrupt_request &= ~mask;
1761}
1762
9349b4f9 1763void cpu_exit(CPUArchState *env)
3098dba0
AJ
1764{
1765 env->exit_request = 1;
1766 cpu_unlink_tb(env);
1767}
1768
c7cd6a37 1769const CPULogItem cpu_log_items[] = {
5fafdf24 1770 { CPU_LOG_TB_OUT_ASM, "out_asm",
f193c797
FB
1771 "show generated host assembly code for each compiled TB" },
1772 { CPU_LOG_TB_IN_ASM, "in_asm",
1773 "show target assembly code for each compiled TB" },
5fafdf24 1774 { CPU_LOG_TB_OP, "op",
57fec1fe 1775 "show micro ops for each compiled TB" },
f193c797 1776 { CPU_LOG_TB_OP_OPT, "op_opt",
e01a1157
BS
1777 "show micro ops "
1778#ifdef TARGET_I386
1779 "before eflags optimization and "
f193c797 1780#endif
e01a1157 1781 "after liveness analysis" },
f193c797
FB
1782 { CPU_LOG_INT, "int",
1783 "show interrupts/exceptions in short format" },
1784 { CPU_LOG_EXEC, "exec",
1785 "show trace before each executed TB (lots of logs)" },
9fddaa0c 1786 { CPU_LOG_TB_CPU, "cpu",
e91c8a77 1787 "show CPU state before block translation" },
f193c797
FB
1788#ifdef TARGET_I386
1789 { CPU_LOG_PCALL, "pcall",
1790 "show protected mode far calls/returns/exceptions" },
eca1bdf4
AL
1791 { CPU_LOG_RESET, "cpu_reset",
1792 "show CPU state before CPU resets" },
f193c797 1793#endif
8e3a9fd2 1794#ifdef DEBUG_IOPORT
fd872598
FB
1795 { CPU_LOG_IOPORT, "ioport",
1796 "show all i/o ports accesses" },
8e3a9fd2 1797#endif
f193c797
FB
1798 { 0, NULL, NULL },
1799};
1800
1801static int cmp1(const char *s1, int n, const char *s2)
1802{
1803 if (strlen(s2) != n)
1804 return 0;
1805 return memcmp(s1, s2, n) == 0;
1806}
3b46e624 1807
f193c797
FB
1808/* takes a comma separated list of log masks. Return 0 if error. */
1809int cpu_str_to_log_mask(const char *str)
1810{
c7cd6a37 1811 const CPULogItem *item;
f193c797
FB
1812 int mask;
1813 const char *p, *p1;
1814
1815 p = str;
1816 mask = 0;
1817 for(;;) {
1818 p1 = strchr(p, ',');
1819 if (!p1)
1820 p1 = p + strlen(p);
9742bf26
YT
1821 if(cmp1(p,p1-p,"all")) {
1822 for(item = cpu_log_items; item->mask != 0; item++) {
1823 mask |= item->mask;
1824 }
1825 } else {
1826 for(item = cpu_log_items; item->mask != 0; item++) {
1827 if (cmp1(p, p1 - p, item->name))
1828 goto found;
1829 }
1830 return 0;
f193c797 1831 }
f193c797
FB
1832 found:
1833 mask |= item->mask;
1834 if (*p1 != ',')
1835 break;
1836 p = p1 + 1;
1837 }
1838 return mask;
1839}
ea041c0e 1840
9349b4f9 1841void cpu_abort(CPUArchState *env, const char *fmt, ...)
7501267e
FB
1842{
1843 va_list ap;
493ae1f0 1844 va_list ap2;
7501267e
FB
1845
1846 va_start(ap, fmt);
493ae1f0 1847 va_copy(ap2, ap);
7501267e
FB
1848 fprintf(stderr, "qemu: fatal: ");
1849 vfprintf(stderr, fmt, ap);
1850 fprintf(stderr, "\n");
1851#ifdef TARGET_I386
7fe48483
FB
1852 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1853#else
1854 cpu_dump_state(env, stderr, fprintf, 0);
7501267e 1855#endif
93fcfe39
AL
1856 if (qemu_log_enabled()) {
1857 qemu_log("qemu: fatal: ");
1858 qemu_log_vprintf(fmt, ap2);
1859 qemu_log("\n");
f9373291 1860#ifdef TARGET_I386
93fcfe39 1861 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
f9373291 1862#else
93fcfe39 1863 log_cpu_state(env, 0);
f9373291 1864#endif
31b1a7b4 1865 qemu_log_flush();
93fcfe39 1866 qemu_log_close();
924edcae 1867 }
493ae1f0 1868 va_end(ap2);
f9373291 1869 va_end(ap);
fd052bf6
RV
1870#if defined(CONFIG_USER_ONLY)
1871 {
1872 struct sigaction act;
1873 sigfillset(&act.sa_mask);
1874 act.sa_handler = SIG_DFL;
1875 sigaction(SIGABRT, &act, NULL);
1876 }
1877#endif
7501267e
FB
1878 abort();
1879}
1880
9349b4f9 1881CPUArchState *cpu_copy(CPUArchState *env)
c5be9f08 1882{
9349b4f9
AF
1883 CPUArchState *new_env = cpu_init(env->cpu_model_str);
1884 CPUArchState *next_cpu = new_env->next_cpu;
c5be9f08 1885 int cpu_index = new_env->cpu_index;
5a38f081
AL
1886#if defined(TARGET_HAS_ICE)
1887 CPUBreakpoint *bp;
1888 CPUWatchpoint *wp;
1889#endif
1890
9349b4f9 1891 memcpy(new_env, env, sizeof(CPUArchState));
5a38f081
AL
1892
1893 /* Preserve chaining and index. */
c5be9f08
TS
1894 new_env->next_cpu = next_cpu;
1895 new_env->cpu_index = cpu_index;
5a38f081
AL
1896
1897 /* Clone all break/watchpoints.
1898 Note: Once we support ptrace with hw-debug register access, make sure
1899 BP_CPU break/watchpoints are handled correctly on clone. */
72cf2d4f
BS
1900 QTAILQ_INIT(&env->breakpoints);
1901 QTAILQ_INIT(&env->watchpoints);
5a38f081 1902#if defined(TARGET_HAS_ICE)
72cf2d4f 1903 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
5a38f081
AL
1904 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1905 }
72cf2d4f 1906 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
5a38f081
AL
1907 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1908 wp->flags, NULL);
1909 }
1910#endif
1911
c5be9f08
TS
1912 return new_env;
1913}
1914
0124311e
FB
1915#if !defined(CONFIG_USER_ONLY)
1916
9349b4f9 1917static inline void tlb_flush_jmp_cache(CPUArchState *env, target_ulong addr)
5c751e99
EI
1918{
1919 unsigned int i;
1920
1921 /* Discard jump cache entries for any tb which might potentially
1922 overlap the flushed page. */
1923 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1924 memset (&env->tb_jmp_cache[i], 0,
9742bf26 1925 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
5c751e99
EI
1926
1927 i = tb_jmp_cache_hash_page(addr);
1928 memset (&env->tb_jmp_cache[i], 0,
9742bf26 1929 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
5c751e99
EI
1930}
1931
08738984
IK
1932static CPUTLBEntry s_cputlb_empty_entry = {
1933 .addr_read = -1,
1934 .addr_write = -1,
1935 .addr_code = -1,
1936 .addend = -1,
1937};
1938
771124e1
PM
1939/* NOTE:
1940 * If flush_global is true (the usual case), flush all tlb entries.
1941 * If flush_global is false, flush (at least) all tlb entries not
1942 * marked global.
1943 *
1944 * Since QEMU doesn't currently implement a global/not-global flag
1945 * for tlb entries, at the moment tlb_flush() will also flush all
1946 * tlb entries in the flush_global == false case. This is OK because
1947 * CPU architectures generally permit an implementation to drop
1948 * entries from the TLB at any time, so flushing more entries than
1949 * required is only an efficiency issue, not a correctness issue.
1950 */
9349b4f9 1951void tlb_flush(CPUArchState *env, int flush_global)
33417e70 1952{
33417e70 1953 int i;
0124311e 1954
9fa3e853
FB
1955#if defined(DEBUG_TLB)
1956 printf("tlb_flush:\n");
1957#endif
0124311e
FB
1958 /* must reset current TB so that interrupts cannot modify the
1959 links while we are modifying them */
1960 env->current_tb = NULL;
1961
33417e70 1962 for(i = 0; i < CPU_TLB_SIZE; i++) {
cfde4bd9
IY
1963 int mmu_idx;
1964 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
08738984 1965 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
cfde4bd9 1966 }
33417e70 1967 }
9fa3e853 1968
8a40a180 1969 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
9fa3e853 1970
d4c430a8
PB
1971 env->tlb_flush_addr = -1;
1972 env->tlb_flush_mask = 0;
e3db7226 1973 tlb_flush_count++;
33417e70
FB
1974}
1975
274da6b2 1976static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
61382a50 1977{
5fafdf24 1978 if (addr == (tlb_entry->addr_read &
84b7b8e7 1979 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1980 addr == (tlb_entry->addr_write &
84b7b8e7 1981 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1982 addr == (tlb_entry->addr_code &
84b7b8e7 1983 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
08738984 1984 *tlb_entry = s_cputlb_empty_entry;
84b7b8e7 1985 }
61382a50
FB
1986}
1987
9349b4f9 1988void tlb_flush_page(CPUArchState *env, target_ulong addr)
33417e70 1989{
8a40a180 1990 int i;
cfde4bd9 1991 int mmu_idx;
0124311e 1992
9fa3e853 1993#if defined(DEBUG_TLB)
108c49b8 1994 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
9fa3e853 1995#endif
d4c430a8
PB
1996 /* Check if we need to flush due to large pages. */
1997 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
1998#if defined(DEBUG_TLB)
1999 printf("tlb_flush_page: forced full flush ("
2000 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
2001 env->tlb_flush_addr, env->tlb_flush_mask);
2002#endif
2003 tlb_flush(env, 1);
2004 return;
2005 }
0124311e
FB
2006 /* must reset current TB so that interrupts cannot modify the
2007 links while we are modifying them */
2008 env->current_tb = NULL;
61382a50
FB
2009
2010 addr &= TARGET_PAGE_MASK;
2011 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
cfde4bd9
IY
2012 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2013 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
0124311e 2014
5c751e99 2015 tlb_flush_jmp_cache(env, addr);
9fa3e853
FB
2016}
2017
9fa3e853
FB
2018/* update the TLBs so that writes to code in the virtual page 'addr'
2019 can be detected */
c227f099 2020static void tlb_protect_code(ram_addr_t ram_addr)
9fa3e853 2021{
5fafdf24 2022 cpu_physical_memory_reset_dirty(ram_addr,
6a00d601
FB
2023 ram_addr + TARGET_PAGE_SIZE,
2024 CODE_DIRTY_FLAG);
9fa3e853
FB
2025}
2026
9fa3e853 2027/* update the TLB so that writes in physical page 'phys_addr' are no longer
3a7d929e 2028 tested for self modifying code */
9349b4f9 2029static void tlb_unprotect_code_phys(CPUArchState *env, ram_addr_t ram_addr,
3a7d929e 2030 target_ulong vaddr)
9fa3e853 2031{
f7c11b53 2032 cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
1ccde1cb
FB
2033}
2034
7859cc6e
AK
2035static bool tlb_is_dirty_ram(CPUTLBEntry *tlbe)
2036{
2037 return (tlbe->addr_write & (TLB_INVALID_MASK|TLB_MMIO|TLB_NOTDIRTY)) == 0;
2038}
2039
5fafdf24 2040static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1ccde1cb
FB
2041 unsigned long start, unsigned long length)
2042{
2043 unsigned long addr;
7859cc6e 2044 if (tlb_is_dirty_ram(tlb_entry)) {
84b7b8e7 2045 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1ccde1cb 2046 if ((addr - start) < length) {
7859cc6e 2047 tlb_entry->addr_write |= TLB_NOTDIRTY;
1ccde1cb
FB
2048 }
2049 }
2050}
2051
5579c7f3 2052/* Note: start and end must be within the same ram block. */
c227f099 2053void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 2054 int dirty_flags)
1ccde1cb 2055{
9349b4f9 2056 CPUArchState *env;
4f2ac237 2057 unsigned long length, start1;
f7c11b53 2058 int i;
1ccde1cb
FB
2059
2060 start &= TARGET_PAGE_MASK;
2061 end = TARGET_PAGE_ALIGN(end);
2062
2063 length = end - start;
2064 if (length == 0)
2065 return;
f7c11b53 2066 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
f23db169 2067
1ccde1cb
FB
2068 /* we modify the TLB cache so that the dirty bit will be set again
2069 when accessing the range */
b2e0a138 2070 start1 = (unsigned long)qemu_safe_ram_ptr(start);
a57d23e4 2071 /* Check that we don't span multiple blocks - this breaks the
5579c7f3 2072 address comparisons below. */
b2e0a138 2073 if ((unsigned long)qemu_safe_ram_ptr(end - 1) - start1
5579c7f3
PB
2074 != (end - 1) - start) {
2075 abort();
2076 }
2077
6a00d601 2078 for(env = first_cpu; env != NULL; env = env->next_cpu) {
cfde4bd9
IY
2079 int mmu_idx;
2080 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2081 for(i = 0; i < CPU_TLB_SIZE; i++)
2082 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
2083 start1, length);
2084 }
6a00d601 2085 }
1ccde1cb
FB
2086}
2087
74576198
AL
2088int cpu_physical_memory_set_dirty_tracking(int enable)
2089{
f6f3fbca 2090 int ret = 0;
74576198 2091 in_migration = enable;
f6f3fbca 2092 return ret;
74576198
AL
2093}
2094
3a7d929e
FB
2095static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2096{
c227f099 2097 ram_addr_t ram_addr;
5579c7f3 2098 void *p;
3a7d929e 2099
7859cc6e 2100 if (tlb_is_dirty_ram(tlb_entry)) {
5579c7f3
PB
2101 p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
2102 + tlb_entry->addend);
e890261f 2103 ram_addr = qemu_ram_addr_from_host_nofail(p);
3a7d929e 2104 if (!cpu_physical_memory_is_dirty(ram_addr)) {
0f459d16 2105 tlb_entry->addr_write |= TLB_NOTDIRTY;
3a7d929e
FB
2106 }
2107 }
2108}
2109
2110/* update the TLB according to the current state of the dirty bits */
9349b4f9 2111void cpu_tlb_update_dirty(CPUArchState *env)
3a7d929e
FB
2112{
2113 int i;
cfde4bd9
IY
2114 int mmu_idx;
2115 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2116 for(i = 0; i < CPU_TLB_SIZE; i++)
2117 tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2118 }
3a7d929e
FB
2119}
2120
0f459d16 2121static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1ccde1cb 2122{
0f459d16
PB
2123 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2124 tlb_entry->addr_write = vaddr;
1ccde1cb
FB
2125}
2126
0f459d16
PB
2127/* update the TLB corresponding to virtual page vaddr
2128 so that it is no longer dirty */
9349b4f9 2129static inline void tlb_set_dirty(CPUArchState *env, target_ulong vaddr)
1ccde1cb 2130{
1ccde1cb 2131 int i;
cfde4bd9 2132 int mmu_idx;
1ccde1cb 2133
0f459d16 2134 vaddr &= TARGET_PAGE_MASK;
1ccde1cb 2135 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
cfde4bd9
IY
2136 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2137 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
9fa3e853
FB
2138}
2139
d4c430a8
PB
2140/* Our TLB does not support large pages, so remember the area covered by
2141 large pages and trigger a full TLB flush if these are invalidated. */
9349b4f9 2142static void tlb_add_large_page(CPUArchState *env, target_ulong vaddr,
d4c430a8
PB
2143 target_ulong size)
2144{
2145 target_ulong mask = ~(size - 1);
2146
2147 if (env->tlb_flush_addr == (target_ulong)-1) {
2148 env->tlb_flush_addr = vaddr & mask;
2149 env->tlb_flush_mask = mask;
2150 return;
2151 }
2152 /* Extend the existing region to include the new page.
2153 This is a compromise between unnecessary flushes and the cost
2154 of maintaining a full variable size TLB. */
2155 mask &= env->tlb_flush_mask;
2156 while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
2157 mask <<= 1;
2158 }
2159 env->tlb_flush_addr &= mask;
2160 env->tlb_flush_mask = mask;
2161}
2162
06ef3525 2163static bool is_ram_rom(MemoryRegionSection *s)
1d393fa2 2164{
06ef3525 2165 return memory_region_is_ram(s->mr);
1d393fa2
AK
2166}
2167
06ef3525 2168static bool is_romd(MemoryRegionSection *s)
75c578dc 2169{
06ef3525 2170 MemoryRegion *mr = s->mr;
75c578dc 2171
75c578dc
AK
2172 return mr->rom_device && mr->readable;
2173}
2174
06ef3525 2175static bool is_ram_rom_romd(MemoryRegionSection *s)
1d393fa2 2176{
06ef3525 2177 return is_ram_rom(s) || is_romd(s);
1d393fa2
AK
2178}
2179
d4c430a8
PB
2180/* Add a new TLB entry. At most one entry for a given virtual address
2181 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2182 supplied size is only used by tlb_flush_page. */
9349b4f9 2183void tlb_set_page(CPUArchState *env, target_ulong vaddr,
d4c430a8
PB
2184 target_phys_addr_t paddr, int prot,
2185 int mmu_idx, target_ulong size)
9fa3e853 2186{
f3705d53 2187 MemoryRegionSection *section;
9fa3e853 2188 unsigned int index;
4f2ac237 2189 target_ulong address;
0f459d16 2190 target_ulong code_address;
355b1943 2191 unsigned long addend;
84b7b8e7 2192 CPUTLBEntry *te;
a1d1bb31 2193 CPUWatchpoint *wp;
c227f099 2194 target_phys_addr_t iotlb;
9fa3e853 2195
d4c430a8
PB
2196 assert(size >= TARGET_PAGE_SIZE);
2197 if (size != TARGET_PAGE_SIZE) {
2198 tlb_add_large_page(env, vaddr, size);
2199 }
06ef3525 2200 section = phys_page_find(paddr >> TARGET_PAGE_BITS);
9fa3e853 2201#if defined(DEBUG_TLB)
7fd3f494
SW
2202 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
2203 " prot=%x idx=%d pd=0x%08lx\n",
2204 vaddr, paddr, prot, mmu_idx, pd);
9fa3e853
FB
2205#endif
2206
0f459d16 2207 address = vaddr;
f3705d53 2208 if (!is_ram_rom_romd(section)) {
0f459d16
PB
2209 /* IO memory case (romd handled later) */
2210 address |= TLB_MMIO;
2211 }
f3705d53
AK
2212 if (is_ram_rom_romd(section)) {
2213 addend = (unsigned long)memory_region_get_ram_ptr(section->mr)
2214 + section_addr(section, paddr);
06ef3525
AK
2215 } else {
2216 addend = 0;
2217 }
f3705d53 2218 if (is_ram_rom(section)) {
0f459d16 2219 /* Normal RAM. */
f3705d53
AK
2220 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
2221 + section_addr(section, paddr);
2222 if (!section->readonly)
aa102231 2223 iotlb |= phys_section_notdirty;
0f459d16 2224 else
aa102231 2225 iotlb |= phys_section_rom;
0f459d16 2226 } else {
ccbb4d44 2227 /* IO handlers are currently passed a physical address.
0f459d16
PB
2228 It would be nice to pass an offset from the base address
2229 of that region. This would avoid having to special case RAM,
2230 and avoid full address decoding in every device.
2231 We can't use the high bits of pd for this because
2232 IO_MEM_ROMD uses these as a ram address. */
aa102231 2233 iotlb = section - phys_sections;
f3705d53 2234 iotlb += section_addr(section, paddr);
0f459d16
PB
2235 }
2236
2237 code_address = address;
2238 /* Make accesses to pages with watchpoints go via the
2239 watchpoint trap routines. */
72cf2d4f 2240 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
a1d1bb31 2241 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
bf298f83
JK
2242 /* Avoid trapping reads of pages with a write breakpoint. */
2243 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
aa102231 2244 iotlb = phys_section_watch + paddr;
bf298f83
JK
2245 address |= TLB_MMIO;
2246 break;
2247 }
6658ffb8 2248 }
0f459d16 2249 }
d79acba4 2250
0f459d16
PB
2251 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2252 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2253 te = &env->tlb_table[mmu_idx][index];
2254 te->addend = addend - vaddr;
2255 if (prot & PAGE_READ) {
2256 te->addr_read = address;
2257 } else {
2258 te->addr_read = -1;
2259 }
5c751e99 2260
0f459d16
PB
2261 if (prot & PAGE_EXEC) {
2262 te->addr_code = code_address;
2263 } else {
2264 te->addr_code = -1;
2265 }
2266 if (prot & PAGE_WRITE) {
f3705d53
AK
2267 if ((memory_region_is_ram(section->mr) && section->readonly)
2268 || is_romd(section)) {
0f459d16
PB
2269 /* Write access calls the I/O callback. */
2270 te->addr_write = address | TLB_MMIO;
f3705d53 2271 } else if (memory_region_is_ram(section->mr)
06ef3525 2272 && !cpu_physical_memory_is_dirty(
f3705d53
AK
2273 section->mr->ram_addr
2274 + section_addr(section, paddr))) {
0f459d16 2275 te->addr_write = address | TLB_NOTDIRTY;
9fa3e853 2276 } else {
0f459d16 2277 te->addr_write = address;
9fa3e853 2278 }
0f459d16
PB
2279 } else {
2280 te->addr_write = -1;
9fa3e853 2281 }
9fa3e853
FB
2282}
2283
0124311e
FB
2284#else
2285
9349b4f9 2286void tlb_flush(CPUArchState *env, int flush_global)
0124311e
FB
2287{
2288}
2289
9349b4f9 2290void tlb_flush_page(CPUArchState *env, target_ulong addr)
0124311e
FB
2291{
2292}
2293
edf8e2af
MW
2294/*
2295 * Walks guest process memory "regions" one by one
2296 * and calls callback function 'fn' for each region.
2297 */
5cd2c5b6
RH
2298
2299struct walk_memory_regions_data
2300{
2301 walk_memory_regions_fn fn;
2302 void *priv;
2303 unsigned long start;
2304 int prot;
2305};
2306
2307static int walk_memory_regions_end(struct walk_memory_regions_data *data,
b480d9b7 2308 abi_ulong end, int new_prot)
5cd2c5b6
RH
2309{
2310 if (data->start != -1ul) {
2311 int rc = data->fn(data->priv, data->start, end, data->prot);
2312 if (rc != 0) {
2313 return rc;
2314 }
2315 }
2316
2317 data->start = (new_prot ? end : -1ul);
2318 data->prot = new_prot;
2319
2320 return 0;
2321}
2322
2323static int walk_memory_regions_1(struct walk_memory_regions_data *data,
b480d9b7 2324 abi_ulong base, int level, void **lp)
5cd2c5b6 2325{
b480d9b7 2326 abi_ulong pa;
5cd2c5b6
RH
2327 int i, rc;
2328
2329 if (*lp == NULL) {
2330 return walk_memory_regions_end(data, base, 0);
2331 }
2332
2333 if (level == 0) {
2334 PageDesc *pd = *lp;
7296abac 2335 for (i = 0; i < L2_SIZE; ++i) {
5cd2c5b6
RH
2336 int prot = pd[i].flags;
2337
2338 pa = base | (i << TARGET_PAGE_BITS);
2339 if (prot != data->prot) {
2340 rc = walk_memory_regions_end(data, pa, prot);
2341 if (rc != 0) {
2342 return rc;
9fa3e853 2343 }
9fa3e853 2344 }
5cd2c5b6
RH
2345 }
2346 } else {
2347 void **pp = *lp;
7296abac 2348 for (i = 0; i < L2_SIZE; ++i) {
b480d9b7
PB
2349 pa = base | ((abi_ulong)i <<
2350 (TARGET_PAGE_BITS + L2_BITS * level));
5cd2c5b6
RH
2351 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2352 if (rc != 0) {
2353 return rc;
2354 }
2355 }
2356 }
2357
2358 return 0;
2359}
2360
2361int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2362{
2363 struct walk_memory_regions_data data;
2364 unsigned long i;
2365
2366 data.fn = fn;
2367 data.priv = priv;
2368 data.start = -1ul;
2369 data.prot = 0;
2370
2371 for (i = 0; i < V_L1_SIZE; i++) {
b480d9b7 2372 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
5cd2c5b6
RH
2373 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2374 if (rc != 0) {
2375 return rc;
9fa3e853 2376 }
33417e70 2377 }
5cd2c5b6
RH
2378
2379 return walk_memory_regions_end(&data, 0, 0);
edf8e2af
MW
2380}
2381
b480d9b7
PB
2382static int dump_region(void *priv, abi_ulong start,
2383 abi_ulong end, unsigned long prot)
edf8e2af
MW
2384{
2385 FILE *f = (FILE *)priv;
2386
b480d9b7
PB
2387 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2388 " "TARGET_ABI_FMT_lx" %c%c%c\n",
edf8e2af
MW
2389 start, end, end - start,
2390 ((prot & PAGE_READ) ? 'r' : '-'),
2391 ((prot & PAGE_WRITE) ? 'w' : '-'),
2392 ((prot & PAGE_EXEC) ? 'x' : '-'));
2393
2394 return (0);
2395}
2396
2397/* dump memory mappings */
2398void page_dump(FILE *f)
2399{
2400 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2401 "start", "end", "size", "prot");
2402 walk_memory_regions(f, dump_region);
33417e70
FB
2403}
2404
53a5960a 2405int page_get_flags(target_ulong address)
33417e70 2406{
9fa3e853
FB
2407 PageDesc *p;
2408
2409 p = page_find(address >> TARGET_PAGE_BITS);
33417e70 2410 if (!p)
9fa3e853
FB
2411 return 0;
2412 return p->flags;
2413}
2414
376a7909
RH
2415/* Modify the flags of a page and invalidate the code if necessary.
2416 The flag PAGE_WRITE_ORG is positioned automatically depending
2417 on PAGE_WRITE. The mmap_lock should already be held. */
53a5960a 2418void page_set_flags(target_ulong start, target_ulong end, int flags)
9fa3e853 2419{
376a7909
RH
2420 target_ulong addr, len;
2421
2422 /* This function should never be called with addresses outside the
2423 guest address space. If this assert fires, it probably indicates
2424 a missing call to h2g_valid. */
b480d9b7
PB
2425#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2426 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
376a7909
RH
2427#endif
2428 assert(start < end);
9fa3e853
FB
2429
2430 start = start & TARGET_PAGE_MASK;
2431 end = TARGET_PAGE_ALIGN(end);
376a7909
RH
2432
2433 if (flags & PAGE_WRITE) {
9fa3e853 2434 flags |= PAGE_WRITE_ORG;
376a7909
RH
2435 }
2436
2437 for (addr = start, len = end - start;
2438 len != 0;
2439 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2440 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2441
2442 /* If the write protection bit is set, then we invalidate
2443 the code inside. */
5fafdf24 2444 if (!(p->flags & PAGE_WRITE) &&
9fa3e853
FB
2445 (flags & PAGE_WRITE) &&
2446 p->first_tb) {
d720b93d 2447 tb_invalidate_phys_page(addr, 0, NULL);
9fa3e853
FB
2448 }
2449 p->flags = flags;
2450 }
33417e70
FB
2451}
2452
3d97b40b
TS
2453int page_check_range(target_ulong start, target_ulong len, int flags)
2454{
2455 PageDesc *p;
2456 target_ulong end;
2457 target_ulong addr;
2458
376a7909
RH
2459 /* This function should never be called with addresses outside the
2460 guest address space. If this assert fires, it probably indicates
2461 a missing call to h2g_valid. */
338e9e6c
BS
2462#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2463 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
376a7909
RH
2464#endif
2465
3e0650a9
RH
2466 if (len == 0) {
2467 return 0;
2468 }
376a7909
RH
2469 if (start + len - 1 < start) {
2470 /* We've wrapped around. */
55f280c9 2471 return -1;
376a7909 2472 }
55f280c9 2473
3d97b40b
TS
2474 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2475 start = start & TARGET_PAGE_MASK;
2476
376a7909
RH
2477 for (addr = start, len = end - start;
2478 len != 0;
2479 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
3d97b40b
TS
2480 p = page_find(addr >> TARGET_PAGE_BITS);
2481 if( !p )
2482 return -1;
2483 if( !(p->flags & PAGE_VALID) )
2484 return -1;
2485
dae3270c 2486 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
3d97b40b 2487 return -1;
dae3270c
FB
2488 if (flags & PAGE_WRITE) {
2489 if (!(p->flags & PAGE_WRITE_ORG))
2490 return -1;
2491 /* unprotect the page if it was put read-only because it
2492 contains translated code */
2493 if (!(p->flags & PAGE_WRITE)) {
2494 if (!page_unprotect(addr, 0, NULL))
2495 return -1;
2496 }
2497 return 0;
2498 }
3d97b40b
TS
2499 }
2500 return 0;
2501}
2502
9fa3e853 2503/* called from signal handler: invalidate the code and unprotect the
ccbb4d44 2504 page. Return TRUE if the fault was successfully handled. */
53a5960a 2505int page_unprotect(target_ulong address, unsigned long pc, void *puc)
9fa3e853 2506{
45d679d6
AJ
2507 unsigned int prot;
2508 PageDesc *p;
53a5960a 2509 target_ulong host_start, host_end, addr;
9fa3e853 2510
c8a706fe
PB
2511 /* Technically this isn't safe inside a signal handler. However we
2512 know this only ever happens in a synchronous SEGV handler, so in
2513 practice it seems to be ok. */
2514 mmap_lock();
2515
45d679d6
AJ
2516 p = page_find(address >> TARGET_PAGE_BITS);
2517 if (!p) {
c8a706fe 2518 mmap_unlock();
9fa3e853 2519 return 0;
c8a706fe 2520 }
45d679d6 2521
9fa3e853
FB
2522 /* if the page was really writable, then we change its
2523 protection back to writable */
45d679d6
AJ
2524 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2525 host_start = address & qemu_host_page_mask;
2526 host_end = host_start + qemu_host_page_size;
2527
2528 prot = 0;
2529 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2530 p = page_find(addr >> TARGET_PAGE_BITS);
2531 p->flags |= PAGE_WRITE;
2532 prot |= p->flags;
2533
9fa3e853
FB
2534 /* and since the content will be modified, we must invalidate
2535 the corresponding translated code. */
45d679d6 2536 tb_invalidate_phys_page(addr, pc, puc);
9fa3e853 2537#ifdef DEBUG_TB_CHECK
45d679d6 2538 tb_invalidate_check(addr);
9fa3e853 2539#endif
9fa3e853 2540 }
45d679d6
AJ
2541 mprotect((void *)g2h(host_start), qemu_host_page_size,
2542 prot & PAGE_BITS);
2543
2544 mmap_unlock();
2545 return 1;
9fa3e853 2546 }
c8a706fe 2547 mmap_unlock();
9fa3e853
FB
2548 return 0;
2549}
2550
9349b4f9 2551static inline void tlb_set_dirty(CPUArchState *env,
6a00d601 2552 unsigned long addr, target_ulong vaddr)
1ccde1cb
FB
2553{
2554}
9fa3e853
FB
2555#endif /* defined(CONFIG_USER_ONLY) */
2556
e2eef170 2557#if !defined(CONFIG_USER_ONLY)
8da3ff18 2558
c04b2b78
PB
2559#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2560typedef struct subpage_t {
70c68e44 2561 MemoryRegion iomem;
c04b2b78 2562 target_phys_addr_t base;
5312bd8b 2563 uint16_t sub_section[TARGET_PAGE_SIZE];
c04b2b78
PB
2564} subpage_t;
2565
c227f099 2566static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 2567 uint16_t section);
0f0cb164 2568static subpage_t *subpage_init(target_phys_addr_t base);
5312bd8b 2569static void destroy_page_desc(uint16_t section_index)
54688b1e 2570{
5312bd8b
AK
2571 MemoryRegionSection *section = &phys_sections[section_index];
2572 MemoryRegion *mr = section->mr;
54688b1e
AK
2573
2574 if (mr->subpage) {
2575 subpage_t *subpage = container_of(mr, subpage_t, iomem);
2576 memory_region_destroy(&subpage->iomem);
2577 g_free(subpage);
2578 }
2579}
2580
4346ae3e 2581static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level)
54688b1e
AK
2582{
2583 unsigned i;
d6f2ea22 2584 PhysPageEntry *p;
54688b1e 2585
c19e8800 2586 if (lp->ptr == PHYS_MAP_NODE_NIL) {
54688b1e
AK
2587 return;
2588 }
2589
c19e8800 2590 p = phys_map_nodes[lp->ptr];
4346ae3e 2591 for (i = 0; i < L2_SIZE; ++i) {
07f07b31 2592 if (!p[i].is_leaf) {
54688b1e 2593 destroy_l2_mapping(&p[i], level - 1);
4346ae3e 2594 } else {
c19e8800 2595 destroy_page_desc(p[i].ptr);
54688b1e 2596 }
54688b1e 2597 }
07f07b31 2598 lp->is_leaf = 0;
c19e8800 2599 lp->ptr = PHYS_MAP_NODE_NIL;
54688b1e
AK
2600}
2601
2602static void destroy_all_mappings(void)
2603{
3eef53df 2604 destroy_l2_mapping(&phys_map, P_L2_LEVELS - 1);
d6f2ea22 2605 phys_map_nodes_reset();
54688b1e
AK
2606}
2607
5312bd8b
AK
2608static uint16_t phys_section_add(MemoryRegionSection *section)
2609{
2610 if (phys_sections_nb == phys_sections_nb_alloc) {
2611 phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16);
2612 phys_sections = g_renew(MemoryRegionSection, phys_sections,
2613 phys_sections_nb_alloc);
2614 }
2615 phys_sections[phys_sections_nb] = *section;
2616 return phys_sections_nb++;
2617}
2618
2619static void phys_sections_clear(void)
2620{
2621 phys_sections_nb = 0;
2622}
2623
8f2498f9
MT
2624/* register physical memory.
2625 For RAM, 'size' must be a multiple of the target page size.
2626 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
8da3ff18
PB
2627 io memory page. The address used when calling the IO function is
2628 the offset from the start of the region, plus region_offset. Both
ccbb4d44 2629 start_addr and region_offset are rounded down to a page boundary
8da3ff18
PB
2630 before calculating this offset. This should not be a problem unless
2631 the low bits of start_addr and region_offset differ. */
0f0cb164
AK
2632static void register_subpage(MemoryRegionSection *section)
2633{
2634 subpage_t *subpage;
2635 target_phys_addr_t base = section->offset_within_address_space
2636 & TARGET_PAGE_MASK;
f3705d53 2637 MemoryRegionSection *existing = phys_page_find(base >> TARGET_PAGE_BITS);
0f0cb164
AK
2638 MemoryRegionSection subsection = {
2639 .offset_within_address_space = base,
2640 .size = TARGET_PAGE_SIZE,
2641 };
0f0cb164
AK
2642 target_phys_addr_t start, end;
2643
f3705d53 2644 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
0f0cb164 2645
f3705d53 2646 if (!(existing->mr->subpage)) {
0f0cb164
AK
2647 subpage = subpage_init(base);
2648 subsection.mr = &subpage->iomem;
2999097b
AK
2649 phys_page_set(base >> TARGET_PAGE_BITS, 1,
2650 phys_section_add(&subsection));
0f0cb164 2651 } else {
f3705d53 2652 subpage = container_of(existing->mr, subpage_t, iomem);
0f0cb164
AK
2653 }
2654 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
2655 end = start + section->size;
2656 subpage_register(subpage, start, end, phys_section_add(section));
2657}
2658
2659
2660static void register_multipage(MemoryRegionSection *section)
33417e70 2661{
dd81124b
AK
2662 target_phys_addr_t start_addr = section->offset_within_address_space;
2663 ram_addr_t size = section->size;
2999097b 2664 target_phys_addr_t addr;
5312bd8b 2665 uint16_t section_index = phys_section_add(section);
dd81124b 2666
3b8e6a2d 2667 assert(size);
f6f3fbca 2668
3b8e6a2d 2669 addr = start_addr;
2999097b
AK
2670 phys_page_set(addr >> TARGET_PAGE_BITS, size >> TARGET_PAGE_BITS,
2671 section_index);
33417e70
FB
2672}
2673
0f0cb164
AK
2674void cpu_register_physical_memory_log(MemoryRegionSection *section,
2675 bool readonly)
2676{
2677 MemoryRegionSection now = *section, remain = *section;
2678
2679 if ((now.offset_within_address_space & ~TARGET_PAGE_MASK)
2680 || (now.size < TARGET_PAGE_SIZE)) {
2681 now.size = MIN(TARGET_PAGE_ALIGN(now.offset_within_address_space)
2682 - now.offset_within_address_space,
2683 now.size);
2684 register_subpage(&now);
2685 remain.size -= now.size;
2686 remain.offset_within_address_space += now.size;
2687 remain.offset_within_region += now.size;
2688 }
2689 now = remain;
2690 now.size &= TARGET_PAGE_MASK;
2691 if (now.size) {
2692 register_multipage(&now);
2693 remain.size -= now.size;
2694 remain.offset_within_address_space += now.size;
2695 remain.offset_within_region += now.size;
2696 }
2697 now = remain;
2698 if (now.size) {
2699 register_subpage(&now);
2700 }
2701}
2702
2703
c227f099 2704void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
f65ed4c1
AL
2705{
2706 if (kvm_enabled())
2707 kvm_coalesce_mmio_region(addr, size);
2708}
2709
c227f099 2710void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
f65ed4c1
AL
2711{
2712 if (kvm_enabled())
2713 kvm_uncoalesce_mmio_region(addr, size);
2714}
2715
62a2744c
SY
2716void qemu_flush_coalesced_mmio_buffer(void)
2717{
2718 if (kvm_enabled())
2719 kvm_flush_coalesced_mmio_buffer();
2720}
2721
c902760f
MT
2722#if defined(__linux__) && !defined(TARGET_S390X)
2723
2724#include <sys/vfs.h>
2725
2726#define HUGETLBFS_MAGIC 0x958458f6
2727
2728static long gethugepagesize(const char *path)
2729{
2730 struct statfs fs;
2731 int ret;
2732
2733 do {
9742bf26 2734 ret = statfs(path, &fs);
c902760f
MT
2735 } while (ret != 0 && errno == EINTR);
2736
2737 if (ret != 0) {
9742bf26
YT
2738 perror(path);
2739 return 0;
c902760f
MT
2740 }
2741
2742 if (fs.f_type != HUGETLBFS_MAGIC)
9742bf26 2743 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
c902760f
MT
2744
2745 return fs.f_bsize;
2746}
2747
04b16653
AW
2748static void *file_ram_alloc(RAMBlock *block,
2749 ram_addr_t memory,
2750 const char *path)
c902760f
MT
2751{
2752 char *filename;
2753 void *area;
2754 int fd;
2755#ifdef MAP_POPULATE
2756 int flags;
2757#endif
2758 unsigned long hpagesize;
2759
2760 hpagesize = gethugepagesize(path);
2761 if (!hpagesize) {
9742bf26 2762 return NULL;
c902760f
MT
2763 }
2764
2765 if (memory < hpagesize) {
2766 return NULL;
2767 }
2768
2769 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2770 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2771 return NULL;
2772 }
2773
2774 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
9742bf26 2775 return NULL;
c902760f
MT
2776 }
2777
2778 fd = mkstemp(filename);
2779 if (fd < 0) {
9742bf26
YT
2780 perror("unable to create backing store for hugepages");
2781 free(filename);
2782 return NULL;
c902760f
MT
2783 }
2784 unlink(filename);
2785 free(filename);
2786
2787 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2788
2789 /*
2790 * ftruncate is not supported by hugetlbfs in older
2791 * hosts, so don't bother bailing out on errors.
2792 * If anything goes wrong with it under other filesystems,
2793 * mmap will fail.
2794 */
2795 if (ftruncate(fd, memory))
9742bf26 2796 perror("ftruncate");
c902760f
MT
2797
2798#ifdef MAP_POPULATE
2799 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2800 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2801 * to sidestep this quirk.
2802 */
2803 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2804 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2805#else
2806 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2807#endif
2808 if (area == MAP_FAILED) {
9742bf26
YT
2809 perror("file_ram_alloc: can't mmap RAM pages");
2810 close(fd);
2811 return (NULL);
c902760f 2812 }
04b16653 2813 block->fd = fd;
c902760f
MT
2814 return area;
2815}
2816#endif
2817
d17b5288 2818static ram_addr_t find_ram_offset(ram_addr_t size)
04b16653
AW
2819{
2820 RAMBlock *block, *next_block;
3e837b2c 2821 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
04b16653
AW
2822
2823 if (QLIST_EMPTY(&ram_list.blocks))
2824 return 0;
2825
2826 QLIST_FOREACH(block, &ram_list.blocks, next) {
f15fbc4b 2827 ram_addr_t end, next = RAM_ADDR_MAX;
04b16653
AW
2828
2829 end = block->offset + block->length;
2830
2831 QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2832 if (next_block->offset >= end) {
2833 next = MIN(next, next_block->offset);
2834 }
2835 }
2836 if (next - end >= size && next - end < mingap) {
3e837b2c 2837 offset = end;
04b16653
AW
2838 mingap = next - end;
2839 }
2840 }
3e837b2c
AW
2841
2842 if (offset == RAM_ADDR_MAX) {
2843 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
2844 (uint64_t)size);
2845 abort();
2846 }
2847
04b16653
AW
2848 return offset;
2849}
2850
2851static ram_addr_t last_ram_offset(void)
d17b5288
AW
2852{
2853 RAMBlock *block;
2854 ram_addr_t last = 0;
2855
2856 QLIST_FOREACH(block, &ram_list.blocks, next)
2857 last = MAX(last, block->offset + block->length);
2858
2859 return last;
2860}
2861
c5705a77 2862void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
84b89d78
CM
2863{
2864 RAMBlock *new_block, *block;
2865
c5705a77
AK
2866 new_block = NULL;
2867 QLIST_FOREACH(block, &ram_list.blocks, next) {
2868 if (block->offset == addr) {
2869 new_block = block;
2870 break;
2871 }
2872 }
2873 assert(new_block);
2874 assert(!new_block->idstr[0]);
84b89d78
CM
2875
2876 if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
2877 char *id = dev->parent_bus->info->get_dev_path(dev);
2878 if (id) {
2879 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
7267c094 2880 g_free(id);
84b89d78
CM
2881 }
2882 }
2883 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2884
2885 QLIST_FOREACH(block, &ram_list.blocks, next) {
c5705a77 2886 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
84b89d78
CM
2887 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2888 new_block->idstr);
2889 abort();
2890 }
2891 }
c5705a77
AK
2892}
2893
2894ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
2895 MemoryRegion *mr)
2896{
2897 RAMBlock *new_block;
2898
2899 size = TARGET_PAGE_ALIGN(size);
2900 new_block = g_malloc0(sizeof(*new_block));
84b89d78 2901
7c637366 2902 new_block->mr = mr;
432d268c 2903 new_block->offset = find_ram_offset(size);
6977dfe6
YT
2904 if (host) {
2905 new_block->host = host;
cd19cfa2 2906 new_block->flags |= RAM_PREALLOC_MASK;
6977dfe6
YT
2907 } else {
2908 if (mem_path) {
c902760f 2909#if defined (__linux__) && !defined(TARGET_S390X)
6977dfe6
YT
2910 new_block->host = file_ram_alloc(new_block, size, mem_path);
2911 if (!new_block->host) {
2912 new_block->host = qemu_vmalloc(size);
e78815a5 2913 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
6977dfe6 2914 }
c902760f 2915#else
6977dfe6
YT
2916 fprintf(stderr, "-mem-path option unsupported\n");
2917 exit(1);
c902760f 2918#endif
6977dfe6 2919 } else {
6b02494d 2920#if defined(TARGET_S390X) && defined(CONFIG_KVM)
ff83678a
CB
2921 /* S390 KVM requires the topmost vma of the RAM to be smaller than
2922 an system defined value, which is at least 256GB. Larger systems
2923 have larger values. We put the guest between the end of data
2924 segment (system break) and this value. We use 32GB as a base to
2925 have enough room for the system break to grow. */
2926 new_block->host = mmap((void*)0x800000000, size,
6977dfe6 2927 PROT_EXEC|PROT_READ|PROT_WRITE,
ff83678a 2928 MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
fb8b2735
AG
2929 if (new_block->host == MAP_FAILED) {
2930 fprintf(stderr, "Allocating RAM failed\n");
2931 abort();
2932 }
6b02494d 2933#else
868bb33f 2934 if (xen_enabled()) {
fce537d4 2935 xen_ram_alloc(new_block->offset, size, mr);
432d268c
JN
2936 } else {
2937 new_block->host = qemu_vmalloc(size);
2938 }
6b02494d 2939#endif
e78815a5 2940 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
6977dfe6 2941 }
c902760f 2942 }
94a6b54f
PB
2943 new_block->length = size;
2944
f471a17e 2945 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
94a6b54f 2946
7267c094 2947 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
04b16653 2948 last_ram_offset() >> TARGET_PAGE_BITS);
d17b5288 2949 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
94a6b54f
PB
2950 0xff, size >> TARGET_PAGE_BITS);
2951
6f0437e8
JK
2952 if (kvm_enabled())
2953 kvm_setup_guest_memory(new_block->host, size);
2954
94a6b54f
PB
2955 return new_block->offset;
2956}
e9a1ab19 2957
c5705a77 2958ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
6977dfe6 2959{
c5705a77 2960 return qemu_ram_alloc_from_ptr(size, NULL, mr);
6977dfe6
YT
2961}
2962
1f2e98b6
AW
2963void qemu_ram_free_from_ptr(ram_addr_t addr)
2964{
2965 RAMBlock *block;
2966
2967 QLIST_FOREACH(block, &ram_list.blocks, next) {
2968 if (addr == block->offset) {
2969 QLIST_REMOVE(block, next);
7267c094 2970 g_free(block);
1f2e98b6
AW
2971 return;
2972 }
2973 }
2974}
2975
c227f099 2976void qemu_ram_free(ram_addr_t addr)
e9a1ab19 2977{
04b16653
AW
2978 RAMBlock *block;
2979
2980 QLIST_FOREACH(block, &ram_list.blocks, next) {
2981 if (addr == block->offset) {
2982 QLIST_REMOVE(block, next);
cd19cfa2
HY
2983 if (block->flags & RAM_PREALLOC_MASK) {
2984 ;
2985 } else if (mem_path) {
04b16653
AW
2986#if defined (__linux__) && !defined(TARGET_S390X)
2987 if (block->fd) {
2988 munmap(block->host, block->length);
2989 close(block->fd);
2990 } else {
2991 qemu_vfree(block->host);
2992 }
fd28aa13
JK
2993#else
2994 abort();
04b16653
AW
2995#endif
2996 } else {
2997#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2998 munmap(block->host, block->length);
2999#else
868bb33f 3000 if (xen_enabled()) {
e41d7c69 3001 xen_invalidate_map_cache_entry(block->host);
432d268c
JN
3002 } else {
3003 qemu_vfree(block->host);
3004 }
04b16653
AW
3005#endif
3006 }
7267c094 3007 g_free(block);
04b16653
AW
3008 return;
3009 }
3010 }
3011
e9a1ab19
FB
3012}
3013
cd19cfa2
HY
3014#ifndef _WIN32
3015void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
3016{
3017 RAMBlock *block;
3018 ram_addr_t offset;
3019 int flags;
3020 void *area, *vaddr;
3021
3022 QLIST_FOREACH(block, &ram_list.blocks, next) {
3023 offset = addr - block->offset;
3024 if (offset < block->length) {
3025 vaddr = block->host + offset;
3026 if (block->flags & RAM_PREALLOC_MASK) {
3027 ;
3028 } else {
3029 flags = MAP_FIXED;
3030 munmap(vaddr, length);
3031 if (mem_path) {
3032#if defined(__linux__) && !defined(TARGET_S390X)
3033 if (block->fd) {
3034#ifdef MAP_POPULATE
3035 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
3036 MAP_PRIVATE;
3037#else
3038 flags |= MAP_PRIVATE;
3039#endif
3040 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3041 flags, block->fd, offset);
3042 } else {
3043 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
3044 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3045 flags, -1, 0);
3046 }
fd28aa13
JK
3047#else
3048 abort();
cd19cfa2
HY
3049#endif
3050 } else {
3051#if defined(TARGET_S390X) && defined(CONFIG_KVM)
3052 flags |= MAP_SHARED | MAP_ANONYMOUS;
3053 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
3054 flags, -1, 0);
3055#else
3056 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
3057 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3058 flags, -1, 0);
3059#endif
3060 }
3061 if (area != vaddr) {
f15fbc4b
AP
3062 fprintf(stderr, "Could not remap addr: "
3063 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
cd19cfa2
HY
3064 length, addr);
3065 exit(1);
3066 }
3067 qemu_madvise(vaddr, length, QEMU_MADV_MERGEABLE);
3068 }
3069 return;
3070 }
3071 }
3072}
3073#endif /* !_WIN32 */
3074
dc828ca1 3075/* Return a host pointer to ram allocated with qemu_ram_alloc.
5579c7f3
PB
3076 With the exception of the softmmu code in this file, this should
3077 only be used for local memory (e.g. video ram) that the device owns,
3078 and knows it isn't going to access beyond the end of the block.
3079
3080 It should not be used for general purpose DMA.
3081 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
3082 */
c227f099 3083void *qemu_get_ram_ptr(ram_addr_t addr)
dc828ca1 3084{
94a6b54f
PB
3085 RAMBlock *block;
3086
f471a17e
AW
3087 QLIST_FOREACH(block, &ram_list.blocks, next) {
3088 if (addr - block->offset < block->length) {
7d82af38
VP
3089 /* Move this entry to to start of the list. */
3090 if (block != QLIST_FIRST(&ram_list.blocks)) {
3091 QLIST_REMOVE(block, next);
3092 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
3093 }
868bb33f 3094 if (xen_enabled()) {
432d268c
JN
3095 /* We need to check if the requested address is in the RAM
3096 * because we don't want to map the entire memory in QEMU.
712c2b41 3097 * In that case just map until the end of the page.
432d268c
JN
3098 */
3099 if (block->offset == 0) {
e41d7c69 3100 return xen_map_cache(addr, 0, 0);
432d268c 3101 } else if (block->host == NULL) {
e41d7c69
JK
3102 block->host =
3103 xen_map_cache(block->offset, block->length, 1);
432d268c
JN
3104 }
3105 }
f471a17e
AW
3106 return block->host + (addr - block->offset);
3107 }
94a6b54f 3108 }
f471a17e
AW
3109
3110 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3111 abort();
3112
3113 return NULL;
dc828ca1
PB
3114}
3115
b2e0a138
MT
3116/* Return a host pointer to ram allocated with qemu_ram_alloc.
3117 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
3118 */
3119void *qemu_safe_ram_ptr(ram_addr_t addr)
3120{
3121 RAMBlock *block;
3122
3123 QLIST_FOREACH(block, &ram_list.blocks, next) {
3124 if (addr - block->offset < block->length) {
868bb33f 3125 if (xen_enabled()) {
432d268c
JN
3126 /* We need to check if the requested address is in the RAM
3127 * because we don't want to map the entire memory in QEMU.
712c2b41 3128 * In that case just map until the end of the page.
432d268c
JN
3129 */
3130 if (block->offset == 0) {
e41d7c69 3131 return xen_map_cache(addr, 0, 0);
432d268c 3132 } else if (block->host == NULL) {
e41d7c69
JK
3133 block->host =
3134 xen_map_cache(block->offset, block->length, 1);
432d268c
JN
3135 }
3136 }
b2e0a138
MT
3137 return block->host + (addr - block->offset);
3138 }
3139 }
3140
3141 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3142 abort();
3143
3144 return NULL;
3145}
3146
38bee5dc
SS
3147/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
3148 * but takes a size argument */
8ab934f9 3149void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
38bee5dc 3150{
8ab934f9
SS
3151 if (*size == 0) {
3152 return NULL;
3153 }
868bb33f 3154 if (xen_enabled()) {
e41d7c69 3155 return xen_map_cache(addr, *size, 1);
868bb33f 3156 } else {
38bee5dc
SS
3157 RAMBlock *block;
3158
3159 QLIST_FOREACH(block, &ram_list.blocks, next) {
3160 if (addr - block->offset < block->length) {
3161 if (addr - block->offset + *size > block->length)
3162 *size = block->length - addr + block->offset;
3163 return block->host + (addr - block->offset);
3164 }
3165 }
3166
3167 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3168 abort();
38bee5dc
SS
3169 }
3170}
3171
050a0ddf
AP
3172void qemu_put_ram_ptr(void *addr)
3173{
3174 trace_qemu_put_ram_ptr(addr);
050a0ddf
AP
3175}
3176
e890261f 3177int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
5579c7f3 3178{
94a6b54f
PB
3179 RAMBlock *block;
3180 uint8_t *host = ptr;
3181
868bb33f 3182 if (xen_enabled()) {
e41d7c69 3183 *ram_addr = xen_ram_addr_from_mapcache(ptr);
712c2b41
SS
3184 return 0;
3185 }
3186
f471a17e 3187 QLIST_FOREACH(block, &ram_list.blocks, next) {
432d268c
JN
3188 /* This case append when the block is not mapped. */
3189 if (block->host == NULL) {
3190 continue;
3191 }
f471a17e 3192 if (host - block->host < block->length) {
e890261f
MT
3193 *ram_addr = block->offset + (host - block->host);
3194 return 0;
f471a17e 3195 }
94a6b54f 3196 }
432d268c 3197
e890261f
MT
3198 return -1;
3199}
f471a17e 3200
e890261f
MT
3201/* Some of the softmmu routines need to translate from a host pointer
3202 (typically a TLB entry) back to a ram offset. */
3203ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
3204{
3205 ram_addr_t ram_addr;
f471a17e 3206
e890261f
MT
3207 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
3208 fprintf(stderr, "Bad ram pointer %p\n", ptr);
3209 abort();
3210 }
3211 return ram_addr;
5579c7f3
PB
3212}
3213
0e0df1e2
AK
3214static uint64_t unassigned_mem_read(void *opaque, target_phys_addr_t addr,
3215 unsigned size)
e18231a3
BS
3216{
3217#ifdef DEBUG_UNASSIGNED
3218 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3219#endif
5b450407 3220#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
0e0df1e2 3221 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
e18231a3
BS
3222#endif
3223 return 0;
3224}
3225
0e0df1e2
AK
3226static void unassigned_mem_write(void *opaque, target_phys_addr_t addr,
3227 uint64_t val, unsigned size)
e18231a3
BS
3228{
3229#ifdef DEBUG_UNASSIGNED
0e0df1e2 3230 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
e18231a3 3231#endif
5b450407 3232#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
0e0df1e2 3233 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
67d3b957 3234#endif
33417e70
FB
3235}
3236
0e0df1e2
AK
3237static const MemoryRegionOps unassigned_mem_ops = {
3238 .read = unassigned_mem_read,
3239 .write = unassigned_mem_write,
3240 .endianness = DEVICE_NATIVE_ENDIAN,
3241};
e18231a3 3242
0e0df1e2
AK
3243static uint64_t error_mem_read(void *opaque, target_phys_addr_t addr,
3244 unsigned size)
e18231a3 3245{
0e0df1e2 3246 abort();
e18231a3
BS
3247}
3248
0e0df1e2
AK
3249static void error_mem_write(void *opaque, target_phys_addr_t addr,
3250 uint64_t value, unsigned size)
e18231a3 3251{
0e0df1e2 3252 abort();
33417e70
FB
3253}
3254
0e0df1e2
AK
3255static const MemoryRegionOps error_mem_ops = {
3256 .read = error_mem_read,
3257 .write = error_mem_write,
3258 .endianness = DEVICE_NATIVE_ENDIAN,
33417e70
FB
3259};
3260
0e0df1e2
AK
3261static const MemoryRegionOps rom_mem_ops = {
3262 .read = error_mem_read,
3263 .write = unassigned_mem_write,
3264 .endianness = DEVICE_NATIVE_ENDIAN,
33417e70
FB
3265};
3266
0e0df1e2
AK
3267static void notdirty_mem_write(void *opaque, target_phys_addr_t ram_addr,
3268 uint64_t val, unsigned size)
9fa3e853 3269{
3a7d929e 3270 int dirty_flags;
f7c11b53 3271 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3a7d929e 3272 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 3273#if !defined(CONFIG_USER_ONLY)
0e0df1e2 3274 tb_invalidate_phys_page_fast(ram_addr, size);
f7c11b53 3275 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
9fa3e853 3276#endif
3a7d929e 3277 }
0e0df1e2
AK
3278 switch (size) {
3279 case 1:
3280 stb_p(qemu_get_ram_ptr(ram_addr), val);
3281 break;
3282 case 2:
3283 stw_p(qemu_get_ram_ptr(ram_addr), val);
3284 break;
3285 case 4:
3286 stl_p(qemu_get_ram_ptr(ram_addr), val);
3287 break;
3288 default:
3289 abort();
3a7d929e 3290 }
f23db169 3291 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
f7c11b53 3292 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
f23db169
FB
3293 /* we remove the notdirty callback only if the code has been
3294 flushed */
3295 if (dirty_flags == 0xff)
2e70f6ef 3296 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
3297}
3298
0e0df1e2
AK
3299static const MemoryRegionOps notdirty_mem_ops = {
3300 .read = error_mem_read,
3301 .write = notdirty_mem_write,
3302 .endianness = DEVICE_NATIVE_ENDIAN,
1ccde1cb
FB
3303};
3304
0f459d16 3305/* Generate a debug exception if a watchpoint has been hit. */
b4051334 3306static void check_watchpoint(int offset, int len_mask, int flags)
0f459d16 3307{
9349b4f9 3308 CPUArchState *env = cpu_single_env;
06d55cc1
AL
3309 target_ulong pc, cs_base;
3310 TranslationBlock *tb;
0f459d16 3311 target_ulong vaddr;
a1d1bb31 3312 CPUWatchpoint *wp;
06d55cc1 3313 int cpu_flags;
0f459d16 3314
06d55cc1
AL
3315 if (env->watchpoint_hit) {
3316 /* We re-entered the check after replacing the TB. Now raise
3317 * the debug interrupt so that is will trigger after the
3318 * current instruction. */
3319 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3320 return;
3321 }
2e70f6ef 3322 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
72cf2d4f 3323 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334
AL
3324 if ((vaddr == (wp->vaddr & len_mask) ||
3325 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
6e140f28
AL
3326 wp->flags |= BP_WATCHPOINT_HIT;
3327 if (!env->watchpoint_hit) {
3328 env->watchpoint_hit = wp;
3329 tb = tb_find_pc(env->mem_io_pc);
3330 if (!tb) {
3331 cpu_abort(env, "check_watchpoint: could not find TB for "
3332 "pc=%p", (void *)env->mem_io_pc);
3333 }
618ba8e6 3334 cpu_restore_state(tb, env, env->mem_io_pc);
6e140f28
AL
3335 tb_phys_invalidate(tb, -1);
3336 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3337 env->exception_index = EXCP_DEBUG;
488d6577 3338 cpu_loop_exit(env);
6e140f28
AL
3339 } else {
3340 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3341 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
488d6577 3342 cpu_resume_from_signal(env, NULL);
6e140f28 3343 }
06d55cc1 3344 }
6e140f28
AL
3345 } else {
3346 wp->flags &= ~BP_WATCHPOINT_HIT;
0f459d16
PB
3347 }
3348 }
3349}
3350
6658ffb8
PB
3351/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3352 so these check for a hit then pass through to the normal out-of-line
3353 phys routines. */
1ec9b909
AK
3354static uint64_t watch_mem_read(void *opaque, target_phys_addr_t addr,
3355 unsigned size)
6658ffb8 3356{
1ec9b909
AK
3357 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
3358 switch (size) {
3359 case 1: return ldub_phys(addr);
3360 case 2: return lduw_phys(addr);
3361 case 4: return ldl_phys(addr);
3362 default: abort();
3363 }
6658ffb8
PB
3364}
3365
1ec9b909
AK
3366static void watch_mem_write(void *opaque, target_phys_addr_t addr,
3367 uint64_t val, unsigned size)
6658ffb8 3368{
1ec9b909
AK
3369 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
3370 switch (size) {
67364150
MF
3371 case 1:
3372 stb_phys(addr, val);
3373 break;
3374 case 2:
3375 stw_phys(addr, val);
3376 break;
3377 case 4:
3378 stl_phys(addr, val);
3379 break;
1ec9b909
AK
3380 default: abort();
3381 }
6658ffb8
PB
3382}
3383
1ec9b909
AK
3384static const MemoryRegionOps watch_mem_ops = {
3385 .read = watch_mem_read,
3386 .write = watch_mem_write,
3387 .endianness = DEVICE_NATIVE_ENDIAN,
6658ffb8 3388};
6658ffb8 3389
70c68e44
AK
3390static uint64_t subpage_read(void *opaque, target_phys_addr_t addr,
3391 unsigned len)
db7b5426 3392{
70c68e44 3393 subpage_t *mmio = opaque;
f6405247 3394 unsigned int idx = SUBPAGE_IDX(addr);
5312bd8b 3395 MemoryRegionSection *section;
db7b5426
BS
3396#if defined(DEBUG_SUBPAGE)
3397 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3398 mmio, len, addr, idx);
3399#endif
db7b5426 3400
5312bd8b
AK
3401 section = &phys_sections[mmio->sub_section[idx]];
3402 addr += mmio->base;
3403 addr -= section->offset_within_address_space;
3404 addr += section->offset_within_region;
37ec01d4 3405 return io_mem_read(section->mr, addr, len);
db7b5426
BS
3406}
3407
70c68e44
AK
3408static void subpage_write(void *opaque, target_phys_addr_t addr,
3409 uint64_t value, unsigned len)
db7b5426 3410{
70c68e44 3411 subpage_t *mmio = opaque;
f6405247 3412 unsigned int idx = SUBPAGE_IDX(addr);
5312bd8b 3413 MemoryRegionSection *section;
db7b5426 3414#if defined(DEBUG_SUBPAGE)
70c68e44
AK
3415 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
3416 " idx %d value %"PRIx64"\n",
f6405247 3417 __func__, mmio, len, addr, idx, value);
db7b5426 3418#endif
f6405247 3419
5312bd8b
AK
3420 section = &phys_sections[mmio->sub_section[idx]];
3421 addr += mmio->base;
3422 addr -= section->offset_within_address_space;
3423 addr += section->offset_within_region;
37ec01d4 3424 io_mem_write(section->mr, addr, value, len);
db7b5426
BS
3425}
3426
70c68e44
AK
3427static const MemoryRegionOps subpage_ops = {
3428 .read = subpage_read,
3429 .write = subpage_write,
3430 .endianness = DEVICE_NATIVE_ENDIAN,
db7b5426
BS
3431};
3432
de712f94
AK
3433static uint64_t subpage_ram_read(void *opaque, target_phys_addr_t addr,
3434 unsigned size)
56384e8b
AF
3435{
3436 ram_addr_t raddr = addr;
3437 void *ptr = qemu_get_ram_ptr(raddr);
de712f94
AK
3438 switch (size) {
3439 case 1: return ldub_p(ptr);
3440 case 2: return lduw_p(ptr);
3441 case 4: return ldl_p(ptr);
3442 default: abort();
3443 }
56384e8b
AF
3444}
3445
de712f94
AK
3446static void subpage_ram_write(void *opaque, target_phys_addr_t addr,
3447 uint64_t value, unsigned size)
56384e8b
AF
3448{
3449 ram_addr_t raddr = addr;
3450 void *ptr = qemu_get_ram_ptr(raddr);
de712f94
AK
3451 switch (size) {
3452 case 1: return stb_p(ptr, value);
3453 case 2: return stw_p(ptr, value);
3454 case 4: return stl_p(ptr, value);
3455 default: abort();
3456 }
56384e8b
AF
3457}
3458
de712f94
AK
3459static const MemoryRegionOps subpage_ram_ops = {
3460 .read = subpage_ram_read,
3461 .write = subpage_ram_write,
3462 .endianness = DEVICE_NATIVE_ENDIAN,
56384e8b
AF
3463};
3464
c227f099 3465static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 3466 uint16_t section)
db7b5426
BS
3467{
3468 int idx, eidx;
3469
3470 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3471 return -1;
3472 idx = SUBPAGE_IDX(start);
3473 eidx = SUBPAGE_IDX(end);
3474#if defined(DEBUG_SUBPAGE)
0bf9e31a 3475 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
db7b5426
BS
3476 mmio, start, end, idx, eidx, memory);
3477#endif
5312bd8b
AK
3478 if (memory_region_is_ram(phys_sections[section].mr)) {
3479 MemoryRegionSection new_section = phys_sections[section];
3480 new_section.mr = &io_mem_subpage_ram;
3481 section = phys_section_add(&new_section);
56384e8b 3482 }
db7b5426 3483 for (; idx <= eidx; idx++) {
5312bd8b 3484 mmio->sub_section[idx] = section;
db7b5426
BS
3485 }
3486
3487 return 0;
3488}
3489
0f0cb164 3490static subpage_t *subpage_init(target_phys_addr_t base)
db7b5426 3491{
c227f099 3492 subpage_t *mmio;
db7b5426 3493
7267c094 3494 mmio = g_malloc0(sizeof(subpage_t));
1eec614b
AL
3495
3496 mmio->base = base;
70c68e44
AK
3497 memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
3498 "subpage", TARGET_PAGE_SIZE);
b3b00c78 3499 mmio->iomem.subpage = true;
db7b5426 3500#if defined(DEBUG_SUBPAGE)
1eec614b
AL
3501 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3502 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
db7b5426 3503#endif
0f0cb164 3504 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, phys_section_unassigned);
db7b5426
BS
3505
3506 return mmio;
3507}
3508
5312bd8b
AK
3509static uint16_t dummy_section(MemoryRegion *mr)
3510{
3511 MemoryRegionSection section = {
3512 .mr = mr,
3513 .offset_within_address_space = 0,
3514 .offset_within_region = 0,
3515 .size = UINT64_MAX,
3516 };
3517
3518 return phys_section_add(&section);
3519}
3520
37ec01d4 3521MemoryRegion *iotlb_to_region(target_phys_addr_t index)
aa102231 3522{
37ec01d4 3523 return phys_sections[index & ~TARGET_PAGE_MASK].mr;
aa102231
AK
3524}
3525
e9179ce1
AK
3526static void io_mem_init(void)
3527{
0e0df1e2 3528 memory_region_init_io(&io_mem_ram, &error_mem_ops, NULL, "ram", UINT64_MAX);
0e0df1e2
AK
3529 memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
3530 memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
3531 "unassigned", UINT64_MAX);
3532 memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
3533 "notdirty", UINT64_MAX);
de712f94
AK
3534 memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
3535 "subpage-ram", UINT64_MAX);
1ec9b909
AK
3536 memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
3537 "watch", UINT64_MAX);
e9179ce1
AK
3538}
3539
50c1e149
AK
3540static void core_begin(MemoryListener *listener)
3541{
54688b1e 3542 destroy_all_mappings();
5312bd8b 3543 phys_sections_clear();
c19e8800 3544 phys_map.ptr = PHYS_MAP_NODE_NIL;
5312bd8b 3545 phys_section_unassigned = dummy_section(&io_mem_unassigned);
aa102231
AK
3546 phys_section_notdirty = dummy_section(&io_mem_notdirty);
3547 phys_section_rom = dummy_section(&io_mem_rom);
3548 phys_section_watch = dummy_section(&io_mem_watch);
50c1e149
AK
3549}
3550
3551static void core_commit(MemoryListener *listener)
3552{
9349b4f9 3553 CPUArchState *env;
117712c3
AK
3554
3555 /* since each CPU stores ram addresses in its TLB cache, we must
3556 reset the modified entries */
3557 /* XXX: slow ! */
3558 for(env = first_cpu; env != NULL; env = env->next_cpu) {
3559 tlb_flush(env, 1);
3560 }
50c1e149
AK
3561}
3562
93632747
AK
3563static void core_region_add(MemoryListener *listener,
3564 MemoryRegionSection *section)
3565{
4855d41a 3566 cpu_register_physical_memory_log(section, section->readonly);
93632747
AK
3567}
3568
3569static void core_region_del(MemoryListener *listener,
3570 MemoryRegionSection *section)
3571{
93632747
AK
3572}
3573
50c1e149
AK
3574static void core_region_nop(MemoryListener *listener,
3575 MemoryRegionSection *section)
3576{
54688b1e 3577 cpu_register_physical_memory_log(section, section->readonly);
50c1e149
AK
3578}
3579
93632747
AK
3580static void core_log_start(MemoryListener *listener,
3581 MemoryRegionSection *section)
3582{
3583}
3584
3585static void core_log_stop(MemoryListener *listener,
3586 MemoryRegionSection *section)
3587{
3588}
3589
3590static void core_log_sync(MemoryListener *listener,
3591 MemoryRegionSection *section)
3592{
3593}
3594
3595static void core_log_global_start(MemoryListener *listener)
3596{
3597 cpu_physical_memory_set_dirty_tracking(1);
3598}
3599
3600static void core_log_global_stop(MemoryListener *listener)
3601{
3602 cpu_physical_memory_set_dirty_tracking(0);
3603}
3604
3605static void core_eventfd_add(MemoryListener *listener,
3606 MemoryRegionSection *section,
3607 bool match_data, uint64_t data, int fd)
3608{
3609}
3610
3611static void core_eventfd_del(MemoryListener *listener,
3612 MemoryRegionSection *section,
3613 bool match_data, uint64_t data, int fd)
3614{
3615}
3616
50c1e149
AK
3617static void io_begin(MemoryListener *listener)
3618{
3619}
3620
3621static void io_commit(MemoryListener *listener)
3622{
3623}
3624
4855d41a
AK
3625static void io_region_add(MemoryListener *listener,
3626 MemoryRegionSection *section)
3627{
a2d33521
AK
3628 MemoryRegionIORange *mrio = g_new(MemoryRegionIORange, 1);
3629
3630 mrio->mr = section->mr;
3631 mrio->offset = section->offset_within_region;
3632 iorange_init(&mrio->iorange, &memory_region_iorange_ops,
4855d41a 3633 section->offset_within_address_space, section->size);
a2d33521 3634 ioport_register(&mrio->iorange);
4855d41a
AK
3635}
3636
3637static void io_region_del(MemoryListener *listener,
3638 MemoryRegionSection *section)
3639{
3640 isa_unassign_ioport(section->offset_within_address_space, section->size);
3641}
3642
50c1e149
AK
3643static void io_region_nop(MemoryListener *listener,
3644 MemoryRegionSection *section)
3645{
3646}
3647
4855d41a
AK
3648static void io_log_start(MemoryListener *listener,
3649 MemoryRegionSection *section)
3650{
3651}
3652
3653static void io_log_stop(MemoryListener *listener,
3654 MemoryRegionSection *section)
3655{
3656}
3657
3658static void io_log_sync(MemoryListener *listener,
3659 MemoryRegionSection *section)
3660{
3661}
3662
3663static void io_log_global_start(MemoryListener *listener)
3664{
3665}
3666
3667static void io_log_global_stop(MemoryListener *listener)
3668{
3669}
3670
3671static void io_eventfd_add(MemoryListener *listener,
3672 MemoryRegionSection *section,
3673 bool match_data, uint64_t data, int fd)
3674{
3675}
3676
3677static void io_eventfd_del(MemoryListener *listener,
3678 MemoryRegionSection *section,
3679 bool match_data, uint64_t data, int fd)
3680{
3681}
3682
93632747 3683static MemoryListener core_memory_listener = {
50c1e149
AK
3684 .begin = core_begin,
3685 .commit = core_commit,
93632747
AK
3686 .region_add = core_region_add,
3687 .region_del = core_region_del,
50c1e149 3688 .region_nop = core_region_nop,
93632747
AK
3689 .log_start = core_log_start,
3690 .log_stop = core_log_stop,
3691 .log_sync = core_log_sync,
3692 .log_global_start = core_log_global_start,
3693 .log_global_stop = core_log_global_stop,
3694 .eventfd_add = core_eventfd_add,
3695 .eventfd_del = core_eventfd_del,
3696 .priority = 0,
3697};
3698
4855d41a 3699static MemoryListener io_memory_listener = {
50c1e149
AK
3700 .begin = io_begin,
3701 .commit = io_commit,
4855d41a
AK
3702 .region_add = io_region_add,
3703 .region_del = io_region_del,
50c1e149 3704 .region_nop = io_region_nop,
4855d41a
AK
3705 .log_start = io_log_start,
3706 .log_stop = io_log_stop,
3707 .log_sync = io_log_sync,
3708 .log_global_start = io_log_global_start,
3709 .log_global_stop = io_log_global_stop,
3710 .eventfd_add = io_eventfd_add,
3711 .eventfd_del = io_eventfd_del,
3712 .priority = 0,
3713};
3714
62152b8a
AK
3715static void memory_map_init(void)
3716{
7267c094 3717 system_memory = g_malloc(sizeof(*system_memory));
8417cebf 3718 memory_region_init(system_memory, "system", INT64_MAX);
62152b8a 3719 set_system_memory_map(system_memory);
309cb471 3720
7267c094 3721 system_io = g_malloc(sizeof(*system_io));
309cb471
AK
3722 memory_region_init(system_io, "io", 65536);
3723 set_system_io_map(system_io);
93632747 3724
4855d41a
AK
3725 memory_listener_register(&core_memory_listener, system_memory);
3726 memory_listener_register(&io_memory_listener, system_io);
62152b8a
AK
3727}
3728
3729MemoryRegion *get_system_memory(void)
3730{
3731 return system_memory;
3732}
3733
309cb471
AK
3734MemoryRegion *get_system_io(void)
3735{
3736 return system_io;
3737}
3738
e2eef170
PB
3739#endif /* !defined(CONFIG_USER_ONLY) */
3740
13eb76e0
FB
3741/* physical memory access (slow version, mainly for debug) */
3742#if defined(CONFIG_USER_ONLY)
9349b4f9 3743int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
a68fe89c 3744 uint8_t *buf, int len, int is_write)
13eb76e0
FB
3745{
3746 int l, flags;
3747 target_ulong page;
53a5960a 3748 void * p;
13eb76e0
FB
3749
3750 while (len > 0) {
3751 page = addr & TARGET_PAGE_MASK;
3752 l = (page + TARGET_PAGE_SIZE) - addr;
3753 if (l > len)
3754 l = len;
3755 flags = page_get_flags(page);
3756 if (!(flags & PAGE_VALID))
a68fe89c 3757 return -1;
13eb76e0
FB
3758 if (is_write) {
3759 if (!(flags & PAGE_WRITE))
a68fe89c 3760 return -1;
579a97f7 3761 /* XXX: this code should not depend on lock_user */
72fb7daa 3762 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
a68fe89c 3763 return -1;
72fb7daa
AJ
3764 memcpy(p, buf, l);
3765 unlock_user(p, addr, l);
13eb76e0
FB
3766 } else {
3767 if (!(flags & PAGE_READ))
a68fe89c 3768 return -1;
579a97f7 3769 /* XXX: this code should not depend on lock_user */
72fb7daa 3770 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
a68fe89c 3771 return -1;
72fb7daa 3772 memcpy(buf, p, l);
5b257578 3773 unlock_user(p, addr, 0);
13eb76e0
FB
3774 }
3775 len -= l;
3776 buf += l;
3777 addr += l;
3778 }
a68fe89c 3779 return 0;
13eb76e0 3780}
8df1cd07 3781
13eb76e0 3782#else
c227f099 3783void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
3784 int len, int is_write)
3785{
37ec01d4 3786 int l;
13eb76e0
FB
3787 uint8_t *ptr;
3788 uint32_t val;
c227f099 3789 target_phys_addr_t page;
f3705d53 3790 MemoryRegionSection *section;
3b46e624 3791
13eb76e0
FB
3792 while (len > 0) {
3793 page = addr & TARGET_PAGE_MASK;
3794 l = (page + TARGET_PAGE_SIZE) - addr;
3795 if (l > len)
3796 l = len;
06ef3525 3797 section = phys_page_find(page >> TARGET_PAGE_BITS);
3b46e624 3798
13eb76e0 3799 if (is_write) {
f3705d53 3800 if (!memory_region_is_ram(section->mr)) {
f1f6e3b8 3801 target_phys_addr_t addr1;
f3705d53 3802 addr1 = section_addr(section, addr);
6a00d601
FB
3803 /* XXX: could force cpu_single_env to NULL to avoid
3804 potential bugs */
6c2934db 3805 if (l >= 4 && ((addr1 & 3) == 0)) {
1c213d19 3806 /* 32 bit write access */
c27004ec 3807 val = ldl_p(buf);
37ec01d4 3808 io_mem_write(section->mr, addr1, val, 4);
13eb76e0 3809 l = 4;
6c2934db 3810 } else if (l >= 2 && ((addr1 & 1) == 0)) {
1c213d19 3811 /* 16 bit write access */
c27004ec 3812 val = lduw_p(buf);
37ec01d4 3813 io_mem_write(section->mr, addr1, val, 2);
13eb76e0
FB
3814 l = 2;
3815 } else {
1c213d19 3816 /* 8 bit write access */
c27004ec 3817 val = ldub_p(buf);
37ec01d4 3818 io_mem_write(section->mr, addr1, val, 1);
13eb76e0
FB
3819 l = 1;
3820 }
f3705d53 3821 } else if (!section->readonly) {
8ca5692d 3822 ram_addr_t addr1;
f3705d53
AK
3823 addr1 = memory_region_get_ram_addr(section->mr)
3824 + section_addr(section, addr);
13eb76e0 3825 /* RAM case */
5579c7f3 3826 ptr = qemu_get_ram_ptr(addr1);
13eb76e0 3827 memcpy(ptr, buf, l);
3a7d929e
FB
3828 if (!cpu_physical_memory_is_dirty(addr1)) {
3829 /* invalidate code */
3830 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3831 /* set dirty bit */
f7c11b53
YT
3832 cpu_physical_memory_set_dirty_flags(
3833 addr1, (0xff & ~CODE_DIRTY_FLAG));
3a7d929e 3834 }
050a0ddf 3835 qemu_put_ram_ptr(ptr);
13eb76e0
FB
3836 }
3837 } else {
f3705d53 3838 if (!is_ram_rom_romd(section)) {
f1f6e3b8 3839 target_phys_addr_t addr1;
13eb76e0 3840 /* I/O case */
f3705d53 3841 addr1 = section_addr(section, addr);
6c2934db 3842 if (l >= 4 && ((addr1 & 3) == 0)) {
13eb76e0 3843 /* 32 bit read access */
37ec01d4 3844 val = io_mem_read(section->mr, addr1, 4);
c27004ec 3845 stl_p(buf, val);
13eb76e0 3846 l = 4;
6c2934db 3847 } else if (l >= 2 && ((addr1 & 1) == 0)) {
13eb76e0 3848 /* 16 bit read access */
37ec01d4 3849 val = io_mem_read(section->mr, addr1, 2);
c27004ec 3850 stw_p(buf, val);
13eb76e0
FB
3851 l = 2;
3852 } else {
1c213d19 3853 /* 8 bit read access */
37ec01d4 3854 val = io_mem_read(section->mr, addr1, 1);
c27004ec 3855 stb_p(buf, val);
13eb76e0
FB
3856 l = 1;
3857 }
3858 } else {
3859 /* RAM case */
0a1b357f
AP
3860 ptr = qemu_get_ram_ptr(section->mr->ram_addr
3861 + section_addr(section, addr));
f3705d53 3862 memcpy(buf, ptr, l);
050a0ddf 3863 qemu_put_ram_ptr(ptr);
13eb76e0
FB
3864 }
3865 }
3866 len -= l;
3867 buf += l;
3868 addr += l;
3869 }
3870}
8df1cd07 3871
d0ecd2aa 3872/* used for ROM loading : can write in RAM and ROM */
c227f099 3873void cpu_physical_memory_write_rom(target_phys_addr_t addr,
d0ecd2aa
FB
3874 const uint8_t *buf, int len)
3875{
3876 int l;
3877 uint8_t *ptr;
c227f099 3878 target_phys_addr_t page;
f3705d53 3879 MemoryRegionSection *section;
3b46e624 3880
d0ecd2aa
FB
3881 while (len > 0) {
3882 page = addr & TARGET_PAGE_MASK;
3883 l = (page + TARGET_PAGE_SIZE) - addr;
3884 if (l > len)
3885 l = len;
06ef3525 3886 section = phys_page_find(page >> TARGET_PAGE_BITS);
3b46e624 3887
f3705d53 3888 if (!is_ram_rom_romd(section)) {
d0ecd2aa
FB
3889 /* do nothing */
3890 } else {
3891 unsigned long addr1;
f3705d53
AK
3892 addr1 = memory_region_get_ram_addr(section->mr)
3893 + section_addr(section, addr);
d0ecd2aa 3894 /* ROM/RAM case */
5579c7f3 3895 ptr = qemu_get_ram_ptr(addr1);
d0ecd2aa 3896 memcpy(ptr, buf, l);
050a0ddf 3897 qemu_put_ram_ptr(ptr);
d0ecd2aa
FB
3898 }
3899 len -= l;
3900 buf += l;
3901 addr += l;
3902 }
3903}
3904
6d16c2f8
AL
3905typedef struct {
3906 void *buffer;
c227f099
AL
3907 target_phys_addr_t addr;
3908 target_phys_addr_t len;
6d16c2f8
AL
3909} BounceBuffer;
3910
3911static BounceBuffer bounce;
3912
ba223c29
AL
3913typedef struct MapClient {
3914 void *opaque;
3915 void (*callback)(void *opaque);
72cf2d4f 3916 QLIST_ENTRY(MapClient) link;
ba223c29
AL
3917} MapClient;
3918
72cf2d4f
BS
3919static QLIST_HEAD(map_client_list, MapClient) map_client_list
3920 = QLIST_HEAD_INITIALIZER(map_client_list);
ba223c29
AL
3921
3922void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3923{
7267c094 3924 MapClient *client = g_malloc(sizeof(*client));
ba223c29
AL
3925
3926 client->opaque = opaque;
3927 client->callback = callback;
72cf2d4f 3928 QLIST_INSERT_HEAD(&map_client_list, client, link);
ba223c29
AL
3929 return client;
3930}
3931
3932void cpu_unregister_map_client(void *_client)
3933{
3934 MapClient *client = (MapClient *)_client;
3935
72cf2d4f 3936 QLIST_REMOVE(client, link);
7267c094 3937 g_free(client);
ba223c29
AL
3938}
3939
3940static void cpu_notify_map_clients(void)
3941{
3942 MapClient *client;
3943
72cf2d4f
BS
3944 while (!QLIST_EMPTY(&map_client_list)) {
3945 client = QLIST_FIRST(&map_client_list);
ba223c29 3946 client->callback(client->opaque);
34d5e948 3947 cpu_unregister_map_client(client);
ba223c29
AL
3948 }
3949}
3950
6d16c2f8
AL
3951/* Map a physical memory region into a host virtual address.
3952 * May map a subset of the requested range, given by and returned in *plen.
3953 * May return NULL if resources needed to perform the mapping are exhausted.
3954 * Use only for reads OR writes - not for read-modify-write operations.
ba223c29
AL
3955 * Use cpu_register_map_client() to know when retrying the map operation is
3956 * likely to succeed.
6d16c2f8 3957 */
c227f099
AL
3958void *cpu_physical_memory_map(target_phys_addr_t addr,
3959 target_phys_addr_t *plen,
6d16c2f8
AL
3960 int is_write)
3961{
c227f099 3962 target_phys_addr_t len = *plen;
38bee5dc 3963 target_phys_addr_t todo = 0;
6d16c2f8 3964 int l;
c227f099 3965 target_phys_addr_t page;
f3705d53 3966 MemoryRegionSection *section;
f15fbc4b 3967 ram_addr_t raddr = RAM_ADDR_MAX;
8ab934f9
SS
3968 ram_addr_t rlen;
3969 void *ret;
6d16c2f8
AL
3970
3971 while (len > 0) {
3972 page = addr & TARGET_PAGE_MASK;
3973 l = (page + TARGET_PAGE_SIZE) - addr;
3974 if (l > len)
3975 l = len;
06ef3525 3976 section = phys_page_find(page >> TARGET_PAGE_BITS);
6d16c2f8 3977
f3705d53 3978 if (!(memory_region_is_ram(section->mr) && !section->readonly)) {
38bee5dc 3979 if (todo || bounce.buffer) {
6d16c2f8
AL
3980 break;
3981 }
3982 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3983 bounce.addr = addr;
3984 bounce.len = l;
3985 if (!is_write) {
54f7b4a3 3986 cpu_physical_memory_read(addr, bounce.buffer, l);
6d16c2f8 3987 }
38bee5dc
SS
3988
3989 *plen = l;
3990 return bounce.buffer;
6d16c2f8 3991 }
8ab934f9 3992 if (!todo) {
f3705d53
AK
3993 raddr = memory_region_get_ram_addr(section->mr)
3994 + section_addr(section, addr);
8ab934f9 3995 }
6d16c2f8
AL
3996
3997 len -= l;
3998 addr += l;
38bee5dc 3999 todo += l;
6d16c2f8 4000 }
8ab934f9
SS
4001 rlen = todo;
4002 ret = qemu_ram_ptr_length(raddr, &rlen);
4003 *plen = rlen;
4004 return ret;
6d16c2f8
AL
4005}
4006
4007/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
4008 * Will also mark the memory as dirty if is_write == 1. access_len gives
4009 * the amount of memory that was actually read or written by the caller.
4010 */
c227f099
AL
4011void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
4012 int is_write, target_phys_addr_t access_len)
6d16c2f8
AL
4013{
4014 if (buffer != bounce.buffer) {
4015 if (is_write) {
e890261f 4016 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
6d16c2f8
AL
4017 while (access_len) {
4018 unsigned l;
4019 l = TARGET_PAGE_SIZE;
4020 if (l > access_len)
4021 l = access_len;
4022 if (!cpu_physical_memory_is_dirty(addr1)) {
4023 /* invalidate code */
4024 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
4025 /* set dirty bit */
f7c11b53
YT
4026 cpu_physical_memory_set_dirty_flags(
4027 addr1, (0xff & ~CODE_DIRTY_FLAG));
6d16c2f8
AL
4028 }
4029 addr1 += l;
4030 access_len -= l;
4031 }
4032 }
868bb33f 4033 if (xen_enabled()) {
e41d7c69 4034 xen_invalidate_map_cache_entry(buffer);
050a0ddf 4035 }
6d16c2f8
AL
4036 return;
4037 }
4038 if (is_write) {
4039 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
4040 }
f8a83245 4041 qemu_vfree(bounce.buffer);
6d16c2f8 4042 bounce.buffer = NULL;
ba223c29 4043 cpu_notify_map_clients();
6d16c2f8 4044}
d0ecd2aa 4045
8df1cd07 4046/* warning: addr must be aligned */
1e78bcc1
AG
4047static inline uint32_t ldl_phys_internal(target_phys_addr_t addr,
4048 enum device_endian endian)
8df1cd07 4049{
8df1cd07
FB
4050 uint8_t *ptr;
4051 uint32_t val;
f3705d53 4052 MemoryRegionSection *section;
8df1cd07 4053
06ef3525 4054 section = phys_page_find(addr >> TARGET_PAGE_BITS);
3b46e624 4055
f3705d53 4056 if (!is_ram_rom_romd(section)) {
8df1cd07 4057 /* I/O case */
f3705d53 4058 addr = section_addr(section, addr);
37ec01d4 4059 val = io_mem_read(section->mr, addr, 4);
1e78bcc1
AG
4060#if defined(TARGET_WORDS_BIGENDIAN)
4061 if (endian == DEVICE_LITTLE_ENDIAN) {
4062 val = bswap32(val);
4063 }
4064#else
4065 if (endian == DEVICE_BIG_ENDIAN) {
4066 val = bswap32(val);
4067 }
4068#endif
8df1cd07
FB
4069 } else {
4070 /* RAM case */
f3705d53 4071 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
06ef3525 4072 & TARGET_PAGE_MASK)
f3705d53 4073 + section_addr(section, addr));
1e78bcc1
AG
4074 switch (endian) {
4075 case DEVICE_LITTLE_ENDIAN:
4076 val = ldl_le_p(ptr);
4077 break;
4078 case DEVICE_BIG_ENDIAN:
4079 val = ldl_be_p(ptr);
4080 break;
4081 default:
4082 val = ldl_p(ptr);
4083 break;
4084 }
8df1cd07
FB
4085 }
4086 return val;
4087}
4088
1e78bcc1
AG
4089uint32_t ldl_phys(target_phys_addr_t addr)
4090{
4091 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4092}
4093
4094uint32_t ldl_le_phys(target_phys_addr_t addr)
4095{
4096 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4097}
4098
4099uint32_t ldl_be_phys(target_phys_addr_t addr)
4100{
4101 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
4102}
4103
84b7b8e7 4104/* warning: addr must be aligned */
1e78bcc1
AG
4105static inline uint64_t ldq_phys_internal(target_phys_addr_t addr,
4106 enum device_endian endian)
84b7b8e7 4107{
84b7b8e7
FB
4108 uint8_t *ptr;
4109 uint64_t val;
f3705d53 4110 MemoryRegionSection *section;
84b7b8e7 4111
06ef3525 4112 section = phys_page_find(addr >> TARGET_PAGE_BITS);
3b46e624 4113
f3705d53 4114 if (!is_ram_rom_romd(section)) {
84b7b8e7 4115 /* I/O case */
f3705d53 4116 addr = section_addr(section, addr);
1e78bcc1
AG
4117
4118 /* XXX This is broken when device endian != cpu endian.
4119 Fix and add "endian" variable check */
84b7b8e7 4120#ifdef TARGET_WORDS_BIGENDIAN
37ec01d4
AK
4121 val = io_mem_read(section->mr, addr, 4) << 32;
4122 val |= io_mem_read(section->mr, addr + 4, 4);
84b7b8e7 4123#else
37ec01d4
AK
4124 val = io_mem_read(section->mr, addr, 4);
4125 val |= io_mem_read(section->mr, addr + 4, 4) << 32;
84b7b8e7
FB
4126#endif
4127 } else {
4128 /* RAM case */
f3705d53 4129 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
06ef3525 4130 & TARGET_PAGE_MASK)
f3705d53 4131 + section_addr(section, addr));
1e78bcc1
AG
4132 switch (endian) {
4133 case DEVICE_LITTLE_ENDIAN:
4134 val = ldq_le_p(ptr);
4135 break;
4136 case DEVICE_BIG_ENDIAN:
4137 val = ldq_be_p(ptr);
4138 break;
4139 default:
4140 val = ldq_p(ptr);
4141 break;
4142 }
84b7b8e7
FB
4143 }
4144 return val;
4145}
4146
1e78bcc1
AG
4147uint64_t ldq_phys(target_phys_addr_t addr)
4148{
4149 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4150}
4151
4152uint64_t ldq_le_phys(target_phys_addr_t addr)
4153{
4154 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4155}
4156
4157uint64_t ldq_be_phys(target_phys_addr_t addr)
4158{
4159 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
4160}
4161
aab33094 4162/* XXX: optimize */
c227f099 4163uint32_t ldub_phys(target_phys_addr_t addr)
aab33094
FB
4164{
4165 uint8_t val;
4166 cpu_physical_memory_read(addr, &val, 1);
4167 return val;
4168}
4169
733f0b02 4170/* warning: addr must be aligned */
1e78bcc1
AG
4171static inline uint32_t lduw_phys_internal(target_phys_addr_t addr,
4172 enum device_endian endian)
aab33094 4173{
733f0b02
MT
4174 uint8_t *ptr;
4175 uint64_t val;
f3705d53 4176 MemoryRegionSection *section;
733f0b02 4177
06ef3525 4178 section = phys_page_find(addr >> TARGET_PAGE_BITS);
733f0b02 4179
f3705d53 4180 if (!is_ram_rom_romd(section)) {
733f0b02 4181 /* I/O case */
f3705d53 4182 addr = section_addr(section, addr);
37ec01d4 4183 val = io_mem_read(section->mr, addr, 2);
1e78bcc1
AG
4184#if defined(TARGET_WORDS_BIGENDIAN)
4185 if (endian == DEVICE_LITTLE_ENDIAN) {
4186 val = bswap16(val);
4187 }
4188#else
4189 if (endian == DEVICE_BIG_ENDIAN) {
4190 val = bswap16(val);
4191 }
4192#endif
733f0b02
MT
4193 } else {
4194 /* RAM case */
f3705d53 4195 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
06ef3525 4196 & TARGET_PAGE_MASK)
f3705d53 4197 + section_addr(section, addr));
1e78bcc1
AG
4198 switch (endian) {
4199 case DEVICE_LITTLE_ENDIAN:
4200 val = lduw_le_p(ptr);
4201 break;
4202 case DEVICE_BIG_ENDIAN:
4203 val = lduw_be_p(ptr);
4204 break;
4205 default:
4206 val = lduw_p(ptr);
4207 break;
4208 }
733f0b02
MT
4209 }
4210 return val;
aab33094
FB
4211}
4212
1e78bcc1
AG
4213uint32_t lduw_phys(target_phys_addr_t addr)
4214{
4215 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4216}
4217
4218uint32_t lduw_le_phys(target_phys_addr_t addr)
4219{
4220 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4221}
4222
4223uint32_t lduw_be_phys(target_phys_addr_t addr)
4224{
4225 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
4226}
4227
8df1cd07
FB
4228/* warning: addr must be aligned. The ram page is not masked as dirty
4229 and the code inside is not invalidated. It is useful if the dirty
4230 bits are used to track modified PTEs */
c227f099 4231void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
8df1cd07 4232{
8df1cd07 4233 uint8_t *ptr;
f3705d53 4234 MemoryRegionSection *section;
8df1cd07 4235
06ef3525 4236 section = phys_page_find(addr >> TARGET_PAGE_BITS);
3b46e624 4237
f3705d53 4238 if (!memory_region_is_ram(section->mr) || section->readonly) {
37ec01d4 4239 addr = section_addr(section, addr);
f3705d53 4240 if (memory_region_is_ram(section->mr)) {
37ec01d4 4241 section = &phys_sections[phys_section_rom];
06ef3525 4242 }
37ec01d4 4243 io_mem_write(section->mr, addr, val, 4);
8df1cd07 4244 } else {
f3705d53 4245 unsigned long addr1 = (memory_region_get_ram_addr(section->mr)
06ef3525 4246 & TARGET_PAGE_MASK)
f3705d53 4247 + section_addr(section, addr);
5579c7f3 4248 ptr = qemu_get_ram_ptr(addr1);
8df1cd07 4249 stl_p(ptr, val);
74576198
AL
4250
4251 if (unlikely(in_migration)) {
4252 if (!cpu_physical_memory_is_dirty(addr1)) {
4253 /* invalidate code */
4254 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4255 /* set dirty bit */
f7c11b53
YT
4256 cpu_physical_memory_set_dirty_flags(
4257 addr1, (0xff & ~CODE_DIRTY_FLAG));
74576198
AL
4258 }
4259 }
8df1cd07
FB
4260 }
4261}
4262
c227f099 4263void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
bc98a7ef 4264{
bc98a7ef 4265 uint8_t *ptr;
f3705d53 4266 MemoryRegionSection *section;
bc98a7ef 4267
06ef3525 4268 section = phys_page_find(addr >> TARGET_PAGE_BITS);
3b46e624 4269
f3705d53 4270 if (!memory_region_is_ram(section->mr) || section->readonly) {
37ec01d4 4271 addr = section_addr(section, addr);
f3705d53 4272 if (memory_region_is_ram(section->mr)) {
37ec01d4 4273 section = &phys_sections[phys_section_rom];
06ef3525 4274 }
bc98a7ef 4275#ifdef TARGET_WORDS_BIGENDIAN
37ec01d4
AK
4276 io_mem_write(section->mr, addr, val >> 32, 4);
4277 io_mem_write(section->mr, addr + 4, (uint32_t)val, 4);
bc98a7ef 4278#else
37ec01d4
AK
4279 io_mem_write(section->mr, addr, (uint32_t)val, 4);
4280 io_mem_write(section->mr, addr + 4, val >> 32, 4);
bc98a7ef
JM
4281#endif
4282 } else {
f3705d53 4283 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
06ef3525 4284 & TARGET_PAGE_MASK)
f3705d53 4285 + section_addr(section, addr));
bc98a7ef
JM
4286 stq_p(ptr, val);
4287 }
4288}
4289
8df1cd07 4290/* warning: addr must be aligned */
1e78bcc1
AG
4291static inline void stl_phys_internal(target_phys_addr_t addr, uint32_t val,
4292 enum device_endian endian)
8df1cd07 4293{
8df1cd07 4294 uint8_t *ptr;
f3705d53 4295 MemoryRegionSection *section;
8df1cd07 4296
06ef3525 4297 section = phys_page_find(addr >> TARGET_PAGE_BITS);
3b46e624 4298
f3705d53 4299 if (!memory_region_is_ram(section->mr) || section->readonly) {
37ec01d4 4300 addr = section_addr(section, addr);
f3705d53 4301 if (memory_region_is_ram(section->mr)) {
37ec01d4 4302 section = &phys_sections[phys_section_rom];
06ef3525 4303 }
1e78bcc1
AG
4304#if defined(TARGET_WORDS_BIGENDIAN)
4305 if (endian == DEVICE_LITTLE_ENDIAN) {
4306 val = bswap32(val);
4307 }
4308#else
4309 if (endian == DEVICE_BIG_ENDIAN) {
4310 val = bswap32(val);
4311 }
4312#endif
37ec01d4 4313 io_mem_write(section->mr, addr, val, 4);
8df1cd07
FB
4314 } else {
4315 unsigned long addr1;
f3705d53
AK
4316 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
4317 + section_addr(section, addr);
8df1cd07 4318 /* RAM case */
5579c7f3 4319 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
4320 switch (endian) {
4321 case DEVICE_LITTLE_ENDIAN:
4322 stl_le_p(ptr, val);
4323 break;
4324 case DEVICE_BIG_ENDIAN:
4325 stl_be_p(ptr, val);
4326 break;
4327 default:
4328 stl_p(ptr, val);
4329 break;
4330 }
3a7d929e
FB
4331 if (!cpu_physical_memory_is_dirty(addr1)) {
4332 /* invalidate code */
4333 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4334 /* set dirty bit */
f7c11b53
YT
4335 cpu_physical_memory_set_dirty_flags(addr1,
4336 (0xff & ~CODE_DIRTY_FLAG));
3a7d929e 4337 }
8df1cd07
FB
4338 }
4339}
4340
1e78bcc1
AG
4341void stl_phys(target_phys_addr_t addr, uint32_t val)
4342{
4343 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4344}
4345
4346void stl_le_phys(target_phys_addr_t addr, uint32_t val)
4347{
4348 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4349}
4350
4351void stl_be_phys(target_phys_addr_t addr, uint32_t val)
4352{
4353 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4354}
4355
aab33094 4356/* XXX: optimize */
c227f099 4357void stb_phys(target_phys_addr_t addr, uint32_t val)
aab33094
FB
4358{
4359 uint8_t v = val;
4360 cpu_physical_memory_write(addr, &v, 1);
4361}
4362
733f0b02 4363/* warning: addr must be aligned */
1e78bcc1
AG
4364static inline void stw_phys_internal(target_phys_addr_t addr, uint32_t val,
4365 enum device_endian endian)
aab33094 4366{
733f0b02 4367 uint8_t *ptr;
f3705d53 4368 MemoryRegionSection *section;
733f0b02 4369
06ef3525 4370 section = phys_page_find(addr >> TARGET_PAGE_BITS);
733f0b02 4371
f3705d53 4372 if (!memory_region_is_ram(section->mr) || section->readonly) {
37ec01d4 4373 addr = section_addr(section, addr);
f3705d53 4374 if (memory_region_is_ram(section->mr)) {
37ec01d4 4375 section = &phys_sections[phys_section_rom];
06ef3525 4376 }
1e78bcc1
AG
4377#if defined(TARGET_WORDS_BIGENDIAN)
4378 if (endian == DEVICE_LITTLE_ENDIAN) {
4379 val = bswap16(val);
4380 }
4381#else
4382 if (endian == DEVICE_BIG_ENDIAN) {
4383 val = bswap16(val);
4384 }
4385#endif
37ec01d4 4386 io_mem_write(section->mr, addr, val, 2);
733f0b02
MT
4387 } else {
4388 unsigned long addr1;
f3705d53
AK
4389 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
4390 + section_addr(section, addr);
733f0b02
MT
4391 /* RAM case */
4392 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
4393 switch (endian) {
4394 case DEVICE_LITTLE_ENDIAN:
4395 stw_le_p(ptr, val);
4396 break;
4397 case DEVICE_BIG_ENDIAN:
4398 stw_be_p(ptr, val);
4399 break;
4400 default:
4401 stw_p(ptr, val);
4402 break;
4403 }
733f0b02
MT
4404 if (!cpu_physical_memory_is_dirty(addr1)) {
4405 /* invalidate code */
4406 tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
4407 /* set dirty bit */
4408 cpu_physical_memory_set_dirty_flags(addr1,
4409 (0xff & ~CODE_DIRTY_FLAG));
4410 }
4411 }
aab33094
FB
4412}
4413
1e78bcc1
AG
4414void stw_phys(target_phys_addr_t addr, uint32_t val)
4415{
4416 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4417}
4418
4419void stw_le_phys(target_phys_addr_t addr, uint32_t val)
4420{
4421 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4422}
4423
4424void stw_be_phys(target_phys_addr_t addr, uint32_t val)
4425{
4426 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4427}
4428
aab33094 4429/* XXX: optimize */
c227f099 4430void stq_phys(target_phys_addr_t addr, uint64_t val)
aab33094
FB
4431{
4432 val = tswap64(val);
71d2b725 4433 cpu_physical_memory_write(addr, &val, 8);
aab33094
FB
4434}
4435
1e78bcc1
AG
4436void stq_le_phys(target_phys_addr_t addr, uint64_t val)
4437{
4438 val = cpu_to_le64(val);
4439 cpu_physical_memory_write(addr, &val, 8);
4440}
4441
4442void stq_be_phys(target_phys_addr_t addr, uint64_t val)
4443{
4444 val = cpu_to_be64(val);
4445 cpu_physical_memory_write(addr, &val, 8);
4446}
4447
5e2972fd 4448/* virtual memory access for debug (includes writing to ROM) */
9349b4f9 4449int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
b448f2f3 4450 uint8_t *buf, int len, int is_write)
13eb76e0
FB
4451{
4452 int l;
c227f099 4453 target_phys_addr_t phys_addr;
9b3c35e0 4454 target_ulong page;
13eb76e0
FB
4455
4456 while (len > 0) {
4457 page = addr & TARGET_PAGE_MASK;
4458 phys_addr = cpu_get_phys_page_debug(env, page);
4459 /* if no physical page mapped, return an error */
4460 if (phys_addr == -1)
4461 return -1;
4462 l = (page + TARGET_PAGE_SIZE) - addr;
4463 if (l > len)
4464 l = len;
5e2972fd 4465 phys_addr += (addr & ~TARGET_PAGE_MASK);
5e2972fd
AL
4466 if (is_write)
4467 cpu_physical_memory_write_rom(phys_addr, buf, l);
4468 else
5e2972fd 4469 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
13eb76e0
FB
4470 len -= l;
4471 buf += l;
4472 addr += l;
4473 }
4474 return 0;
4475}
a68fe89c 4476#endif
13eb76e0 4477
2e70f6ef
PB
4478/* in deterministic execution mode, instructions doing device I/Os
4479 must be at the end of the TB */
9349b4f9 4480void cpu_io_recompile(CPUArchState *env, void *retaddr)
2e70f6ef
PB
4481{
4482 TranslationBlock *tb;
4483 uint32_t n, cflags;
4484 target_ulong pc, cs_base;
4485 uint64_t flags;
4486
4487 tb = tb_find_pc((unsigned long)retaddr);
4488 if (!tb) {
4489 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
4490 retaddr);
4491 }
4492 n = env->icount_decr.u16.low + tb->icount;
618ba8e6 4493 cpu_restore_state(tb, env, (unsigned long)retaddr);
2e70f6ef 4494 /* Calculate how many instructions had been executed before the fault
bf20dc07 4495 occurred. */
2e70f6ef
PB
4496 n = n - env->icount_decr.u16.low;
4497 /* Generate a new TB ending on the I/O insn. */
4498 n++;
4499 /* On MIPS and SH, delay slot instructions can only be restarted if
4500 they were already the first instruction in the TB. If this is not
bf20dc07 4501 the first instruction in a TB then re-execute the preceding
2e70f6ef
PB
4502 branch. */
4503#if defined(TARGET_MIPS)
4504 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4505 env->active_tc.PC -= 4;
4506 env->icount_decr.u16.low++;
4507 env->hflags &= ~MIPS_HFLAG_BMASK;
4508 }
4509#elif defined(TARGET_SH4)
4510 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4511 && n > 1) {
4512 env->pc -= 2;
4513 env->icount_decr.u16.low++;
4514 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4515 }
4516#endif
4517 /* This should never happen. */
4518 if (n > CF_COUNT_MASK)
4519 cpu_abort(env, "TB too big during recompile");
4520
4521 cflags = n | CF_LAST_IO;
4522 pc = tb->pc;
4523 cs_base = tb->cs_base;
4524 flags = tb->flags;
4525 tb_phys_invalidate(tb, -1);
4526 /* FIXME: In theory this could raise an exception. In practice
4527 we have already translated the block once so it's probably ok. */
4528 tb_gen_code(env, pc, cs_base, flags, cflags);
bf20dc07 4529 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
2e70f6ef
PB
4530 the first in the TB) then we end up generating a whole new TB and
4531 repeating the fault, which is horribly inefficient.
4532 Better would be to execute just this insn uncached, or generate a
4533 second new TB. */
4534 cpu_resume_from_signal(env, NULL);
4535}
4536
b3755a91
PB
4537#if !defined(CONFIG_USER_ONLY)
4538
055403b2 4539void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
e3db7226
FB
4540{
4541 int i, target_code_size, max_target_code_size;
4542 int direct_jmp_count, direct_jmp2_count, cross_page;
4543 TranslationBlock *tb;
3b46e624 4544
e3db7226
FB
4545 target_code_size = 0;
4546 max_target_code_size = 0;
4547 cross_page = 0;
4548 direct_jmp_count = 0;
4549 direct_jmp2_count = 0;
4550 for(i = 0; i < nb_tbs; i++) {
4551 tb = &tbs[i];
4552 target_code_size += tb->size;
4553 if (tb->size > max_target_code_size)
4554 max_target_code_size = tb->size;
4555 if (tb->page_addr[1] != -1)
4556 cross_page++;
4557 if (tb->tb_next_offset[0] != 0xffff) {
4558 direct_jmp_count++;
4559 if (tb->tb_next_offset[1] != 0xffff) {
4560 direct_jmp2_count++;
4561 }
4562 }
4563 }
4564 /* XXX: avoid using doubles ? */
57fec1fe 4565 cpu_fprintf(f, "Translation buffer state:\n");
055403b2 4566 cpu_fprintf(f, "gen code size %td/%ld\n",
26a5f13b
FB
4567 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4568 cpu_fprintf(f, "TB count %d/%d\n",
4569 nb_tbs, code_gen_max_blocks);
5fafdf24 4570 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
e3db7226
FB
4571 nb_tbs ? target_code_size / nb_tbs : 0,
4572 max_target_code_size);
055403b2 4573 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
e3db7226
FB
4574 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4575 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
5fafdf24
TS
4576 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4577 cross_page,
e3db7226
FB
4578 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4579 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
5fafdf24 4580 direct_jmp_count,
e3db7226
FB
4581 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4582 direct_jmp2_count,
4583 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
57fec1fe 4584 cpu_fprintf(f, "\nStatistics:\n");
e3db7226
FB
4585 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4586 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4587 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
b67d9a52 4588 tcg_dump_info(f, cpu_fprintf);
e3db7226
FB
4589}
4590
d39e8222
AK
4591/* NOTE: this function can trigger an exception */
4592/* NOTE2: the returned address is not exactly the physical address: it
4593 is the offset relative to phys_ram_base */
9349b4f9 4594tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr)
d39e8222
AK
4595{
4596 int mmu_idx, page_index, pd;
4597 void *p;
37ec01d4 4598 MemoryRegion *mr;
d39e8222
AK
4599
4600 page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
4601 mmu_idx = cpu_mmu_index(env1);
4602 if (unlikely(env1->tlb_table[mmu_idx][page_index].addr_code !=
4603 (addr & TARGET_PAGE_MASK))) {
e141ab52
BS
4604#ifdef CONFIG_TCG_PASS_AREG0
4605 cpu_ldub_code(env1, addr);
4606#else
d39e8222 4607 ldub_code(addr);
e141ab52 4608#endif
d39e8222 4609 }
ce5d64c2 4610 pd = env1->iotlb[mmu_idx][page_index] & ~TARGET_PAGE_MASK;
37ec01d4
AK
4611 mr = iotlb_to_region(pd);
4612 if (mr != &io_mem_ram && mr != &io_mem_rom
32b08980
AK
4613 && mr != &io_mem_notdirty && !mr->rom_device
4614 && mr != &io_mem_watch) {
d39e8222
AK
4615#if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_SPARC)
4616 cpu_unassigned_access(env1, addr, 0, 1, 0, 4);
4617#else
4618 cpu_abort(env1, "Trying to execute code outside RAM or ROM at 0x" TARGET_FMT_lx "\n", addr);
4619#endif
4620 }
4621 p = (void *)((uintptr_t)addr + env1->tlb_table[mmu_idx][page_index].addend);
4622 return qemu_ram_addr_from_host_nofail(p);
4623}
4624
82afa586
BH
4625/*
4626 * A helper function for the _utterly broken_ virtio device model to find out if
4627 * it's running on a big endian machine. Don't do this at home kids!
4628 */
4629bool virtio_is_big_endian(void);
4630bool virtio_is_big_endian(void)
4631{
4632#if defined(TARGET_WORDS_BIGENDIAN)
4633 return true;
4634#else
4635 return false;
4636#endif
4637}
4638
61382a50 4639#define MMUSUFFIX _cmmu
3917149d 4640#undef GETPC
61382a50
FB
4641#define GETPC() NULL
4642#define env cpu_single_env
b769d8fe 4643#define SOFTMMU_CODE_ACCESS
61382a50
FB
4644
4645#define SHIFT 0
4646#include "softmmu_template.h"
4647
4648#define SHIFT 1
4649#include "softmmu_template.h"
4650
4651#define SHIFT 2
4652#include "softmmu_template.h"
4653
4654#define SHIFT 3
4655#include "softmmu_template.h"
4656
4657#undef env
4658
4659#endif