]> git.proxmox.com Git - mirror_qemu.git/blame - exec.c
memory: dispatch directly via MemoryRegion
[mirror_qemu.git] / exec.c
CommitLineData
54936004 1/*
fd6ce8f6 2 * virtual page mapping and translated block handling
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
54936004 18 */
67b915a5 19#include "config.h"
d5a8f07c
FB
20#ifdef _WIN32
21#include <windows.h>
22#else
a98d49b1 23#include <sys/types.h>
d5a8f07c
FB
24#include <sys/mman.h>
25#endif
54936004 26
055403b2 27#include "qemu-common.h"
6180a181 28#include "cpu.h"
b67d9a52 29#include "tcg.h"
b3c7724c 30#include "hw/hw.h"
cc9e98cb 31#include "hw/qdev.h"
74576198 32#include "osdep.h"
7ba1e619 33#include "kvm.h"
432d268c 34#include "hw/xen.h"
29e922b6 35#include "qemu-timer.h"
62152b8a
AK
36#include "memory.h"
37#include "exec-memory.h"
53a5960a
PB
38#if defined(CONFIG_USER_ONLY)
39#include <qemu.h>
f01576f1
JL
40#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41#include <sys/param.h>
42#if __FreeBSD_version >= 700104
43#define HAVE_KINFO_GETVMMAP
44#define sigqueue sigqueue_freebsd /* avoid redefinition */
45#include <sys/time.h>
46#include <sys/proc.h>
47#include <machine/profile.h>
48#define _KERNEL
49#include <sys/user.h>
50#undef _KERNEL
51#undef sigqueue
52#include <libutil.h>
53#endif
54#endif
432d268c
JN
55#else /* !CONFIG_USER_ONLY */
56#include "xen-mapcache.h"
6506e4f9 57#include "trace.h"
53a5960a 58#endif
54936004 59
67d95c15
AK
60#define WANT_EXEC_OBSOLETE
61#include "exec-obsolete.h"
62
fd6ce8f6 63//#define DEBUG_TB_INVALIDATE
66e85a21 64//#define DEBUG_FLUSH
9fa3e853 65//#define DEBUG_TLB
67d3b957 66//#define DEBUG_UNASSIGNED
fd6ce8f6
FB
67
68/* make various TB consistency checks */
5fafdf24
TS
69//#define DEBUG_TB_CHECK
70//#define DEBUG_TLB_CHECK
fd6ce8f6 71
1196be37 72//#define DEBUG_IOPORT
db7b5426 73//#define DEBUG_SUBPAGE
1196be37 74
99773bd4
PB
75#if !defined(CONFIG_USER_ONLY)
76/* TB consistency checks only implemented for usermode emulation. */
77#undef DEBUG_TB_CHECK
78#endif
79
9fa3e853
FB
80#define SMC_BITMAP_USE_THRESHOLD 10
81
bdaf78e0 82static TranslationBlock *tbs;
24ab68ac 83static int code_gen_max_blocks;
9fa3e853 84TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
bdaf78e0 85static int nb_tbs;
eb51d102 86/* any access to the tbs or the page table must use this lock */
c227f099 87spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
fd6ce8f6 88
141ac468
BS
89#if defined(__arm__) || defined(__sparc_v9__)
90/* The prologue must be reachable with a direct jump. ARM and Sparc64
91 have limited branch ranges (possibly also PPC) so place it in a
d03d860b
BS
92 section close to code segment. */
93#define code_gen_section \
94 __attribute__((__section__(".gen_code"))) \
95 __attribute__((aligned (32)))
f8e2af11
SW
96#elif defined(_WIN32)
97/* Maximum alignment for Win32 is 16. */
98#define code_gen_section \
99 __attribute__((aligned (16)))
d03d860b
BS
100#else
101#define code_gen_section \
102 __attribute__((aligned (32)))
103#endif
104
105uint8_t code_gen_prologue[1024] code_gen_section;
bdaf78e0
BS
106static uint8_t *code_gen_buffer;
107static unsigned long code_gen_buffer_size;
26a5f13b 108/* threshold to flush the translated code buffer */
bdaf78e0 109static unsigned long code_gen_buffer_max_size;
24ab68ac 110static uint8_t *code_gen_ptr;
fd6ce8f6 111
e2eef170 112#if !defined(CONFIG_USER_ONLY)
9fa3e853 113int phys_ram_fd;
74576198 114static int in_migration;
94a6b54f 115
85d59fef 116RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
62152b8a
AK
117
118static MemoryRegion *system_memory;
309cb471 119static MemoryRegion *system_io;
62152b8a 120
0e0df1e2 121MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty;
de712f94 122static MemoryRegion io_mem_subpage_ram;
0e0df1e2 123
e2eef170 124#endif
9fa3e853 125
6a00d601
FB
126CPUState *first_cpu;
127/* current CPU in the current thread. It is only valid inside
128 cpu_exec() */
b3c4bbe5 129DEFINE_TLS(CPUState *,cpu_single_env);
2e70f6ef 130/* 0 = Do not count executed instructions.
bf20dc07 131 1 = Precise instruction counting.
2e70f6ef
PB
132 2 = Adaptive rate instruction counting. */
133int use_icount = 0;
6a00d601 134
54936004 135typedef struct PageDesc {
92e873b9 136 /* list of TBs intersecting this ram page */
fd6ce8f6 137 TranslationBlock *first_tb;
9fa3e853
FB
138 /* in order to optimize self modifying code, we count the number
139 of lookups we do to a given page to use a bitmap */
140 unsigned int code_write_count;
141 uint8_t *code_bitmap;
142#if defined(CONFIG_USER_ONLY)
143 unsigned long flags;
144#endif
54936004
FB
145} PageDesc;
146
41c1b1c9 147/* In system mode we want L1_MAP to be based on ram offsets,
5cd2c5b6
RH
148 while in user mode we want it to be based on virtual addresses. */
149#if !defined(CONFIG_USER_ONLY)
41c1b1c9
PB
150#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
151# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
152#else
5cd2c5b6 153# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
41c1b1c9 154#endif
bedb69ea 155#else
5cd2c5b6 156# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
bedb69ea 157#endif
54936004 158
5cd2c5b6
RH
159/* Size of the L2 (and L3, etc) page tables. */
160#define L2_BITS 10
54936004
FB
161#define L2_SIZE (1 << L2_BITS)
162
3eef53df
AK
163#define P_L2_LEVELS \
164 (((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / L2_BITS) + 1)
165
5cd2c5b6 166/* The bits remaining after N lower levels of page tables. */
5cd2c5b6
RH
167#define V_L1_BITS_REM \
168 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
169
5cd2c5b6
RH
170#if V_L1_BITS_REM < 4
171#define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
172#else
173#define V_L1_BITS V_L1_BITS_REM
174#endif
175
5cd2c5b6
RH
176#define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
177
5cd2c5b6
RH
178#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
179
83fb7adf 180unsigned long qemu_real_host_page_size;
83fb7adf
FB
181unsigned long qemu_host_page_size;
182unsigned long qemu_host_page_mask;
54936004 183
5cd2c5b6
RH
184/* This is a multi-level map on the virtual address space.
185 The bottom level has pointers to PageDesc. */
186static void *l1_map[V_L1_SIZE];
54936004 187
e2eef170 188#if !defined(CONFIG_USER_ONLY)
4346ae3e
AK
189typedef struct PhysPageEntry PhysPageEntry;
190
5312bd8b
AK
191static MemoryRegionSection *phys_sections;
192static unsigned phys_sections_nb, phys_sections_nb_alloc;
193static uint16_t phys_section_unassigned;
aa102231
AK
194static uint16_t phys_section_notdirty;
195static uint16_t phys_section_rom;
196static uint16_t phys_section_watch;
5312bd8b 197
4346ae3e 198struct PhysPageEntry {
07f07b31
AK
199 uint16_t is_leaf : 1;
200 /* index into phys_sections (is_leaf) or phys_map_nodes (!is_leaf) */
201 uint16_t ptr : 15;
4346ae3e
AK
202};
203
d6f2ea22
AK
204/* Simple allocator for PhysPageEntry nodes */
205static PhysPageEntry (*phys_map_nodes)[L2_SIZE];
206static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc;
207
07f07b31 208#define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
d6f2ea22 209
5cd2c5b6 210/* This is a multi-level map on the physical address space.
06ef3525 211 The bottom level has pointers to MemoryRegionSections. */
07f07b31 212static PhysPageEntry phys_map = { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
6d9a1304 213
e2eef170 214static void io_mem_init(void);
62152b8a 215static void memory_map_init(void);
e2eef170 216
33417e70 217/* io memory support */
a621f38d 218MemoryRegion *io_mem_region[IO_MEM_NB_ENTRIES];
511d2b14 219static char io_mem_used[IO_MEM_NB_ENTRIES];
1ec9b909 220static MemoryRegion io_mem_watch;
6658ffb8 221#endif
33417e70 222
34865134 223/* log support */
1e8b27ca
JR
224#ifdef WIN32
225static const char *logfilename = "qemu.log";
226#else
d9b630fd 227static const char *logfilename = "/tmp/qemu.log";
1e8b27ca 228#endif
34865134
FB
229FILE *logfile;
230int loglevel;
e735b91c 231static int log_append = 0;
34865134 232
e3db7226 233/* statistics */
b3755a91 234#if !defined(CONFIG_USER_ONLY)
e3db7226 235static int tlb_flush_count;
b3755a91 236#endif
e3db7226
FB
237static int tb_flush_count;
238static int tb_phys_invalidate_count;
239
7cb69cae
FB
240#ifdef _WIN32
241static void map_exec(void *addr, long size)
242{
243 DWORD old_protect;
244 VirtualProtect(addr, size,
245 PAGE_EXECUTE_READWRITE, &old_protect);
246
247}
248#else
249static void map_exec(void *addr, long size)
250{
4369415f 251 unsigned long start, end, page_size;
7cb69cae 252
4369415f 253 page_size = getpagesize();
7cb69cae 254 start = (unsigned long)addr;
4369415f 255 start &= ~(page_size - 1);
7cb69cae
FB
256
257 end = (unsigned long)addr + size;
4369415f
FB
258 end += page_size - 1;
259 end &= ~(page_size - 1);
7cb69cae
FB
260
261 mprotect((void *)start, end - start,
262 PROT_READ | PROT_WRITE | PROT_EXEC);
263}
264#endif
265
b346ff46 266static void page_init(void)
54936004 267{
83fb7adf 268 /* NOTE: we can always suppose that qemu_host_page_size >=
54936004 269 TARGET_PAGE_SIZE */
c2b48b69
AL
270#ifdef _WIN32
271 {
272 SYSTEM_INFO system_info;
273
274 GetSystemInfo(&system_info);
275 qemu_real_host_page_size = system_info.dwPageSize;
276 }
277#else
278 qemu_real_host_page_size = getpagesize();
279#endif
83fb7adf
FB
280 if (qemu_host_page_size == 0)
281 qemu_host_page_size = qemu_real_host_page_size;
282 if (qemu_host_page_size < TARGET_PAGE_SIZE)
283 qemu_host_page_size = TARGET_PAGE_SIZE;
83fb7adf 284 qemu_host_page_mask = ~(qemu_host_page_size - 1);
50a9569b 285
2e9a5713 286#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
50a9569b 287 {
f01576f1
JL
288#ifdef HAVE_KINFO_GETVMMAP
289 struct kinfo_vmentry *freep;
290 int i, cnt;
291
292 freep = kinfo_getvmmap(getpid(), &cnt);
293 if (freep) {
294 mmap_lock();
295 for (i = 0; i < cnt; i++) {
296 unsigned long startaddr, endaddr;
297
298 startaddr = freep[i].kve_start;
299 endaddr = freep[i].kve_end;
300 if (h2g_valid(startaddr)) {
301 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
302
303 if (h2g_valid(endaddr)) {
304 endaddr = h2g(endaddr);
fd436907 305 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
f01576f1
JL
306 } else {
307#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
308 endaddr = ~0ul;
fd436907 309 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
f01576f1
JL
310#endif
311 }
312 }
313 }
314 free(freep);
315 mmap_unlock();
316 }
317#else
50a9569b 318 FILE *f;
50a9569b 319
0776590d 320 last_brk = (unsigned long)sbrk(0);
5cd2c5b6 321
fd436907 322 f = fopen("/compat/linux/proc/self/maps", "r");
50a9569b 323 if (f) {
5cd2c5b6
RH
324 mmap_lock();
325
50a9569b 326 do {
5cd2c5b6
RH
327 unsigned long startaddr, endaddr;
328 int n;
329
330 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
331
332 if (n == 2 && h2g_valid(startaddr)) {
333 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
334
335 if (h2g_valid(endaddr)) {
336 endaddr = h2g(endaddr);
337 } else {
338 endaddr = ~0ul;
339 }
340 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
50a9569b
AZ
341 }
342 } while (!feof(f));
5cd2c5b6 343
50a9569b 344 fclose(f);
5cd2c5b6 345 mmap_unlock();
50a9569b 346 }
f01576f1 347#endif
50a9569b
AZ
348 }
349#endif
54936004
FB
350}
351
41c1b1c9 352static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
54936004 353{
41c1b1c9
PB
354 PageDesc *pd;
355 void **lp;
356 int i;
357
5cd2c5b6 358#if defined(CONFIG_USER_ONLY)
7267c094 359 /* We can't use g_malloc because it may recurse into a locked mutex. */
5cd2c5b6
RH
360# define ALLOC(P, SIZE) \
361 do { \
362 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
363 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
5cd2c5b6
RH
364 } while (0)
365#else
366# define ALLOC(P, SIZE) \
7267c094 367 do { P = g_malloc0(SIZE); } while (0)
17e2377a 368#endif
434929bf 369
5cd2c5b6
RH
370 /* Level 1. Always allocated. */
371 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
372
373 /* Level 2..N-1. */
374 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
375 void **p = *lp;
376
377 if (p == NULL) {
378 if (!alloc) {
379 return NULL;
380 }
381 ALLOC(p, sizeof(void *) * L2_SIZE);
382 *lp = p;
17e2377a 383 }
5cd2c5b6
RH
384
385 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
386 }
387
388 pd = *lp;
389 if (pd == NULL) {
390 if (!alloc) {
391 return NULL;
392 }
393 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
394 *lp = pd;
54936004 395 }
5cd2c5b6
RH
396
397#undef ALLOC
5cd2c5b6
RH
398
399 return pd + (index & (L2_SIZE - 1));
54936004
FB
400}
401
41c1b1c9 402static inline PageDesc *page_find(tb_page_addr_t index)
54936004 403{
5cd2c5b6 404 return page_find_alloc(index, 0);
fd6ce8f6
FB
405}
406
6d9a1304 407#if !defined(CONFIG_USER_ONLY)
d6f2ea22 408
f7bf5461 409static void phys_map_node_reserve(unsigned nodes)
d6f2ea22 410{
f7bf5461 411 if (phys_map_nodes_nb + nodes > phys_map_nodes_nb_alloc) {
d6f2ea22
AK
412 typedef PhysPageEntry Node[L2_SIZE];
413 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16);
f7bf5461
AK
414 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc,
415 phys_map_nodes_nb + nodes);
d6f2ea22
AK
416 phys_map_nodes = g_renew(Node, phys_map_nodes,
417 phys_map_nodes_nb_alloc);
418 }
f7bf5461
AK
419}
420
421static uint16_t phys_map_node_alloc(void)
422{
423 unsigned i;
424 uint16_t ret;
425
426 ret = phys_map_nodes_nb++;
427 assert(ret != PHYS_MAP_NODE_NIL);
428 assert(ret != phys_map_nodes_nb_alloc);
d6f2ea22 429 for (i = 0; i < L2_SIZE; ++i) {
07f07b31 430 phys_map_nodes[ret][i].is_leaf = 0;
c19e8800 431 phys_map_nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
d6f2ea22 432 }
f7bf5461 433 return ret;
d6f2ea22
AK
434}
435
436static void phys_map_nodes_reset(void)
437{
438 phys_map_nodes_nb = 0;
439}
440
92e873b9 441
2999097b
AK
442static void phys_page_set_level(PhysPageEntry *lp, target_phys_addr_t *index,
443 target_phys_addr_t *nb, uint16_t leaf,
444 int level)
f7bf5461
AK
445{
446 PhysPageEntry *p;
447 int i;
07f07b31 448 target_phys_addr_t step = (target_phys_addr_t)1 << (level * L2_BITS);
108c49b8 449
07f07b31 450 if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
c19e8800
AK
451 lp->ptr = phys_map_node_alloc();
452 p = phys_map_nodes[lp->ptr];
f7bf5461
AK
453 if (level == 0) {
454 for (i = 0; i < L2_SIZE; i++) {
07f07b31 455 p[i].is_leaf = 1;
c19e8800 456 p[i].ptr = phys_section_unassigned;
4346ae3e 457 }
67c4d23c 458 }
f7bf5461 459 } else {
c19e8800 460 p = phys_map_nodes[lp->ptr];
92e873b9 461 }
2999097b 462 lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
f7bf5461 463
2999097b 464 while (*nb && lp < &p[L2_SIZE]) {
07f07b31
AK
465 if ((*index & (step - 1)) == 0 && *nb >= step) {
466 lp->is_leaf = true;
c19e8800 467 lp->ptr = leaf;
07f07b31
AK
468 *index += step;
469 *nb -= step;
2999097b
AK
470 } else {
471 phys_page_set_level(lp, index, nb, leaf, level - 1);
472 }
473 ++lp;
f7bf5461
AK
474 }
475}
476
2999097b
AK
477static void phys_page_set(target_phys_addr_t index, target_phys_addr_t nb,
478 uint16_t leaf)
f7bf5461 479{
2999097b 480 /* Wildly overreserve - it doesn't matter much. */
07f07b31 481 phys_map_node_reserve(3 * P_L2_LEVELS);
5cd2c5b6 482
2999097b 483 phys_page_set_level(&phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
92e873b9
FB
484}
485
f3705d53 486static MemoryRegionSection *phys_page_find(target_phys_addr_t index)
92e873b9 487{
31ab2b4a
AK
488 PhysPageEntry lp = phys_map;
489 PhysPageEntry *p;
490 int i;
31ab2b4a 491 uint16_t s_index = phys_section_unassigned;
f1f6e3b8 492
07f07b31 493 for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
c19e8800 494 if (lp.ptr == PHYS_MAP_NODE_NIL) {
31ab2b4a
AK
495 goto not_found;
496 }
c19e8800 497 p = phys_map_nodes[lp.ptr];
31ab2b4a 498 lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
5312bd8b 499 }
31ab2b4a 500
c19e8800 501 s_index = lp.ptr;
31ab2b4a 502not_found:
f3705d53
AK
503 return &phys_sections[s_index];
504}
505
506static target_phys_addr_t section_addr(MemoryRegionSection *section,
507 target_phys_addr_t addr)
508{
509 addr -= section->offset_within_address_space;
510 addr += section->offset_within_region;
511 return addr;
92e873b9
FB
512}
513
c227f099
AL
514static void tlb_protect_code(ram_addr_t ram_addr);
515static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 516 target_ulong vaddr);
c8a706fe
PB
517#define mmap_lock() do { } while(0)
518#define mmap_unlock() do { } while(0)
9fa3e853 519#endif
fd6ce8f6 520
4369415f
FB
521#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
522
523#if defined(CONFIG_USER_ONLY)
ccbb4d44 524/* Currently it is not recommended to allocate big chunks of data in
4369415f
FB
525 user mode. It will change when a dedicated libc will be used */
526#define USE_STATIC_CODE_GEN_BUFFER
527#endif
528
529#ifdef USE_STATIC_CODE_GEN_BUFFER
ebf50fb3
AJ
530static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
531 __attribute__((aligned (CODE_GEN_ALIGN)));
4369415f
FB
532#endif
533
8fcd3692 534static void code_gen_alloc(unsigned long tb_size)
26a5f13b 535{
4369415f
FB
536#ifdef USE_STATIC_CODE_GEN_BUFFER
537 code_gen_buffer = static_code_gen_buffer;
538 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
539 map_exec(code_gen_buffer, code_gen_buffer_size);
540#else
26a5f13b
FB
541 code_gen_buffer_size = tb_size;
542 if (code_gen_buffer_size == 0) {
4369415f 543#if defined(CONFIG_USER_ONLY)
4369415f
FB
544 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
545#else
ccbb4d44 546 /* XXX: needs adjustments */
94a6b54f 547 code_gen_buffer_size = (unsigned long)(ram_size / 4);
4369415f 548#endif
26a5f13b
FB
549 }
550 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
551 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
552 /* The code gen buffer location may have constraints depending on
553 the host cpu and OS */
554#if defined(__linux__)
555 {
556 int flags;
141ac468
BS
557 void *start = NULL;
558
26a5f13b
FB
559 flags = MAP_PRIVATE | MAP_ANONYMOUS;
560#if defined(__x86_64__)
561 flags |= MAP_32BIT;
562 /* Cannot map more than that */
563 if (code_gen_buffer_size > (800 * 1024 * 1024))
564 code_gen_buffer_size = (800 * 1024 * 1024);
141ac468
BS
565#elif defined(__sparc_v9__)
566 // Map the buffer below 2G, so we can use direct calls and branches
567 flags |= MAP_FIXED;
568 start = (void *) 0x60000000UL;
569 if (code_gen_buffer_size > (512 * 1024 * 1024))
570 code_gen_buffer_size = (512 * 1024 * 1024);
1cb0661e 571#elif defined(__arm__)
5c84bd90 572 /* Keep the buffer no bigger than 16MB to branch between blocks */
1cb0661e
AZ
573 if (code_gen_buffer_size > 16 * 1024 * 1024)
574 code_gen_buffer_size = 16 * 1024 * 1024;
eba0b893
RH
575#elif defined(__s390x__)
576 /* Map the buffer so that we can use direct calls and branches. */
577 /* We have a +- 4GB range on the branches; leave some slop. */
578 if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
579 code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
580 }
581 start = (void *)0x90000000UL;
26a5f13b 582#endif
141ac468
BS
583 code_gen_buffer = mmap(start, code_gen_buffer_size,
584 PROT_WRITE | PROT_READ | PROT_EXEC,
26a5f13b
FB
585 flags, -1, 0);
586 if (code_gen_buffer == MAP_FAILED) {
587 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
588 exit(1);
589 }
590 }
cbb608a5 591#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
9f4b09a4
TN
592 || defined(__DragonFly__) || defined(__OpenBSD__) \
593 || defined(__NetBSD__)
06e67a82
AL
594 {
595 int flags;
596 void *addr = NULL;
597 flags = MAP_PRIVATE | MAP_ANONYMOUS;
598#if defined(__x86_64__)
599 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
600 * 0x40000000 is free */
601 flags |= MAP_FIXED;
602 addr = (void *)0x40000000;
603 /* Cannot map more than that */
604 if (code_gen_buffer_size > (800 * 1024 * 1024))
605 code_gen_buffer_size = (800 * 1024 * 1024);
4cd31ad2
BS
606#elif defined(__sparc_v9__)
607 // Map the buffer below 2G, so we can use direct calls and branches
608 flags |= MAP_FIXED;
609 addr = (void *) 0x60000000UL;
610 if (code_gen_buffer_size > (512 * 1024 * 1024)) {
611 code_gen_buffer_size = (512 * 1024 * 1024);
612 }
06e67a82
AL
613#endif
614 code_gen_buffer = mmap(addr, code_gen_buffer_size,
615 PROT_WRITE | PROT_READ | PROT_EXEC,
616 flags, -1, 0);
617 if (code_gen_buffer == MAP_FAILED) {
618 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
619 exit(1);
620 }
621 }
26a5f13b 622#else
7267c094 623 code_gen_buffer = g_malloc(code_gen_buffer_size);
26a5f13b
FB
624 map_exec(code_gen_buffer, code_gen_buffer_size);
625#endif
4369415f 626#endif /* !USE_STATIC_CODE_GEN_BUFFER */
26a5f13b 627 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
a884da8a
PM
628 code_gen_buffer_max_size = code_gen_buffer_size -
629 (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
26a5f13b 630 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
7267c094 631 tbs = g_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
26a5f13b
FB
632}
633
634/* Must be called before using the QEMU cpus. 'tb_size' is the size
635 (in bytes) allocated to the translation buffer. Zero means default
636 size. */
d5ab9713 637void tcg_exec_init(unsigned long tb_size)
26a5f13b 638{
26a5f13b
FB
639 cpu_gen_init();
640 code_gen_alloc(tb_size);
641 code_gen_ptr = code_gen_buffer;
4369415f 642 page_init();
9002ec79
RH
643#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
644 /* There's no guest base to take into account, so go ahead and
645 initialize the prologue now. */
646 tcg_prologue_init(&tcg_ctx);
647#endif
26a5f13b
FB
648}
649
d5ab9713
JK
650bool tcg_enabled(void)
651{
652 return code_gen_buffer != NULL;
653}
654
655void cpu_exec_init_all(void)
656{
657#if !defined(CONFIG_USER_ONLY)
658 memory_map_init();
659 io_mem_init();
660#endif
661}
662
9656f324
PB
663#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
664
e59fb374 665static int cpu_common_post_load(void *opaque, int version_id)
e7f4eff7
JQ
666{
667 CPUState *env = opaque;
9656f324 668
3098dba0
AJ
669 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
670 version_id is increased. */
671 env->interrupt_request &= ~0x01;
9656f324
PB
672 tlb_flush(env, 1);
673
674 return 0;
675}
e7f4eff7
JQ
676
677static const VMStateDescription vmstate_cpu_common = {
678 .name = "cpu_common",
679 .version_id = 1,
680 .minimum_version_id = 1,
681 .minimum_version_id_old = 1,
e7f4eff7
JQ
682 .post_load = cpu_common_post_load,
683 .fields = (VMStateField []) {
684 VMSTATE_UINT32(halted, CPUState),
685 VMSTATE_UINT32(interrupt_request, CPUState),
686 VMSTATE_END_OF_LIST()
687 }
688};
9656f324
PB
689#endif
690
950f1472
GC
691CPUState *qemu_get_cpu(int cpu)
692{
693 CPUState *env = first_cpu;
694
695 while (env) {
696 if (env->cpu_index == cpu)
697 break;
698 env = env->next_cpu;
699 }
700
701 return env;
702}
703
6a00d601 704void cpu_exec_init(CPUState *env)
fd6ce8f6 705{
6a00d601
FB
706 CPUState **penv;
707 int cpu_index;
708
c2764719
PB
709#if defined(CONFIG_USER_ONLY)
710 cpu_list_lock();
711#endif
6a00d601
FB
712 env->next_cpu = NULL;
713 penv = &first_cpu;
714 cpu_index = 0;
715 while (*penv != NULL) {
1e9fa730 716 penv = &(*penv)->next_cpu;
6a00d601
FB
717 cpu_index++;
718 }
719 env->cpu_index = cpu_index;
268a362c 720 env->numa_node = 0;
72cf2d4f
BS
721 QTAILQ_INIT(&env->breakpoints);
722 QTAILQ_INIT(&env->watchpoints);
dc7a09cf
JK
723#ifndef CONFIG_USER_ONLY
724 env->thread_id = qemu_get_thread_id();
725#endif
6a00d601 726 *penv = env;
c2764719
PB
727#if defined(CONFIG_USER_ONLY)
728 cpu_list_unlock();
729#endif
b3c7724c 730#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
0be71e32
AW
731 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
732 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
b3c7724c
PB
733 cpu_save, cpu_load, env);
734#endif
fd6ce8f6
FB
735}
736
d1a1eb74
TG
737/* Allocate a new translation block. Flush the translation buffer if
738 too many translation blocks or too much generated code. */
739static TranslationBlock *tb_alloc(target_ulong pc)
740{
741 TranslationBlock *tb;
742
743 if (nb_tbs >= code_gen_max_blocks ||
744 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
745 return NULL;
746 tb = &tbs[nb_tbs++];
747 tb->pc = pc;
748 tb->cflags = 0;
749 return tb;
750}
751
752void tb_free(TranslationBlock *tb)
753{
754 /* In practice this is mostly used for single use temporary TB
755 Ignore the hard cases and just back up if this TB happens to
756 be the last one generated. */
757 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
758 code_gen_ptr = tb->tc_ptr;
759 nb_tbs--;
760 }
761}
762
9fa3e853
FB
763static inline void invalidate_page_bitmap(PageDesc *p)
764{
765 if (p->code_bitmap) {
7267c094 766 g_free(p->code_bitmap);
9fa3e853
FB
767 p->code_bitmap = NULL;
768 }
769 p->code_write_count = 0;
770}
771
5cd2c5b6
RH
772/* Set to NULL all the 'first_tb' fields in all PageDescs. */
773
774static void page_flush_tb_1 (int level, void **lp)
fd6ce8f6 775{
5cd2c5b6 776 int i;
fd6ce8f6 777
5cd2c5b6
RH
778 if (*lp == NULL) {
779 return;
780 }
781 if (level == 0) {
782 PageDesc *pd = *lp;
7296abac 783 for (i = 0; i < L2_SIZE; ++i) {
5cd2c5b6
RH
784 pd[i].first_tb = NULL;
785 invalidate_page_bitmap(pd + i);
fd6ce8f6 786 }
5cd2c5b6
RH
787 } else {
788 void **pp = *lp;
7296abac 789 for (i = 0; i < L2_SIZE; ++i) {
5cd2c5b6
RH
790 page_flush_tb_1 (level - 1, pp + i);
791 }
792 }
793}
794
795static void page_flush_tb(void)
796{
797 int i;
798 for (i = 0; i < V_L1_SIZE; i++) {
799 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
fd6ce8f6
FB
800 }
801}
802
803/* flush all the translation blocks */
d4e8164f 804/* XXX: tb_flush is currently not thread safe */
6a00d601 805void tb_flush(CPUState *env1)
fd6ce8f6 806{
6a00d601 807 CPUState *env;
0124311e 808#if defined(DEBUG_FLUSH)
ab3d1727
BS
809 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
810 (unsigned long)(code_gen_ptr - code_gen_buffer),
811 nb_tbs, nb_tbs > 0 ?
812 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
fd6ce8f6 813#endif
26a5f13b 814 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
a208e54a
PB
815 cpu_abort(env1, "Internal error: code buffer overflow\n");
816
fd6ce8f6 817 nb_tbs = 0;
3b46e624 818
6a00d601
FB
819 for(env = first_cpu; env != NULL; env = env->next_cpu) {
820 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
821 }
9fa3e853 822
8a8a608f 823 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
fd6ce8f6 824 page_flush_tb();
9fa3e853 825
fd6ce8f6 826 code_gen_ptr = code_gen_buffer;
d4e8164f
FB
827 /* XXX: flush processor icache at this point if cache flush is
828 expensive */
e3db7226 829 tb_flush_count++;
fd6ce8f6
FB
830}
831
832#ifdef DEBUG_TB_CHECK
833
bc98a7ef 834static void tb_invalidate_check(target_ulong address)
fd6ce8f6
FB
835{
836 TranslationBlock *tb;
837 int i;
838 address &= TARGET_PAGE_MASK;
99773bd4
PB
839 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
840 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
841 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
842 address >= tb->pc + tb->size)) {
0bf9e31a
BS
843 printf("ERROR invalidate: address=" TARGET_FMT_lx
844 " PC=%08lx size=%04x\n",
99773bd4 845 address, (long)tb->pc, tb->size);
fd6ce8f6
FB
846 }
847 }
848 }
849}
850
851/* verify that all the pages have correct rights for code */
852static void tb_page_check(void)
853{
854 TranslationBlock *tb;
855 int i, flags1, flags2;
3b46e624 856
99773bd4
PB
857 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
858 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
859 flags1 = page_get_flags(tb->pc);
860 flags2 = page_get_flags(tb->pc + tb->size - 1);
861 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
862 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
99773bd4 863 (long)tb->pc, tb->size, flags1, flags2);
fd6ce8f6
FB
864 }
865 }
866 }
867}
868
869#endif
870
871/* invalidate one TB */
872static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
873 int next_offset)
874{
875 TranslationBlock *tb1;
876 for(;;) {
877 tb1 = *ptb;
878 if (tb1 == tb) {
879 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
880 break;
881 }
882 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
883 }
884}
885
9fa3e853
FB
886static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
887{
888 TranslationBlock *tb1;
889 unsigned int n1;
890
891 for(;;) {
892 tb1 = *ptb;
893 n1 = (long)tb1 & 3;
894 tb1 = (TranslationBlock *)((long)tb1 & ~3);
895 if (tb1 == tb) {
896 *ptb = tb1->page_next[n1];
897 break;
898 }
899 ptb = &tb1->page_next[n1];
900 }
901}
902
d4e8164f
FB
903static inline void tb_jmp_remove(TranslationBlock *tb, int n)
904{
905 TranslationBlock *tb1, **ptb;
906 unsigned int n1;
907
908 ptb = &tb->jmp_next[n];
909 tb1 = *ptb;
910 if (tb1) {
911 /* find tb(n) in circular list */
912 for(;;) {
913 tb1 = *ptb;
914 n1 = (long)tb1 & 3;
915 tb1 = (TranslationBlock *)((long)tb1 & ~3);
916 if (n1 == n && tb1 == tb)
917 break;
918 if (n1 == 2) {
919 ptb = &tb1->jmp_first;
920 } else {
921 ptb = &tb1->jmp_next[n1];
922 }
923 }
924 /* now we can suppress tb(n) from the list */
925 *ptb = tb->jmp_next[n];
926
927 tb->jmp_next[n] = NULL;
928 }
929}
930
931/* reset the jump entry 'n' of a TB so that it is not chained to
932 another TB */
933static inline void tb_reset_jump(TranslationBlock *tb, int n)
934{
935 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
936}
937
41c1b1c9 938void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
fd6ce8f6 939{
6a00d601 940 CPUState *env;
8a40a180 941 PageDesc *p;
d4e8164f 942 unsigned int h, n1;
41c1b1c9 943 tb_page_addr_t phys_pc;
8a40a180 944 TranslationBlock *tb1, *tb2;
3b46e624 945
8a40a180
FB
946 /* remove the TB from the hash list */
947 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
948 h = tb_phys_hash_func(phys_pc);
5fafdf24 949 tb_remove(&tb_phys_hash[h], tb,
8a40a180
FB
950 offsetof(TranslationBlock, phys_hash_next));
951
952 /* remove the TB from the page list */
953 if (tb->page_addr[0] != page_addr) {
954 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
955 tb_page_remove(&p->first_tb, tb);
956 invalidate_page_bitmap(p);
957 }
958 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
959 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
960 tb_page_remove(&p->first_tb, tb);
961 invalidate_page_bitmap(p);
962 }
963
36bdbe54 964 tb_invalidated_flag = 1;
59817ccb 965
fd6ce8f6 966 /* remove the TB from the hash list */
8a40a180 967 h = tb_jmp_cache_hash_func(tb->pc);
6a00d601
FB
968 for(env = first_cpu; env != NULL; env = env->next_cpu) {
969 if (env->tb_jmp_cache[h] == tb)
970 env->tb_jmp_cache[h] = NULL;
971 }
d4e8164f
FB
972
973 /* suppress this TB from the two jump lists */
974 tb_jmp_remove(tb, 0);
975 tb_jmp_remove(tb, 1);
976
977 /* suppress any remaining jumps to this TB */
978 tb1 = tb->jmp_first;
979 for(;;) {
980 n1 = (long)tb1 & 3;
981 if (n1 == 2)
982 break;
983 tb1 = (TranslationBlock *)((long)tb1 & ~3);
984 tb2 = tb1->jmp_next[n1];
985 tb_reset_jump(tb1, n1);
986 tb1->jmp_next[n1] = NULL;
987 tb1 = tb2;
988 }
989 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
9fa3e853 990
e3db7226 991 tb_phys_invalidate_count++;
9fa3e853
FB
992}
993
994static inline void set_bits(uint8_t *tab, int start, int len)
995{
996 int end, mask, end1;
997
998 end = start + len;
999 tab += start >> 3;
1000 mask = 0xff << (start & 7);
1001 if ((start & ~7) == (end & ~7)) {
1002 if (start < end) {
1003 mask &= ~(0xff << (end & 7));
1004 *tab |= mask;
1005 }
1006 } else {
1007 *tab++ |= mask;
1008 start = (start + 8) & ~7;
1009 end1 = end & ~7;
1010 while (start < end1) {
1011 *tab++ = 0xff;
1012 start += 8;
1013 }
1014 if (start < end) {
1015 mask = ~(0xff << (end & 7));
1016 *tab |= mask;
1017 }
1018 }
1019}
1020
1021static void build_page_bitmap(PageDesc *p)
1022{
1023 int n, tb_start, tb_end;
1024 TranslationBlock *tb;
3b46e624 1025
7267c094 1026 p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8);
9fa3e853
FB
1027
1028 tb = p->first_tb;
1029 while (tb != NULL) {
1030 n = (long)tb & 3;
1031 tb = (TranslationBlock *)((long)tb & ~3);
1032 /* NOTE: this is subtle as a TB may span two physical pages */
1033 if (n == 0) {
1034 /* NOTE: tb_end may be after the end of the page, but
1035 it is not a problem */
1036 tb_start = tb->pc & ~TARGET_PAGE_MASK;
1037 tb_end = tb_start + tb->size;
1038 if (tb_end > TARGET_PAGE_SIZE)
1039 tb_end = TARGET_PAGE_SIZE;
1040 } else {
1041 tb_start = 0;
1042 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1043 }
1044 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
1045 tb = tb->page_next[n];
1046 }
1047}
1048
2e70f6ef
PB
1049TranslationBlock *tb_gen_code(CPUState *env,
1050 target_ulong pc, target_ulong cs_base,
1051 int flags, int cflags)
d720b93d
FB
1052{
1053 TranslationBlock *tb;
1054 uint8_t *tc_ptr;
41c1b1c9
PB
1055 tb_page_addr_t phys_pc, phys_page2;
1056 target_ulong virt_page2;
d720b93d
FB
1057 int code_gen_size;
1058
41c1b1c9 1059 phys_pc = get_page_addr_code(env, pc);
c27004ec 1060 tb = tb_alloc(pc);
d720b93d
FB
1061 if (!tb) {
1062 /* flush must be done */
1063 tb_flush(env);
1064 /* cannot fail at this point */
c27004ec 1065 tb = tb_alloc(pc);
2e70f6ef
PB
1066 /* Don't forget to invalidate previous TB info. */
1067 tb_invalidated_flag = 1;
d720b93d
FB
1068 }
1069 tc_ptr = code_gen_ptr;
1070 tb->tc_ptr = tc_ptr;
1071 tb->cs_base = cs_base;
1072 tb->flags = flags;
1073 tb->cflags = cflags;
d07bde88 1074 cpu_gen_code(env, tb, &code_gen_size);
d720b93d 1075 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
3b46e624 1076
d720b93d 1077 /* check next page if needed */
c27004ec 1078 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
d720b93d 1079 phys_page2 = -1;
c27004ec 1080 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
41c1b1c9 1081 phys_page2 = get_page_addr_code(env, virt_page2);
d720b93d 1082 }
41c1b1c9 1083 tb_link_page(tb, phys_pc, phys_page2);
2e70f6ef 1084 return tb;
d720b93d 1085}
3b46e624 1086
9fa3e853
FB
1087/* invalidate all TBs which intersect with the target physical page
1088 starting in range [start;end[. NOTE: start and end must refer to
d720b93d
FB
1089 the same physical page. 'is_cpu_write_access' should be true if called
1090 from a real cpu write access: the virtual CPU will exit the current
1091 TB if code is modified inside this TB. */
41c1b1c9 1092void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
d720b93d
FB
1093 int is_cpu_write_access)
1094{
6b917547 1095 TranslationBlock *tb, *tb_next, *saved_tb;
d720b93d 1096 CPUState *env = cpu_single_env;
41c1b1c9 1097 tb_page_addr_t tb_start, tb_end;
6b917547
AL
1098 PageDesc *p;
1099 int n;
1100#ifdef TARGET_HAS_PRECISE_SMC
1101 int current_tb_not_found = is_cpu_write_access;
1102 TranslationBlock *current_tb = NULL;
1103 int current_tb_modified = 0;
1104 target_ulong current_pc = 0;
1105 target_ulong current_cs_base = 0;
1106 int current_flags = 0;
1107#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
1108
1109 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 1110 if (!p)
9fa3e853 1111 return;
5fafdf24 1112 if (!p->code_bitmap &&
d720b93d
FB
1113 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1114 is_cpu_write_access) {
9fa3e853
FB
1115 /* build code bitmap */
1116 build_page_bitmap(p);
1117 }
1118
1119 /* we remove all the TBs in the range [start, end[ */
1120 /* XXX: see if in some cases it could be faster to invalidate all the code */
1121 tb = p->first_tb;
1122 while (tb != NULL) {
1123 n = (long)tb & 3;
1124 tb = (TranslationBlock *)((long)tb & ~3);
1125 tb_next = tb->page_next[n];
1126 /* NOTE: this is subtle as a TB may span two physical pages */
1127 if (n == 0) {
1128 /* NOTE: tb_end may be after the end of the page, but
1129 it is not a problem */
1130 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1131 tb_end = tb_start + tb->size;
1132 } else {
1133 tb_start = tb->page_addr[1];
1134 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1135 }
1136 if (!(tb_end <= start || tb_start >= end)) {
d720b93d
FB
1137#ifdef TARGET_HAS_PRECISE_SMC
1138 if (current_tb_not_found) {
1139 current_tb_not_found = 0;
1140 current_tb = NULL;
2e70f6ef 1141 if (env->mem_io_pc) {
d720b93d 1142 /* now we have a real cpu fault */
2e70f6ef 1143 current_tb = tb_find_pc(env->mem_io_pc);
d720b93d
FB
1144 }
1145 }
1146 if (current_tb == tb &&
2e70f6ef 1147 (current_tb->cflags & CF_COUNT_MASK) != 1) {
d720b93d
FB
1148 /* If we are modifying the current TB, we must stop
1149 its execution. We could be more precise by checking
1150 that the modification is after the current PC, but it
1151 would require a specialized function to partially
1152 restore the CPU state */
3b46e624 1153
d720b93d 1154 current_tb_modified = 1;
618ba8e6 1155 cpu_restore_state(current_tb, env, env->mem_io_pc);
6b917547
AL
1156 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1157 &current_flags);
d720b93d
FB
1158 }
1159#endif /* TARGET_HAS_PRECISE_SMC */
6f5a9f7e
FB
1160 /* we need to do that to handle the case where a signal
1161 occurs while doing tb_phys_invalidate() */
1162 saved_tb = NULL;
1163 if (env) {
1164 saved_tb = env->current_tb;
1165 env->current_tb = NULL;
1166 }
9fa3e853 1167 tb_phys_invalidate(tb, -1);
6f5a9f7e
FB
1168 if (env) {
1169 env->current_tb = saved_tb;
1170 if (env->interrupt_request && env->current_tb)
1171 cpu_interrupt(env, env->interrupt_request);
1172 }
9fa3e853
FB
1173 }
1174 tb = tb_next;
1175 }
1176#if !defined(CONFIG_USER_ONLY)
1177 /* if no code remaining, no need to continue to use slow writes */
1178 if (!p->first_tb) {
1179 invalidate_page_bitmap(p);
d720b93d 1180 if (is_cpu_write_access) {
2e70f6ef 1181 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
d720b93d
FB
1182 }
1183 }
1184#endif
1185#ifdef TARGET_HAS_PRECISE_SMC
1186 if (current_tb_modified) {
1187 /* we generate a block containing just the instruction
1188 modifying the memory. It will ensure that it cannot modify
1189 itself */
ea1c1802 1190 env->current_tb = NULL;
2e70f6ef 1191 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
d720b93d 1192 cpu_resume_from_signal(env, NULL);
9fa3e853 1193 }
fd6ce8f6 1194#endif
9fa3e853 1195}
fd6ce8f6 1196
9fa3e853 1197/* len must be <= 8 and start must be a multiple of len */
41c1b1c9 1198static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
9fa3e853
FB
1199{
1200 PageDesc *p;
1201 int offset, b;
59817ccb 1202#if 0
a4193c8a 1203 if (1) {
93fcfe39
AL
1204 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1205 cpu_single_env->mem_io_vaddr, len,
1206 cpu_single_env->eip,
1207 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
59817ccb
FB
1208 }
1209#endif
9fa3e853 1210 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 1211 if (!p)
9fa3e853
FB
1212 return;
1213 if (p->code_bitmap) {
1214 offset = start & ~TARGET_PAGE_MASK;
1215 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1216 if (b & ((1 << len) - 1))
1217 goto do_invalidate;
1218 } else {
1219 do_invalidate:
d720b93d 1220 tb_invalidate_phys_page_range(start, start + len, 1);
9fa3e853
FB
1221 }
1222}
1223
9fa3e853 1224#if !defined(CONFIG_SOFTMMU)
41c1b1c9 1225static void tb_invalidate_phys_page(tb_page_addr_t addr,
d720b93d 1226 unsigned long pc, void *puc)
9fa3e853 1227{
6b917547 1228 TranslationBlock *tb;
9fa3e853 1229 PageDesc *p;
6b917547 1230 int n;
d720b93d 1231#ifdef TARGET_HAS_PRECISE_SMC
6b917547 1232 TranslationBlock *current_tb = NULL;
d720b93d 1233 CPUState *env = cpu_single_env;
6b917547
AL
1234 int current_tb_modified = 0;
1235 target_ulong current_pc = 0;
1236 target_ulong current_cs_base = 0;
1237 int current_flags = 0;
d720b93d 1238#endif
9fa3e853
FB
1239
1240 addr &= TARGET_PAGE_MASK;
1241 p = page_find(addr >> TARGET_PAGE_BITS);
5fafdf24 1242 if (!p)
9fa3e853
FB
1243 return;
1244 tb = p->first_tb;
d720b93d
FB
1245#ifdef TARGET_HAS_PRECISE_SMC
1246 if (tb && pc != 0) {
1247 current_tb = tb_find_pc(pc);
1248 }
1249#endif
9fa3e853
FB
1250 while (tb != NULL) {
1251 n = (long)tb & 3;
1252 tb = (TranslationBlock *)((long)tb & ~3);
d720b93d
FB
1253#ifdef TARGET_HAS_PRECISE_SMC
1254 if (current_tb == tb &&
2e70f6ef 1255 (current_tb->cflags & CF_COUNT_MASK) != 1) {
d720b93d
FB
1256 /* If we are modifying the current TB, we must stop
1257 its execution. We could be more precise by checking
1258 that the modification is after the current PC, but it
1259 would require a specialized function to partially
1260 restore the CPU state */
3b46e624 1261
d720b93d 1262 current_tb_modified = 1;
618ba8e6 1263 cpu_restore_state(current_tb, env, pc);
6b917547
AL
1264 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1265 &current_flags);
d720b93d
FB
1266 }
1267#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
1268 tb_phys_invalidate(tb, addr);
1269 tb = tb->page_next[n];
1270 }
fd6ce8f6 1271 p->first_tb = NULL;
d720b93d
FB
1272#ifdef TARGET_HAS_PRECISE_SMC
1273 if (current_tb_modified) {
1274 /* we generate a block containing just the instruction
1275 modifying the memory. It will ensure that it cannot modify
1276 itself */
ea1c1802 1277 env->current_tb = NULL;
2e70f6ef 1278 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
d720b93d
FB
1279 cpu_resume_from_signal(env, puc);
1280 }
1281#endif
fd6ce8f6 1282}
9fa3e853 1283#endif
fd6ce8f6
FB
1284
1285/* add the tb in the target page and protect it if necessary */
5fafdf24 1286static inline void tb_alloc_page(TranslationBlock *tb,
41c1b1c9 1287 unsigned int n, tb_page_addr_t page_addr)
fd6ce8f6
FB
1288{
1289 PageDesc *p;
4429ab44
JQ
1290#ifndef CONFIG_USER_ONLY
1291 bool page_already_protected;
1292#endif
9fa3e853
FB
1293
1294 tb->page_addr[n] = page_addr;
5cd2c5b6 1295 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
9fa3e853 1296 tb->page_next[n] = p->first_tb;
4429ab44
JQ
1297#ifndef CONFIG_USER_ONLY
1298 page_already_protected = p->first_tb != NULL;
1299#endif
9fa3e853
FB
1300 p->first_tb = (TranslationBlock *)((long)tb | n);
1301 invalidate_page_bitmap(p);
fd6ce8f6 1302
107db443 1303#if defined(TARGET_HAS_SMC) || 1
d720b93d 1304
9fa3e853 1305#if defined(CONFIG_USER_ONLY)
fd6ce8f6 1306 if (p->flags & PAGE_WRITE) {
53a5960a
PB
1307 target_ulong addr;
1308 PageDesc *p2;
9fa3e853
FB
1309 int prot;
1310
fd6ce8f6
FB
1311 /* force the host page as non writable (writes will have a
1312 page fault + mprotect overhead) */
53a5960a 1313 page_addr &= qemu_host_page_mask;
fd6ce8f6 1314 prot = 0;
53a5960a
PB
1315 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1316 addr += TARGET_PAGE_SIZE) {
1317
1318 p2 = page_find (addr >> TARGET_PAGE_BITS);
1319 if (!p2)
1320 continue;
1321 prot |= p2->flags;
1322 p2->flags &= ~PAGE_WRITE;
53a5960a 1323 }
5fafdf24 1324 mprotect(g2h(page_addr), qemu_host_page_size,
fd6ce8f6
FB
1325 (prot & PAGE_BITS) & ~PAGE_WRITE);
1326#ifdef DEBUG_TB_INVALIDATE
ab3d1727 1327 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
53a5960a 1328 page_addr);
fd6ce8f6 1329#endif
fd6ce8f6 1330 }
9fa3e853
FB
1331#else
1332 /* if some code is already present, then the pages are already
1333 protected. So we handle the case where only the first TB is
1334 allocated in a physical page */
4429ab44 1335 if (!page_already_protected) {
6a00d601 1336 tlb_protect_code(page_addr);
9fa3e853
FB
1337 }
1338#endif
d720b93d
FB
1339
1340#endif /* TARGET_HAS_SMC */
fd6ce8f6
FB
1341}
1342
9fa3e853
FB
1343/* add a new TB and link it to the physical page tables. phys_page2 is
1344 (-1) to indicate that only one page contains the TB. */
41c1b1c9
PB
1345void tb_link_page(TranslationBlock *tb,
1346 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
d4e8164f 1347{
9fa3e853
FB
1348 unsigned int h;
1349 TranslationBlock **ptb;
1350
c8a706fe
PB
1351 /* Grab the mmap lock to stop another thread invalidating this TB
1352 before we are done. */
1353 mmap_lock();
9fa3e853
FB
1354 /* add in the physical hash table */
1355 h = tb_phys_hash_func(phys_pc);
1356 ptb = &tb_phys_hash[h];
1357 tb->phys_hash_next = *ptb;
1358 *ptb = tb;
fd6ce8f6
FB
1359
1360 /* add in the page list */
9fa3e853
FB
1361 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1362 if (phys_page2 != -1)
1363 tb_alloc_page(tb, 1, phys_page2);
1364 else
1365 tb->page_addr[1] = -1;
9fa3e853 1366
d4e8164f
FB
1367 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1368 tb->jmp_next[0] = NULL;
1369 tb->jmp_next[1] = NULL;
1370
1371 /* init original jump addresses */
1372 if (tb->tb_next_offset[0] != 0xffff)
1373 tb_reset_jump(tb, 0);
1374 if (tb->tb_next_offset[1] != 0xffff)
1375 tb_reset_jump(tb, 1);
8a40a180
FB
1376
1377#ifdef DEBUG_TB_CHECK
1378 tb_page_check();
1379#endif
c8a706fe 1380 mmap_unlock();
fd6ce8f6
FB
1381}
1382
9fa3e853
FB
1383/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1384 tb[1].tc_ptr. Return NULL if not found */
1385TranslationBlock *tb_find_pc(unsigned long tc_ptr)
fd6ce8f6 1386{
9fa3e853
FB
1387 int m_min, m_max, m;
1388 unsigned long v;
1389 TranslationBlock *tb;
a513fe19
FB
1390
1391 if (nb_tbs <= 0)
1392 return NULL;
1393 if (tc_ptr < (unsigned long)code_gen_buffer ||
1394 tc_ptr >= (unsigned long)code_gen_ptr)
1395 return NULL;
1396 /* binary search (cf Knuth) */
1397 m_min = 0;
1398 m_max = nb_tbs - 1;
1399 while (m_min <= m_max) {
1400 m = (m_min + m_max) >> 1;
1401 tb = &tbs[m];
1402 v = (unsigned long)tb->tc_ptr;
1403 if (v == tc_ptr)
1404 return tb;
1405 else if (tc_ptr < v) {
1406 m_max = m - 1;
1407 } else {
1408 m_min = m + 1;
1409 }
5fafdf24 1410 }
a513fe19
FB
1411 return &tbs[m_max];
1412}
7501267e 1413
ea041c0e
FB
1414static void tb_reset_jump_recursive(TranslationBlock *tb);
1415
1416static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1417{
1418 TranslationBlock *tb1, *tb_next, **ptb;
1419 unsigned int n1;
1420
1421 tb1 = tb->jmp_next[n];
1422 if (tb1 != NULL) {
1423 /* find head of list */
1424 for(;;) {
1425 n1 = (long)tb1 & 3;
1426 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1427 if (n1 == 2)
1428 break;
1429 tb1 = tb1->jmp_next[n1];
1430 }
1431 /* we are now sure now that tb jumps to tb1 */
1432 tb_next = tb1;
1433
1434 /* remove tb from the jmp_first list */
1435 ptb = &tb_next->jmp_first;
1436 for(;;) {
1437 tb1 = *ptb;
1438 n1 = (long)tb1 & 3;
1439 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1440 if (n1 == n && tb1 == tb)
1441 break;
1442 ptb = &tb1->jmp_next[n1];
1443 }
1444 *ptb = tb->jmp_next[n];
1445 tb->jmp_next[n] = NULL;
3b46e624 1446
ea041c0e
FB
1447 /* suppress the jump to next tb in generated code */
1448 tb_reset_jump(tb, n);
1449
0124311e 1450 /* suppress jumps in the tb on which we could have jumped */
ea041c0e
FB
1451 tb_reset_jump_recursive(tb_next);
1452 }
1453}
1454
1455static void tb_reset_jump_recursive(TranslationBlock *tb)
1456{
1457 tb_reset_jump_recursive2(tb, 0);
1458 tb_reset_jump_recursive2(tb, 1);
1459}
1460
1fddef4b 1461#if defined(TARGET_HAS_ICE)
94df27fd
PB
1462#if defined(CONFIG_USER_ONLY)
1463static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1464{
1465 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1466}
1467#else
d720b93d
FB
1468static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1469{
c227f099 1470 target_phys_addr_t addr;
c227f099 1471 ram_addr_t ram_addr;
f3705d53 1472 MemoryRegionSection *section;
d720b93d 1473
c2f07f81 1474 addr = cpu_get_phys_page_debug(env, pc);
06ef3525 1475 section = phys_page_find(addr >> TARGET_PAGE_BITS);
f3705d53
AK
1476 if (!(memory_region_is_ram(section->mr)
1477 || (section->mr->rom_device && section->mr->readable))) {
06ef3525
AK
1478 return;
1479 }
f3705d53
AK
1480 ram_addr = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
1481 + section_addr(section, addr);
706cd4b5 1482 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
d720b93d 1483}
c27004ec 1484#endif
94df27fd 1485#endif /* TARGET_HAS_ICE */
d720b93d 1486
c527ee8f
PB
1487#if defined(CONFIG_USER_ONLY)
1488void cpu_watchpoint_remove_all(CPUState *env, int mask)
1489
1490{
1491}
1492
1493int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1494 int flags, CPUWatchpoint **watchpoint)
1495{
1496 return -ENOSYS;
1497}
1498#else
6658ffb8 1499/* Add a watchpoint. */
a1d1bb31
AL
1500int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1501 int flags, CPUWatchpoint **watchpoint)
6658ffb8 1502{
b4051334 1503 target_ulong len_mask = ~(len - 1);
c0ce998e 1504 CPUWatchpoint *wp;
6658ffb8 1505
b4051334 1506 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
0dc23828
MF
1507 if ((len & (len - 1)) || (addr & ~len_mask) ||
1508 len == 0 || len > TARGET_PAGE_SIZE) {
b4051334
AL
1509 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1510 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1511 return -EINVAL;
1512 }
7267c094 1513 wp = g_malloc(sizeof(*wp));
a1d1bb31
AL
1514
1515 wp->vaddr = addr;
b4051334 1516 wp->len_mask = len_mask;
a1d1bb31
AL
1517 wp->flags = flags;
1518
2dc9f411 1519 /* keep all GDB-injected watchpoints in front */
c0ce998e 1520 if (flags & BP_GDB)
72cf2d4f 1521 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
c0ce998e 1522 else
72cf2d4f 1523 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
6658ffb8 1524
6658ffb8 1525 tlb_flush_page(env, addr);
a1d1bb31
AL
1526
1527 if (watchpoint)
1528 *watchpoint = wp;
1529 return 0;
6658ffb8
PB
1530}
1531
a1d1bb31
AL
1532/* Remove a specific watchpoint. */
1533int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1534 int flags)
6658ffb8 1535{
b4051334 1536 target_ulong len_mask = ~(len - 1);
a1d1bb31 1537 CPUWatchpoint *wp;
6658ffb8 1538
72cf2d4f 1539 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334 1540 if (addr == wp->vaddr && len_mask == wp->len_mask
6e140f28 1541 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
a1d1bb31 1542 cpu_watchpoint_remove_by_ref(env, wp);
6658ffb8
PB
1543 return 0;
1544 }
1545 }
a1d1bb31 1546 return -ENOENT;
6658ffb8
PB
1547}
1548
a1d1bb31
AL
1549/* Remove a specific watchpoint by reference. */
1550void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1551{
72cf2d4f 1552 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
7d03f82f 1553
a1d1bb31
AL
1554 tlb_flush_page(env, watchpoint->vaddr);
1555
7267c094 1556 g_free(watchpoint);
a1d1bb31
AL
1557}
1558
1559/* Remove all matching watchpoints. */
1560void cpu_watchpoint_remove_all(CPUState *env, int mask)
1561{
c0ce998e 1562 CPUWatchpoint *wp, *next;
a1d1bb31 1563
72cf2d4f 1564 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
a1d1bb31
AL
1565 if (wp->flags & mask)
1566 cpu_watchpoint_remove_by_ref(env, wp);
c0ce998e 1567 }
7d03f82f 1568}
c527ee8f 1569#endif
7d03f82f 1570
a1d1bb31
AL
1571/* Add a breakpoint. */
1572int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1573 CPUBreakpoint **breakpoint)
4c3a88a2 1574{
1fddef4b 1575#if defined(TARGET_HAS_ICE)
c0ce998e 1576 CPUBreakpoint *bp;
3b46e624 1577
7267c094 1578 bp = g_malloc(sizeof(*bp));
4c3a88a2 1579
a1d1bb31
AL
1580 bp->pc = pc;
1581 bp->flags = flags;
1582
2dc9f411 1583 /* keep all GDB-injected breakpoints in front */
c0ce998e 1584 if (flags & BP_GDB)
72cf2d4f 1585 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
c0ce998e 1586 else
72cf2d4f 1587 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
3b46e624 1588
d720b93d 1589 breakpoint_invalidate(env, pc);
a1d1bb31
AL
1590
1591 if (breakpoint)
1592 *breakpoint = bp;
4c3a88a2
FB
1593 return 0;
1594#else
a1d1bb31 1595 return -ENOSYS;
4c3a88a2
FB
1596#endif
1597}
1598
a1d1bb31
AL
1599/* Remove a specific breakpoint. */
1600int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1601{
7d03f82f 1602#if defined(TARGET_HAS_ICE)
a1d1bb31
AL
1603 CPUBreakpoint *bp;
1604
72cf2d4f 1605 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31
AL
1606 if (bp->pc == pc && bp->flags == flags) {
1607 cpu_breakpoint_remove_by_ref(env, bp);
1608 return 0;
1609 }
7d03f82f 1610 }
a1d1bb31
AL
1611 return -ENOENT;
1612#else
1613 return -ENOSYS;
7d03f82f
EI
1614#endif
1615}
1616
a1d1bb31
AL
1617/* Remove a specific breakpoint by reference. */
1618void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
4c3a88a2 1619{
1fddef4b 1620#if defined(TARGET_HAS_ICE)
72cf2d4f 1621 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
d720b93d 1622
a1d1bb31
AL
1623 breakpoint_invalidate(env, breakpoint->pc);
1624
7267c094 1625 g_free(breakpoint);
a1d1bb31
AL
1626#endif
1627}
1628
1629/* Remove all matching breakpoints. */
1630void cpu_breakpoint_remove_all(CPUState *env, int mask)
1631{
1632#if defined(TARGET_HAS_ICE)
c0ce998e 1633 CPUBreakpoint *bp, *next;
a1d1bb31 1634
72cf2d4f 1635 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
a1d1bb31
AL
1636 if (bp->flags & mask)
1637 cpu_breakpoint_remove_by_ref(env, bp);
c0ce998e 1638 }
4c3a88a2
FB
1639#endif
1640}
1641
c33a346e
FB
1642/* enable or disable single step mode. EXCP_DEBUG is returned by the
1643 CPU loop after each instruction */
1644void cpu_single_step(CPUState *env, int enabled)
1645{
1fddef4b 1646#if defined(TARGET_HAS_ICE)
c33a346e
FB
1647 if (env->singlestep_enabled != enabled) {
1648 env->singlestep_enabled = enabled;
e22a25c9
AL
1649 if (kvm_enabled())
1650 kvm_update_guest_debug(env, 0);
1651 else {
ccbb4d44 1652 /* must flush all the translated code to avoid inconsistencies */
e22a25c9
AL
1653 /* XXX: only flush what is necessary */
1654 tb_flush(env);
1655 }
c33a346e
FB
1656 }
1657#endif
1658}
1659
34865134
FB
1660/* enable or disable low levels log */
1661void cpu_set_log(int log_flags)
1662{
1663 loglevel = log_flags;
1664 if (loglevel && !logfile) {
11fcfab4 1665 logfile = fopen(logfilename, log_append ? "a" : "w");
34865134
FB
1666 if (!logfile) {
1667 perror(logfilename);
1668 _exit(1);
1669 }
9fa3e853
FB
1670#if !defined(CONFIG_SOFTMMU)
1671 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1672 {
b55266b5 1673 static char logfile_buf[4096];
9fa3e853
FB
1674 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1675 }
daf767b1
SW
1676#elif defined(_WIN32)
1677 /* Win32 doesn't support line-buffering, so use unbuffered output. */
1678 setvbuf(logfile, NULL, _IONBF, 0);
1679#else
34865134 1680 setvbuf(logfile, NULL, _IOLBF, 0);
9fa3e853 1681#endif
e735b91c
PB
1682 log_append = 1;
1683 }
1684 if (!loglevel && logfile) {
1685 fclose(logfile);
1686 logfile = NULL;
34865134
FB
1687 }
1688}
1689
1690void cpu_set_log_filename(const char *filename)
1691{
1692 logfilename = strdup(filename);
e735b91c
PB
1693 if (logfile) {
1694 fclose(logfile);
1695 logfile = NULL;
1696 }
1697 cpu_set_log(loglevel);
34865134 1698}
c33a346e 1699
3098dba0 1700static void cpu_unlink_tb(CPUState *env)
ea041c0e 1701{
3098dba0
AJ
1702 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1703 problem and hope the cpu will stop of its own accord. For userspace
1704 emulation this often isn't actually as bad as it sounds. Often
1705 signals are used primarily to interrupt blocking syscalls. */
ea041c0e 1706 TranslationBlock *tb;
c227f099 1707 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
59817ccb 1708
cab1b4bd 1709 spin_lock(&interrupt_lock);
3098dba0
AJ
1710 tb = env->current_tb;
1711 /* if the cpu is currently executing code, we must unlink it and
1712 all the potentially executing TB */
f76cfe56 1713 if (tb) {
3098dba0
AJ
1714 env->current_tb = NULL;
1715 tb_reset_jump_recursive(tb);
be214e6c 1716 }
cab1b4bd 1717 spin_unlock(&interrupt_lock);
3098dba0
AJ
1718}
1719
97ffbd8d 1720#ifndef CONFIG_USER_ONLY
3098dba0 1721/* mask must never be zero, except for A20 change call */
ec6959d0 1722static void tcg_handle_interrupt(CPUState *env, int mask)
3098dba0
AJ
1723{
1724 int old_mask;
be214e6c 1725
2e70f6ef 1726 old_mask = env->interrupt_request;
68a79315 1727 env->interrupt_request |= mask;
3098dba0 1728
8edac960
AL
1729 /*
1730 * If called from iothread context, wake the target cpu in
1731 * case its halted.
1732 */
b7680cb6 1733 if (!qemu_cpu_is_self(env)) {
8edac960
AL
1734 qemu_cpu_kick(env);
1735 return;
1736 }
8edac960 1737
2e70f6ef 1738 if (use_icount) {
266910c4 1739 env->icount_decr.u16.high = 0xffff;
2e70f6ef 1740 if (!can_do_io(env)
be214e6c 1741 && (mask & ~old_mask) != 0) {
2e70f6ef
PB
1742 cpu_abort(env, "Raised interrupt while not in I/O function");
1743 }
2e70f6ef 1744 } else {
3098dba0 1745 cpu_unlink_tb(env);
ea041c0e
FB
1746 }
1747}
1748
ec6959d0
JK
1749CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1750
97ffbd8d
JK
1751#else /* CONFIG_USER_ONLY */
1752
1753void cpu_interrupt(CPUState *env, int mask)
1754{
1755 env->interrupt_request |= mask;
1756 cpu_unlink_tb(env);
1757}
1758#endif /* CONFIG_USER_ONLY */
1759
b54ad049
FB
1760void cpu_reset_interrupt(CPUState *env, int mask)
1761{
1762 env->interrupt_request &= ~mask;
1763}
1764
3098dba0
AJ
1765void cpu_exit(CPUState *env)
1766{
1767 env->exit_request = 1;
1768 cpu_unlink_tb(env);
1769}
1770
c7cd6a37 1771const CPULogItem cpu_log_items[] = {
5fafdf24 1772 { CPU_LOG_TB_OUT_ASM, "out_asm",
f193c797
FB
1773 "show generated host assembly code for each compiled TB" },
1774 { CPU_LOG_TB_IN_ASM, "in_asm",
1775 "show target assembly code for each compiled TB" },
5fafdf24 1776 { CPU_LOG_TB_OP, "op",
57fec1fe 1777 "show micro ops for each compiled TB" },
f193c797 1778 { CPU_LOG_TB_OP_OPT, "op_opt",
e01a1157
BS
1779 "show micro ops "
1780#ifdef TARGET_I386
1781 "before eflags optimization and "
f193c797 1782#endif
e01a1157 1783 "after liveness analysis" },
f193c797
FB
1784 { CPU_LOG_INT, "int",
1785 "show interrupts/exceptions in short format" },
1786 { CPU_LOG_EXEC, "exec",
1787 "show trace before each executed TB (lots of logs)" },
9fddaa0c 1788 { CPU_LOG_TB_CPU, "cpu",
e91c8a77 1789 "show CPU state before block translation" },
f193c797
FB
1790#ifdef TARGET_I386
1791 { CPU_LOG_PCALL, "pcall",
1792 "show protected mode far calls/returns/exceptions" },
eca1bdf4
AL
1793 { CPU_LOG_RESET, "cpu_reset",
1794 "show CPU state before CPU resets" },
f193c797 1795#endif
8e3a9fd2 1796#ifdef DEBUG_IOPORT
fd872598
FB
1797 { CPU_LOG_IOPORT, "ioport",
1798 "show all i/o ports accesses" },
8e3a9fd2 1799#endif
f193c797
FB
1800 { 0, NULL, NULL },
1801};
1802
1803static int cmp1(const char *s1, int n, const char *s2)
1804{
1805 if (strlen(s2) != n)
1806 return 0;
1807 return memcmp(s1, s2, n) == 0;
1808}
3b46e624 1809
f193c797
FB
1810/* takes a comma separated list of log masks. Return 0 if error. */
1811int cpu_str_to_log_mask(const char *str)
1812{
c7cd6a37 1813 const CPULogItem *item;
f193c797
FB
1814 int mask;
1815 const char *p, *p1;
1816
1817 p = str;
1818 mask = 0;
1819 for(;;) {
1820 p1 = strchr(p, ',');
1821 if (!p1)
1822 p1 = p + strlen(p);
9742bf26
YT
1823 if(cmp1(p,p1-p,"all")) {
1824 for(item = cpu_log_items; item->mask != 0; item++) {
1825 mask |= item->mask;
1826 }
1827 } else {
1828 for(item = cpu_log_items; item->mask != 0; item++) {
1829 if (cmp1(p, p1 - p, item->name))
1830 goto found;
1831 }
1832 return 0;
f193c797 1833 }
f193c797
FB
1834 found:
1835 mask |= item->mask;
1836 if (*p1 != ',')
1837 break;
1838 p = p1 + 1;
1839 }
1840 return mask;
1841}
ea041c0e 1842
7501267e
FB
1843void cpu_abort(CPUState *env, const char *fmt, ...)
1844{
1845 va_list ap;
493ae1f0 1846 va_list ap2;
7501267e
FB
1847
1848 va_start(ap, fmt);
493ae1f0 1849 va_copy(ap2, ap);
7501267e
FB
1850 fprintf(stderr, "qemu: fatal: ");
1851 vfprintf(stderr, fmt, ap);
1852 fprintf(stderr, "\n");
1853#ifdef TARGET_I386
7fe48483
FB
1854 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1855#else
1856 cpu_dump_state(env, stderr, fprintf, 0);
7501267e 1857#endif
93fcfe39
AL
1858 if (qemu_log_enabled()) {
1859 qemu_log("qemu: fatal: ");
1860 qemu_log_vprintf(fmt, ap2);
1861 qemu_log("\n");
f9373291 1862#ifdef TARGET_I386
93fcfe39 1863 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
f9373291 1864#else
93fcfe39 1865 log_cpu_state(env, 0);
f9373291 1866#endif
31b1a7b4 1867 qemu_log_flush();
93fcfe39 1868 qemu_log_close();
924edcae 1869 }
493ae1f0 1870 va_end(ap2);
f9373291 1871 va_end(ap);
fd052bf6
RV
1872#if defined(CONFIG_USER_ONLY)
1873 {
1874 struct sigaction act;
1875 sigfillset(&act.sa_mask);
1876 act.sa_handler = SIG_DFL;
1877 sigaction(SIGABRT, &act, NULL);
1878 }
1879#endif
7501267e
FB
1880 abort();
1881}
1882
c5be9f08
TS
1883CPUState *cpu_copy(CPUState *env)
1884{
01ba9816 1885 CPUState *new_env = cpu_init(env->cpu_model_str);
c5be9f08
TS
1886 CPUState *next_cpu = new_env->next_cpu;
1887 int cpu_index = new_env->cpu_index;
5a38f081
AL
1888#if defined(TARGET_HAS_ICE)
1889 CPUBreakpoint *bp;
1890 CPUWatchpoint *wp;
1891#endif
1892
c5be9f08 1893 memcpy(new_env, env, sizeof(CPUState));
5a38f081
AL
1894
1895 /* Preserve chaining and index. */
c5be9f08
TS
1896 new_env->next_cpu = next_cpu;
1897 new_env->cpu_index = cpu_index;
5a38f081
AL
1898
1899 /* Clone all break/watchpoints.
1900 Note: Once we support ptrace with hw-debug register access, make sure
1901 BP_CPU break/watchpoints are handled correctly on clone. */
72cf2d4f
BS
1902 QTAILQ_INIT(&env->breakpoints);
1903 QTAILQ_INIT(&env->watchpoints);
5a38f081 1904#if defined(TARGET_HAS_ICE)
72cf2d4f 1905 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
5a38f081
AL
1906 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1907 }
72cf2d4f 1908 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
5a38f081
AL
1909 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1910 wp->flags, NULL);
1911 }
1912#endif
1913
c5be9f08
TS
1914 return new_env;
1915}
1916
0124311e
FB
1917#if !defined(CONFIG_USER_ONLY)
1918
5c751e99
EI
1919static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1920{
1921 unsigned int i;
1922
1923 /* Discard jump cache entries for any tb which might potentially
1924 overlap the flushed page. */
1925 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1926 memset (&env->tb_jmp_cache[i], 0,
9742bf26 1927 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
5c751e99
EI
1928
1929 i = tb_jmp_cache_hash_page(addr);
1930 memset (&env->tb_jmp_cache[i], 0,
9742bf26 1931 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
5c751e99
EI
1932}
1933
08738984
IK
1934static CPUTLBEntry s_cputlb_empty_entry = {
1935 .addr_read = -1,
1936 .addr_write = -1,
1937 .addr_code = -1,
1938 .addend = -1,
1939};
1940
771124e1
PM
1941/* NOTE:
1942 * If flush_global is true (the usual case), flush all tlb entries.
1943 * If flush_global is false, flush (at least) all tlb entries not
1944 * marked global.
1945 *
1946 * Since QEMU doesn't currently implement a global/not-global flag
1947 * for tlb entries, at the moment tlb_flush() will also flush all
1948 * tlb entries in the flush_global == false case. This is OK because
1949 * CPU architectures generally permit an implementation to drop
1950 * entries from the TLB at any time, so flushing more entries than
1951 * required is only an efficiency issue, not a correctness issue.
1952 */
ee8b7021 1953void tlb_flush(CPUState *env, int flush_global)
33417e70 1954{
33417e70 1955 int i;
0124311e 1956
9fa3e853
FB
1957#if defined(DEBUG_TLB)
1958 printf("tlb_flush:\n");
1959#endif
0124311e
FB
1960 /* must reset current TB so that interrupts cannot modify the
1961 links while we are modifying them */
1962 env->current_tb = NULL;
1963
33417e70 1964 for(i = 0; i < CPU_TLB_SIZE; i++) {
cfde4bd9
IY
1965 int mmu_idx;
1966 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
08738984 1967 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
cfde4bd9 1968 }
33417e70 1969 }
9fa3e853 1970
8a40a180 1971 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
9fa3e853 1972
d4c430a8
PB
1973 env->tlb_flush_addr = -1;
1974 env->tlb_flush_mask = 0;
e3db7226 1975 tlb_flush_count++;
33417e70
FB
1976}
1977
274da6b2 1978static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
61382a50 1979{
5fafdf24 1980 if (addr == (tlb_entry->addr_read &
84b7b8e7 1981 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1982 addr == (tlb_entry->addr_write &
84b7b8e7 1983 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1984 addr == (tlb_entry->addr_code &
84b7b8e7 1985 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
08738984 1986 *tlb_entry = s_cputlb_empty_entry;
84b7b8e7 1987 }
61382a50
FB
1988}
1989
2e12669a 1990void tlb_flush_page(CPUState *env, target_ulong addr)
33417e70 1991{
8a40a180 1992 int i;
cfde4bd9 1993 int mmu_idx;
0124311e 1994
9fa3e853 1995#if defined(DEBUG_TLB)
108c49b8 1996 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
9fa3e853 1997#endif
d4c430a8
PB
1998 /* Check if we need to flush due to large pages. */
1999 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
2000#if defined(DEBUG_TLB)
2001 printf("tlb_flush_page: forced full flush ("
2002 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
2003 env->tlb_flush_addr, env->tlb_flush_mask);
2004#endif
2005 tlb_flush(env, 1);
2006 return;
2007 }
0124311e
FB
2008 /* must reset current TB so that interrupts cannot modify the
2009 links while we are modifying them */
2010 env->current_tb = NULL;
61382a50
FB
2011
2012 addr &= TARGET_PAGE_MASK;
2013 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
cfde4bd9
IY
2014 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2015 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
0124311e 2016
5c751e99 2017 tlb_flush_jmp_cache(env, addr);
9fa3e853
FB
2018}
2019
9fa3e853
FB
2020/* update the TLBs so that writes to code in the virtual page 'addr'
2021 can be detected */
c227f099 2022static void tlb_protect_code(ram_addr_t ram_addr)
9fa3e853 2023{
5fafdf24 2024 cpu_physical_memory_reset_dirty(ram_addr,
6a00d601
FB
2025 ram_addr + TARGET_PAGE_SIZE,
2026 CODE_DIRTY_FLAG);
9fa3e853
FB
2027}
2028
9fa3e853 2029/* update the TLB so that writes in physical page 'phys_addr' are no longer
3a7d929e 2030 tested for self modifying code */
c227f099 2031static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 2032 target_ulong vaddr)
9fa3e853 2033{
f7c11b53 2034 cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
1ccde1cb
FB
2035}
2036
5fafdf24 2037static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1ccde1cb
FB
2038 unsigned long start, unsigned long length)
2039{
2040 unsigned long addr;
0e0df1e2 2041 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr) {
84b7b8e7 2042 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1ccde1cb 2043 if ((addr - start) < length) {
0f459d16 2044 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1ccde1cb
FB
2045 }
2046 }
2047}
2048
5579c7f3 2049/* Note: start and end must be within the same ram block. */
c227f099 2050void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 2051 int dirty_flags)
1ccde1cb
FB
2052{
2053 CPUState *env;
4f2ac237 2054 unsigned long length, start1;
f7c11b53 2055 int i;
1ccde1cb
FB
2056
2057 start &= TARGET_PAGE_MASK;
2058 end = TARGET_PAGE_ALIGN(end);
2059
2060 length = end - start;
2061 if (length == 0)
2062 return;
f7c11b53 2063 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
f23db169 2064
1ccde1cb
FB
2065 /* we modify the TLB cache so that the dirty bit will be set again
2066 when accessing the range */
b2e0a138 2067 start1 = (unsigned long)qemu_safe_ram_ptr(start);
a57d23e4 2068 /* Check that we don't span multiple blocks - this breaks the
5579c7f3 2069 address comparisons below. */
b2e0a138 2070 if ((unsigned long)qemu_safe_ram_ptr(end - 1) - start1
5579c7f3
PB
2071 != (end - 1) - start) {
2072 abort();
2073 }
2074
6a00d601 2075 for(env = first_cpu; env != NULL; env = env->next_cpu) {
cfde4bd9
IY
2076 int mmu_idx;
2077 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2078 for(i = 0; i < CPU_TLB_SIZE; i++)
2079 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
2080 start1, length);
2081 }
6a00d601 2082 }
1ccde1cb
FB
2083}
2084
74576198
AL
2085int cpu_physical_memory_set_dirty_tracking(int enable)
2086{
f6f3fbca 2087 int ret = 0;
74576198 2088 in_migration = enable;
f6f3fbca 2089 return ret;
74576198
AL
2090}
2091
3a7d929e
FB
2092static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2093{
c227f099 2094 ram_addr_t ram_addr;
5579c7f3 2095 void *p;
3a7d929e 2096
0e0df1e2 2097 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr) {
5579c7f3
PB
2098 p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
2099 + tlb_entry->addend);
e890261f 2100 ram_addr = qemu_ram_addr_from_host_nofail(p);
3a7d929e 2101 if (!cpu_physical_memory_is_dirty(ram_addr)) {
0f459d16 2102 tlb_entry->addr_write |= TLB_NOTDIRTY;
3a7d929e
FB
2103 }
2104 }
2105}
2106
2107/* update the TLB according to the current state of the dirty bits */
2108void cpu_tlb_update_dirty(CPUState *env)
2109{
2110 int i;
cfde4bd9
IY
2111 int mmu_idx;
2112 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2113 for(i = 0; i < CPU_TLB_SIZE; i++)
2114 tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2115 }
3a7d929e
FB
2116}
2117
0f459d16 2118static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1ccde1cb 2119{
0f459d16
PB
2120 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2121 tlb_entry->addr_write = vaddr;
1ccde1cb
FB
2122}
2123
0f459d16
PB
2124/* update the TLB corresponding to virtual page vaddr
2125 so that it is no longer dirty */
2126static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1ccde1cb 2127{
1ccde1cb 2128 int i;
cfde4bd9 2129 int mmu_idx;
1ccde1cb 2130
0f459d16 2131 vaddr &= TARGET_PAGE_MASK;
1ccde1cb 2132 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
cfde4bd9
IY
2133 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2134 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
9fa3e853
FB
2135}
2136
d4c430a8
PB
2137/* Our TLB does not support large pages, so remember the area covered by
2138 large pages and trigger a full TLB flush if these are invalidated. */
2139static void tlb_add_large_page(CPUState *env, target_ulong vaddr,
2140 target_ulong size)
2141{
2142 target_ulong mask = ~(size - 1);
2143
2144 if (env->tlb_flush_addr == (target_ulong)-1) {
2145 env->tlb_flush_addr = vaddr & mask;
2146 env->tlb_flush_mask = mask;
2147 return;
2148 }
2149 /* Extend the existing region to include the new page.
2150 This is a compromise between unnecessary flushes and the cost
2151 of maintaining a full variable size TLB. */
2152 mask &= env->tlb_flush_mask;
2153 while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
2154 mask <<= 1;
2155 }
2156 env->tlb_flush_addr &= mask;
2157 env->tlb_flush_mask = mask;
2158}
2159
06ef3525 2160static bool is_ram_rom(MemoryRegionSection *s)
1d393fa2 2161{
06ef3525 2162 return memory_region_is_ram(s->mr);
1d393fa2
AK
2163}
2164
06ef3525 2165static bool is_romd(MemoryRegionSection *s)
75c578dc 2166{
06ef3525 2167 MemoryRegion *mr = s->mr;
75c578dc 2168
75c578dc
AK
2169 return mr->rom_device && mr->readable;
2170}
2171
06ef3525 2172static bool is_ram_rom_romd(MemoryRegionSection *s)
1d393fa2 2173{
06ef3525 2174 return is_ram_rom(s) || is_romd(s);
1d393fa2
AK
2175}
2176
d4c430a8
PB
2177/* Add a new TLB entry. At most one entry for a given virtual address
2178 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2179 supplied size is only used by tlb_flush_page. */
2180void tlb_set_page(CPUState *env, target_ulong vaddr,
2181 target_phys_addr_t paddr, int prot,
2182 int mmu_idx, target_ulong size)
9fa3e853 2183{
f3705d53 2184 MemoryRegionSection *section;
9fa3e853 2185 unsigned int index;
4f2ac237 2186 target_ulong address;
0f459d16 2187 target_ulong code_address;
355b1943 2188 unsigned long addend;
84b7b8e7 2189 CPUTLBEntry *te;
a1d1bb31 2190 CPUWatchpoint *wp;
c227f099 2191 target_phys_addr_t iotlb;
9fa3e853 2192
d4c430a8
PB
2193 assert(size >= TARGET_PAGE_SIZE);
2194 if (size != TARGET_PAGE_SIZE) {
2195 tlb_add_large_page(env, vaddr, size);
2196 }
06ef3525 2197 section = phys_page_find(paddr >> TARGET_PAGE_BITS);
9fa3e853 2198#if defined(DEBUG_TLB)
7fd3f494
SW
2199 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
2200 " prot=%x idx=%d pd=0x%08lx\n",
2201 vaddr, paddr, prot, mmu_idx, pd);
9fa3e853
FB
2202#endif
2203
0f459d16 2204 address = vaddr;
f3705d53 2205 if (!is_ram_rom_romd(section)) {
0f459d16
PB
2206 /* IO memory case (romd handled later) */
2207 address |= TLB_MMIO;
2208 }
f3705d53
AK
2209 if (is_ram_rom_romd(section)) {
2210 addend = (unsigned long)memory_region_get_ram_ptr(section->mr)
2211 + section_addr(section, paddr);
06ef3525
AK
2212 } else {
2213 addend = 0;
2214 }
f3705d53 2215 if (is_ram_rom(section)) {
0f459d16 2216 /* Normal RAM. */
f3705d53
AK
2217 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
2218 + section_addr(section, paddr);
2219 if (!section->readonly)
aa102231 2220 iotlb |= phys_section_notdirty;
0f459d16 2221 else
aa102231 2222 iotlb |= phys_section_rom;
0f459d16 2223 } else {
ccbb4d44 2224 /* IO handlers are currently passed a physical address.
0f459d16
PB
2225 It would be nice to pass an offset from the base address
2226 of that region. This would avoid having to special case RAM,
2227 and avoid full address decoding in every device.
2228 We can't use the high bits of pd for this because
2229 IO_MEM_ROMD uses these as a ram address. */
aa102231 2230 iotlb = section - phys_sections;
f3705d53 2231 iotlb += section_addr(section, paddr);
0f459d16
PB
2232 }
2233
2234 code_address = address;
2235 /* Make accesses to pages with watchpoints go via the
2236 watchpoint trap routines. */
72cf2d4f 2237 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
a1d1bb31 2238 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
bf298f83
JK
2239 /* Avoid trapping reads of pages with a write breakpoint. */
2240 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
aa102231 2241 iotlb = phys_section_watch + paddr;
bf298f83
JK
2242 address |= TLB_MMIO;
2243 break;
2244 }
6658ffb8 2245 }
0f459d16 2246 }
d79acba4 2247
0f459d16
PB
2248 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2249 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2250 te = &env->tlb_table[mmu_idx][index];
2251 te->addend = addend - vaddr;
2252 if (prot & PAGE_READ) {
2253 te->addr_read = address;
2254 } else {
2255 te->addr_read = -1;
2256 }
5c751e99 2257
0f459d16
PB
2258 if (prot & PAGE_EXEC) {
2259 te->addr_code = code_address;
2260 } else {
2261 te->addr_code = -1;
2262 }
2263 if (prot & PAGE_WRITE) {
f3705d53
AK
2264 if ((memory_region_is_ram(section->mr) && section->readonly)
2265 || is_romd(section)) {
0f459d16
PB
2266 /* Write access calls the I/O callback. */
2267 te->addr_write = address | TLB_MMIO;
f3705d53 2268 } else if (memory_region_is_ram(section->mr)
06ef3525 2269 && !cpu_physical_memory_is_dirty(
f3705d53
AK
2270 section->mr->ram_addr
2271 + section_addr(section, paddr))) {
0f459d16 2272 te->addr_write = address | TLB_NOTDIRTY;
9fa3e853 2273 } else {
0f459d16 2274 te->addr_write = address;
9fa3e853 2275 }
0f459d16
PB
2276 } else {
2277 te->addr_write = -1;
9fa3e853 2278 }
9fa3e853
FB
2279}
2280
0124311e
FB
2281#else
2282
ee8b7021 2283void tlb_flush(CPUState *env, int flush_global)
0124311e
FB
2284{
2285}
2286
2e12669a 2287void tlb_flush_page(CPUState *env, target_ulong addr)
0124311e
FB
2288{
2289}
2290
edf8e2af
MW
2291/*
2292 * Walks guest process memory "regions" one by one
2293 * and calls callback function 'fn' for each region.
2294 */
5cd2c5b6
RH
2295
2296struct walk_memory_regions_data
2297{
2298 walk_memory_regions_fn fn;
2299 void *priv;
2300 unsigned long start;
2301 int prot;
2302};
2303
2304static int walk_memory_regions_end(struct walk_memory_regions_data *data,
b480d9b7 2305 abi_ulong end, int new_prot)
5cd2c5b6
RH
2306{
2307 if (data->start != -1ul) {
2308 int rc = data->fn(data->priv, data->start, end, data->prot);
2309 if (rc != 0) {
2310 return rc;
2311 }
2312 }
2313
2314 data->start = (new_prot ? end : -1ul);
2315 data->prot = new_prot;
2316
2317 return 0;
2318}
2319
2320static int walk_memory_regions_1(struct walk_memory_regions_data *data,
b480d9b7 2321 abi_ulong base, int level, void **lp)
5cd2c5b6 2322{
b480d9b7 2323 abi_ulong pa;
5cd2c5b6
RH
2324 int i, rc;
2325
2326 if (*lp == NULL) {
2327 return walk_memory_regions_end(data, base, 0);
2328 }
2329
2330 if (level == 0) {
2331 PageDesc *pd = *lp;
7296abac 2332 for (i = 0; i < L2_SIZE; ++i) {
5cd2c5b6
RH
2333 int prot = pd[i].flags;
2334
2335 pa = base | (i << TARGET_PAGE_BITS);
2336 if (prot != data->prot) {
2337 rc = walk_memory_regions_end(data, pa, prot);
2338 if (rc != 0) {
2339 return rc;
9fa3e853 2340 }
9fa3e853 2341 }
5cd2c5b6
RH
2342 }
2343 } else {
2344 void **pp = *lp;
7296abac 2345 for (i = 0; i < L2_SIZE; ++i) {
b480d9b7
PB
2346 pa = base | ((abi_ulong)i <<
2347 (TARGET_PAGE_BITS + L2_BITS * level));
5cd2c5b6
RH
2348 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2349 if (rc != 0) {
2350 return rc;
2351 }
2352 }
2353 }
2354
2355 return 0;
2356}
2357
2358int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2359{
2360 struct walk_memory_regions_data data;
2361 unsigned long i;
2362
2363 data.fn = fn;
2364 data.priv = priv;
2365 data.start = -1ul;
2366 data.prot = 0;
2367
2368 for (i = 0; i < V_L1_SIZE; i++) {
b480d9b7 2369 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
5cd2c5b6
RH
2370 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2371 if (rc != 0) {
2372 return rc;
9fa3e853 2373 }
33417e70 2374 }
5cd2c5b6
RH
2375
2376 return walk_memory_regions_end(&data, 0, 0);
edf8e2af
MW
2377}
2378
b480d9b7
PB
2379static int dump_region(void *priv, abi_ulong start,
2380 abi_ulong end, unsigned long prot)
edf8e2af
MW
2381{
2382 FILE *f = (FILE *)priv;
2383
b480d9b7
PB
2384 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2385 " "TARGET_ABI_FMT_lx" %c%c%c\n",
edf8e2af
MW
2386 start, end, end - start,
2387 ((prot & PAGE_READ) ? 'r' : '-'),
2388 ((prot & PAGE_WRITE) ? 'w' : '-'),
2389 ((prot & PAGE_EXEC) ? 'x' : '-'));
2390
2391 return (0);
2392}
2393
2394/* dump memory mappings */
2395void page_dump(FILE *f)
2396{
2397 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2398 "start", "end", "size", "prot");
2399 walk_memory_regions(f, dump_region);
33417e70
FB
2400}
2401
53a5960a 2402int page_get_flags(target_ulong address)
33417e70 2403{
9fa3e853
FB
2404 PageDesc *p;
2405
2406 p = page_find(address >> TARGET_PAGE_BITS);
33417e70 2407 if (!p)
9fa3e853
FB
2408 return 0;
2409 return p->flags;
2410}
2411
376a7909
RH
2412/* Modify the flags of a page and invalidate the code if necessary.
2413 The flag PAGE_WRITE_ORG is positioned automatically depending
2414 on PAGE_WRITE. The mmap_lock should already be held. */
53a5960a 2415void page_set_flags(target_ulong start, target_ulong end, int flags)
9fa3e853 2416{
376a7909
RH
2417 target_ulong addr, len;
2418
2419 /* This function should never be called with addresses outside the
2420 guest address space. If this assert fires, it probably indicates
2421 a missing call to h2g_valid. */
b480d9b7
PB
2422#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2423 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
376a7909
RH
2424#endif
2425 assert(start < end);
9fa3e853
FB
2426
2427 start = start & TARGET_PAGE_MASK;
2428 end = TARGET_PAGE_ALIGN(end);
376a7909
RH
2429
2430 if (flags & PAGE_WRITE) {
9fa3e853 2431 flags |= PAGE_WRITE_ORG;
376a7909
RH
2432 }
2433
2434 for (addr = start, len = end - start;
2435 len != 0;
2436 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2437 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2438
2439 /* If the write protection bit is set, then we invalidate
2440 the code inside. */
5fafdf24 2441 if (!(p->flags & PAGE_WRITE) &&
9fa3e853
FB
2442 (flags & PAGE_WRITE) &&
2443 p->first_tb) {
d720b93d 2444 tb_invalidate_phys_page(addr, 0, NULL);
9fa3e853
FB
2445 }
2446 p->flags = flags;
2447 }
33417e70
FB
2448}
2449
3d97b40b
TS
2450int page_check_range(target_ulong start, target_ulong len, int flags)
2451{
2452 PageDesc *p;
2453 target_ulong end;
2454 target_ulong addr;
2455
376a7909
RH
2456 /* This function should never be called with addresses outside the
2457 guest address space. If this assert fires, it probably indicates
2458 a missing call to h2g_valid. */
338e9e6c
BS
2459#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2460 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
376a7909
RH
2461#endif
2462
3e0650a9
RH
2463 if (len == 0) {
2464 return 0;
2465 }
376a7909
RH
2466 if (start + len - 1 < start) {
2467 /* We've wrapped around. */
55f280c9 2468 return -1;
376a7909 2469 }
55f280c9 2470
3d97b40b
TS
2471 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2472 start = start & TARGET_PAGE_MASK;
2473
376a7909
RH
2474 for (addr = start, len = end - start;
2475 len != 0;
2476 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
3d97b40b
TS
2477 p = page_find(addr >> TARGET_PAGE_BITS);
2478 if( !p )
2479 return -1;
2480 if( !(p->flags & PAGE_VALID) )
2481 return -1;
2482
dae3270c 2483 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
3d97b40b 2484 return -1;
dae3270c
FB
2485 if (flags & PAGE_WRITE) {
2486 if (!(p->flags & PAGE_WRITE_ORG))
2487 return -1;
2488 /* unprotect the page if it was put read-only because it
2489 contains translated code */
2490 if (!(p->flags & PAGE_WRITE)) {
2491 if (!page_unprotect(addr, 0, NULL))
2492 return -1;
2493 }
2494 return 0;
2495 }
3d97b40b
TS
2496 }
2497 return 0;
2498}
2499
9fa3e853 2500/* called from signal handler: invalidate the code and unprotect the
ccbb4d44 2501 page. Return TRUE if the fault was successfully handled. */
53a5960a 2502int page_unprotect(target_ulong address, unsigned long pc, void *puc)
9fa3e853 2503{
45d679d6
AJ
2504 unsigned int prot;
2505 PageDesc *p;
53a5960a 2506 target_ulong host_start, host_end, addr;
9fa3e853 2507
c8a706fe
PB
2508 /* Technically this isn't safe inside a signal handler. However we
2509 know this only ever happens in a synchronous SEGV handler, so in
2510 practice it seems to be ok. */
2511 mmap_lock();
2512
45d679d6
AJ
2513 p = page_find(address >> TARGET_PAGE_BITS);
2514 if (!p) {
c8a706fe 2515 mmap_unlock();
9fa3e853 2516 return 0;
c8a706fe 2517 }
45d679d6 2518
9fa3e853
FB
2519 /* if the page was really writable, then we change its
2520 protection back to writable */
45d679d6
AJ
2521 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2522 host_start = address & qemu_host_page_mask;
2523 host_end = host_start + qemu_host_page_size;
2524
2525 prot = 0;
2526 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2527 p = page_find(addr >> TARGET_PAGE_BITS);
2528 p->flags |= PAGE_WRITE;
2529 prot |= p->flags;
2530
9fa3e853
FB
2531 /* and since the content will be modified, we must invalidate
2532 the corresponding translated code. */
45d679d6 2533 tb_invalidate_phys_page(addr, pc, puc);
9fa3e853 2534#ifdef DEBUG_TB_CHECK
45d679d6 2535 tb_invalidate_check(addr);
9fa3e853 2536#endif
9fa3e853 2537 }
45d679d6
AJ
2538 mprotect((void *)g2h(host_start), qemu_host_page_size,
2539 prot & PAGE_BITS);
2540
2541 mmap_unlock();
2542 return 1;
9fa3e853 2543 }
c8a706fe 2544 mmap_unlock();
9fa3e853
FB
2545 return 0;
2546}
2547
6a00d601
FB
2548static inline void tlb_set_dirty(CPUState *env,
2549 unsigned long addr, target_ulong vaddr)
1ccde1cb
FB
2550{
2551}
9fa3e853
FB
2552#endif /* defined(CONFIG_USER_ONLY) */
2553
e2eef170 2554#if !defined(CONFIG_USER_ONLY)
8da3ff18 2555
c04b2b78
PB
2556#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2557typedef struct subpage_t {
70c68e44 2558 MemoryRegion iomem;
c04b2b78 2559 target_phys_addr_t base;
5312bd8b 2560 uint16_t sub_section[TARGET_PAGE_SIZE];
c04b2b78
PB
2561} subpage_t;
2562
c227f099 2563static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 2564 uint16_t section);
0f0cb164 2565static subpage_t *subpage_init(target_phys_addr_t base);
5312bd8b 2566static void destroy_page_desc(uint16_t section_index)
54688b1e 2567{
5312bd8b
AK
2568 MemoryRegionSection *section = &phys_sections[section_index];
2569 MemoryRegion *mr = section->mr;
54688b1e
AK
2570
2571 if (mr->subpage) {
2572 subpage_t *subpage = container_of(mr, subpage_t, iomem);
2573 memory_region_destroy(&subpage->iomem);
2574 g_free(subpage);
2575 }
2576}
2577
4346ae3e 2578static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level)
54688b1e
AK
2579{
2580 unsigned i;
d6f2ea22 2581 PhysPageEntry *p;
54688b1e 2582
c19e8800 2583 if (lp->ptr == PHYS_MAP_NODE_NIL) {
54688b1e
AK
2584 return;
2585 }
2586
c19e8800 2587 p = phys_map_nodes[lp->ptr];
4346ae3e 2588 for (i = 0; i < L2_SIZE; ++i) {
07f07b31 2589 if (!p[i].is_leaf) {
54688b1e 2590 destroy_l2_mapping(&p[i], level - 1);
4346ae3e 2591 } else {
c19e8800 2592 destroy_page_desc(p[i].ptr);
54688b1e 2593 }
54688b1e 2594 }
07f07b31 2595 lp->is_leaf = 0;
c19e8800 2596 lp->ptr = PHYS_MAP_NODE_NIL;
54688b1e
AK
2597}
2598
2599static void destroy_all_mappings(void)
2600{
3eef53df 2601 destroy_l2_mapping(&phys_map, P_L2_LEVELS - 1);
d6f2ea22 2602 phys_map_nodes_reset();
54688b1e
AK
2603}
2604
5312bd8b
AK
2605static uint16_t phys_section_add(MemoryRegionSection *section)
2606{
2607 if (phys_sections_nb == phys_sections_nb_alloc) {
2608 phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16);
2609 phys_sections = g_renew(MemoryRegionSection, phys_sections,
2610 phys_sections_nb_alloc);
2611 }
2612 phys_sections[phys_sections_nb] = *section;
2613 return phys_sections_nb++;
2614}
2615
2616static void phys_sections_clear(void)
2617{
2618 phys_sections_nb = 0;
2619}
2620
8f2498f9
MT
2621/* register physical memory.
2622 For RAM, 'size' must be a multiple of the target page size.
2623 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
8da3ff18
PB
2624 io memory page. The address used when calling the IO function is
2625 the offset from the start of the region, plus region_offset. Both
ccbb4d44 2626 start_addr and region_offset are rounded down to a page boundary
8da3ff18
PB
2627 before calculating this offset. This should not be a problem unless
2628 the low bits of start_addr and region_offset differ. */
0f0cb164
AK
2629static void register_subpage(MemoryRegionSection *section)
2630{
2631 subpage_t *subpage;
2632 target_phys_addr_t base = section->offset_within_address_space
2633 & TARGET_PAGE_MASK;
f3705d53 2634 MemoryRegionSection *existing = phys_page_find(base >> TARGET_PAGE_BITS);
0f0cb164
AK
2635 MemoryRegionSection subsection = {
2636 .offset_within_address_space = base,
2637 .size = TARGET_PAGE_SIZE,
2638 };
0f0cb164
AK
2639 target_phys_addr_t start, end;
2640
f3705d53 2641 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
0f0cb164 2642
f3705d53 2643 if (!(existing->mr->subpage)) {
0f0cb164
AK
2644 subpage = subpage_init(base);
2645 subsection.mr = &subpage->iomem;
2999097b
AK
2646 phys_page_set(base >> TARGET_PAGE_BITS, 1,
2647 phys_section_add(&subsection));
0f0cb164 2648 } else {
f3705d53 2649 subpage = container_of(existing->mr, subpage_t, iomem);
0f0cb164
AK
2650 }
2651 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
2652 end = start + section->size;
2653 subpage_register(subpage, start, end, phys_section_add(section));
2654}
2655
2656
2657static void register_multipage(MemoryRegionSection *section)
33417e70 2658{
dd81124b
AK
2659 target_phys_addr_t start_addr = section->offset_within_address_space;
2660 ram_addr_t size = section->size;
2999097b 2661 target_phys_addr_t addr;
5312bd8b 2662 uint16_t section_index = phys_section_add(section);
dd81124b 2663
3b8e6a2d 2664 assert(size);
f6f3fbca 2665
3b8e6a2d 2666 addr = start_addr;
2999097b
AK
2667 phys_page_set(addr >> TARGET_PAGE_BITS, size >> TARGET_PAGE_BITS,
2668 section_index);
33417e70
FB
2669}
2670
0f0cb164
AK
2671void cpu_register_physical_memory_log(MemoryRegionSection *section,
2672 bool readonly)
2673{
2674 MemoryRegionSection now = *section, remain = *section;
2675
2676 if ((now.offset_within_address_space & ~TARGET_PAGE_MASK)
2677 || (now.size < TARGET_PAGE_SIZE)) {
2678 now.size = MIN(TARGET_PAGE_ALIGN(now.offset_within_address_space)
2679 - now.offset_within_address_space,
2680 now.size);
2681 register_subpage(&now);
2682 remain.size -= now.size;
2683 remain.offset_within_address_space += now.size;
2684 remain.offset_within_region += now.size;
2685 }
2686 now = remain;
2687 now.size &= TARGET_PAGE_MASK;
2688 if (now.size) {
2689 register_multipage(&now);
2690 remain.size -= now.size;
2691 remain.offset_within_address_space += now.size;
2692 remain.offset_within_region += now.size;
2693 }
2694 now = remain;
2695 if (now.size) {
2696 register_subpage(&now);
2697 }
2698}
2699
2700
c227f099 2701void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
f65ed4c1
AL
2702{
2703 if (kvm_enabled())
2704 kvm_coalesce_mmio_region(addr, size);
2705}
2706
c227f099 2707void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
f65ed4c1
AL
2708{
2709 if (kvm_enabled())
2710 kvm_uncoalesce_mmio_region(addr, size);
2711}
2712
62a2744c
SY
2713void qemu_flush_coalesced_mmio_buffer(void)
2714{
2715 if (kvm_enabled())
2716 kvm_flush_coalesced_mmio_buffer();
2717}
2718
c902760f
MT
2719#if defined(__linux__) && !defined(TARGET_S390X)
2720
2721#include <sys/vfs.h>
2722
2723#define HUGETLBFS_MAGIC 0x958458f6
2724
2725static long gethugepagesize(const char *path)
2726{
2727 struct statfs fs;
2728 int ret;
2729
2730 do {
9742bf26 2731 ret = statfs(path, &fs);
c902760f
MT
2732 } while (ret != 0 && errno == EINTR);
2733
2734 if (ret != 0) {
9742bf26
YT
2735 perror(path);
2736 return 0;
c902760f
MT
2737 }
2738
2739 if (fs.f_type != HUGETLBFS_MAGIC)
9742bf26 2740 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
c902760f
MT
2741
2742 return fs.f_bsize;
2743}
2744
04b16653
AW
2745static void *file_ram_alloc(RAMBlock *block,
2746 ram_addr_t memory,
2747 const char *path)
c902760f
MT
2748{
2749 char *filename;
2750 void *area;
2751 int fd;
2752#ifdef MAP_POPULATE
2753 int flags;
2754#endif
2755 unsigned long hpagesize;
2756
2757 hpagesize = gethugepagesize(path);
2758 if (!hpagesize) {
9742bf26 2759 return NULL;
c902760f
MT
2760 }
2761
2762 if (memory < hpagesize) {
2763 return NULL;
2764 }
2765
2766 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2767 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2768 return NULL;
2769 }
2770
2771 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
9742bf26 2772 return NULL;
c902760f
MT
2773 }
2774
2775 fd = mkstemp(filename);
2776 if (fd < 0) {
9742bf26
YT
2777 perror("unable to create backing store for hugepages");
2778 free(filename);
2779 return NULL;
c902760f
MT
2780 }
2781 unlink(filename);
2782 free(filename);
2783
2784 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2785
2786 /*
2787 * ftruncate is not supported by hugetlbfs in older
2788 * hosts, so don't bother bailing out on errors.
2789 * If anything goes wrong with it under other filesystems,
2790 * mmap will fail.
2791 */
2792 if (ftruncate(fd, memory))
9742bf26 2793 perror("ftruncate");
c902760f
MT
2794
2795#ifdef MAP_POPULATE
2796 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2797 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2798 * to sidestep this quirk.
2799 */
2800 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2801 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2802#else
2803 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2804#endif
2805 if (area == MAP_FAILED) {
9742bf26
YT
2806 perror("file_ram_alloc: can't mmap RAM pages");
2807 close(fd);
2808 return (NULL);
c902760f 2809 }
04b16653 2810 block->fd = fd;
c902760f
MT
2811 return area;
2812}
2813#endif
2814
d17b5288 2815static ram_addr_t find_ram_offset(ram_addr_t size)
04b16653
AW
2816{
2817 RAMBlock *block, *next_block;
3e837b2c 2818 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
04b16653
AW
2819
2820 if (QLIST_EMPTY(&ram_list.blocks))
2821 return 0;
2822
2823 QLIST_FOREACH(block, &ram_list.blocks, next) {
f15fbc4b 2824 ram_addr_t end, next = RAM_ADDR_MAX;
04b16653
AW
2825
2826 end = block->offset + block->length;
2827
2828 QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2829 if (next_block->offset >= end) {
2830 next = MIN(next, next_block->offset);
2831 }
2832 }
2833 if (next - end >= size && next - end < mingap) {
3e837b2c 2834 offset = end;
04b16653
AW
2835 mingap = next - end;
2836 }
2837 }
3e837b2c
AW
2838
2839 if (offset == RAM_ADDR_MAX) {
2840 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
2841 (uint64_t)size);
2842 abort();
2843 }
2844
04b16653
AW
2845 return offset;
2846}
2847
2848static ram_addr_t last_ram_offset(void)
d17b5288
AW
2849{
2850 RAMBlock *block;
2851 ram_addr_t last = 0;
2852
2853 QLIST_FOREACH(block, &ram_list.blocks, next)
2854 last = MAX(last, block->offset + block->length);
2855
2856 return last;
2857}
2858
c5705a77 2859void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
84b89d78
CM
2860{
2861 RAMBlock *new_block, *block;
2862
c5705a77
AK
2863 new_block = NULL;
2864 QLIST_FOREACH(block, &ram_list.blocks, next) {
2865 if (block->offset == addr) {
2866 new_block = block;
2867 break;
2868 }
2869 }
2870 assert(new_block);
2871 assert(!new_block->idstr[0]);
84b89d78
CM
2872
2873 if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
2874 char *id = dev->parent_bus->info->get_dev_path(dev);
2875 if (id) {
2876 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
7267c094 2877 g_free(id);
84b89d78
CM
2878 }
2879 }
2880 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2881
2882 QLIST_FOREACH(block, &ram_list.blocks, next) {
c5705a77 2883 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
84b89d78
CM
2884 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2885 new_block->idstr);
2886 abort();
2887 }
2888 }
c5705a77
AK
2889}
2890
2891ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
2892 MemoryRegion *mr)
2893{
2894 RAMBlock *new_block;
2895
2896 size = TARGET_PAGE_ALIGN(size);
2897 new_block = g_malloc0(sizeof(*new_block));
84b89d78 2898
7c637366 2899 new_block->mr = mr;
432d268c 2900 new_block->offset = find_ram_offset(size);
6977dfe6
YT
2901 if (host) {
2902 new_block->host = host;
cd19cfa2 2903 new_block->flags |= RAM_PREALLOC_MASK;
6977dfe6
YT
2904 } else {
2905 if (mem_path) {
c902760f 2906#if defined (__linux__) && !defined(TARGET_S390X)
6977dfe6
YT
2907 new_block->host = file_ram_alloc(new_block, size, mem_path);
2908 if (!new_block->host) {
2909 new_block->host = qemu_vmalloc(size);
e78815a5 2910 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
6977dfe6 2911 }
c902760f 2912#else
6977dfe6
YT
2913 fprintf(stderr, "-mem-path option unsupported\n");
2914 exit(1);
c902760f 2915#endif
6977dfe6 2916 } else {
6b02494d 2917#if defined(TARGET_S390X) && defined(CONFIG_KVM)
ff83678a
CB
2918 /* S390 KVM requires the topmost vma of the RAM to be smaller than
2919 an system defined value, which is at least 256GB. Larger systems
2920 have larger values. We put the guest between the end of data
2921 segment (system break) and this value. We use 32GB as a base to
2922 have enough room for the system break to grow. */
2923 new_block->host = mmap((void*)0x800000000, size,
6977dfe6 2924 PROT_EXEC|PROT_READ|PROT_WRITE,
ff83678a 2925 MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
fb8b2735
AG
2926 if (new_block->host == MAP_FAILED) {
2927 fprintf(stderr, "Allocating RAM failed\n");
2928 abort();
2929 }
6b02494d 2930#else
868bb33f 2931 if (xen_enabled()) {
fce537d4 2932 xen_ram_alloc(new_block->offset, size, mr);
432d268c
JN
2933 } else {
2934 new_block->host = qemu_vmalloc(size);
2935 }
6b02494d 2936#endif
e78815a5 2937 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
6977dfe6 2938 }
c902760f 2939 }
94a6b54f
PB
2940 new_block->length = size;
2941
f471a17e 2942 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
94a6b54f 2943
7267c094 2944 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
04b16653 2945 last_ram_offset() >> TARGET_PAGE_BITS);
d17b5288 2946 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
94a6b54f
PB
2947 0xff, size >> TARGET_PAGE_BITS);
2948
6f0437e8
JK
2949 if (kvm_enabled())
2950 kvm_setup_guest_memory(new_block->host, size);
2951
94a6b54f
PB
2952 return new_block->offset;
2953}
e9a1ab19 2954
c5705a77 2955ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
6977dfe6 2956{
c5705a77 2957 return qemu_ram_alloc_from_ptr(size, NULL, mr);
6977dfe6
YT
2958}
2959
1f2e98b6
AW
2960void qemu_ram_free_from_ptr(ram_addr_t addr)
2961{
2962 RAMBlock *block;
2963
2964 QLIST_FOREACH(block, &ram_list.blocks, next) {
2965 if (addr == block->offset) {
2966 QLIST_REMOVE(block, next);
7267c094 2967 g_free(block);
1f2e98b6
AW
2968 return;
2969 }
2970 }
2971}
2972
c227f099 2973void qemu_ram_free(ram_addr_t addr)
e9a1ab19 2974{
04b16653
AW
2975 RAMBlock *block;
2976
2977 QLIST_FOREACH(block, &ram_list.blocks, next) {
2978 if (addr == block->offset) {
2979 QLIST_REMOVE(block, next);
cd19cfa2
HY
2980 if (block->flags & RAM_PREALLOC_MASK) {
2981 ;
2982 } else if (mem_path) {
04b16653
AW
2983#if defined (__linux__) && !defined(TARGET_S390X)
2984 if (block->fd) {
2985 munmap(block->host, block->length);
2986 close(block->fd);
2987 } else {
2988 qemu_vfree(block->host);
2989 }
fd28aa13
JK
2990#else
2991 abort();
04b16653
AW
2992#endif
2993 } else {
2994#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2995 munmap(block->host, block->length);
2996#else
868bb33f 2997 if (xen_enabled()) {
e41d7c69 2998 xen_invalidate_map_cache_entry(block->host);
432d268c
JN
2999 } else {
3000 qemu_vfree(block->host);
3001 }
04b16653
AW
3002#endif
3003 }
7267c094 3004 g_free(block);
04b16653
AW
3005 return;
3006 }
3007 }
3008
e9a1ab19
FB
3009}
3010
cd19cfa2
HY
3011#ifndef _WIN32
3012void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
3013{
3014 RAMBlock *block;
3015 ram_addr_t offset;
3016 int flags;
3017 void *area, *vaddr;
3018
3019 QLIST_FOREACH(block, &ram_list.blocks, next) {
3020 offset = addr - block->offset;
3021 if (offset < block->length) {
3022 vaddr = block->host + offset;
3023 if (block->flags & RAM_PREALLOC_MASK) {
3024 ;
3025 } else {
3026 flags = MAP_FIXED;
3027 munmap(vaddr, length);
3028 if (mem_path) {
3029#if defined(__linux__) && !defined(TARGET_S390X)
3030 if (block->fd) {
3031#ifdef MAP_POPULATE
3032 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
3033 MAP_PRIVATE;
3034#else
3035 flags |= MAP_PRIVATE;
3036#endif
3037 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3038 flags, block->fd, offset);
3039 } else {
3040 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
3041 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3042 flags, -1, 0);
3043 }
fd28aa13
JK
3044#else
3045 abort();
cd19cfa2
HY
3046#endif
3047 } else {
3048#if defined(TARGET_S390X) && defined(CONFIG_KVM)
3049 flags |= MAP_SHARED | MAP_ANONYMOUS;
3050 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
3051 flags, -1, 0);
3052#else
3053 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
3054 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3055 flags, -1, 0);
3056#endif
3057 }
3058 if (area != vaddr) {
f15fbc4b
AP
3059 fprintf(stderr, "Could not remap addr: "
3060 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
cd19cfa2
HY
3061 length, addr);
3062 exit(1);
3063 }
3064 qemu_madvise(vaddr, length, QEMU_MADV_MERGEABLE);
3065 }
3066 return;
3067 }
3068 }
3069}
3070#endif /* !_WIN32 */
3071
dc828ca1 3072/* Return a host pointer to ram allocated with qemu_ram_alloc.
5579c7f3
PB
3073 With the exception of the softmmu code in this file, this should
3074 only be used for local memory (e.g. video ram) that the device owns,
3075 and knows it isn't going to access beyond the end of the block.
3076
3077 It should not be used for general purpose DMA.
3078 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
3079 */
c227f099 3080void *qemu_get_ram_ptr(ram_addr_t addr)
dc828ca1 3081{
94a6b54f
PB
3082 RAMBlock *block;
3083
f471a17e
AW
3084 QLIST_FOREACH(block, &ram_list.blocks, next) {
3085 if (addr - block->offset < block->length) {
7d82af38
VP
3086 /* Move this entry to to start of the list. */
3087 if (block != QLIST_FIRST(&ram_list.blocks)) {
3088 QLIST_REMOVE(block, next);
3089 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
3090 }
868bb33f 3091 if (xen_enabled()) {
432d268c
JN
3092 /* We need to check if the requested address is in the RAM
3093 * because we don't want to map the entire memory in QEMU.
712c2b41 3094 * In that case just map until the end of the page.
432d268c
JN
3095 */
3096 if (block->offset == 0) {
e41d7c69 3097 return xen_map_cache(addr, 0, 0);
432d268c 3098 } else if (block->host == NULL) {
e41d7c69
JK
3099 block->host =
3100 xen_map_cache(block->offset, block->length, 1);
432d268c
JN
3101 }
3102 }
f471a17e
AW
3103 return block->host + (addr - block->offset);
3104 }
94a6b54f 3105 }
f471a17e
AW
3106
3107 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3108 abort();
3109
3110 return NULL;
dc828ca1
PB
3111}
3112
b2e0a138
MT
3113/* Return a host pointer to ram allocated with qemu_ram_alloc.
3114 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
3115 */
3116void *qemu_safe_ram_ptr(ram_addr_t addr)
3117{
3118 RAMBlock *block;
3119
3120 QLIST_FOREACH(block, &ram_list.blocks, next) {
3121 if (addr - block->offset < block->length) {
868bb33f 3122 if (xen_enabled()) {
432d268c
JN
3123 /* We need to check if the requested address is in the RAM
3124 * because we don't want to map the entire memory in QEMU.
712c2b41 3125 * In that case just map until the end of the page.
432d268c
JN
3126 */
3127 if (block->offset == 0) {
e41d7c69 3128 return xen_map_cache(addr, 0, 0);
432d268c 3129 } else if (block->host == NULL) {
e41d7c69
JK
3130 block->host =
3131 xen_map_cache(block->offset, block->length, 1);
432d268c
JN
3132 }
3133 }
b2e0a138
MT
3134 return block->host + (addr - block->offset);
3135 }
3136 }
3137
3138 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3139 abort();
3140
3141 return NULL;
3142}
3143
38bee5dc
SS
3144/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
3145 * but takes a size argument */
8ab934f9 3146void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
38bee5dc 3147{
8ab934f9
SS
3148 if (*size == 0) {
3149 return NULL;
3150 }
868bb33f 3151 if (xen_enabled()) {
e41d7c69 3152 return xen_map_cache(addr, *size, 1);
868bb33f 3153 } else {
38bee5dc
SS
3154 RAMBlock *block;
3155
3156 QLIST_FOREACH(block, &ram_list.blocks, next) {
3157 if (addr - block->offset < block->length) {
3158 if (addr - block->offset + *size > block->length)
3159 *size = block->length - addr + block->offset;
3160 return block->host + (addr - block->offset);
3161 }
3162 }
3163
3164 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3165 abort();
38bee5dc
SS
3166 }
3167}
3168
050a0ddf
AP
3169void qemu_put_ram_ptr(void *addr)
3170{
3171 trace_qemu_put_ram_ptr(addr);
050a0ddf
AP
3172}
3173
e890261f 3174int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
5579c7f3 3175{
94a6b54f
PB
3176 RAMBlock *block;
3177 uint8_t *host = ptr;
3178
868bb33f 3179 if (xen_enabled()) {
e41d7c69 3180 *ram_addr = xen_ram_addr_from_mapcache(ptr);
712c2b41
SS
3181 return 0;
3182 }
3183
f471a17e 3184 QLIST_FOREACH(block, &ram_list.blocks, next) {
432d268c
JN
3185 /* This case append when the block is not mapped. */
3186 if (block->host == NULL) {
3187 continue;
3188 }
f471a17e 3189 if (host - block->host < block->length) {
e890261f
MT
3190 *ram_addr = block->offset + (host - block->host);
3191 return 0;
f471a17e 3192 }
94a6b54f 3193 }
432d268c 3194
e890261f
MT
3195 return -1;
3196}
f471a17e 3197
e890261f
MT
3198/* Some of the softmmu routines need to translate from a host pointer
3199 (typically a TLB entry) back to a ram offset. */
3200ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
3201{
3202 ram_addr_t ram_addr;
f471a17e 3203
e890261f
MT
3204 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
3205 fprintf(stderr, "Bad ram pointer %p\n", ptr);
3206 abort();
3207 }
3208 return ram_addr;
5579c7f3
PB
3209}
3210
0e0df1e2
AK
3211static uint64_t unassigned_mem_read(void *opaque, target_phys_addr_t addr,
3212 unsigned size)
e18231a3
BS
3213{
3214#ifdef DEBUG_UNASSIGNED
3215 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3216#endif
5b450407 3217#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
0e0df1e2 3218 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
e18231a3
BS
3219#endif
3220 return 0;
3221}
3222
0e0df1e2
AK
3223static void unassigned_mem_write(void *opaque, target_phys_addr_t addr,
3224 uint64_t val, unsigned size)
e18231a3
BS
3225{
3226#ifdef DEBUG_UNASSIGNED
0e0df1e2 3227 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
e18231a3 3228#endif
5b450407 3229#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
0e0df1e2 3230 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
67d3b957 3231#endif
33417e70
FB
3232}
3233
0e0df1e2
AK
3234static const MemoryRegionOps unassigned_mem_ops = {
3235 .read = unassigned_mem_read,
3236 .write = unassigned_mem_write,
3237 .endianness = DEVICE_NATIVE_ENDIAN,
3238};
e18231a3 3239
0e0df1e2
AK
3240static uint64_t error_mem_read(void *opaque, target_phys_addr_t addr,
3241 unsigned size)
e18231a3 3242{
0e0df1e2 3243 abort();
e18231a3
BS
3244}
3245
0e0df1e2
AK
3246static void error_mem_write(void *opaque, target_phys_addr_t addr,
3247 uint64_t value, unsigned size)
e18231a3 3248{
0e0df1e2 3249 abort();
33417e70
FB
3250}
3251
0e0df1e2
AK
3252static const MemoryRegionOps error_mem_ops = {
3253 .read = error_mem_read,
3254 .write = error_mem_write,
3255 .endianness = DEVICE_NATIVE_ENDIAN,
33417e70
FB
3256};
3257
0e0df1e2
AK
3258static const MemoryRegionOps rom_mem_ops = {
3259 .read = error_mem_read,
3260 .write = unassigned_mem_write,
3261 .endianness = DEVICE_NATIVE_ENDIAN,
33417e70
FB
3262};
3263
0e0df1e2
AK
3264static void notdirty_mem_write(void *opaque, target_phys_addr_t ram_addr,
3265 uint64_t val, unsigned size)
9fa3e853 3266{
3a7d929e 3267 int dirty_flags;
f7c11b53 3268 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3a7d929e 3269 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 3270#if !defined(CONFIG_USER_ONLY)
0e0df1e2 3271 tb_invalidate_phys_page_fast(ram_addr, size);
f7c11b53 3272 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
9fa3e853 3273#endif
3a7d929e 3274 }
0e0df1e2
AK
3275 switch (size) {
3276 case 1:
3277 stb_p(qemu_get_ram_ptr(ram_addr), val);
3278 break;
3279 case 2:
3280 stw_p(qemu_get_ram_ptr(ram_addr), val);
3281 break;
3282 case 4:
3283 stl_p(qemu_get_ram_ptr(ram_addr), val);
3284 break;
3285 default:
3286 abort();
3a7d929e 3287 }
f23db169 3288 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
f7c11b53 3289 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
f23db169
FB
3290 /* we remove the notdirty callback only if the code has been
3291 flushed */
3292 if (dirty_flags == 0xff)
2e70f6ef 3293 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
3294}
3295
0e0df1e2
AK
3296static const MemoryRegionOps notdirty_mem_ops = {
3297 .read = error_mem_read,
3298 .write = notdirty_mem_write,
3299 .endianness = DEVICE_NATIVE_ENDIAN,
1ccde1cb
FB
3300};
3301
0f459d16 3302/* Generate a debug exception if a watchpoint has been hit. */
b4051334 3303static void check_watchpoint(int offset, int len_mask, int flags)
0f459d16
PB
3304{
3305 CPUState *env = cpu_single_env;
06d55cc1
AL
3306 target_ulong pc, cs_base;
3307 TranslationBlock *tb;
0f459d16 3308 target_ulong vaddr;
a1d1bb31 3309 CPUWatchpoint *wp;
06d55cc1 3310 int cpu_flags;
0f459d16 3311
06d55cc1
AL
3312 if (env->watchpoint_hit) {
3313 /* We re-entered the check after replacing the TB. Now raise
3314 * the debug interrupt so that is will trigger after the
3315 * current instruction. */
3316 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3317 return;
3318 }
2e70f6ef 3319 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
72cf2d4f 3320 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334
AL
3321 if ((vaddr == (wp->vaddr & len_mask) ||
3322 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
6e140f28
AL
3323 wp->flags |= BP_WATCHPOINT_HIT;
3324 if (!env->watchpoint_hit) {
3325 env->watchpoint_hit = wp;
3326 tb = tb_find_pc(env->mem_io_pc);
3327 if (!tb) {
3328 cpu_abort(env, "check_watchpoint: could not find TB for "
3329 "pc=%p", (void *)env->mem_io_pc);
3330 }
618ba8e6 3331 cpu_restore_state(tb, env, env->mem_io_pc);
6e140f28
AL
3332 tb_phys_invalidate(tb, -1);
3333 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3334 env->exception_index = EXCP_DEBUG;
488d6577 3335 cpu_loop_exit(env);
6e140f28
AL
3336 } else {
3337 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3338 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
488d6577 3339 cpu_resume_from_signal(env, NULL);
6e140f28 3340 }
06d55cc1 3341 }
6e140f28
AL
3342 } else {
3343 wp->flags &= ~BP_WATCHPOINT_HIT;
0f459d16
PB
3344 }
3345 }
3346}
3347
6658ffb8
PB
3348/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3349 so these check for a hit then pass through to the normal out-of-line
3350 phys routines. */
1ec9b909
AK
3351static uint64_t watch_mem_read(void *opaque, target_phys_addr_t addr,
3352 unsigned size)
6658ffb8 3353{
1ec9b909
AK
3354 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
3355 switch (size) {
3356 case 1: return ldub_phys(addr);
3357 case 2: return lduw_phys(addr);
3358 case 4: return ldl_phys(addr);
3359 default: abort();
3360 }
6658ffb8
PB
3361}
3362
1ec9b909
AK
3363static void watch_mem_write(void *opaque, target_phys_addr_t addr,
3364 uint64_t val, unsigned size)
6658ffb8 3365{
1ec9b909
AK
3366 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
3367 switch (size) {
67364150
MF
3368 case 1:
3369 stb_phys(addr, val);
3370 break;
3371 case 2:
3372 stw_phys(addr, val);
3373 break;
3374 case 4:
3375 stl_phys(addr, val);
3376 break;
1ec9b909
AK
3377 default: abort();
3378 }
6658ffb8
PB
3379}
3380
1ec9b909
AK
3381static const MemoryRegionOps watch_mem_ops = {
3382 .read = watch_mem_read,
3383 .write = watch_mem_write,
3384 .endianness = DEVICE_NATIVE_ENDIAN,
6658ffb8 3385};
6658ffb8 3386
70c68e44
AK
3387static uint64_t subpage_read(void *opaque, target_phys_addr_t addr,
3388 unsigned len)
db7b5426 3389{
70c68e44 3390 subpage_t *mmio = opaque;
f6405247 3391 unsigned int idx = SUBPAGE_IDX(addr);
5312bd8b 3392 MemoryRegionSection *section;
db7b5426
BS
3393#if defined(DEBUG_SUBPAGE)
3394 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3395 mmio, len, addr, idx);
3396#endif
db7b5426 3397
5312bd8b
AK
3398 section = &phys_sections[mmio->sub_section[idx]];
3399 addr += mmio->base;
3400 addr -= section->offset_within_address_space;
3401 addr += section->offset_within_region;
37ec01d4 3402 return io_mem_read(section->mr, addr, len);
db7b5426
BS
3403}
3404
70c68e44
AK
3405static void subpage_write(void *opaque, target_phys_addr_t addr,
3406 uint64_t value, unsigned len)
db7b5426 3407{
70c68e44 3408 subpage_t *mmio = opaque;
f6405247 3409 unsigned int idx = SUBPAGE_IDX(addr);
5312bd8b 3410 MemoryRegionSection *section;
db7b5426 3411#if defined(DEBUG_SUBPAGE)
70c68e44
AK
3412 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
3413 " idx %d value %"PRIx64"\n",
f6405247 3414 __func__, mmio, len, addr, idx, value);
db7b5426 3415#endif
f6405247 3416
5312bd8b
AK
3417 section = &phys_sections[mmio->sub_section[idx]];
3418 addr += mmio->base;
3419 addr -= section->offset_within_address_space;
3420 addr += section->offset_within_region;
37ec01d4 3421 io_mem_write(section->mr, addr, value, len);
db7b5426
BS
3422}
3423
70c68e44
AK
3424static const MemoryRegionOps subpage_ops = {
3425 .read = subpage_read,
3426 .write = subpage_write,
3427 .endianness = DEVICE_NATIVE_ENDIAN,
db7b5426
BS
3428};
3429
de712f94
AK
3430static uint64_t subpage_ram_read(void *opaque, target_phys_addr_t addr,
3431 unsigned size)
56384e8b
AF
3432{
3433 ram_addr_t raddr = addr;
3434 void *ptr = qemu_get_ram_ptr(raddr);
de712f94
AK
3435 switch (size) {
3436 case 1: return ldub_p(ptr);
3437 case 2: return lduw_p(ptr);
3438 case 4: return ldl_p(ptr);
3439 default: abort();
3440 }
56384e8b
AF
3441}
3442
de712f94
AK
3443static void subpage_ram_write(void *opaque, target_phys_addr_t addr,
3444 uint64_t value, unsigned size)
56384e8b
AF
3445{
3446 ram_addr_t raddr = addr;
3447 void *ptr = qemu_get_ram_ptr(raddr);
de712f94
AK
3448 switch (size) {
3449 case 1: return stb_p(ptr, value);
3450 case 2: return stw_p(ptr, value);
3451 case 4: return stl_p(ptr, value);
3452 default: abort();
3453 }
56384e8b
AF
3454}
3455
de712f94
AK
3456static const MemoryRegionOps subpage_ram_ops = {
3457 .read = subpage_ram_read,
3458 .write = subpage_ram_write,
3459 .endianness = DEVICE_NATIVE_ENDIAN,
56384e8b
AF
3460};
3461
c227f099 3462static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
5312bd8b 3463 uint16_t section)
db7b5426
BS
3464{
3465 int idx, eidx;
3466
3467 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3468 return -1;
3469 idx = SUBPAGE_IDX(start);
3470 eidx = SUBPAGE_IDX(end);
3471#if defined(DEBUG_SUBPAGE)
0bf9e31a 3472 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
db7b5426
BS
3473 mmio, start, end, idx, eidx, memory);
3474#endif
5312bd8b
AK
3475 if (memory_region_is_ram(phys_sections[section].mr)) {
3476 MemoryRegionSection new_section = phys_sections[section];
3477 new_section.mr = &io_mem_subpage_ram;
3478 section = phys_section_add(&new_section);
56384e8b 3479 }
db7b5426 3480 for (; idx <= eidx; idx++) {
5312bd8b 3481 mmio->sub_section[idx] = section;
db7b5426
BS
3482 }
3483
3484 return 0;
3485}
3486
0f0cb164 3487static subpage_t *subpage_init(target_phys_addr_t base)
db7b5426 3488{
c227f099 3489 subpage_t *mmio;
db7b5426 3490
7267c094 3491 mmio = g_malloc0(sizeof(subpage_t));
1eec614b
AL
3492
3493 mmio->base = base;
70c68e44
AK
3494 memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
3495 "subpage", TARGET_PAGE_SIZE);
b3b00c78 3496 mmio->iomem.subpage = true;
db7b5426 3497#if defined(DEBUG_SUBPAGE)
1eec614b
AL
3498 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3499 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
db7b5426 3500#endif
0f0cb164 3501 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, phys_section_unassigned);
db7b5426
BS
3502
3503 return mmio;
3504}
3505
88715657
AL
3506static int get_free_io_mem_idx(void)
3507{
3508 int i;
3509
3510 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3511 if (!io_mem_used[i]) {
3512 io_mem_used[i] = 1;
3513 return i;
3514 }
c6703b47 3515 fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
88715657
AL
3516 return -1;
3517}
3518
33417e70
FB
3519/* mem_read and mem_write are arrays of functions containing the
3520 function to access byte (index 0), word (index 1) and dword (index
0b4e6e3e 3521 2). Functions can be omitted with a NULL function pointer.
3ee89922 3522 If io_index is non zero, the corresponding io zone is
4254fab8
BS
3523 modified. If it is zero, a new io zone is allocated. The return
3524 value can be used with cpu_register_physical_memory(). (-1) is
3525 returned if error. */
a621f38d 3526static int cpu_register_io_memory_fixed(int io_index, MemoryRegion *mr)
33417e70 3527{
33417e70 3528 if (io_index <= 0) {
88715657
AL
3529 io_index = get_free_io_mem_idx();
3530 if (io_index == -1)
3531 return io_index;
33417e70
FB
3532 } else {
3533 if (io_index >= IO_MEM_NB_ENTRIES)
3534 return -1;
3535 }
b5ff1b31 3536
a621f38d 3537 io_mem_region[io_index] = mr;
f6405247 3538
11c7ef0c 3539 return io_index;
33417e70 3540}
61382a50 3541
a621f38d 3542int cpu_register_io_memory(MemoryRegion *mr)
1eed09cb 3543{
a621f38d 3544 return cpu_register_io_memory_fixed(0, mr);
1eed09cb
AK
3545}
3546
11c7ef0c 3547void cpu_unregister_io_memory(int io_index)
88715657 3548{
a621f38d 3549 io_mem_region[io_index] = NULL;
88715657
AL
3550 io_mem_used[io_index] = 0;
3551}
3552
5312bd8b
AK
3553static uint16_t dummy_section(MemoryRegion *mr)
3554{
3555 MemoryRegionSection section = {
3556 .mr = mr,
3557 .offset_within_address_space = 0,
3558 .offset_within_region = 0,
3559 .size = UINT64_MAX,
3560 };
3561
3562 return phys_section_add(&section);
3563}
3564
37ec01d4 3565MemoryRegion *iotlb_to_region(target_phys_addr_t index)
aa102231 3566{
37ec01d4 3567 return phys_sections[index & ~TARGET_PAGE_MASK].mr;
aa102231
AK
3568}
3569
e9179ce1
AK
3570static void io_mem_init(void)
3571{
3572 int i;
3573
0e0df1e2
AK
3574 /* Must be first: */
3575 memory_region_init_io(&io_mem_ram, &error_mem_ops, NULL, "ram", UINT64_MAX);
3576 assert(io_mem_ram.ram_addr == 0);
3577 memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
3578 memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
3579 "unassigned", UINT64_MAX);
3580 memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
3581 "notdirty", UINT64_MAX);
de712f94
AK
3582 memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
3583 "subpage-ram", UINT64_MAX);
e9179ce1
AK
3584 for (i=0; i<5; i++)
3585 io_mem_used[i] = 1;
3586
1ec9b909
AK
3587 memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
3588 "watch", UINT64_MAX);
e9179ce1
AK
3589}
3590
50c1e149
AK
3591static void core_begin(MemoryListener *listener)
3592{
54688b1e 3593 destroy_all_mappings();
5312bd8b 3594 phys_sections_clear();
c19e8800 3595 phys_map.ptr = PHYS_MAP_NODE_NIL;
5312bd8b 3596 phys_section_unassigned = dummy_section(&io_mem_unassigned);
aa102231
AK
3597 phys_section_notdirty = dummy_section(&io_mem_notdirty);
3598 phys_section_rom = dummy_section(&io_mem_rom);
3599 phys_section_watch = dummy_section(&io_mem_watch);
50c1e149
AK
3600}
3601
3602static void core_commit(MemoryListener *listener)
3603{
117712c3
AK
3604 CPUState *env;
3605
3606 /* since each CPU stores ram addresses in its TLB cache, we must
3607 reset the modified entries */
3608 /* XXX: slow ! */
3609 for(env = first_cpu; env != NULL; env = env->next_cpu) {
3610 tlb_flush(env, 1);
3611 }
50c1e149
AK
3612}
3613
93632747
AK
3614static void core_region_add(MemoryListener *listener,
3615 MemoryRegionSection *section)
3616{
4855d41a 3617 cpu_register_physical_memory_log(section, section->readonly);
93632747
AK
3618}
3619
3620static void core_region_del(MemoryListener *listener,
3621 MemoryRegionSection *section)
3622{
93632747
AK
3623}
3624
50c1e149
AK
3625static void core_region_nop(MemoryListener *listener,
3626 MemoryRegionSection *section)
3627{
54688b1e 3628 cpu_register_physical_memory_log(section, section->readonly);
50c1e149
AK
3629}
3630
93632747
AK
3631static void core_log_start(MemoryListener *listener,
3632 MemoryRegionSection *section)
3633{
3634}
3635
3636static void core_log_stop(MemoryListener *listener,
3637 MemoryRegionSection *section)
3638{
3639}
3640
3641static void core_log_sync(MemoryListener *listener,
3642 MemoryRegionSection *section)
3643{
3644}
3645
3646static void core_log_global_start(MemoryListener *listener)
3647{
3648 cpu_physical_memory_set_dirty_tracking(1);
3649}
3650
3651static void core_log_global_stop(MemoryListener *listener)
3652{
3653 cpu_physical_memory_set_dirty_tracking(0);
3654}
3655
3656static void core_eventfd_add(MemoryListener *listener,
3657 MemoryRegionSection *section,
3658 bool match_data, uint64_t data, int fd)
3659{
3660}
3661
3662static void core_eventfd_del(MemoryListener *listener,
3663 MemoryRegionSection *section,
3664 bool match_data, uint64_t data, int fd)
3665{
3666}
3667
50c1e149
AK
3668static void io_begin(MemoryListener *listener)
3669{
3670}
3671
3672static void io_commit(MemoryListener *listener)
3673{
3674}
3675
4855d41a
AK
3676static void io_region_add(MemoryListener *listener,
3677 MemoryRegionSection *section)
3678{
a2d33521
AK
3679 MemoryRegionIORange *mrio = g_new(MemoryRegionIORange, 1);
3680
3681 mrio->mr = section->mr;
3682 mrio->offset = section->offset_within_region;
3683 iorange_init(&mrio->iorange, &memory_region_iorange_ops,
4855d41a 3684 section->offset_within_address_space, section->size);
a2d33521 3685 ioport_register(&mrio->iorange);
4855d41a
AK
3686}
3687
3688static void io_region_del(MemoryListener *listener,
3689 MemoryRegionSection *section)
3690{
3691 isa_unassign_ioport(section->offset_within_address_space, section->size);
3692}
3693
50c1e149
AK
3694static void io_region_nop(MemoryListener *listener,
3695 MemoryRegionSection *section)
3696{
3697}
3698
4855d41a
AK
3699static void io_log_start(MemoryListener *listener,
3700 MemoryRegionSection *section)
3701{
3702}
3703
3704static void io_log_stop(MemoryListener *listener,
3705 MemoryRegionSection *section)
3706{
3707}
3708
3709static void io_log_sync(MemoryListener *listener,
3710 MemoryRegionSection *section)
3711{
3712}
3713
3714static void io_log_global_start(MemoryListener *listener)
3715{
3716}
3717
3718static void io_log_global_stop(MemoryListener *listener)
3719{
3720}
3721
3722static void io_eventfd_add(MemoryListener *listener,
3723 MemoryRegionSection *section,
3724 bool match_data, uint64_t data, int fd)
3725{
3726}
3727
3728static void io_eventfd_del(MemoryListener *listener,
3729 MemoryRegionSection *section,
3730 bool match_data, uint64_t data, int fd)
3731{
3732}
3733
93632747 3734static MemoryListener core_memory_listener = {
50c1e149
AK
3735 .begin = core_begin,
3736 .commit = core_commit,
93632747
AK
3737 .region_add = core_region_add,
3738 .region_del = core_region_del,
50c1e149 3739 .region_nop = core_region_nop,
93632747
AK
3740 .log_start = core_log_start,
3741 .log_stop = core_log_stop,
3742 .log_sync = core_log_sync,
3743 .log_global_start = core_log_global_start,
3744 .log_global_stop = core_log_global_stop,
3745 .eventfd_add = core_eventfd_add,
3746 .eventfd_del = core_eventfd_del,
3747 .priority = 0,
3748};
3749
4855d41a 3750static MemoryListener io_memory_listener = {
50c1e149
AK
3751 .begin = io_begin,
3752 .commit = io_commit,
4855d41a
AK
3753 .region_add = io_region_add,
3754 .region_del = io_region_del,
50c1e149 3755 .region_nop = io_region_nop,
4855d41a
AK
3756 .log_start = io_log_start,
3757 .log_stop = io_log_stop,
3758 .log_sync = io_log_sync,
3759 .log_global_start = io_log_global_start,
3760 .log_global_stop = io_log_global_stop,
3761 .eventfd_add = io_eventfd_add,
3762 .eventfd_del = io_eventfd_del,
3763 .priority = 0,
3764};
3765
62152b8a
AK
3766static void memory_map_init(void)
3767{
7267c094 3768 system_memory = g_malloc(sizeof(*system_memory));
8417cebf 3769 memory_region_init(system_memory, "system", INT64_MAX);
62152b8a 3770 set_system_memory_map(system_memory);
309cb471 3771
7267c094 3772 system_io = g_malloc(sizeof(*system_io));
309cb471
AK
3773 memory_region_init(system_io, "io", 65536);
3774 set_system_io_map(system_io);
93632747 3775
4855d41a
AK
3776 memory_listener_register(&core_memory_listener, system_memory);
3777 memory_listener_register(&io_memory_listener, system_io);
62152b8a
AK
3778}
3779
3780MemoryRegion *get_system_memory(void)
3781{
3782 return system_memory;
3783}
3784
309cb471
AK
3785MemoryRegion *get_system_io(void)
3786{
3787 return system_io;
3788}
3789
e2eef170
PB
3790#endif /* !defined(CONFIG_USER_ONLY) */
3791
13eb76e0
FB
3792/* physical memory access (slow version, mainly for debug) */
3793#if defined(CONFIG_USER_ONLY)
a68fe89c
PB
3794int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3795 uint8_t *buf, int len, int is_write)
13eb76e0
FB
3796{
3797 int l, flags;
3798 target_ulong page;
53a5960a 3799 void * p;
13eb76e0
FB
3800
3801 while (len > 0) {
3802 page = addr & TARGET_PAGE_MASK;
3803 l = (page + TARGET_PAGE_SIZE) - addr;
3804 if (l > len)
3805 l = len;
3806 flags = page_get_flags(page);
3807 if (!(flags & PAGE_VALID))
a68fe89c 3808 return -1;
13eb76e0
FB
3809 if (is_write) {
3810 if (!(flags & PAGE_WRITE))
a68fe89c 3811 return -1;
579a97f7 3812 /* XXX: this code should not depend on lock_user */
72fb7daa 3813 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
a68fe89c 3814 return -1;
72fb7daa
AJ
3815 memcpy(p, buf, l);
3816 unlock_user(p, addr, l);
13eb76e0
FB
3817 } else {
3818 if (!(flags & PAGE_READ))
a68fe89c 3819 return -1;
579a97f7 3820 /* XXX: this code should not depend on lock_user */
72fb7daa 3821 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
a68fe89c 3822 return -1;
72fb7daa 3823 memcpy(buf, p, l);
5b257578 3824 unlock_user(p, addr, 0);
13eb76e0
FB
3825 }
3826 len -= l;
3827 buf += l;
3828 addr += l;
3829 }
a68fe89c 3830 return 0;
13eb76e0 3831}
8df1cd07 3832
13eb76e0 3833#else
c227f099 3834void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
3835 int len, int is_write)
3836{
37ec01d4 3837 int l;
13eb76e0
FB
3838 uint8_t *ptr;
3839 uint32_t val;
c227f099 3840 target_phys_addr_t page;
f3705d53 3841 MemoryRegionSection *section;
3b46e624 3842
13eb76e0
FB
3843 while (len > 0) {
3844 page = addr & TARGET_PAGE_MASK;
3845 l = (page + TARGET_PAGE_SIZE) - addr;
3846 if (l > len)
3847 l = len;
06ef3525 3848 section = phys_page_find(page >> TARGET_PAGE_BITS);
3b46e624 3849
13eb76e0 3850 if (is_write) {
f3705d53 3851 if (!memory_region_is_ram(section->mr)) {
f1f6e3b8 3852 target_phys_addr_t addr1;
f3705d53 3853 addr1 = section_addr(section, addr);
6a00d601
FB
3854 /* XXX: could force cpu_single_env to NULL to avoid
3855 potential bugs */
6c2934db 3856 if (l >= 4 && ((addr1 & 3) == 0)) {
1c213d19 3857 /* 32 bit write access */
c27004ec 3858 val = ldl_p(buf);
37ec01d4 3859 io_mem_write(section->mr, addr1, val, 4);
13eb76e0 3860 l = 4;
6c2934db 3861 } else if (l >= 2 && ((addr1 & 1) == 0)) {
1c213d19 3862 /* 16 bit write access */
c27004ec 3863 val = lduw_p(buf);
37ec01d4 3864 io_mem_write(section->mr, addr1, val, 2);
13eb76e0
FB
3865 l = 2;
3866 } else {
1c213d19 3867 /* 8 bit write access */
c27004ec 3868 val = ldub_p(buf);
37ec01d4 3869 io_mem_write(section->mr, addr1, val, 1);
13eb76e0
FB
3870 l = 1;
3871 }
f3705d53 3872 } else if (!section->readonly) {
8ca5692d 3873 ram_addr_t addr1;
f3705d53
AK
3874 addr1 = memory_region_get_ram_addr(section->mr)
3875 + section_addr(section, addr);
13eb76e0 3876 /* RAM case */
5579c7f3 3877 ptr = qemu_get_ram_ptr(addr1);
13eb76e0 3878 memcpy(ptr, buf, l);
3a7d929e
FB
3879 if (!cpu_physical_memory_is_dirty(addr1)) {
3880 /* invalidate code */
3881 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3882 /* set dirty bit */
f7c11b53
YT
3883 cpu_physical_memory_set_dirty_flags(
3884 addr1, (0xff & ~CODE_DIRTY_FLAG));
3a7d929e 3885 }
050a0ddf 3886 qemu_put_ram_ptr(ptr);
13eb76e0
FB
3887 }
3888 } else {
f3705d53 3889 if (!is_ram_rom_romd(section)) {
f1f6e3b8 3890 target_phys_addr_t addr1;
13eb76e0 3891 /* I/O case */
f3705d53 3892 addr1 = section_addr(section, addr);
6c2934db 3893 if (l >= 4 && ((addr1 & 3) == 0)) {
13eb76e0 3894 /* 32 bit read access */
37ec01d4 3895 val = io_mem_read(section->mr, addr1, 4);
c27004ec 3896 stl_p(buf, val);
13eb76e0 3897 l = 4;
6c2934db 3898 } else if (l >= 2 && ((addr1 & 1) == 0)) {
13eb76e0 3899 /* 16 bit read access */
37ec01d4 3900 val = io_mem_read(section->mr, addr1, 2);
c27004ec 3901 stw_p(buf, val);
13eb76e0
FB
3902 l = 2;
3903 } else {
1c213d19 3904 /* 8 bit read access */
37ec01d4 3905 val = io_mem_read(section->mr, addr1, 1);
c27004ec 3906 stb_p(buf, val);
13eb76e0
FB
3907 l = 1;
3908 }
3909 } else {
3910 /* RAM case */
f3705d53
AK
3911 ptr = qemu_get_ram_ptr(section->mr->ram_addr)
3912 + section_addr(section, addr);
3913 memcpy(buf, ptr, l);
050a0ddf 3914 qemu_put_ram_ptr(ptr);
13eb76e0
FB
3915 }
3916 }
3917 len -= l;
3918 buf += l;
3919 addr += l;
3920 }
3921}
8df1cd07 3922
d0ecd2aa 3923/* used for ROM loading : can write in RAM and ROM */
c227f099 3924void cpu_physical_memory_write_rom(target_phys_addr_t addr,
d0ecd2aa
FB
3925 const uint8_t *buf, int len)
3926{
3927 int l;
3928 uint8_t *ptr;
c227f099 3929 target_phys_addr_t page;
f3705d53 3930 MemoryRegionSection *section;
3b46e624 3931
d0ecd2aa
FB
3932 while (len > 0) {
3933 page = addr & TARGET_PAGE_MASK;
3934 l = (page + TARGET_PAGE_SIZE) - addr;
3935 if (l > len)
3936 l = len;
06ef3525 3937 section = phys_page_find(page >> TARGET_PAGE_BITS);
3b46e624 3938
f3705d53 3939 if (!is_ram_rom_romd(section)) {
d0ecd2aa
FB
3940 /* do nothing */
3941 } else {
3942 unsigned long addr1;
f3705d53
AK
3943 addr1 = memory_region_get_ram_addr(section->mr)
3944 + section_addr(section, addr);
d0ecd2aa 3945 /* ROM/RAM case */
5579c7f3 3946 ptr = qemu_get_ram_ptr(addr1);
d0ecd2aa 3947 memcpy(ptr, buf, l);
050a0ddf 3948 qemu_put_ram_ptr(ptr);
d0ecd2aa
FB
3949 }
3950 len -= l;
3951 buf += l;
3952 addr += l;
3953 }
3954}
3955
6d16c2f8
AL
3956typedef struct {
3957 void *buffer;
c227f099
AL
3958 target_phys_addr_t addr;
3959 target_phys_addr_t len;
6d16c2f8
AL
3960} BounceBuffer;
3961
3962static BounceBuffer bounce;
3963
ba223c29
AL
3964typedef struct MapClient {
3965 void *opaque;
3966 void (*callback)(void *opaque);
72cf2d4f 3967 QLIST_ENTRY(MapClient) link;
ba223c29
AL
3968} MapClient;
3969
72cf2d4f
BS
3970static QLIST_HEAD(map_client_list, MapClient) map_client_list
3971 = QLIST_HEAD_INITIALIZER(map_client_list);
ba223c29
AL
3972
3973void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3974{
7267c094 3975 MapClient *client = g_malloc(sizeof(*client));
ba223c29
AL
3976
3977 client->opaque = opaque;
3978 client->callback = callback;
72cf2d4f 3979 QLIST_INSERT_HEAD(&map_client_list, client, link);
ba223c29
AL
3980 return client;
3981}
3982
3983void cpu_unregister_map_client(void *_client)
3984{
3985 MapClient *client = (MapClient *)_client;
3986
72cf2d4f 3987 QLIST_REMOVE(client, link);
7267c094 3988 g_free(client);
ba223c29
AL
3989}
3990
3991static void cpu_notify_map_clients(void)
3992{
3993 MapClient *client;
3994
72cf2d4f
BS
3995 while (!QLIST_EMPTY(&map_client_list)) {
3996 client = QLIST_FIRST(&map_client_list);
ba223c29 3997 client->callback(client->opaque);
34d5e948 3998 cpu_unregister_map_client(client);
ba223c29
AL
3999 }
4000}
4001
6d16c2f8
AL
4002/* Map a physical memory region into a host virtual address.
4003 * May map a subset of the requested range, given by and returned in *plen.
4004 * May return NULL if resources needed to perform the mapping are exhausted.
4005 * Use only for reads OR writes - not for read-modify-write operations.
ba223c29
AL
4006 * Use cpu_register_map_client() to know when retrying the map operation is
4007 * likely to succeed.
6d16c2f8 4008 */
c227f099
AL
4009void *cpu_physical_memory_map(target_phys_addr_t addr,
4010 target_phys_addr_t *plen,
6d16c2f8
AL
4011 int is_write)
4012{
c227f099 4013 target_phys_addr_t len = *plen;
38bee5dc 4014 target_phys_addr_t todo = 0;
6d16c2f8 4015 int l;
c227f099 4016 target_phys_addr_t page;
f3705d53 4017 MemoryRegionSection *section;
f15fbc4b 4018 ram_addr_t raddr = RAM_ADDR_MAX;
8ab934f9
SS
4019 ram_addr_t rlen;
4020 void *ret;
6d16c2f8
AL
4021
4022 while (len > 0) {
4023 page = addr & TARGET_PAGE_MASK;
4024 l = (page + TARGET_PAGE_SIZE) - addr;
4025 if (l > len)
4026 l = len;
06ef3525 4027 section = phys_page_find(page >> TARGET_PAGE_BITS);
6d16c2f8 4028
f3705d53 4029 if (!(memory_region_is_ram(section->mr) && !section->readonly)) {
38bee5dc 4030 if (todo || bounce.buffer) {
6d16c2f8
AL
4031 break;
4032 }
4033 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
4034 bounce.addr = addr;
4035 bounce.len = l;
4036 if (!is_write) {
54f7b4a3 4037 cpu_physical_memory_read(addr, bounce.buffer, l);
6d16c2f8 4038 }
38bee5dc
SS
4039
4040 *plen = l;
4041 return bounce.buffer;
6d16c2f8 4042 }
8ab934f9 4043 if (!todo) {
f3705d53
AK
4044 raddr = memory_region_get_ram_addr(section->mr)
4045 + section_addr(section, addr);
8ab934f9 4046 }
6d16c2f8
AL
4047
4048 len -= l;
4049 addr += l;
38bee5dc 4050 todo += l;
6d16c2f8 4051 }
8ab934f9
SS
4052 rlen = todo;
4053 ret = qemu_ram_ptr_length(raddr, &rlen);
4054 *plen = rlen;
4055 return ret;
6d16c2f8
AL
4056}
4057
4058/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
4059 * Will also mark the memory as dirty if is_write == 1. access_len gives
4060 * the amount of memory that was actually read or written by the caller.
4061 */
c227f099
AL
4062void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
4063 int is_write, target_phys_addr_t access_len)
6d16c2f8
AL
4064{
4065 if (buffer != bounce.buffer) {
4066 if (is_write) {
e890261f 4067 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
6d16c2f8
AL
4068 while (access_len) {
4069 unsigned l;
4070 l = TARGET_PAGE_SIZE;
4071 if (l > access_len)
4072 l = access_len;
4073 if (!cpu_physical_memory_is_dirty(addr1)) {
4074 /* invalidate code */
4075 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
4076 /* set dirty bit */
f7c11b53
YT
4077 cpu_physical_memory_set_dirty_flags(
4078 addr1, (0xff & ~CODE_DIRTY_FLAG));
6d16c2f8
AL
4079 }
4080 addr1 += l;
4081 access_len -= l;
4082 }
4083 }
868bb33f 4084 if (xen_enabled()) {
e41d7c69 4085 xen_invalidate_map_cache_entry(buffer);
050a0ddf 4086 }
6d16c2f8
AL
4087 return;
4088 }
4089 if (is_write) {
4090 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
4091 }
f8a83245 4092 qemu_vfree(bounce.buffer);
6d16c2f8 4093 bounce.buffer = NULL;
ba223c29 4094 cpu_notify_map_clients();
6d16c2f8 4095}
d0ecd2aa 4096
8df1cd07 4097/* warning: addr must be aligned */
1e78bcc1
AG
4098static inline uint32_t ldl_phys_internal(target_phys_addr_t addr,
4099 enum device_endian endian)
8df1cd07 4100{
8df1cd07
FB
4101 uint8_t *ptr;
4102 uint32_t val;
f3705d53 4103 MemoryRegionSection *section;
8df1cd07 4104
06ef3525 4105 section = phys_page_find(addr >> TARGET_PAGE_BITS);
3b46e624 4106
f3705d53 4107 if (!is_ram_rom_romd(section)) {
8df1cd07 4108 /* I/O case */
f3705d53 4109 addr = section_addr(section, addr);
37ec01d4 4110 val = io_mem_read(section->mr, addr, 4);
1e78bcc1
AG
4111#if defined(TARGET_WORDS_BIGENDIAN)
4112 if (endian == DEVICE_LITTLE_ENDIAN) {
4113 val = bswap32(val);
4114 }
4115#else
4116 if (endian == DEVICE_BIG_ENDIAN) {
4117 val = bswap32(val);
4118 }
4119#endif
8df1cd07
FB
4120 } else {
4121 /* RAM case */
f3705d53 4122 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
06ef3525 4123 & TARGET_PAGE_MASK)
f3705d53 4124 + section_addr(section, addr));
1e78bcc1
AG
4125 switch (endian) {
4126 case DEVICE_LITTLE_ENDIAN:
4127 val = ldl_le_p(ptr);
4128 break;
4129 case DEVICE_BIG_ENDIAN:
4130 val = ldl_be_p(ptr);
4131 break;
4132 default:
4133 val = ldl_p(ptr);
4134 break;
4135 }
8df1cd07
FB
4136 }
4137 return val;
4138}
4139
1e78bcc1
AG
4140uint32_t ldl_phys(target_phys_addr_t addr)
4141{
4142 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4143}
4144
4145uint32_t ldl_le_phys(target_phys_addr_t addr)
4146{
4147 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4148}
4149
4150uint32_t ldl_be_phys(target_phys_addr_t addr)
4151{
4152 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
4153}
4154
84b7b8e7 4155/* warning: addr must be aligned */
1e78bcc1
AG
4156static inline uint64_t ldq_phys_internal(target_phys_addr_t addr,
4157 enum device_endian endian)
84b7b8e7 4158{
84b7b8e7
FB
4159 uint8_t *ptr;
4160 uint64_t val;
f3705d53 4161 MemoryRegionSection *section;
84b7b8e7 4162
06ef3525 4163 section = phys_page_find(addr >> TARGET_PAGE_BITS);
3b46e624 4164
f3705d53 4165 if (!is_ram_rom_romd(section)) {
84b7b8e7 4166 /* I/O case */
f3705d53 4167 addr = section_addr(section, addr);
1e78bcc1
AG
4168
4169 /* XXX This is broken when device endian != cpu endian.
4170 Fix and add "endian" variable check */
84b7b8e7 4171#ifdef TARGET_WORDS_BIGENDIAN
37ec01d4
AK
4172 val = io_mem_read(section->mr, addr, 4) << 32;
4173 val |= io_mem_read(section->mr, addr + 4, 4);
84b7b8e7 4174#else
37ec01d4
AK
4175 val = io_mem_read(section->mr, addr, 4);
4176 val |= io_mem_read(section->mr, addr + 4, 4) << 32;
84b7b8e7
FB
4177#endif
4178 } else {
4179 /* RAM case */
f3705d53 4180 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
06ef3525 4181 & TARGET_PAGE_MASK)
f3705d53 4182 + section_addr(section, addr));
1e78bcc1
AG
4183 switch (endian) {
4184 case DEVICE_LITTLE_ENDIAN:
4185 val = ldq_le_p(ptr);
4186 break;
4187 case DEVICE_BIG_ENDIAN:
4188 val = ldq_be_p(ptr);
4189 break;
4190 default:
4191 val = ldq_p(ptr);
4192 break;
4193 }
84b7b8e7
FB
4194 }
4195 return val;
4196}
4197
1e78bcc1
AG
4198uint64_t ldq_phys(target_phys_addr_t addr)
4199{
4200 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4201}
4202
4203uint64_t ldq_le_phys(target_phys_addr_t addr)
4204{
4205 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4206}
4207
4208uint64_t ldq_be_phys(target_phys_addr_t addr)
4209{
4210 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
4211}
4212
aab33094 4213/* XXX: optimize */
c227f099 4214uint32_t ldub_phys(target_phys_addr_t addr)
aab33094
FB
4215{
4216 uint8_t val;
4217 cpu_physical_memory_read(addr, &val, 1);
4218 return val;
4219}
4220
733f0b02 4221/* warning: addr must be aligned */
1e78bcc1
AG
4222static inline uint32_t lduw_phys_internal(target_phys_addr_t addr,
4223 enum device_endian endian)
aab33094 4224{
733f0b02
MT
4225 uint8_t *ptr;
4226 uint64_t val;
f3705d53 4227 MemoryRegionSection *section;
733f0b02 4228
06ef3525 4229 section = phys_page_find(addr >> TARGET_PAGE_BITS);
733f0b02 4230
f3705d53 4231 if (!is_ram_rom_romd(section)) {
733f0b02 4232 /* I/O case */
f3705d53 4233 addr = section_addr(section, addr);
37ec01d4 4234 val = io_mem_read(section->mr, addr, 2);
1e78bcc1
AG
4235#if defined(TARGET_WORDS_BIGENDIAN)
4236 if (endian == DEVICE_LITTLE_ENDIAN) {
4237 val = bswap16(val);
4238 }
4239#else
4240 if (endian == DEVICE_BIG_ENDIAN) {
4241 val = bswap16(val);
4242 }
4243#endif
733f0b02
MT
4244 } else {
4245 /* RAM case */
f3705d53 4246 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
06ef3525 4247 & TARGET_PAGE_MASK)
f3705d53 4248 + section_addr(section, addr));
1e78bcc1
AG
4249 switch (endian) {
4250 case DEVICE_LITTLE_ENDIAN:
4251 val = lduw_le_p(ptr);
4252 break;
4253 case DEVICE_BIG_ENDIAN:
4254 val = lduw_be_p(ptr);
4255 break;
4256 default:
4257 val = lduw_p(ptr);
4258 break;
4259 }
733f0b02
MT
4260 }
4261 return val;
aab33094
FB
4262}
4263
1e78bcc1
AG
4264uint32_t lduw_phys(target_phys_addr_t addr)
4265{
4266 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4267}
4268
4269uint32_t lduw_le_phys(target_phys_addr_t addr)
4270{
4271 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4272}
4273
4274uint32_t lduw_be_phys(target_phys_addr_t addr)
4275{
4276 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
4277}
4278
8df1cd07
FB
4279/* warning: addr must be aligned. The ram page is not masked as dirty
4280 and the code inside is not invalidated. It is useful if the dirty
4281 bits are used to track modified PTEs */
c227f099 4282void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
8df1cd07 4283{
8df1cd07 4284 uint8_t *ptr;
f3705d53 4285 MemoryRegionSection *section;
8df1cd07 4286
06ef3525 4287 section = phys_page_find(addr >> TARGET_PAGE_BITS);
3b46e624 4288
f3705d53 4289 if (!memory_region_is_ram(section->mr) || section->readonly) {
37ec01d4 4290 addr = section_addr(section, addr);
f3705d53 4291 if (memory_region_is_ram(section->mr)) {
37ec01d4 4292 section = &phys_sections[phys_section_rom];
06ef3525 4293 }
37ec01d4 4294 io_mem_write(section->mr, addr, val, 4);
8df1cd07 4295 } else {
f3705d53 4296 unsigned long addr1 = (memory_region_get_ram_addr(section->mr)
06ef3525 4297 & TARGET_PAGE_MASK)
f3705d53 4298 + section_addr(section, addr);
5579c7f3 4299 ptr = qemu_get_ram_ptr(addr1);
8df1cd07 4300 stl_p(ptr, val);
74576198
AL
4301
4302 if (unlikely(in_migration)) {
4303 if (!cpu_physical_memory_is_dirty(addr1)) {
4304 /* invalidate code */
4305 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4306 /* set dirty bit */
f7c11b53
YT
4307 cpu_physical_memory_set_dirty_flags(
4308 addr1, (0xff & ~CODE_DIRTY_FLAG));
74576198
AL
4309 }
4310 }
8df1cd07
FB
4311 }
4312}
4313
c227f099 4314void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
bc98a7ef 4315{
bc98a7ef 4316 uint8_t *ptr;
f3705d53 4317 MemoryRegionSection *section;
bc98a7ef 4318
06ef3525 4319 section = phys_page_find(addr >> TARGET_PAGE_BITS);
3b46e624 4320
f3705d53 4321 if (!memory_region_is_ram(section->mr) || section->readonly) {
37ec01d4 4322 addr = section_addr(section, addr);
f3705d53 4323 if (memory_region_is_ram(section->mr)) {
37ec01d4 4324 section = &phys_sections[phys_section_rom];
06ef3525 4325 }
bc98a7ef 4326#ifdef TARGET_WORDS_BIGENDIAN
37ec01d4
AK
4327 io_mem_write(section->mr, addr, val >> 32, 4);
4328 io_mem_write(section->mr, addr + 4, (uint32_t)val, 4);
bc98a7ef 4329#else
37ec01d4
AK
4330 io_mem_write(section->mr, addr, (uint32_t)val, 4);
4331 io_mem_write(section->mr, addr + 4, val >> 32, 4);
bc98a7ef
JM
4332#endif
4333 } else {
f3705d53 4334 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
06ef3525 4335 & TARGET_PAGE_MASK)
f3705d53 4336 + section_addr(section, addr));
bc98a7ef
JM
4337 stq_p(ptr, val);
4338 }
4339}
4340
8df1cd07 4341/* warning: addr must be aligned */
1e78bcc1
AG
4342static inline void stl_phys_internal(target_phys_addr_t addr, uint32_t val,
4343 enum device_endian endian)
8df1cd07 4344{
8df1cd07 4345 uint8_t *ptr;
f3705d53 4346 MemoryRegionSection *section;
8df1cd07 4347
06ef3525 4348 section = phys_page_find(addr >> TARGET_PAGE_BITS);
3b46e624 4349
f3705d53 4350 if (!memory_region_is_ram(section->mr) || section->readonly) {
37ec01d4 4351 addr = section_addr(section, addr);
f3705d53 4352 if (memory_region_is_ram(section->mr)) {
37ec01d4 4353 section = &phys_sections[phys_section_rom];
06ef3525 4354 }
1e78bcc1
AG
4355#if defined(TARGET_WORDS_BIGENDIAN)
4356 if (endian == DEVICE_LITTLE_ENDIAN) {
4357 val = bswap32(val);
4358 }
4359#else
4360 if (endian == DEVICE_BIG_ENDIAN) {
4361 val = bswap32(val);
4362 }
4363#endif
37ec01d4 4364 io_mem_write(section->mr, addr, val, 4);
8df1cd07
FB
4365 } else {
4366 unsigned long addr1;
f3705d53
AK
4367 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
4368 + section_addr(section, addr);
8df1cd07 4369 /* RAM case */
5579c7f3 4370 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
4371 switch (endian) {
4372 case DEVICE_LITTLE_ENDIAN:
4373 stl_le_p(ptr, val);
4374 break;
4375 case DEVICE_BIG_ENDIAN:
4376 stl_be_p(ptr, val);
4377 break;
4378 default:
4379 stl_p(ptr, val);
4380 break;
4381 }
3a7d929e
FB
4382 if (!cpu_physical_memory_is_dirty(addr1)) {
4383 /* invalidate code */
4384 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4385 /* set dirty bit */
f7c11b53
YT
4386 cpu_physical_memory_set_dirty_flags(addr1,
4387 (0xff & ~CODE_DIRTY_FLAG));
3a7d929e 4388 }
8df1cd07
FB
4389 }
4390}
4391
1e78bcc1
AG
4392void stl_phys(target_phys_addr_t addr, uint32_t val)
4393{
4394 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4395}
4396
4397void stl_le_phys(target_phys_addr_t addr, uint32_t val)
4398{
4399 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4400}
4401
4402void stl_be_phys(target_phys_addr_t addr, uint32_t val)
4403{
4404 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4405}
4406
aab33094 4407/* XXX: optimize */
c227f099 4408void stb_phys(target_phys_addr_t addr, uint32_t val)
aab33094
FB
4409{
4410 uint8_t v = val;
4411 cpu_physical_memory_write(addr, &v, 1);
4412}
4413
733f0b02 4414/* warning: addr must be aligned */
1e78bcc1
AG
4415static inline void stw_phys_internal(target_phys_addr_t addr, uint32_t val,
4416 enum device_endian endian)
aab33094 4417{
733f0b02 4418 uint8_t *ptr;
f3705d53 4419 MemoryRegionSection *section;
733f0b02 4420
06ef3525 4421 section = phys_page_find(addr >> TARGET_PAGE_BITS);
733f0b02 4422
f3705d53 4423 if (!memory_region_is_ram(section->mr) || section->readonly) {
37ec01d4 4424 addr = section_addr(section, addr);
f3705d53 4425 if (memory_region_is_ram(section->mr)) {
37ec01d4 4426 section = &phys_sections[phys_section_rom];
06ef3525 4427 }
1e78bcc1
AG
4428#if defined(TARGET_WORDS_BIGENDIAN)
4429 if (endian == DEVICE_LITTLE_ENDIAN) {
4430 val = bswap16(val);
4431 }
4432#else
4433 if (endian == DEVICE_BIG_ENDIAN) {
4434 val = bswap16(val);
4435 }
4436#endif
37ec01d4 4437 io_mem_write(section->mr, addr, val, 2);
733f0b02
MT
4438 } else {
4439 unsigned long addr1;
f3705d53
AK
4440 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
4441 + section_addr(section, addr);
733f0b02
MT
4442 /* RAM case */
4443 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
4444 switch (endian) {
4445 case DEVICE_LITTLE_ENDIAN:
4446 stw_le_p(ptr, val);
4447 break;
4448 case DEVICE_BIG_ENDIAN:
4449 stw_be_p(ptr, val);
4450 break;
4451 default:
4452 stw_p(ptr, val);
4453 break;
4454 }
733f0b02
MT
4455 if (!cpu_physical_memory_is_dirty(addr1)) {
4456 /* invalidate code */
4457 tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
4458 /* set dirty bit */
4459 cpu_physical_memory_set_dirty_flags(addr1,
4460 (0xff & ~CODE_DIRTY_FLAG));
4461 }
4462 }
aab33094
FB
4463}
4464
1e78bcc1
AG
4465void stw_phys(target_phys_addr_t addr, uint32_t val)
4466{
4467 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4468}
4469
4470void stw_le_phys(target_phys_addr_t addr, uint32_t val)
4471{
4472 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4473}
4474
4475void stw_be_phys(target_phys_addr_t addr, uint32_t val)
4476{
4477 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4478}
4479
aab33094 4480/* XXX: optimize */
c227f099 4481void stq_phys(target_phys_addr_t addr, uint64_t val)
aab33094
FB
4482{
4483 val = tswap64(val);
71d2b725 4484 cpu_physical_memory_write(addr, &val, 8);
aab33094
FB
4485}
4486
1e78bcc1
AG
4487void stq_le_phys(target_phys_addr_t addr, uint64_t val)
4488{
4489 val = cpu_to_le64(val);
4490 cpu_physical_memory_write(addr, &val, 8);
4491}
4492
4493void stq_be_phys(target_phys_addr_t addr, uint64_t val)
4494{
4495 val = cpu_to_be64(val);
4496 cpu_physical_memory_write(addr, &val, 8);
4497}
4498
5e2972fd 4499/* virtual memory access for debug (includes writing to ROM) */
5fafdf24 4500int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
b448f2f3 4501 uint8_t *buf, int len, int is_write)
13eb76e0
FB
4502{
4503 int l;
c227f099 4504 target_phys_addr_t phys_addr;
9b3c35e0 4505 target_ulong page;
13eb76e0
FB
4506
4507 while (len > 0) {
4508 page = addr & TARGET_PAGE_MASK;
4509 phys_addr = cpu_get_phys_page_debug(env, page);
4510 /* if no physical page mapped, return an error */
4511 if (phys_addr == -1)
4512 return -1;
4513 l = (page + TARGET_PAGE_SIZE) - addr;
4514 if (l > len)
4515 l = len;
5e2972fd 4516 phys_addr += (addr & ~TARGET_PAGE_MASK);
5e2972fd
AL
4517 if (is_write)
4518 cpu_physical_memory_write_rom(phys_addr, buf, l);
4519 else
5e2972fd 4520 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
13eb76e0
FB
4521 len -= l;
4522 buf += l;
4523 addr += l;
4524 }
4525 return 0;
4526}
a68fe89c 4527#endif
13eb76e0 4528
2e70f6ef
PB
4529/* in deterministic execution mode, instructions doing device I/Os
4530 must be at the end of the TB */
4531void cpu_io_recompile(CPUState *env, void *retaddr)
4532{
4533 TranslationBlock *tb;
4534 uint32_t n, cflags;
4535 target_ulong pc, cs_base;
4536 uint64_t flags;
4537
4538 tb = tb_find_pc((unsigned long)retaddr);
4539 if (!tb) {
4540 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
4541 retaddr);
4542 }
4543 n = env->icount_decr.u16.low + tb->icount;
618ba8e6 4544 cpu_restore_state(tb, env, (unsigned long)retaddr);
2e70f6ef 4545 /* Calculate how many instructions had been executed before the fault
bf20dc07 4546 occurred. */
2e70f6ef
PB
4547 n = n - env->icount_decr.u16.low;
4548 /* Generate a new TB ending on the I/O insn. */
4549 n++;
4550 /* On MIPS and SH, delay slot instructions can only be restarted if
4551 they were already the first instruction in the TB. If this is not
bf20dc07 4552 the first instruction in a TB then re-execute the preceding
2e70f6ef
PB
4553 branch. */
4554#if defined(TARGET_MIPS)
4555 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4556 env->active_tc.PC -= 4;
4557 env->icount_decr.u16.low++;
4558 env->hflags &= ~MIPS_HFLAG_BMASK;
4559 }
4560#elif defined(TARGET_SH4)
4561 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4562 && n > 1) {
4563 env->pc -= 2;
4564 env->icount_decr.u16.low++;
4565 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4566 }
4567#endif
4568 /* This should never happen. */
4569 if (n > CF_COUNT_MASK)
4570 cpu_abort(env, "TB too big during recompile");
4571
4572 cflags = n | CF_LAST_IO;
4573 pc = tb->pc;
4574 cs_base = tb->cs_base;
4575 flags = tb->flags;
4576 tb_phys_invalidate(tb, -1);
4577 /* FIXME: In theory this could raise an exception. In practice
4578 we have already translated the block once so it's probably ok. */
4579 tb_gen_code(env, pc, cs_base, flags, cflags);
bf20dc07 4580 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
2e70f6ef
PB
4581 the first in the TB) then we end up generating a whole new TB and
4582 repeating the fault, which is horribly inefficient.
4583 Better would be to execute just this insn uncached, or generate a
4584 second new TB. */
4585 cpu_resume_from_signal(env, NULL);
4586}
4587
b3755a91
PB
4588#if !defined(CONFIG_USER_ONLY)
4589
055403b2 4590void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
e3db7226
FB
4591{
4592 int i, target_code_size, max_target_code_size;
4593 int direct_jmp_count, direct_jmp2_count, cross_page;
4594 TranslationBlock *tb;
3b46e624 4595
e3db7226
FB
4596 target_code_size = 0;
4597 max_target_code_size = 0;
4598 cross_page = 0;
4599 direct_jmp_count = 0;
4600 direct_jmp2_count = 0;
4601 for(i = 0; i < nb_tbs; i++) {
4602 tb = &tbs[i];
4603 target_code_size += tb->size;
4604 if (tb->size > max_target_code_size)
4605 max_target_code_size = tb->size;
4606 if (tb->page_addr[1] != -1)
4607 cross_page++;
4608 if (tb->tb_next_offset[0] != 0xffff) {
4609 direct_jmp_count++;
4610 if (tb->tb_next_offset[1] != 0xffff) {
4611 direct_jmp2_count++;
4612 }
4613 }
4614 }
4615 /* XXX: avoid using doubles ? */
57fec1fe 4616 cpu_fprintf(f, "Translation buffer state:\n");
055403b2 4617 cpu_fprintf(f, "gen code size %td/%ld\n",
26a5f13b
FB
4618 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4619 cpu_fprintf(f, "TB count %d/%d\n",
4620 nb_tbs, code_gen_max_blocks);
5fafdf24 4621 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
e3db7226
FB
4622 nb_tbs ? target_code_size / nb_tbs : 0,
4623 max_target_code_size);
055403b2 4624 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
e3db7226
FB
4625 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4626 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
5fafdf24
TS
4627 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4628 cross_page,
e3db7226
FB
4629 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4630 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
5fafdf24 4631 direct_jmp_count,
e3db7226
FB
4632 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4633 direct_jmp2_count,
4634 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
57fec1fe 4635 cpu_fprintf(f, "\nStatistics:\n");
e3db7226
FB
4636 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4637 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4638 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
b67d9a52 4639 tcg_dump_info(f, cpu_fprintf);
e3db7226
FB
4640}
4641
d39e8222
AK
4642/* NOTE: this function can trigger an exception */
4643/* NOTE2: the returned address is not exactly the physical address: it
4644 is the offset relative to phys_ram_base */
4645tb_page_addr_t get_page_addr_code(CPUState *env1, target_ulong addr)
4646{
4647 int mmu_idx, page_index, pd;
4648 void *p;
37ec01d4 4649 MemoryRegion *mr;
d39e8222
AK
4650
4651 page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
4652 mmu_idx = cpu_mmu_index(env1);
4653 if (unlikely(env1->tlb_table[mmu_idx][page_index].addr_code !=
4654 (addr & TARGET_PAGE_MASK))) {
4655 ldub_code(addr);
4656 }
ce5d64c2 4657 pd = env1->iotlb[mmu_idx][page_index] & ~TARGET_PAGE_MASK;
37ec01d4
AK
4658 mr = iotlb_to_region(pd);
4659 if (mr != &io_mem_ram && mr != &io_mem_rom
4660 && mr != &io_mem_notdirty && !mr->rom_device) {
d39e8222
AK
4661#if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_SPARC)
4662 cpu_unassigned_access(env1, addr, 0, 1, 0, 4);
4663#else
4664 cpu_abort(env1, "Trying to execute code outside RAM or ROM at 0x" TARGET_FMT_lx "\n", addr);
4665#endif
4666 }
4667 p = (void *)((uintptr_t)addr + env1->tlb_table[mmu_idx][page_index].addend);
4668 return qemu_ram_addr_from_host_nofail(p);
4669}
4670
82afa586
BH
4671/*
4672 * A helper function for the _utterly broken_ virtio device model to find out if
4673 * it's running on a big endian machine. Don't do this at home kids!
4674 */
4675bool virtio_is_big_endian(void);
4676bool virtio_is_big_endian(void)
4677{
4678#if defined(TARGET_WORDS_BIGENDIAN)
4679 return true;
4680#else
4681 return false;
4682#endif
4683}
4684
61382a50 4685#define MMUSUFFIX _cmmu
3917149d 4686#undef GETPC
61382a50
FB
4687#define GETPC() NULL
4688#define env cpu_single_env
b769d8fe 4689#define SOFTMMU_CODE_ACCESS
61382a50
FB
4690
4691#define SHIFT 0
4692#include "softmmu_template.h"
4693
4694#define SHIFT 1
4695#include "softmmu_template.h"
4696
4697#define SHIFT 2
4698#include "softmmu_template.h"
4699
4700#define SHIFT 3
4701#include "softmmu_template.h"
4702
4703#undef env
4704
4705#endif