]> git.proxmox.com Git - qemu.git/blame - exec.c
Switch cpu_register_physical_memory_log() to use MemoryRegions
[qemu.git] / exec.c
CommitLineData
54936004 1/*
fd6ce8f6 2 * virtual page mapping and translated block handling
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
54936004 18 */
67b915a5 19#include "config.h"
d5a8f07c
FB
20#ifdef _WIN32
21#include <windows.h>
22#else
a98d49b1 23#include <sys/types.h>
d5a8f07c
FB
24#include <sys/mman.h>
25#endif
54936004 26
055403b2 27#include "qemu-common.h"
6180a181 28#include "cpu.h"
b67d9a52 29#include "tcg.h"
b3c7724c 30#include "hw/hw.h"
cc9e98cb 31#include "hw/qdev.h"
74576198 32#include "osdep.h"
7ba1e619 33#include "kvm.h"
432d268c 34#include "hw/xen.h"
29e922b6 35#include "qemu-timer.h"
62152b8a
AK
36#include "memory.h"
37#include "exec-memory.h"
53a5960a
PB
38#if defined(CONFIG_USER_ONLY)
39#include <qemu.h>
f01576f1
JL
40#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41#include <sys/param.h>
42#if __FreeBSD_version >= 700104
43#define HAVE_KINFO_GETVMMAP
44#define sigqueue sigqueue_freebsd /* avoid redefinition */
45#include <sys/time.h>
46#include <sys/proc.h>
47#include <machine/profile.h>
48#define _KERNEL
49#include <sys/user.h>
50#undef _KERNEL
51#undef sigqueue
52#include <libutil.h>
53#endif
54#endif
432d268c
JN
55#else /* !CONFIG_USER_ONLY */
56#include "xen-mapcache.h"
6506e4f9 57#include "trace.h"
53a5960a 58#endif
54936004 59
67d95c15
AK
60#define WANT_EXEC_OBSOLETE
61#include "exec-obsolete.h"
62
fd6ce8f6 63//#define DEBUG_TB_INVALIDATE
66e85a21 64//#define DEBUG_FLUSH
9fa3e853 65//#define DEBUG_TLB
67d3b957 66//#define DEBUG_UNASSIGNED
fd6ce8f6
FB
67
68/* make various TB consistency checks */
5fafdf24
TS
69//#define DEBUG_TB_CHECK
70//#define DEBUG_TLB_CHECK
fd6ce8f6 71
1196be37 72//#define DEBUG_IOPORT
db7b5426 73//#define DEBUG_SUBPAGE
1196be37 74
99773bd4
PB
75#if !defined(CONFIG_USER_ONLY)
76/* TB consistency checks only implemented for usermode emulation. */
77#undef DEBUG_TB_CHECK
78#endif
79
9fa3e853
FB
80#define SMC_BITMAP_USE_THRESHOLD 10
81
bdaf78e0 82static TranslationBlock *tbs;
24ab68ac 83static int code_gen_max_blocks;
9fa3e853 84TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
bdaf78e0 85static int nb_tbs;
eb51d102 86/* any access to the tbs or the page table must use this lock */
c227f099 87spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
fd6ce8f6 88
141ac468
BS
89#if defined(__arm__) || defined(__sparc_v9__)
90/* The prologue must be reachable with a direct jump. ARM and Sparc64
91 have limited branch ranges (possibly also PPC) so place it in a
d03d860b
BS
92 section close to code segment. */
93#define code_gen_section \
94 __attribute__((__section__(".gen_code"))) \
95 __attribute__((aligned (32)))
f8e2af11
SW
96#elif defined(_WIN32)
97/* Maximum alignment for Win32 is 16. */
98#define code_gen_section \
99 __attribute__((aligned (16)))
d03d860b
BS
100#else
101#define code_gen_section \
102 __attribute__((aligned (32)))
103#endif
104
105uint8_t code_gen_prologue[1024] code_gen_section;
bdaf78e0
BS
106static uint8_t *code_gen_buffer;
107static unsigned long code_gen_buffer_size;
26a5f13b 108/* threshold to flush the translated code buffer */
bdaf78e0 109static unsigned long code_gen_buffer_max_size;
24ab68ac 110static uint8_t *code_gen_ptr;
fd6ce8f6 111
e2eef170 112#if !defined(CONFIG_USER_ONLY)
9fa3e853 113int phys_ram_fd;
74576198 114static int in_migration;
94a6b54f 115
85d59fef 116RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
62152b8a
AK
117
118static MemoryRegion *system_memory;
309cb471 119static MemoryRegion *system_io;
62152b8a 120
0e0df1e2
AK
121MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty;
122
e2eef170 123#endif
9fa3e853 124
6a00d601
FB
125CPUState *first_cpu;
126/* current CPU in the current thread. It is only valid inside
127 cpu_exec() */
b3c4bbe5 128DEFINE_TLS(CPUState *,cpu_single_env);
2e70f6ef 129/* 0 = Do not count executed instructions.
bf20dc07 130 1 = Precise instruction counting.
2e70f6ef
PB
131 2 = Adaptive rate instruction counting. */
132int use_icount = 0;
6a00d601 133
54936004 134typedef struct PageDesc {
92e873b9 135 /* list of TBs intersecting this ram page */
fd6ce8f6 136 TranslationBlock *first_tb;
9fa3e853
FB
137 /* in order to optimize self modifying code, we count the number
138 of lookups we do to a given page to use a bitmap */
139 unsigned int code_write_count;
140 uint8_t *code_bitmap;
141#if defined(CONFIG_USER_ONLY)
142 unsigned long flags;
143#endif
54936004
FB
144} PageDesc;
145
41c1b1c9 146/* In system mode we want L1_MAP to be based on ram offsets,
5cd2c5b6
RH
147 while in user mode we want it to be based on virtual addresses. */
148#if !defined(CONFIG_USER_ONLY)
41c1b1c9
PB
149#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
150# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
151#else
5cd2c5b6 152# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
41c1b1c9 153#endif
bedb69ea 154#else
5cd2c5b6 155# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
bedb69ea 156#endif
54936004 157
5cd2c5b6
RH
158/* Size of the L2 (and L3, etc) page tables. */
159#define L2_BITS 10
54936004
FB
160#define L2_SIZE (1 << L2_BITS)
161
5cd2c5b6
RH
162/* The bits remaining after N lower levels of page tables. */
163#define P_L1_BITS_REM \
164 ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
165#define V_L1_BITS_REM \
166 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
167
168/* Size of the L1 page table. Avoid silly small sizes. */
169#if P_L1_BITS_REM < 4
170#define P_L1_BITS (P_L1_BITS_REM + L2_BITS)
171#else
172#define P_L1_BITS P_L1_BITS_REM
173#endif
174
175#if V_L1_BITS_REM < 4
176#define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
177#else
178#define V_L1_BITS V_L1_BITS_REM
179#endif
180
181#define P_L1_SIZE ((target_phys_addr_t)1 << P_L1_BITS)
182#define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
183
184#define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS)
185#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
186
83fb7adf 187unsigned long qemu_real_host_page_size;
83fb7adf
FB
188unsigned long qemu_host_page_size;
189unsigned long qemu_host_page_mask;
54936004 190
5cd2c5b6
RH
191/* This is a multi-level map on the virtual address space.
192 The bottom level has pointers to PageDesc. */
193static void *l1_map[V_L1_SIZE];
54936004 194
e2eef170 195#if !defined(CONFIG_USER_ONLY)
41c1b1c9
PB
196typedef struct PhysPageDesc {
197 /* offset in host memory of the page + io_index in the low bits */
198 ram_addr_t phys_offset;
199 ram_addr_t region_offset;
200} PhysPageDesc;
201
5cd2c5b6
RH
202/* This is a multi-level map on the physical address space.
203 The bottom level has pointers to PhysPageDesc. */
204static void *l1_phys_map[P_L1_SIZE];
6d9a1304 205
e2eef170 206static void io_mem_init(void);
62152b8a 207static void memory_map_init(void);
e2eef170 208
33417e70 209/* io memory support */
acbbec5d
AK
210CPUWriteMemoryFunc *_io_mem_write[IO_MEM_NB_ENTRIES][4];
211CPUReadMemoryFunc *_io_mem_read[IO_MEM_NB_ENTRIES][4];
a4193c8a 212void *io_mem_opaque[IO_MEM_NB_ENTRIES];
511d2b14 213static char io_mem_used[IO_MEM_NB_ENTRIES];
6658ffb8
PB
214static int io_mem_watch;
215#endif
33417e70 216
34865134 217/* log support */
1e8b27ca
JR
218#ifdef WIN32
219static const char *logfilename = "qemu.log";
220#else
d9b630fd 221static const char *logfilename = "/tmp/qemu.log";
1e8b27ca 222#endif
34865134
FB
223FILE *logfile;
224int loglevel;
e735b91c 225static int log_append = 0;
34865134 226
e3db7226 227/* statistics */
b3755a91 228#if !defined(CONFIG_USER_ONLY)
e3db7226 229static int tlb_flush_count;
b3755a91 230#endif
e3db7226
FB
231static int tb_flush_count;
232static int tb_phys_invalidate_count;
233
7cb69cae
FB
234#ifdef _WIN32
235static void map_exec(void *addr, long size)
236{
237 DWORD old_protect;
238 VirtualProtect(addr, size,
239 PAGE_EXECUTE_READWRITE, &old_protect);
240
241}
242#else
243static void map_exec(void *addr, long size)
244{
4369415f 245 unsigned long start, end, page_size;
7cb69cae 246
4369415f 247 page_size = getpagesize();
7cb69cae 248 start = (unsigned long)addr;
4369415f 249 start &= ~(page_size - 1);
7cb69cae
FB
250
251 end = (unsigned long)addr + size;
4369415f
FB
252 end += page_size - 1;
253 end &= ~(page_size - 1);
7cb69cae
FB
254
255 mprotect((void *)start, end - start,
256 PROT_READ | PROT_WRITE | PROT_EXEC);
257}
258#endif
259
b346ff46 260static void page_init(void)
54936004 261{
83fb7adf 262 /* NOTE: we can always suppose that qemu_host_page_size >=
54936004 263 TARGET_PAGE_SIZE */
c2b48b69
AL
264#ifdef _WIN32
265 {
266 SYSTEM_INFO system_info;
267
268 GetSystemInfo(&system_info);
269 qemu_real_host_page_size = system_info.dwPageSize;
270 }
271#else
272 qemu_real_host_page_size = getpagesize();
273#endif
83fb7adf
FB
274 if (qemu_host_page_size == 0)
275 qemu_host_page_size = qemu_real_host_page_size;
276 if (qemu_host_page_size < TARGET_PAGE_SIZE)
277 qemu_host_page_size = TARGET_PAGE_SIZE;
83fb7adf 278 qemu_host_page_mask = ~(qemu_host_page_size - 1);
50a9569b 279
2e9a5713 280#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
50a9569b 281 {
f01576f1
JL
282#ifdef HAVE_KINFO_GETVMMAP
283 struct kinfo_vmentry *freep;
284 int i, cnt;
285
286 freep = kinfo_getvmmap(getpid(), &cnt);
287 if (freep) {
288 mmap_lock();
289 for (i = 0; i < cnt; i++) {
290 unsigned long startaddr, endaddr;
291
292 startaddr = freep[i].kve_start;
293 endaddr = freep[i].kve_end;
294 if (h2g_valid(startaddr)) {
295 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
296
297 if (h2g_valid(endaddr)) {
298 endaddr = h2g(endaddr);
fd436907 299 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
f01576f1
JL
300 } else {
301#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
302 endaddr = ~0ul;
fd436907 303 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
f01576f1
JL
304#endif
305 }
306 }
307 }
308 free(freep);
309 mmap_unlock();
310 }
311#else
50a9569b 312 FILE *f;
50a9569b 313
0776590d 314 last_brk = (unsigned long)sbrk(0);
5cd2c5b6 315
fd436907 316 f = fopen("/compat/linux/proc/self/maps", "r");
50a9569b 317 if (f) {
5cd2c5b6
RH
318 mmap_lock();
319
50a9569b 320 do {
5cd2c5b6
RH
321 unsigned long startaddr, endaddr;
322 int n;
323
324 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
325
326 if (n == 2 && h2g_valid(startaddr)) {
327 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
328
329 if (h2g_valid(endaddr)) {
330 endaddr = h2g(endaddr);
331 } else {
332 endaddr = ~0ul;
333 }
334 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
50a9569b
AZ
335 }
336 } while (!feof(f));
5cd2c5b6 337
50a9569b 338 fclose(f);
5cd2c5b6 339 mmap_unlock();
50a9569b 340 }
f01576f1 341#endif
50a9569b
AZ
342 }
343#endif
54936004
FB
344}
345
41c1b1c9 346static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
54936004 347{
41c1b1c9
PB
348 PageDesc *pd;
349 void **lp;
350 int i;
351
5cd2c5b6 352#if defined(CONFIG_USER_ONLY)
7267c094 353 /* We can't use g_malloc because it may recurse into a locked mutex. */
5cd2c5b6
RH
354# define ALLOC(P, SIZE) \
355 do { \
356 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
357 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
5cd2c5b6
RH
358 } while (0)
359#else
360# define ALLOC(P, SIZE) \
7267c094 361 do { P = g_malloc0(SIZE); } while (0)
17e2377a 362#endif
434929bf 363
5cd2c5b6
RH
364 /* Level 1. Always allocated. */
365 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
366
367 /* Level 2..N-1. */
368 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
369 void **p = *lp;
370
371 if (p == NULL) {
372 if (!alloc) {
373 return NULL;
374 }
375 ALLOC(p, sizeof(void *) * L2_SIZE);
376 *lp = p;
17e2377a 377 }
5cd2c5b6
RH
378
379 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
380 }
381
382 pd = *lp;
383 if (pd == NULL) {
384 if (!alloc) {
385 return NULL;
386 }
387 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
388 *lp = pd;
54936004 389 }
5cd2c5b6
RH
390
391#undef ALLOC
5cd2c5b6
RH
392
393 return pd + (index & (L2_SIZE - 1));
54936004
FB
394}
395
41c1b1c9 396static inline PageDesc *page_find(tb_page_addr_t index)
54936004 397{
5cd2c5b6 398 return page_find_alloc(index, 0);
fd6ce8f6
FB
399}
400
6d9a1304 401#if !defined(CONFIG_USER_ONLY)
c227f099 402static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
92e873b9 403{
e3f4e2a4 404 PhysPageDesc *pd;
5cd2c5b6
RH
405 void **lp;
406 int i;
92e873b9 407
5cd2c5b6
RH
408 /* Level 1. Always allocated. */
409 lp = l1_phys_map + ((index >> P_L1_SHIFT) & (P_L1_SIZE - 1));
108c49b8 410
5cd2c5b6
RH
411 /* Level 2..N-1. */
412 for (i = P_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
413 void **p = *lp;
414 if (p == NULL) {
415 if (!alloc) {
416 return NULL;
417 }
7267c094 418 *lp = p = g_malloc0(sizeof(void *) * L2_SIZE);
5cd2c5b6
RH
419 }
420 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
108c49b8 421 }
5cd2c5b6 422
e3f4e2a4 423 pd = *lp;
5cd2c5b6 424 if (pd == NULL) {
e3f4e2a4 425 int i;
5ab97b7f 426 int first_index = index & ~(L2_SIZE - 1);
5cd2c5b6
RH
427
428 if (!alloc) {
108c49b8 429 return NULL;
5cd2c5b6
RH
430 }
431
7267c094 432 *lp = pd = g_malloc(sizeof(PhysPageDesc) * L2_SIZE);
5cd2c5b6 433
67c4d23c 434 for (i = 0; i < L2_SIZE; i++) {
0e0df1e2 435 pd[i].phys_offset = io_mem_unassigned.ram_addr;
5ab97b7f 436 pd[i].region_offset = (first_index + i) << TARGET_PAGE_BITS;
67c4d23c 437 }
92e873b9 438 }
5cd2c5b6
RH
439
440 return pd + (index & (L2_SIZE - 1));
92e873b9
FB
441}
442
f1f6e3b8 443static inline PhysPageDesc phys_page_find(target_phys_addr_t index)
92e873b9 444{
f1f6e3b8
AK
445 PhysPageDesc *p = phys_page_find_alloc(index, 0);
446
447 if (p) {
448 return *p;
449 } else {
450 return (PhysPageDesc) {
0e0df1e2 451 .phys_offset = io_mem_unassigned.ram_addr,
f1f6e3b8
AK
452 .region_offset = index << TARGET_PAGE_BITS,
453 };
454 }
92e873b9
FB
455}
456
c227f099
AL
457static void tlb_protect_code(ram_addr_t ram_addr);
458static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 459 target_ulong vaddr);
c8a706fe
PB
460#define mmap_lock() do { } while(0)
461#define mmap_unlock() do { } while(0)
9fa3e853 462#endif
fd6ce8f6 463
4369415f
FB
464#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
465
466#if defined(CONFIG_USER_ONLY)
ccbb4d44 467/* Currently it is not recommended to allocate big chunks of data in
4369415f
FB
468 user mode. It will change when a dedicated libc will be used */
469#define USE_STATIC_CODE_GEN_BUFFER
470#endif
471
472#ifdef USE_STATIC_CODE_GEN_BUFFER
ebf50fb3
AJ
473static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
474 __attribute__((aligned (CODE_GEN_ALIGN)));
4369415f
FB
475#endif
476
8fcd3692 477static void code_gen_alloc(unsigned long tb_size)
26a5f13b 478{
4369415f
FB
479#ifdef USE_STATIC_CODE_GEN_BUFFER
480 code_gen_buffer = static_code_gen_buffer;
481 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
482 map_exec(code_gen_buffer, code_gen_buffer_size);
483#else
26a5f13b
FB
484 code_gen_buffer_size = tb_size;
485 if (code_gen_buffer_size == 0) {
4369415f 486#if defined(CONFIG_USER_ONLY)
4369415f
FB
487 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
488#else
ccbb4d44 489 /* XXX: needs adjustments */
94a6b54f 490 code_gen_buffer_size = (unsigned long)(ram_size / 4);
4369415f 491#endif
26a5f13b
FB
492 }
493 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
494 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
495 /* The code gen buffer location may have constraints depending on
496 the host cpu and OS */
497#if defined(__linux__)
498 {
499 int flags;
141ac468
BS
500 void *start = NULL;
501
26a5f13b
FB
502 flags = MAP_PRIVATE | MAP_ANONYMOUS;
503#if defined(__x86_64__)
504 flags |= MAP_32BIT;
505 /* Cannot map more than that */
506 if (code_gen_buffer_size > (800 * 1024 * 1024))
507 code_gen_buffer_size = (800 * 1024 * 1024);
141ac468
BS
508#elif defined(__sparc_v9__)
509 // Map the buffer below 2G, so we can use direct calls and branches
510 flags |= MAP_FIXED;
511 start = (void *) 0x60000000UL;
512 if (code_gen_buffer_size > (512 * 1024 * 1024))
513 code_gen_buffer_size = (512 * 1024 * 1024);
1cb0661e 514#elif defined(__arm__)
222f23f5 515 /* Keep the buffer no bigger than 16GB to branch between blocks */
1cb0661e
AZ
516 if (code_gen_buffer_size > 16 * 1024 * 1024)
517 code_gen_buffer_size = 16 * 1024 * 1024;
eba0b893
RH
518#elif defined(__s390x__)
519 /* Map the buffer so that we can use direct calls and branches. */
520 /* We have a +- 4GB range on the branches; leave some slop. */
521 if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
522 code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
523 }
524 start = (void *)0x90000000UL;
26a5f13b 525#endif
141ac468
BS
526 code_gen_buffer = mmap(start, code_gen_buffer_size,
527 PROT_WRITE | PROT_READ | PROT_EXEC,
26a5f13b
FB
528 flags, -1, 0);
529 if (code_gen_buffer == MAP_FAILED) {
530 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
531 exit(1);
532 }
533 }
cbb608a5 534#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
9f4b09a4
TN
535 || defined(__DragonFly__) || defined(__OpenBSD__) \
536 || defined(__NetBSD__)
06e67a82
AL
537 {
538 int flags;
539 void *addr = NULL;
540 flags = MAP_PRIVATE | MAP_ANONYMOUS;
541#if defined(__x86_64__)
542 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
543 * 0x40000000 is free */
544 flags |= MAP_FIXED;
545 addr = (void *)0x40000000;
546 /* Cannot map more than that */
547 if (code_gen_buffer_size > (800 * 1024 * 1024))
548 code_gen_buffer_size = (800 * 1024 * 1024);
4cd31ad2
BS
549#elif defined(__sparc_v9__)
550 // Map the buffer below 2G, so we can use direct calls and branches
551 flags |= MAP_FIXED;
552 addr = (void *) 0x60000000UL;
553 if (code_gen_buffer_size > (512 * 1024 * 1024)) {
554 code_gen_buffer_size = (512 * 1024 * 1024);
555 }
06e67a82
AL
556#endif
557 code_gen_buffer = mmap(addr, code_gen_buffer_size,
558 PROT_WRITE | PROT_READ | PROT_EXEC,
559 flags, -1, 0);
560 if (code_gen_buffer == MAP_FAILED) {
561 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
562 exit(1);
563 }
564 }
26a5f13b 565#else
7267c094 566 code_gen_buffer = g_malloc(code_gen_buffer_size);
26a5f13b
FB
567 map_exec(code_gen_buffer, code_gen_buffer_size);
568#endif
4369415f 569#endif /* !USE_STATIC_CODE_GEN_BUFFER */
26a5f13b 570 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
a884da8a
PM
571 code_gen_buffer_max_size = code_gen_buffer_size -
572 (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
26a5f13b 573 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
7267c094 574 tbs = g_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
26a5f13b
FB
575}
576
577/* Must be called before using the QEMU cpus. 'tb_size' is the size
578 (in bytes) allocated to the translation buffer. Zero means default
579 size. */
d5ab9713 580void tcg_exec_init(unsigned long tb_size)
26a5f13b 581{
26a5f13b
FB
582 cpu_gen_init();
583 code_gen_alloc(tb_size);
584 code_gen_ptr = code_gen_buffer;
4369415f 585 page_init();
9002ec79
RH
586#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
587 /* There's no guest base to take into account, so go ahead and
588 initialize the prologue now. */
589 tcg_prologue_init(&tcg_ctx);
590#endif
26a5f13b
FB
591}
592
d5ab9713
JK
593bool tcg_enabled(void)
594{
595 return code_gen_buffer != NULL;
596}
597
598void cpu_exec_init_all(void)
599{
600#if !defined(CONFIG_USER_ONLY)
601 memory_map_init();
602 io_mem_init();
603#endif
604}
605
9656f324
PB
606#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
607
e59fb374 608static int cpu_common_post_load(void *opaque, int version_id)
e7f4eff7
JQ
609{
610 CPUState *env = opaque;
9656f324 611
3098dba0
AJ
612 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
613 version_id is increased. */
614 env->interrupt_request &= ~0x01;
9656f324
PB
615 tlb_flush(env, 1);
616
617 return 0;
618}
e7f4eff7
JQ
619
620static const VMStateDescription vmstate_cpu_common = {
621 .name = "cpu_common",
622 .version_id = 1,
623 .minimum_version_id = 1,
624 .minimum_version_id_old = 1,
e7f4eff7
JQ
625 .post_load = cpu_common_post_load,
626 .fields = (VMStateField []) {
627 VMSTATE_UINT32(halted, CPUState),
628 VMSTATE_UINT32(interrupt_request, CPUState),
629 VMSTATE_END_OF_LIST()
630 }
631};
9656f324
PB
632#endif
633
950f1472
GC
634CPUState *qemu_get_cpu(int cpu)
635{
636 CPUState *env = first_cpu;
637
638 while (env) {
639 if (env->cpu_index == cpu)
640 break;
641 env = env->next_cpu;
642 }
643
644 return env;
645}
646
6a00d601 647void cpu_exec_init(CPUState *env)
fd6ce8f6 648{
6a00d601
FB
649 CPUState **penv;
650 int cpu_index;
651
c2764719
PB
652#if defined(CONFIG_USER_ONLY)
653 cpu_list_lock();
654#endif
6a00d601
FB
655 env->next_cpu = NULL;
656 penv = &first_cpu;
657 cpu_index = 0;
658 while (*penv != NULL) {
1e9fa730 659 penv = &(*penv)->next_cpu;
6a00d601
FB
660 cpu_index++;
661 }
662 env->cpu_index = cpu_index;
268a362c 663 env->numa_node = 0;
72cf2d4f
BS
664 QTAILQ_INIT(&env->breakpoints);
665 QTAILQ_INIT(&env->watchpoints);
dc7a09cf
JK
666#ifndef CONFIG_USER_ONLY
667 env->thread_id = qemu_get_thread_id();
668#endif
6a00d601 669 *penv = env;
c2764719
PB
670#if defined(CONFIG_USER_ONLY)
671 cpu_list_unlock();
672#endif
b3c7724c 673#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
0be71e32
AW
674 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
675 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
b3c7724c
PB
676 cpu_save, cpu_load, env);
677#endif
fd6ce8f6
FB
678}
679
d1a1eb74
TG
680/* Allocate a new translation block. Flush the translation buffer if
681 too many translation blocks or too much generated code. */
682static TranslationBlock *tb_alloc(target_ulong pc)
683{
684 TranslationBlock *tb;
685
686 if (nb_tbs >= code_gen_max_blocks ||
687 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
688 return NULL;
689 tb = &tbs[nb_tbs++];
690 tb->pc = pc;
691 tb->cflags = 0;
692 return tb;
693}
694
695void tb_free(TranslationBlock *tb)
696{
697 /* In practice this is mostly used for single use temporary TB
698 Ignore the hard cases and just back up if this TB happens to
699 be the last one generated. */
700 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
701 code_gen_ptr = tb->tc_ptr;
702 nb_tbs--;
703 }
704}
705
9fa3e853
FB
706static inline void invalidate_page_bitmap(PageDesc *p)
707{
708 if (p->code_bitmap) {
7267c094 709 g_free(p->code_bitmap);
9fa3e853
FB
710 p->code_bitmap = NULL;
711 }
712 p->code_write_count = 0;
713}
714
5cd2c5b6
RH
715/* Set to NULL all the 'first_tb' fields in all PageDescs. */
716
717static void page_flush_tb_1 (int level, void **lp)
fd6ce8f6 718{
5cd2c5b6 719 int i;
fd6ce8f6 720
5cd2c5b6
RH
721 if (*lp == NULL) {
722 return;
723 }
724 if (level == 0) {
725 PageDesc *pd = *lp;
7296abac 726 for (i = 0; i < L2_SIZE; ++i) {
5cd2c5b6
RH
727 pd[i].first_tb = NULL;
728 invalidate_page_bitmap(pd + i);
fd6ce8f6 729 }
5cd2c5b6
RH
730 } else {
731 void **pp = *lp;
7296abac 732 for (i = 0; i < L2_SIZE; ++i) {
5cd2c5b6
RH
733 page_flush_tb_1 (level - 1, pp + i);
734 }
735 }
736}
737
738static void page_flush_tb(void)
739{
740 int i;
741 for (i = 0; i < V_L1_SIZE; i++) {
742 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
fd6ce8f6
FB
743 }
744}
745
746/* flush all the translation blocks */
d4e8164f 747/* XXX: tb_flush is currently not thread safe */
6a00d601 748void tb_flush(CPUState *env1)
fd6ce8f6 749{
6a00d601 750 CPUState *env;
0124311e 751#if defined(DEBUG_FLUSH)
ab3d1727
BS
752 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
753 (unsigned long)(code_gen_ptr - code_gen_buffer),
754 nb_tbs, nb_tbs > 0 ?
755 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
fd6ce8f6 756#endif
26a5f13b 757 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
a208e54a
PB
758 cpu_abort(env1, "Internal error: code buffer overflow\n");
759
fd6ce8f6 760 nb_tbs = 0;
3b46e624 761
6a00d601
FB
762 for(env = first_cpu; env != NULL; env = env->next_cpu) {
763 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
764 }
9fa3e853 765
8a8a608f 766 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
fd6ce8f6 767 page_flush_tb();
9fa3e853 768
fd6ce8f6 769 code_gen_ptr = code_gen_buffer;
d4e8164f
FB
770 /* XXX: flush processor icache at this point if cache flush is
771 expensive */
e3db7226 772 tb_flush_count++;
fd6ce8f6
FB
773}
774
775#ifdef DEBUG_TB_CHECK
776
bc98a7ef 777static void tb_invalidate_check(target_ulong address)
fd6ce8f6
FB
778{
779 TranslationBlock *tb;
780 int i;
781 address &= TARGET_PAGE_MASK;
99773bd4
PB
782 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
783 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
784 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
785 address >= tb->pc + tb->size)) {
0bf9e31a
BS
786 printf("ERROR invalidate: address=" TARGET_FMT_lx
787 " PC=%08lx size=%04x\n",
99773bd4 788 address, (long)tb->pc, tb->size);
fd6ce8f6
FB
789 }
790 }
791 }
792}
793
794/* verify that all the pages have correct rights for code */
795static void tb_page_check(void)
796{
797 TranslationBlock *tb;
798 int i, flags1, flags2;
3b46e624 799
99773bd4
PB
800 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
801 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
802 flags1 = page_get_flags(tb->pc);
803 flags2 = page_get_flags(tb->pc + tb->size - 1);
804 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
805 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
99773bd4 806 (long)tb->pc, tb->size, flags1, flags2);
fd6ce8f6
FB
807 }
808 }
809 }
810}
811
812#endif
813
814/* invalidate one TB */
815static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
816 int next_offset)
817{
818 TranslationBlock *tb1;
819 for(;;) {
820 tb1 = *ptb;
821 if (tb1 == tb) {
822 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
823 break;
824 }
825 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
826 }
827}
828
9fa3e853
FB
829static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
830{
831 TranslationBlock *tb1;
832 unsigned int n1;
833
834 for(;;) {
835 tb1 = *ptb;
836 n1 = (long)tb1 & 3;
837 tb1 = (TranslationBlock *)((long)tb1 & ~3);
838 if (tb1 == tb) {
839 *ptb = tb1->page_next[n1];
840 break;
841 }
842 ptb = &tb1->page_next[n1];
843 }
844}
845
d4e8164f
FB
846static inline void tb_jmp_remove(TranslationBlock *tb, int n)
847{
848 TranslationBlock *tb1, **ptb;
849 unsigned int n1;
850
851 ptb = &tb->jmp_next[n];
852 tb1 = *ptb;
853 if (tb1) {
854 /* find tb(n) in circular list */
855 for(;;) {
856 tb1 = *ptb;
857 n1 = (long)tb1 & 3;
858 tb1 = (TranslationBlock *)((long)tb1 & ~3);
859 if (n1 == n && tb1 == tb)
860 break;
861 if (n1 == 2) {
862 ptb = &tb1->jmp_first;
863 } else {
864 ptb = &tb1->jmp_next[n1];
865 }
866 }
867 /* now we can suppress tb(n) from the list */
868 *ptb = tb->jmp_next[n];
869
870 tb->jmp_next[n] = NULL;
871 }
872}
873
874/* reset the jump entry 'n' of a TB so that it is not chained to
875 another TB */
876static inline void tb_reset_jump(TranslationBlock *tb, int n)
877{
878 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
879}
880
41c1b1c9 881void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
fd6ce8f6 882{
6a00d601 883 CPUState *env;
8a40a180 884 PageDesc *p;
d4e8164f 885 unsigned int h, n1;
41c1b1c9 886 tb_page_addr_t phys_pc;
8a40a180 887 TranslationBlock *tb1, *tb2;
3b46e624 888
8a40a180
FB
889 /* remove the TB from the hash list */
890 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
891 h = tb_phys_hash_func(phys_pc);
5fafdf24 892 tb_remove(&tb_phys_hash[h], tb,
8a40a180
FB
893 offsetof(TranslationBlock, phys_hash_next));
894
895 /* remove the TB from the page list */
896 if (tb->page_addr[0] != page_addr) {
897 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
898 tb_page_remove(&p->first_tb, tb);
899 invalidate_page_bitmap(p);
900 }
901 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
902 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
903 tb_page_remove(&p->first_tb, tb);
904 invalidate_page_bitmap(p);
905 }
906
36bdbe54 907 tb_invalidated_flag = 1;
59817ccb 908
fd6ce8f6 909 /* remove the TB from the hash list */
8a40a180 910 h = tb_jmp_cache_hash_func(tb->pc);
6a00d601
FB
911 for(env = first_cpu; env != NULL; env = env->next_cpu) {
912 if (env->tb_jmp_cache[h] == tb)
913 env->tb_jmp_cache[h] = NULL;
914 }
d4e8164f
FB
915
916 /* suppress this TB from the two jump lists */
917 tb_jmp_remove(tb, 0);
918 tb_jmp_remove(tb, 1);
919
920 /* suppress any remaining jumps to this TB */
921 tb1 = tb->jmp_first;
922 for(;;) {
923 n1 = (long)tb1 & 3;
924 if (n1 == 2)
925 break;
926 tb1 = (TranslationBlock *)((long)tb1 & ~3);
927 tb2 = tb1->jmp_next[n1];
928 tb_reset_jump(tb1, n1);
929 tb1->jmp_next[n1] = NULL;
930 tb1 = tb2;
931 }
932 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
9fa3e853 933
e3db7226 934 tb_phys_invalidate_count++;
9fa3e853
FB
935}
936
937static inline void set_bits(uint8_t *tab, int start, int len)
938{
939 int end, mask, end1;
940
941 end = start + len;
942 tab += start >> 3;
943 mask = 0xff << (start & 7);
944 if ((start & ~7) == (end & ~7)) {
945 if (start < end) {
946 mask &= ~(0xff << (end & 7));
947 *tab |= mask;
948 }
949 } else {
950 *tab++ |= mask;
951 start = (start + 8) & ~7;
952 end1 = end & ~7;
953 while (start < end1) {
954 *tab++ = 0xff;
955 start += 8;
956 }
957 if (start < end) {
958 mask = ~(0xff << (end & 7));
959 *tab |= mask;
960 }
961 }
962}
963
964static void build_page_bitmap(PageDesc *p)
965{
966 int n, tb_start, tb_end;
967 TranslationBlock *tb;
3b46e624 968
7267c094 969 p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8);
9fa3e853
FB
970
971 tb = p->first_tb;
972 while (tb != NULL) {
973 n = (long)tb & 3;
974 tb = (TranslationBlock *)((long)tb & ~3);
975 /* NOTE: this is subtle as a TB may span two physical pages */
976 if (n == 0) {
977 /* NOTE: tb_end may be after the end of the page, but
978 it is not a problem */
979 tb_start = tb->pc & ~TARGET_PAGE_MASK;
980 tb_end = tb_start + tb->size;
981 if (tb_end > TARGET_PAGE_SIZE)
982 tb_end = TARGET_PAGE_SIZE;
983 } else {
984 tb_start = 0;
985 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
986 }
987 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
988 tb = tb->page_next[n];
989 }
990}
991
2e70f6ef
PB
992TranslationBlock *tb_gen_code(CPUState *env,
993 target_ulong pc, target_ulong cs_base,
994 int flags, int cflags)
d720b93d
FB
995{
996 TranslationBlock *tb;
997 uint8_t *tc_ptr;
41c1b1c9
PB
998 tb_page_addr_t phys_pc, phys_page2;
999 target_ulong virt_page2;
d720b93d
FB
1000 int code_gen_size;
1001
41c1b1c9 1002 phys_pc = get_page_addr_code(env, pc);
c27004ec 1003 tb = tb_alloc(pc);
d720b93d
FB
1004 if (!tb) {
1005 /* flush must be done */
1006 tb_flush(env);
1007 /* cannot fail at this point */
c27004ec 1008 tb = tb_alloc(pc);
2e70f6ef
PB
1009 /* Don't forget to invalidate previous TB info. */
1010 tb_invalidated_flag = 1;
d720b93d
FB
1011 }
1012 tc_ptr = code_gen_ptr;
1013 tb->tc_ptr = tc_ptr;
1014 tb->cs_base = cs_base;
1015 tb->flags = flags;
1016 tb->cflags = cflags;
d07bde88 1017 cpu_gen_code(env, tb, &code_gen_size);
d720b93d 1018 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
3b46e624 1019
d720b93d 1020 /* check next page if needed */
c27004ec 1021 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
d720b93d 1022 phys_page2 = -1;
c27004ec 1023 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
41c1b1c9 1024 phys_page2 = get_page_addr_code(env, virt_page2);
d720b93d 1025 }
41c1b1c9 1026 tb_link_page(tb, phys_pc, phys_page2);
2e70f6ef 1027 return tb;
d720b93d 1028}
3b46e624 1029
9fa3e853
FB
1030/* invalidate all TBs which intersect with the target physical page
1031 starting in range [start;end[. NOTE: start and end must refer to
d720b93d
FB
1032 the same physical page. 'is_cpu_write_access' should be true if called
1033 from a real cpu write access: the virtual CPU will exit the current
1034 TB if code is modified inside this TB. */
41c1b1c9 1035void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
d720b93d
FB
1036 int is_cpu_write_access)
1037{
6b917547 1038 TranslationBlock *tb, *tb_next, *saved_tb;
d720b93d 1039 CPUState *env = cpu_single_env;
41c1b1c9 1040 tb_page_addr_t tb_start, tb_end;
6b917547
AL
1041 PageDesc *p;
1042 int n;
1043#ifdef TARGET_HAS_PRECISE_SMC
1044 int current_tb_not_found = is_cpu_write_access;
1045 TranslationBlock *current_tb = NULL;
1046 int current_tb_modified = 0;
1047 target_ulong current_pc = 0;
1048 target_ulong current_cs_base = 0;
1049 int current_flags = 0;
1050#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
1051
1052 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 1053 if (!p)
9fa3e853 1054 return;
5fafdf24 1055 if (!p->code_bitmap &&
d720b93d
FB
1056 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1057 is_cpu_write_access) {
9fa3e853
FB
1058 /* build code bitmap */
1059 build_page_bitmap(p);
1060 }
1061
1062 /* we remove all the TBs in the range [start, end[ */
1063 /* XXX: see if in some cases it could be faster to invalidate all the code */
1064 tb = p->first_tb;
1065 while (tb != NULL) {
1066 n = (long)tb & 3;
1067 tb = (TranslationBlock *)((long)tb & ~3);
1068 tb_next = tb->page_next[n];
1069 /* NOTE: this is subtle as a TB may span two physical pages */
1070 if (n == 0) {
1071 /* NOTE: tb_end may be after the end of the page, but
1072 it is not a problem */
1073 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1074 tb_end = tb_start + tb->size;
1075 } else {
1076 tb_start = tb->page_addr[1];
1077 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1078 }
1079 if (!(tb_end <= start || tb_start >= end)) {
d720b93d
FB
1080#ifdef TARGET_HAS_PRECISE_SMC
1081 if (current_tb_not_found) {
1082 current_tb_not_found = 0;
1083 current_tb = NULL;
2e70f6ef 1084 if (env->mem_io_pc) {
d720b93d 1085 /* now we have a real cpu fault */
2e70f6ef 1086 current_tb = tb_find_pc(env->mem_io_pc);
d720b93d
FB
1087 }
1088 }
1089 if (current_tb == tb &&
2e70f6ef 1090 (current_tb->cflags & CF_COUNT_MASK) != 1) {
d720b93d
FB
1091 /* If we are modifying the current TB, we must stop
1092 its execution. We could be more precise by checking
1093 that the modification is after the current PC, but it
1094 would require a specialized function to partially
1095 restore the CPU state */
3b46e624 1096
d720b93d 1097 current_tb_modified = 1;
618ba8e6 1098 cpu_restore_state(current_tb, env, env->mem_io_pc);
6b917547
AL
1099 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1100 &current_flags);
d720b93d
FB
1101 }
1102#endif /* TARGET_HAS_PRECISE_SMC */
6f5a9f7e
FB
1103 /* we need to do that to handle the case where a signal
1104 occurs while doing tb_phys_invalidate() */
1105 saved_tb = NULL;
1106 if (env) {
1107 saved_tb = env->current_tb;
1108 env->current_tb = NULL;
1109 }
9fa3e853 1110 tb_phys_invalidate(tb, -1);
6f5a9f7e
FB
1111 if (env) {
1112 env->current_tb = saved_tb;
1113 if (env->interrupt_request && env->current_tb)
1114 cpu_interrupt(env, env->interrupt_request);
1115 }
9fa3e853
FB
1116 }
1117 tb = tb_next;
1118 }
1119#if !defined(CONFIG_USER_ONLY)
1120 /* if no code remaining, no need to continue to use slow writes */
1121 if (!p->first_tb) {
1122 invalidate_page_bitmap(p);
d720b93d 1123 if (is_cpu_write_access) {
2e70f6ef 1124 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
d720b93d
FB
1125 }
1126 }
1127#endif
1128#ifdef TARGET_HAS_PRECISE_SMC
1129 if (current_tb_modified) {
1130 /* we generate a block containing just the instruction
1131 modifying the memory. It will ensure that it cannot modify
1132 itself */
ea1c1802 1133 env->current_tb = NULL;
2e70f6ef 1134 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
d720b93d 1135 cpu_resume_from_signal(env, NULL);
9fa3e853 1136 }
fd6ce8f6 1137#endif
9fa3e853 1138}
fd6ce8f6 1139
9fa3e853 1140/* len must be <= 8 and start must be a multiple of len */
41c1b1c9 1141static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
9fa3e853
FB
1142{
1143 PageDesc *p;
1144 int offset, b;
59817ccb 1145#if 0
a4193c8a 1146 if (1) {
93fcfe39
AL
1147 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1148 cpu_single_env->mem_io_vaddr, len,
1149 cpu_single_env->eip,
1150 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
59817ccb
FB
1151 }
1152#endif
9fa3e853 1153 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 1154 if (!p)
9fa3e853
FB
1155 return;
1156 if (p->code_bitmap) {
1157 offset = start & ~TARGET_PAGE_MASK;
1158 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1159 if (b & ((1 << len) - 1))
1160 goto do_invalidate;
1161 } else {
1162 do_invalidate:
d720b93d 1163 tb_invalidate_phys_page_range(start, start + len, 1);
9fa3e853
FB
1164 }
1165}
1166
9fa3e853 1167#if !defined(CONFIG_SOFTMMU)
41c1b1c9 1168static void tb_invalidate_phys_page(tb_page_addr_t addr,
d720b93d 1169 unsigned long pc, void *puc)
9fa3e853 1170{
6b917547 1171 TranslationBlock *tb;
9fa3e853 1172 PageDesc *p;
6b917547 1173 int n;
d720b93d 1174#ifdef TARGET_HAS_PRECISE_SMC
6b917547 1175 TranslationBlock *current_tb = NULL;
d720b93d 1176 CPUState *env = cpu_single_env;
6b917547
AL
1177 int current_tb_modified = 0;
1178 target_ulong current_pc = 0;
1179 target_ulong current_cs_base = 0;
1180 int current_flags = 0;
d720b93d 1181#endif
9fa3e853
FB
1182
1183 addr &= TARGET_PAGE_MASK;
1184 p = page_find(addr >> TARGET_PAGE_BITS);
5fafdf24 1185 if (!p)
9fa3e853
FB
1186 return;
1187 tb = p->first_tb;
d720b93d
FB
1188#ifdef TARGET_HAS_PRECISE_SMC
1189 if (tb && pc != 0) {
1190 current_tb = tb_find_pc(pc);
1191 }
1192#endif
9fa3e853
FB
1193 while (tb != NULL) {
1194 n = (long)tb & 3;
1195 tb = (TranslationBlock *)((long)tb & ~3);
d720b93d
FB
1196#ifdef TARGET_HAS_PRECISE_SMC
1197 if (current_tb == tb &&
2e70f6ef 1198 (current_tb->cflags & CF_COUNT_MASK) != 1) {
d720b93d
FB
1199 /* If we are modifying the current TB, we must stop
1200 its execution. We could be more precise by checking
1201 that the modification is after the current PC, but it
1202 would require a specialized function to partially
1203 restore the CPU state */
3b46e624 1204
d720b93d 1205 current_tb_modified = 1;
618ba8e6 1206 cpu_restore_state(current_tb, env, pc);
6b917547
AL
1207 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1208 &current_flags);
d720b93d
FB
1209 }
1210#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
1211 tb_phys_invalidate(tb, addr);
1212 tb = tb->page_next[n];
1213 }
fd6ce8f6 1214 p->first_tb = NULL;
d720b93d
FB
1215#ifdef TARGET_HAS_PRECISE_SMC
1216 if (current_tb_modified) {
1217 /* we generate a block containing just the instruction
1218 modifying the memory. It will ensure that it cannot modify
1219 itself */
ea1c1802 1220 env->current_tb = NULL;
2e70f6ef 1221 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
d720b93d
FB
1222 cpu_resume_from_signal(env, puc);
1223 }
1224#endif
fd6ce8f6 1225}
9fa3e853 1226#endif
fd6ce8f6
FB
1227
1228/* add the tb in the target page and protect it if necessary */
5fafdf24 1229static inline void tb_alloc_page(TranslationBlock *tb,
41c1b1c9 1230 unsigned int n, tb_page_addr_t page_addr)
fd6ce8f6
FB
1231{
1232 PageDesc *p;
4429ab44
JQ
1233#ifndef CONFIG_USER_ONLY
1234 bool page_already_protected;
1235#endif
9fa3e853
FB
1236
1237 tb->page_addr[n] = page_addr;
5cd2c5b6 1238 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
9fa3e853 1239 tb->page_next[n] = p->first_tb;
4429ab44
JQ
1240#ifndef CONFIG_USER_ONLY
1241 page_already_protected = p->first_tb != NULL;
1242#endif
9fa3e853
FB
1243 p->first_tb = (TranslationBlock *)((long)tb | n);
1244 invalidate_page_bitmap(p);
fd6ce8f6 1245
107db443 1246#if defined(TARGET_HAS_SMC) || 1
d720b93d 1247
9fa3e853 1248#if defined(CONFIG_USER_ONLY)
fd6ce8f6 1249 if (p->flags & PAGE_WRITE) {
53a5960a
PB
1250 target_ulong addr;
1251 PageDesc *p2;
9fa3e853
FB
1252 int prot;
1253
fd6ce8f6
FB
1254 /* force the host page as non writable (writes will have a
1255 page fault + mprotect overhead) */
53a5960a 1256 page_addr &= qemu_host_page_mask;
fd6ce8f6 1257 prot = 0;
53a5960a
PB
1258 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1259 addr += TARGET_PAGE_SIZE) {
1260
1261 p2 = page_find (addr >> TARGET_PAGE_BITS);
1262 if (!p2)
1263 continue;
1264 prot |= p2->flags;
1265 p2->flags &= ~PAGE_WRITE;
53a5960a 1266 }
5fafdf24 1267 mprotect(g2h(page_addr), qemu_host_page_size,
fd6ce8f6
FB
1268 (prot & PAGE_BITS) & ~PAGE_WRITE);
1269#ifdef DEBUG_TB_INVALIDATE
ab3d1727 1270 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
53a5960a 1271 page_addr);
fd6ce8f6 1272#endif
fd6ce8f6 1273 }
9fa3e853
FB
1274#else
1275 /* if some code is already present, then the pages are already
1276 protected. So we handle the case where only the first TB is
1277 allocated in a physical page */
4429ab44 1278 if (!page_already_protected) {
6a00d601 1279 tlb_protect_code(page_addr);
9fa3e853
FB
1280 }
1281#endif
d720b93d
FB
1282
1283#endif /* TARGET_HAS_SMC */
fd6ce8f6
FB
1284}
1285
9fa3e853
FB
1286/* add a new TB and link it to the physical page tables. phys_page2 is
1287 (-1) to indicate that only one page contains the TB. */
41c1b1c9
PB
1288void tb_link_page(TranslationBlock *tb,
1289 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
d4e8164f 1290{
9fa3e853
FB
1291 unsigned int h;
1292 TranslationBlock **ptb;
1293
c8a706fe
PB
1294 /* Grab the mmap lock to stop another thread invalidating this TB
1295 before we are done. */
1296 mmap_lock();
9fa3e853
FB
1297 /* add in the physical hash table */
1298 h = tb_phys_hash_func(phys_pc);
1299 ptb = &tb_phys_hash[h];
1300 tb->phys_hash_next = *ptb;
1301 *ptb = tb;
fd6ce8f6
FB
1302
1303 /* add in the page list */
9fa3e853
FB
1304 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1305 if (phys_page2 != -1)
1306 tb_alloc_page(tb, 1, phys_page2);
1307 else
1308 tb->page_addr[1] = -1;
9fa3e853 1309
d4e8164f
FB
1310 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1311 tb->jmp_next[0] = NULL;
1312 tb->jmp_next[1] = NULL;
1313
1314 /* init original jump addresses */
1315 if (tb->tb_next_offset[0] != 0xffff)
1316 tb_reset_jump(tb, 0);
1317 if (tb->tb_next_offset[1] != 0xffff)
1318 tb_reset_jump(tb, 1);
8a40a180
FB
1319
1320#ifdef DEBUG_TB_CHECK
1321 tb_page_check();
1322#endif
c8a706fe 1323 mmap_unlock();
fd6ce8f6
FB
1324}
1325
9fa3e853
FB
1326/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1327 tb[1].tc_ptr. Return NULL if not found */
1328TranslationBlock *tb_find_pc(unsigned long tc_ptr)
fd6ce8f6 1329{
9fa3e853
FB
1330 int m_min, m_max, m;
1331 unsigned long v;
1332 TranslationBlock *tb;
a513fe19
FB
1333
1334 if (nb_tbs <= 0)
1335 return NULL;
1336 if (tc_ptr < (unsigned long)code_gen_buffer ||
1337 tc_ptr >= (unsigned long)code_gen_ptr)
1338 return NULL;
1339 /* binary search (cf Knuth) */
1340 m_min = 0;
1341 m_max = nb_tbs - 1;
1342 while (m_min <= m_max) {
1343 m = (m_min + m_max) >> 1;
1344 tb = &tbs[m];
1345 v = (unsigned long)tb->tc_ptr;
1346 if (v == tc_ptr)
1347 return tb;
1348 else if (tc_ptr < v) {
1349 m_max = m - 1;
1350 } else {
1351 m_min = m + 1;
1352 }
5fafdf24 1353 }
a513fe19
FB
1354 return &tbs[m_max];
1355}
7501267e 1356
ea041c0e
FB
1357static void tb_reset_jump_recursive(TranslationBlock *tb);
1358
1359static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1360{
1361 TranslationBlock *tb1, *tb_next, **ptb;
1362 unsigned int n1;
1363
1364 tb1 = tb->jmp_next[n];
1365 if (tb1 != NULL) {
1366 /* find head of list */
1367 for(;;) {
1368 n1 = (long)tb1 & 3;
1369 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1370 if (n1 == 2)
1371 break;
1372 tb1 = tb1->jmp_next[n1];
1373 }
1374 /* we are now sure now that tb jumps to tb1 */
1375 tb_next = tb1;
1376
1377 /* remove tb from the jmp_first list */
1378 ptb = &tb_next->jmp_first;
1379 for(;;) {
1380 tb1 = *ptb;
1381 n1 = (long)tb1 & 3;
1382 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1383 if (n1 == n && tb1 == tb)
1384 break;
1385 ptb = &tb1->jmp_next[n1];
1386 }
1387 *ptb = tb->jmp_next[n];
1388 tb->jmp_next[n] = NULL;
3b46e624 1389
ea041c0e
FB
1390 /* suppress the jump to next tb in generated code */
1391 tb_reset_jump(tb, n);
1392
0124311e 1393 /* suppress jumps in the tb on which we could have jumped */
ea041c0e
FB
1394 tb_reset_jump_recursive(tb_next);
1395 }
1396}
1397
1398static void tb_reset_jump_recursive(TranslationBlock *tb)
1399{
1400 tb_reset_jump_recursive2(tb, 0);
1401 tb_reset_jump_recursive2(tb, 1);
1402}
1403
1fddef4b 1404#if defined(TARGET_HAS_ICE)
94df27fd
PB
1405#if defined(CONFIG_USER_ONLY)
1406static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1407{
1408 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1409}
1410#else
d720b93d
FB
1411static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1412{
c227f099 1413 target_phys_addr_t addr;
9b3c35e0 1414 target_ulong pd;
c227f099 1415 ram_addr_t ram_addr;
f1f6e3b8 1416 PhysPageDesc p;
d720b93d 1417
c2f07f81
PB
1418 addr = cpu_get_phys_page_debug(env, pc);
1419 p = phys_page_find(addr >> TARGET_PAGE_BITS);
f1f6e3b8 1420 pd = p.phys_offset;
c2f07f81 1421 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
706cd4b5 1422 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
d720b93d 1423}
c27004ec 1424#endif
94df27fd 1425#endif /* TARGET_HAS_ICE */
d720b93d 1426
c527ee8f
PB
1427#if defined(CONFIG_USER_ONLY)
1428void cpu_watchpoint_remove_all(CPUState *env, int mask)
1429
1430{
1431}
1432
1433int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1434 int flags, CPUWatchpoint **watchpoint)
1435{
1436 return -ENOSYS;
1437}
1438#else
6658ffb8 1439/* Add a watchpoint. */
a1d1bb31
AL
1440int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1441 int flags, CPUWatchpoint **watchpoint)
6658ffb8 1442{
b4051334 1443 target_ulong len_mask = ~(len - 1);
c0ce998e 1444 CPUWatchpoint *wp;
6658ffb8 1445
b4051334
AL
1446 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1447 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1448 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1449 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1450 return -EINVAL;
1451 }
7267c094 1452 wp = g_malloc(sizeof(*wp));
a1d1bb31
AL
1453
1454 wp->vaddr = addr;
b4051334 1455 wp->len_mask = len_mask;
a1d1bb31
AL
1456 wp->flags = flags;
1457
2dc9f411 1458 /* keep all GDB-injected watchpoints in front */
c0ce998e 1459 if (flags & BP_GDB)
72cf2d4f 1460 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
c0ce998e 1461 else
72cf2d4f 1462 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
6658ffb8 1463
6658ffb8 1464 tlb_flush_page(env, addr);
a1d1bb31
AL
1465
1466 if (watchpoint)
1467 *watchpoint = wp;
1468 return 0;
6658ffb8
PB
1469}
1470
a1d1bb31
AL
1471/* Remove a specific watchpoint. */
1472int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1473 int flags)
6658ffb8 1474{
b4051334 1475 target_ulong len_mask = ~(len - 1);
a1d1bb31 1476 CPUWatchpoint *wp;
6658ffb8 1477
72cf2d4f 1478 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334 1479 if (addr == wp->vaddr && len_mask == wp->len_mask
6e140f28 1480 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
a1d1bb31 1481 cpu_watchpoint_remove_by_ref(env, wp);
6658ffb8
PB
1482 return 0;
1483 }
1484 }
a1d1bb31 1485 return -ENOENT;
6658ffb8
PB
1486}
1487
a1d1bb31
AL
1488/* Remove a specific watchpoint by reference. */
1489void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1490{
72cf2d4f 1491 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
7d03f82f 1492
a1d1bb31
AL
1493 tlb_flush_page(env, watchpoint->vaddr);
1494
7267c094 1495 g_free(watchpoint);
a1d1bb31
AL
1496}
1497
1498/* Remove all matching watchpoints. */
1499void cpu_watchpoint_remove_all(CPUState *env, int mask)
1500{
c0ce998e 1501 CPUWatchpoint *wp, *next;
a1d1bb31 1502
72cf2d4f 1503 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
a1d1bb31
AL
1504 if (wp->flags & mask)
1505 cpu_watchpoint_remove_by_ref(env, wp);
c0ce998e 1506 }
7d03f82f 1507}
c527ee8f 1508#endif
7d03f82f 1509
a1d1bb31
AL
1510/* Add a breakpoint. */
1511int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1512 CPUBreakpoint **breakpoint)
4c3a88a2 1513{
1fddef4b 1514#if defined(TARGET_HAS_ICE)
c0ce998e 1515 CPUBreakpoint *bp;
3b46e624 1516
7267c094 1517 bp = g_malloc(sizeof(*bp));
4c3a88a2 1518
a1d1bb31
AL
1519 bp->pc = pc;
1520 bp->flags = flags;
1521
2dc9f411 1522 /* keep all GDB-injected breakpoints in front */
c0ce998e 1523 if (flags & BP_GDB)
72cf2d4f 1524 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
c0ce998e 1525 else
72cf2d4f 1526 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
3b46e624 1527
d720b93d 1528 breakpoint_invalidate(env, pc);
a1d1bb31
AL
1529
1530 if (breakpoint)
1531 *breakpoint = bp;
4c3a88a2
FB
1532 return 0;
1533#else
a1d1bb31 1534 return -ENOSYS;
4c3a88a2
FB
1535#endif
1536}
1537
a1d1bb31
AL
1538/* Remove a specific breakpoint. */
1539int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1540{
7d03f82f 1541#if defined(TARGET_HAS_ICE)
a1d1bb31
AL
1542 CPUBreakpoint *bp;
1543
72cf2d4f 1544 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31
AL
1545 if (bp->pc == pc && bp->flags == flags) {
1546 cpu_breakpoint_remove_by_ref(env, bp);
1547 return 0;
1548 }
7d03f82f 1549 }
a1d1bb31
AL
1550 return -ENOENT;
1551#else
1552 return -ENOSYS;
7d03f82f
EI
1553#endif
1554}
1555
a1d1bb31
AL
1556/* Remove a specific breakpoint by reference. */
1557void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
4c3a88a2 1558{
1fddef4b 1559#if defined(TARGET_HAS_ICE)
72cf2d4f 1560 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
d720b93d 1561
a1d1bb31
AL
1562 breakpoint_invalidate(env, breakpoint->pc);
1563
7267c094 1564 g_free(breakpoint);
a1d1bb31
AL
1565#endif
1566}
1567
1568/* Remove all matching breakpoints. */
1569void cpu_breakpoint_remove_all(CPUState *env, int mask)
1570{
1571#if defined(TARGET_HAS_ICE)
c0ce998e 1572 CPUBreakpoint *bp, *next;
a1d1bb31 1573
72cf2d4f 1574 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
a1d1bb31
AL
1575 if (bp->flags & mask)
1576 cpu_breakpoint_remove_by_ref(env, bp);
c0ce998e 1577 }
4c3a88a2
FB
1578#endif
1579}
1580
c33a346e
FB
1581/* enable or disable single step mode. EXCP_DEBUG is returned by the
1582 CPU loop after each instruction */
1583void cpu_single_step(CPUState *env, int enabled)
1584{
1fddef4b 1585#if defined(TARGET_HAS_ICE)
c33a346e
FB
1586 if (env->singlestep_enabled != enabled) {
1587 env->singlestep_enabled = enabled;
e22a25c9
AL
1588 if (kvm_enabled())
1589 kvm_update_guest_debug(env, 0);
1590 else {
ccbb4d44 1591 /* must flush all the translated code to avoid inconsistencies */
e22a25c9
AL
1592 /* XXX: only flush what is necessary */
1593 tb_flush(env);
1594 }
c33a346e
FB
1595 }
1596#endif
1597}
1598
34865134
FB
1599/* enable or disable low levels log */
1600void cpu_set_log(int log_flags)
1601{
1602 loglevel = log_flags;
1603 if (loglevel && !logfile) {
11fcfab4 1604 logfile = fopen(logfilename, log_append ? "a" : "w");
34865134
FB
1605 if (!logfile) {
1606 perror(logfilename);
1607 _exit(1);
1608 }
9fa3e853
FB
1609#if !defined(CONFIG_SOFTMMU)
1610 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1611 {
b55266b5 1612 static char logfile_buf[4096];
9fa3e853
FB
1613 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1614 }
daf767b1
SW
1615#elif defined(_WIN32)
1616 /* Win32 doesn't support line-buffering, so use unbuffered output. */
1617 setvbuf(logfile, NULL, _IONBF, 0);
1618#else
34865134 1619 setvbuf(logfile, NULL, _IOLBF, 0);
9fa3e853 1620#endif
e735b91c
PB
1621 log_append = 1;
1622 }
1623 if (!loglevel && logfile) {
1624 fclose(logfile);
1625 logfile = NULL;
34865134
FB
1626 }
1627}
1628
1629void cpu_set_log_filename(const char *filename)
1630{
1631 logfilename = strdup(filename);
e735b91c
PB
1632 if (logfile) {
1633 fclose(logfile);
1634 logfile = NULL;
1635 }
1636 cpu_set_log(loglevel);
34865134 1637}
c33a346e 1638
3098dba0 1639static void cpu_unlink_tb(CPUState *env)
ea041c0e 1640{
3098dba0
AJ
1641 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1642 problem and hope the cpu will stop of its own accord. For userspace
1643 emulation this often isn't actually as bad as it sounds. Often
1644 signals are used primarily to interrupt blocking syscalls. */
ea041c0e 1645 TranslationBlock *tb;
c227f099 1646 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
59817ccb 1647
cab1b4bd 1648 spin_lock(&interrupt_lock);
3098dba0
AJ
1649 tb = env->current_tb;
1650 /* if the cpu is currently executing code, we must unlink it and
1651 all the potentially executing TB */
f76cfe56 1652 if (tb) {
3098dba0
AJ
1653 env->current_tb = NULL;
1654 tb_reset_jump_recursive(tb);
be214e6c 1655 }
cab1b4bd 1656 spin_unlock(&interrupt_lock);
3098dba0
AJ
1657}
1658
97ffbd8d 1659#ifndef CONFIG_USER_ONLY
3098dba0 1660/* mask must never be zero, except for A20 change call */
ec6959d0 1661static void tcg_handle_interrupt(CPUState *env, int mask)
3098dba0
AJ
1662{
1663 int old_mask;
be214e6c 1664
2e70f6ef 1665 old_mask = env->interrupt_request;
68a79315 1666 env->interrupt_request |= mask;
3098dba0 1667
8edac960
AL
1668 /*
1669 * If called from iothread context, wake the target cpu in
1670 * case its halted.
1671 */
b7680cb6 1672 if (!qemu_cpu_is_self(env)) {
8edac960
AL
1673 qemu_cpu_kick(env);
1674 return;
1675 }
8edac960 1676
2e70f6ef 1677 if (use_icount) {
266910c4 1678 env->icount_decr.u16.high = 0xffff;
2e70f6ef 1679 if (!can_do_io(env)
be214e6c 1680 && (mask & ~old_mask) != 0) {
2e70f6ef
PB
1681 cpu_abort(env, "Raised interrupt while not in I/O function");
1682 }
2e70f6ef 1683 } else {
3098dba0 1684 cpu_unlink_tb(env);
ea041c0e
FB
1685 }
1686}
1687
ec6959d0
JK
1688CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1689
97ffbd8d
JK
1690#else /* CONFIG_USER_ONLY */
1691
1692void cpu_interrupt(CPUState *env, int mask)
1693{
1694 env->interrupt_request |= mask;
1695 cpu_unlink_tb(env);
1696}
1697#endif /* CONFIG_USER_ONLY */
1698
b54ad049
FB
1699void cpu_reset_interrupt(CPUState *env, int mask)
1700{
1701 env->interrupt_request &= ~mask;
1702}
1703
3098dba0
AJ
1704void cpu_exit(CPUState *env)
1705{
1706 env->exit_request = 1;
1707 cpu_unlink_tb(env);
1708}
1709
c7cd6a37 1710const CPULogItem cpu_log_items[] = {
5fafdf24 1711 { CPU_LOG_TB_OUT_ASM, "out_asm",
f193c797
FB
1712 "show generated host assembly code for each compiled TB" },
1713 { CPU_LOG_TB_IN_ASM, "in_asm",
1714 "show target assembly code for each compiled TB" },
5fafdf24 1715 { CPU_LOG_TB_OP, "op",
57fec1fe 1716 "show micro ops for each compiled TB" },
f193c797 1717 { CPU_LOG_TB_OP_OPT, "op_opt",
e01a1157
BS
1718 "show micro ops "
1719#ifdef TARGET_I386
1720 "before eflags optimization and "
f193c797 1721#endif
e01a1157 1722 "after liveness analysis" },
f193c797
FB
1723 { CPU_LOG_INT, "int",
1724 "show interrupts/exceptions in short format" },
1725 { CPU_LOG_EXEC, "exec",
1726 "show trace before each executed TB (lots of logs)" },
9fddaa0c 1727 { CPU_LOG_TB_CPU, "cpu",
e91c8a77 1728 "show CPU state before block translation" },
f193c797
FB
1729#ifdef TARGET_I386
1730 { CPU_LOG_PCALL, "pcall",
1731 "show protected mode far calls/returns/exceptions" },
eca1bdf4
AL
1732 { CPU_LOG_RESET, "cpu_reset",
1733 "show CPU state before CPU resets" },
f193c797 1734#endif
8e3a9fd2 1735#ifdef DEBUG_IOPORT
fd872598
FB
1736 { CPU_LOG_IOPORT, "ioport",
1737 "show all i/o ports accesses" },
8e3a9fd2 1738#endif
f193c797
FB
1739 { 0, NULL, NULL },
1740};
1741
1742static int cmp1(const char *s1, int n, const char *s2)
1743{
1744 if (strlen(s2) != n)
1745 return 0;
1746 return memcmp(s1, s2, n) == 0;
1747}
3b46e624 1748
f193c797
FB
1749/* takes a comma separated list of log masks. Return 0 if error. */
1750int cpu_str_to_log_mask(const char *str)
1751{
c7cd6a37 1752 const CPULogItem *item;
f193c797
FB
1753 int mask;
1754 const char *p, *p1;
1755
1756 p = str;
1757 mask = 0;
1758 for(;;) {
1759 p1 = strchr(p, ',');
1760 if (!p1)
1761 p1 = p + strlen(p);
9742bf26
YT
1762 if(cmp1(p,p1-p,"all")) {
1763 for(item = cpu_log_items; item->mask != 0; item++) {
1764 mask |= item->mask;
1765 }
1766 } else {
1767 for(item = cpu_log_items; item->mask != 0; item++) {
1768 if (cmp1(p, p1 - p, item->name))
1769 goto found;
1770 }
1771 return 0;
f193c797 1772 }
f193c797
FB
1773 found:
1774 mask |= item->mask;
1775 if (*p1 != ',')
1776 break;
1777 p = p1 + 1;
1778 }
1779 return mask;
1780}
ea041c0e 1781
7501267e
FB
1782void cpu_abort(CPUState *env, const char *fmt, ...)
1783{
1784 va_list ap;
493ae1f0 1785 va_list ap2;
7501267e
FB
1786
1787 va_start(ap, fmt);
493ae1f0 1788 va_copy(ap2, ap);
7501267e
FB
1789 fprintf(stderr, "qemu: fatal: ");
1790 vfprintf(stderr, fmt, ap);
1791 fprintf(stderr, "\n");
1792#ifdef TARGET_I386
7fe48483
FB
1793 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1794#else
1795 cpu_dump_state(env, stderr, fprintf, 0);
7501267e 1796#endif
93fcfe39
AL
1797 if (qemu_log_enabled()) {
1798 qemu_log("qemu: fatal: ");
1799 qemu_log_vprintf(fmt, ap2);
1800 qemu_log("\n");
f9373291 1801#ifdef TARGET_I386
93fcfe39 1802 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
f9373291 1803#else
93fcfe39 1804 log_cpu_state(env, 0);
f9373291 1805#endif
31b1a7b4 1806 qemu_log_flush();
93fcfe39 1807 qemu_log_close();
924edcae 1808 }
493ae1f0 1809 va_end(ap2);
f9373291 1810 va_end(ap);
fd052bf6
RV
1811#if defined(CONFIG_USER_ONLY)
1812 {
1813 struct sigaction act;
1814 sigfillset(&act.sa_mask);
1815 act.sa_handler = SIG_DFL;
1816 sigaction(SIGABRT, &act, NULL);
1817 }
1818#endif
7501267e
FB
1819 abort();
1820}
1821
c5be9f08
TS
1822CPUState *cpu_copy(CPUState *env)
1823{
01ba9816 1824 CPUState *new_env = cpu_init(env->cpu_model_str);
c5be9f08
TS
1825 CPUState *next_cpu = new_env->next_cpu;
1826 int cpu_index = new_env->cpu_index;
5a38f081
AL
1827#if defined(TARGET_HAS_ICE)
1828 CPUBreakpoint *bp;
1829 CPUWatchpoint *wp;
1830#endif
1831
c5be9f08 1832 memcpy(new_env, env, sizeof(CPUState));
5a38f081
AL
1833
1834 /* Preserve chaining and index. */
c5be9f08
TS
1835 new_env->next_cpu = next_cpu;
1836 new_env->cpu_index = cpu_index;
5a38f081
AL
1837
1838 /* Clone all break/watchpoints.
1839 Note: Once we support ptrace with hw-debug register access, make sure
1840 BP_CPU break/watchpoints are handled correctly on clone. */
72cf2d4f
BS
1841 QTAILQ_INIT(&env->breakpoints);
1842 QTAILQ_INIT(&env->watchpoints);
5a38f081 1843#if defined(TARGET_HAS_ICE)
72cf2d4f 1844 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
5a38f081
AL
1845 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1846 }
72cf2d4f 1847 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
5a38f081
AL
1848 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1849 wp->flags, NULL);
1850 }
1851#endif
1852
c5be9f08
TS
1853 return new_env;
1854}
1855
0124311e
FB
1856#if !defined(CONFIG_USER_ONLY)
1857
5c751e99
EI
1858static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1859{
1860 unsigned int i;
1861
1862 /* Discard jump cache entries for any tb which might potentially
1863 overlap the flushed page. */
1864 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1865 memset (&env->tb_jmp_cache[i], 0,
9742bf26 1866 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
5c751e99
EI
1867
1868 i = tb_jmp_cache_hash_page(addr);
1869 memset (&env->tb_jmp_cache[i], 0,
9742bf26 1870 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
5c751e99
EI
1871}
1872
08738984
IK
1873static CPUTLBEntry s_cputlb_empty_entry = {
1874 .addr_read = -1,
1875 .addr_write = -1,
1876 .addr_code = -1,
1877 .addend = -1,
1878};
1879
ee8b7021
FB
1880/* NOTE: if flush_global is true, also flush global entries (not
1881 implemented yet) */
1882void tlb_flush(CPUState *env, int flush_global)
33417e70 1883{
33417e70 1884 int i;
0124311e 1885
9fa3e853
FB
1886#if defined(DEBUG_TLB)
1887 printf("tlb_flush:\n");
1888#endif
0124311e
FB
1889 /* must reset current TB so that interrupts cannot modify the
1890 links while we are modifying them */
1891 env->current_tb = NULL;
1892
33417e70 1893 for(i = 0; i < CPU_TLB_SIZE; i++) {
cfde4bd9
IY
1894 int mmu_idx;
1895 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
08738984 1896 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
cfde4bd9 1897 }
33417e70 1898 }
9fa3e853 1899
8a40a180 1900 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
9fa3e853 1901
d4c430a8
PB
1902 env->tlb_flush_addr = -1;
1903 env->tlb_flush_mask = 0;
e3db7226 1904 tlb_flush_count++;
33417e70
FB
1905}
1906
274da6b2 1907static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
61382a50 1908{
5fafdf24 1909 if (addr == (tlb_entry->addr_read &
84b7b8e7 1910 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1911 addr == (tlb_entry->addr_write &
84b7b8e7 1912 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1913 addr == (tlb_entry->addr_code &
84b7b8e7 1914 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
08738984 1915 *tlb_entry = s_cputlb_empty_entry;
84b7b8e7 1916 }
61382a50
FB
1917}
1918
2e12669a 1919void tlb_flush_page(CPUState *env, target_ulong addr)
33417e70 1920{
8a40a180 1921 int i;
cfde4bd9 1922 int mmu_idx;
0124311e 1923
9fa3e853 1924#if defined(DEBUG_TLB)
108c49b8 1925 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
9fa3e853 1926#endif
d4c430a8
PB
1927 /* Check if we need to flush due to large pages. */
1928 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
1929#if defined(DEBUG_TLB)
1930 printf("tlb_flush_page: forced full flush ("
1931 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
1932 env->tlb_flush_addr, env->tlb_flush_mask);
1933#endif
1934 tlb_flush(env, 1);
1935 return;
1936 }
0124311e
FB
1937 /* must reset current TB so that interrupts cannot modify the
1938 links while we are modifying them */
1939 env->current_tb = NULL;
61382a50
FB
1940
1941 addr &= TARGET_PAGE_MASK;
1942 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
cfde4bd9
IY
1943 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1944 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
0124311e 1945
5c751e99 1946 tlb_flush_jmp_cache(env, addr);
9fa3e853
FB
1947}
1948
9fa3e853
FB
1949/* update the TLBs so that writes to code in the virtual page 'addr'
1950 can be detected */
c227f099 1951static void tlb_protect_code(ram_addr_t ram_addr)
9fa3e853 1952{
5fafdf24 1953 cpu_physical_memory_reset_dirty(ram_addr,
6a00d601
FB
1954 ram_addr + TARGET_PAGE_SIZE,
1955 CODE_DIRTY_FLAG);
9fa3e853
FB
1956}
1957
9fa3e853 1958/* update the TLB so that writes in physical page 'phys_addr' are no longer
3a7d929e 1959 tested for self modifying code */
c227f099 1960static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 1961 target_ulong vaddr)
9fa3e853 1962{
f7c11b53 1963 cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
1ccde1cb
FB
1964}
1965
5fafdf24 1966static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1ccde1cb
FB
1967 unsigned long start, unsigned long length)
1968{
1969 unsigned long addr;
0e0df1e2 1970 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr) {
84b7b8e7 1971 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1ccde1cb 1972 if ((addr - start) < length) {
0f459d16 1973 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1ccde1cb
FB
1974 }
1975 }
1976}
1977
5579c7f3 1978/* Note: start and end must be within the same ram block. */
c227f099 1979void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 1980 int dirty_flags)
1ccde1cb
FB
1981{
1982 CPUState *env;
4f2ac237 1983 unsigned long length, start1;
f7c11b53 1984 int i;
1ccde1cb
FB
1985
1986 start &= TARGET_PAGE_MASK;
1987 end = TARGET_PAGE_ALIGN(end);
1988
1989 length = end - start;
1990 if (length == 0)
1991 return;
f7c11b53 1992 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
f23db169 1993
1ccde1cb
FB
1994 /* we modify the TLB cache so that the dirty bit will be set again
1995 when accessing the range */
b2e0a138 1996 start1 = (unsigned long)qemu_safe_ram_ptr(start);
a57d23e4 1997 /* Check that we don't span multiple blocks - this breaks the
5579c7f3 1998 address comparisons below. */
b2e0a138 1999 if ((unsigned long)qemu_safe_ram_ptr(end - 1) - start1
5579c7f3
PB
2000 != (end - 1) - start) {
2001 abort();
2002 }
2003
6a00d601 2004 for(env = first_cpu; env != NULL; env = env->next_cpu) {
cfde4bd9
IY
2005 int mmu_idx;
2006 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2007 for(i = 0; i < CPU_TLB_SIZE; i++)
2008 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
2009 start1, length);
2010 }
6a00d601 2011 }
1ccde1cb
FB
2012}
2013
74576198
AL
2014int cpu_physical_memory_set_dirty_tracking(int enable)
2015{
f6f3fbca 2016 int ret = 0;
74576198 2017 in_migration = enable;
f6f3fbca 2018 return ret;
74576198
AL
2019}
2020
3a7d929e
FB
2021static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2022{
c227f099 2023 ram_addr_t ram_addr;
5579c7f3 2024 void *p;
3a7d929e 2025
0e0df1e2 2026 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr) {
5579c7f3
PB
2027 p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
2028 + tlb_entry->addend);
e890261f 2029 ram_addr = qemu_ram_addr_from_host_nofail(p);
3a7d929e 2030 if (!cpu_physical_memory_is_dirty(ram_addr)) {
0f459d16 2031 tlb_entry->addr_write |= TLB_NOTDIRTY;
3a7d929e
FB
2032 }
2033 }
2034}
2035
2036/* update the TLB according to the current state of the dirty bits */
2037void cpu_tlb_update_dirty(CPUState *env)
2038{
2039 int i;
cfde4bd9
IY
2040 int mmu_idx;
2041 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2042 for(i = 0; i < CPU_TLB_SIZE; i++)
2043 tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2044 }
3a7d929e
FB
2045}
2046
0f459d16 2047static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1ccde1cb 2048{
0f459d16
PB
2049 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2050 tlb_entry->addr_write = vaddr;
1ccde1cb
FB
2051}
2052
0f459d16
PB
2053/* update the TLB corresponding to virtual page vaddr
2054 so that it is no longer dirty */
2055static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1ccde1cb 2056{
1ccde1cb 2057 int i;
cfde4bd9 2058 int mmu_idx;
1ccde1cb 2059
0f459d16 2060 vaddr &= TARGET_PAGE_MASK;
1ccde1cb 2061 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
cfde4bd9
IY
2062 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2063 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
9fa3e853
FB
2064}
2065
d4c430a8
PB
2066/* Our TLB does not support large pages, so remember the area covered by
2067 large pages and trigger a full TLB flush if these are invalidated. */
2068static void tlb_add_large_page(CPUState *env, target_ulong vaddr,
2069 target_ulong size)
2070{
2071 target_ulong mask = ~(size - 1);
2072
2073 if (env->tlb_flush_addr == (target_ulong)-1) {
2074 env->tlb_flush_addr = vaddr & mask;
2075 env->tlb_flush_mask = mask;
2076 return;
2077 }
2078 /* Extend the existing region to include the new page.
2079 This is a compromise between unnecessary flushes and the cost
2080 of maintaining a full variable size TLB. */
2081 mask &= env->tlb_flush_mask;
2082 while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
2083 mask <<= 1;
2084 }
2085 env->tlb_flush_addr &= mask;
2086 env->tlb_flush_mask = mask;
2087}
2088
1d393fa2
AK
2089static bool is_ram_rom(ram_addr_t pd)
2090{
2091 pd &= ~TARGET_PAGE_MASK;
0e0df1e2 2092 return pd == io_mem_ram.ram_addr || pd == io_mem_rom.ram_addr;
1d393fa2
AK
2093}
2094
2095static bool is_ram_rom_romd(ram_addr_t pd)
2096{
2097 return is_ram_rom(pd) || (pd & IO_MEM_ROMD);
2098}
2099
d4c430a8
PB
2100/* Add a new TLB entry. At most one entry for a given virtual address
2101 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2102 supplied size is only used by tlb_flush_page. */
2103void tlb_set_page(CPUState *env, target_ulong vaddr,
2104 target_phys_addr_t paddr, int prot,
2105 int mmu_idx, target_ulong size)
9fa3e853 2106{
f1f6e3b8 2107 PhysPageDesc p;
4f2ac237 2108 unsigned long pd;
9fa3e853 2109 unsigned int index;
4f2ac237 2110 target_ulong address;
0f459d16 2111 target_ulong code_address;
355b1943 2112 unsigned long addend;
84b7b8e7 2113 CPUTLBEntry *te;
a1d1bb31 2114 CPUWatchpoint *wp;
c227f099 2115 target_phys_addr_t iotlb;
9fa3e853 2116
d4c430a8
PB
2117 assert(size >= TARGET_PAGE_SIZE);
2118 if (size != TARGET_PAGE_SIZE) {
2119 tlb_add_large_page(env, vaddr, size);
2120 }
92e873b9 2121 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
f1f6e3b8 2122 pd = p.phys_offset;
9fa3e853 2123#if defined(DEBUG_TLB)
7fd3f494
SW
2124 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
2125 " prot=%x idx=%d pd=0x%08lx\n",
2126 vaddr, paddr, prot, mmu_idx, pd);
9fa3e853
FB
2127#endif
2128
0f459d16 2129 address = vaddr;
1d393fa2 2130 if (!is_ram_rom_romd(pd)) {
0f459d16
PB
2131 /* IO memory case (romd handled later) */
2132 address |= TLB_MMIO;
2133 }
5579c7f3 2134 addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
1d393fa2 2135 if (is_ram_rom(pd)) {
0f459d16
PB
2136 /* Normal RAM. */
2137 iotlb = pd & TARGET_PAGE_MASK;
0e0df1e2
AK
2138 if ((pd & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr)
2139 iotlb |= io_mem_notdirty.ram_addr;
0f459d16 2140 else
0e0df1e2 2141 iotlb |= io_mem_rom.ram_addr;
0f459d16 2142 } else {
ccbb4d44 2143 /* IO handlers are currently passed a physical address.
0f459d16
PB
2144 It would be nice to pass an offset from the base address
2145 of that region. This would avoid having to special case RAM,
2146 and avoid full address decoding in every device.
2147 We can't use the high bits of pd for this because
2148 IO_MEM_ROMD uses these as a ram address. */
8da3ff18 2149 iotlb = (pd & ~TARGET_PAGE_MASK);
f1f6e3b8 2150 iotlb += p.region_offset;
0f459d16
PB
2151 }
2152
2153 code_address = address;
2154 /* Make accesses to pages with watchpoints go via the
2155 watchpoint trap routines. */
72cf2d4f 2156 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
a1d1bb31 2157 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
bf298f83
JK
2158 /* Avoid trapping reads of pages with a write breakpoint. */
2159 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
2160 iotlb = io_mem_watch + paddr;
2161 address |= TLB_MMIO;
2162 break;
2163 }
6658ffb8 2164 }
0f459d16 2165 }
d79acba4 2166
0f459d16
PB
2167 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2168 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2169 te = &env->tlb_table[mmu_idx][index];
2170 te->addend = addend - vaddr;
2171 if (prot & PAGE_READ) {
2172 te->addr_read = address;
2173 } else {
2174 te->addr_read = -1;
2175 }
5c751e99 2176
0f459d16
PB
2177 if (prot & PAGE_EXEC) {
2178 te->addr_code = code_address;
2179 } else {
2180 te->addr_code = -1;
2181 }
2182 if (prot & PAGE_WRITE) {
0e0df1e2 2183 if ((pd & ~TARGET_PAGE_MASK) == io_mem_rom.ram_addr ||
0f459d16
PB
2184 (pd & IO_MEM_ROMD)) {
2185 /* Write access calls the I/O callback. */
2186 te->addr_write = address | TLB_MMIO;
0e0df1e2 2187 } else if ((pd & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr &&
0f459d16
PB
2188 !cpu_physical_memory_is_dirty(pd)) {
2189 te->addr_write = address | TLB_NOTDIRTY;
9fa3e853 2190 } else {
0f459d16 2191 te->addr_write = address;
9fa3e853 2192 }
0f459d16
PB
2193 } else {
2194 te->addr_write = -1;
9fa3e853 2195 }
9fa3e853
FB
2196}
2197
0124311e
FB
2198#else
2199
ee8b7021 2200void tlb_flush(CPUState *env, int flush_global)
0124311e
FB
2201{
2202}
2203
2e12669a 2204void tlb_flush_page(CPUState *env, target_ulong addr)
0124311e
FB
2205{
2206}
2207
edf8e2af
MW
2208/*
2209 * Walks guest process memory "regions" one by one
2210 * and calls callback function 'fn' for each region.
2211 */
5cd2c5b6
RH
2212
2213struct walk_memory_regions_data
2214{
2215 walk_memory_regions_fn fn;
2216 void *priv;
2217 unsigned long start;
2218 int prot;
2219};
2220
2221static int walk_memory_regions_end(struct walk_memory_regions_data *data,
b480d9b7 2222 abi_ulong end, int new_prot)
5cd2c5b6
RH
2223{
2224 if (data->start != -1ul) {
2225 int rc = data->fn(data->priv, data->start, end, data->prot);
2226 if (rc != 0) {
2227 return rc;
2228 }
2229 }
2230
2231 data->start = (new_prot ? end : -1ul);
2232 data->prot = new_prot;
2233
2234 return 0;
2235}
2236
2237static int walk_memory_regions_1(struct walk_memory_regions_data *data,
b480d9b7 2238 abi_ulong base, int level, void **lp)
5cd2c5b6 2239{
b480d9b7 2240 abi_ulong pa;
5cd2c5b6
RH
2241 int i, rc;
2242
2243 if (*lp == NULL) {
2244 return walk_memory_regions_end(data, base, 0);
2245 }
2246
2247 if (level == 0) {
2248 PageDesc *pd = *lp;
7296abac 2249 for (i = 0; i < L2_SIZE; ++i) {
5cd2c5b6
RH
2250 int prot = pd[i].flags;
2251
2252 pa = base | (i << TARGET_PAGE_BITS);
2253 if (prot != data->prot) {
2254 rc = walk_memory_regions_end(data, pa, prot);
2255 if (rc != 0) {
2256 return rc;
9fa3e853 2257 }
9fa3e853 2258 }
5cd2c5b6
RH
2259 }
2260 } else {
2261 void **pp = *lp;
7296abac 2262 for (i = 0; i < L2_SIZE; ++i) {
b480d9b7
PB
2263 pa = base | ((abi_ulong)i <<
2264 (TARGET_PAGE_BITS + L2_BITS * level));
5cd2c5b6
RH
2265 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2266 if (rc != 0) {
2267 return rc;
2268 }
2269 }
2270 }
2271
2272 return 0;
2273}
2274
2275int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2276{
2277 struct walk_memory_regions_data data;
2278 unsigned long i;
2279
2280 data.fn = fn;
2281 data.priv = priv;
2282 data.start = -1ul;
2283 data.prot = 0;
2284
2285 for (i = 0; i < V_L1_SIZE; i++) {
b480d9b7 2286 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
5cd2c5b6
RH
2287 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2288 if (rc != 0) {
2289 return rc;
9fa3e853 2290 }
33417e70 2291 }
5cd2c5b6
RH
2292
2293 return walk_memory_regions_end(&data, 0, 0);
edf8e2af
MW
2294}
2295
b480d9b7
PB
2296static int dump_region(void *priv, abi_ulong start,
2297 abi_ulong end, unsigned long prot)
edf8e2af
MW
2298{
2299 FILE *f = (FILE *)priv;
2300
b480d9b7
PB
2301 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2302 " "TARGET_ABI_FMT_lx" %c%c%c\n",
edf8e2af
MW
2303 start, end, end - start,
2304 ((prot & PAGE_READ) ? 'r' : '-'),
2305 ((prot & PAGE_WRITE) ? 'w' : '-'),
2306 ((prot & PAGE_EXEC) ? 'x' : '-'));
2307
2308 return (0);
2309}
2310
2311/* dump memory mappings */
2312void page_dump(FILE *f)
2313{
2314 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2315 "start", "end", "size", "prot");
2316 walk_memory_regions(f, dump_region);
33417e70
FB
2317}
2318
53a5960a 2319int page_get_flags(target_ulong address)
33417e70 2320{
9fa3e853
FB
2321 PageDesc *p;
2322
2323 p = page_find(address >> TARGET_PAGE_BITS);
33417e70 2324 if (!p)
9fa3e853
FB
2325 return 0;
2326 return p->flags;
2327}
2328
376a7909
RH
2329/* Modify the flags of a page and invalidate the code if necessary.
2330 The flag PAGE_WRITE_ORG is positioned automatically depending
2331 on PAGE_WRITE. The mmap_lock should already be held. */
53a5960a 2332void page_set_flags(target_ulong start, target_ulong end, int flags)
9fa3e853 2333{
376a7909
RH
2334 target_ulong addr, len;
2335
2336 /* This function should never be called with addresses outside the
2337 guest address space. If this assert fires, it probably indicates
2338 a missing call to h2g_valid. */
b480d9b7
PB
2339#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2340 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
376a7909
RH
2341#endif
2342 assert(start < end);
9fa3e853
FB
2343
2344 start = start & TARGET_PAGE_MASK;
2345 end = TARGET_PAGE_ALIGN(end);
376a7909
RH
2346
2347 if (flags & PAGE_WRITE) {
9fa3e853 2348 flags |= PAGE_WRITE_ORG;
376a7909
RH
2349 }
2350
2351 for (addr = start, len = end - start;
2352 len != 0;
2353 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2354 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2355
2356 /* If the write protection bit is set, then we invalidate
2357 the code inside. */
5fafdf24 2358 if (!(p->flags & PAGE_WRITE) &&
9fa3e853
FB
2359 (flags & PAGE_WRITE) &&
2360 p->first_tb) {
d720b93d 2361 tb_invalidate_phys_page(addr, 0, NULL);
9fa3e853
FB
2362 }
2363 p->flags = flags;
2364 }
33417e70
FB
2365}
2366
3d97b40b
TS
2367int page_check_range(target_ulong start, target_ulong len, int flags)
2368{
2369 PageDesc *p;
2370 target_ulong end;
2371 target_ulong addr;
2372
376a7909
RH
2373 /* This function should never be called with addresses outside the
2374 guest address space. If this assert fires, it probably indicates
2375 a missing call to h2g_valid. */
338e9e6c
BS
2376#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2377 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
376a7909
RH
2378#endif
2379
3e0650a9
RH
2380 if (len == 0) {
2381 return 0;
2382 }
376a7909
RH
2383 if (start + len - 1 < start) {
2384 /* We've wrapped around. */
55f280c9 2385 return -1;
376a7909 2386 }
55f280c9 2387
3d97b40b
TS
2388 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2389 start = start & TARGET_PAGE_MASK;
2390
376a7909
RH
2391 for (addr = start, len = end - start;
2392 len != 0;
2393 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
3d97b40b
TS
2394 p = page_find(addr >> TARGET_PAGE_BITS);
2395 if( !p )
2396 return -1;
2397 if( !(p->flags & PAGE_VALID) )
2398 return -1;
2399
dae3270c 2400 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
3d97b40b 2401 return -1;
dae3270c
FB
2402 if (flags & PAGE_WRITE) {
2403 if (!(p->flags & PAGE_WRITE_ORG))
2404 return -1;
2405 /* unprotect the page if it was put read-only because it
2406 contains translated code */
2407 if (!(p->flags & PAGE_WRITE)) {
2408 if (!page_unprotect(addr, 0, NULL))
2409 return -1;
2410 }
2411 return 0;
2412 }
3d97b40b
TS
2413 }
2414 return 0;
2415}
2416
9fa3e853 2417/* called from signal handler: invalidate the code and unprotect the
ccbb4d44 2418 page. Return TRUE if the fault was successfully handled. */
53a5960a 2419int page_unprotect(target_ulong address, unsigned long pc, void *puc)
9fa3e853 2420{
45d679d6
AJ
2421 unsigned int prot;
2422 PageDesc *p;
53a5960a 2423 target_ulong host_start, host_end, addr;
9fa3e853 2424
c8a706fe
PB
2425 /* Technically this isn't safe inside a signal handler. However we
2426 know this only ever happens in a synchronous SEGV handler, so in
2427 practice it seems to be ok. */
2428 mmap_lock();
2429
45d679d6
AJ
2430 p = page_find(address >> TARGET_PAGE_BITS);
2431 if (!p) {
c8a706fe 2432 mmap_unlock();
9fa3e853 2433 return 0;
c8a706fe 2434 }
45d679d6 2435
9fa3e853
FB
2436 /* if the page was really writable, then we change its
2437 protection back to writable */
45d679d6
AJ
2438 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2439 host_start = address & qemu_host_page_mask;
2440 host_end = host_start + qemu_host_page_size;
2441
2442 prot = 0;
2443 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2444 p = page_find(addr >> TARGET_PAGE_BITS);
2445 p->flags |= PAGE_WRITE;
2446 prot |= p->flags;
2447
9fa3e853
FB
2448 /* and since the content will be modified, we must invalidate
2449 the corresponding translated code. */
45d679d6 2450 tb_invalidate_phys_page(addr, pc, puc);
9fa3e853 2451#ifdef DEBUG_TB_CHECK
45d679d6 2452 tb_invalidate_check(addr);
9fa3e853 2453#endif
9fa3e853 2454 }
45d679d6
AJ
2455 mprotect((void *)g2h(host_start), qemu_host_page_size,
2456 prot & PAGE_BITS);
2457
2458 mmap_unlock();
2459 return 1;
9fa3e853 2460 }
c8a706fe 2461 mmap_unlock();
9fa3e853
FB
2462 return 0;
2463}
2464
6a00d601
FB
2465static inline void tlb_set_dirty(CPUState *env,
2466 unsigned long addr, target_ulong vaddr)
1ccde1cb
FB
2467{
2468}
9fa3e853
FB
2469#endif /* defined(CONFIG_USER_ONLY) */
2470
e2eef170 2471#if !defined(CONFIG_USER_ONLY)
8da3ff18 2472
c04b2b78
PB
2473#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2474typedef struct subpage_t {
2475 target_phys_addr_t base;
f6405247
RH
2476 ram_addr_t sub_io_index[TARGET_PAGE_SIZE];
2477 ram_addr_t region_offset[TARGET_PAGE_SIZE];
c04b2b78
PB
2478} subpage_t;
2479
c227f099
AL
2480static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2481 ram_addr_t memory, ram_addr_t region_offset);
f6405247
RH
2482static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2483 ram_addr_t orig_memory,
2484 ram_addr_t region_offset);
db7b5426
BS
2485#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2486 need_subpage) \
2487 do { \
2488 if (addr > start_addr) \
2489 start_addr2 = 0; \
2490 else { \
2491 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2492 if (start_addr2 > 0) \
2493 need_subpage = 1; \
2494 } \
2495 \
49e9fba2 2496 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
db7b5426
BS
2497 end_addr2 = TARGET_PAGE_SIZE - 1; \
2498 else { \
2499 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2500 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2501 need_subpage = 1; \
2502 } \
2503 } while (0)
2504
8f2498f9
MT
2505/* register physical memory.
2506 For RAM, 'size' must be a multiple of the target page size.
2507 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
8da3ff18
PB
2508 io memory page. The address used when calling the IO function is
2509 the offset from the start of the region, plus region_offset. Both
ccbb4d44 2510 start_addr and region_offset are rounded down to a page boundary
8da3ff18
PB
2511 before calculating this offset. This should not be a problem unless
2512 the low bits of start_addr and region_offset differ. */
dd81124b
AK
2513void cpu_register_physical_memory_log(MemoryRegionSection *section,
2514 bool readable, bool readonly)
33417e70 2515{
dd81124b
AK
2516 target_phys_addr_t start_addr = section->offset_within_address_space;
2517 ram_addr_t size = section->size;
2518 ram_addr_t phys_offset = section->mr->ram_addr;
2519 ram_addr_t region_offset = section->offset_within_region;
c227f099 2520 target_phys_addr_t addr, end_addr;
92e873b9 2521 PhysPageDesc *p;
9d42037b 2522 CPUState *env;
c227f099 2523 ram_addr_t orig_size = size;
f6405247 2524 subpage_t *subpage;
33417e70 2525
dd81124b
AK
2526 if (memory_region_is_ram(section->mr)) {
2527 phys_offset += region_offset;
2528 region_offset = 0;
2529 }
2530
2531 if (!readable) {
2532 phys_offset &= ~TARGET_PAGE_MASK & ~IO_MEM_ROMD;
2533 }
2534
2535 if (readonly) {
2536 phys_offset |= io_mem_rom.ram_addr;
2537 }
2538
3b8e6a2d 2539 assert(size);
f6f3fbca 2540
0e0df1e2 2541 if (phys_offset == io_mem_unassigned.ram_addr) {
67c4d23c
PB
2542 region_offset = start_addr;
2543 }
8da3ff18 2544 region_offset &= TARGET_PAGE_MASK;
5fd386f6 2545 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
c227f099 2546 end_addr = start_addr + (target_phys_addr_t)size;
3b8e6a2d
EI
2547
2548 addr = start_addr;
2549 do {
f1f6e3b8 2550 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 0);
0e0df1e2 2551 if (p && p->phys_offset != io_mem_unassigned.ram_addr) {
c227f099
AL
2552 ram_addr_t orig_memory = p->phys_offset;
2553 target_phys_addr_t start_addr2, end_addr2;
db7b5426
BS
2554 int need_subpage = 0;
2555
2556 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2557 need_subpage);
f6405247 2558 if (need_subpage) {
db7b5426
BS
2559 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2560 subpage = subpage_init((addr & TARGET_PAGE_MASK),
8da3ff18
PB
2561 &p->phys_offset, orig_memory,
2562 p->region_offset);
db7b5426
BS
2563 } else {
2564 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2565 >> IO_MEM_SHIFT];
2566 }
8da3ff18
PB
2567 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2568 region_offset);
2569 p->region_offset = 0;
db7b5426
BS
2570 } else {
2571 p->phys_offset = phys_offset;
2774c6d0 2572 p->region_offset = region_offset;
1d393fa2 2573 if (is_ram_rom_romd(phys_offset))
db7b5426
BS
2574 phys_offset += TARGET_PAGE_SIZE;
2575 }
2576 } else {
2577 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2578 p->phys_offset = phys_offset;
8da3ff18 2579 p->region_offset = region_offset;
1d393fa2 2580 if (is_ram_rom_romd(phys_offset)) {
db7b5426 2581 phys_offset += TARGET_PAGE_SIZE;
0e8f0967 2582 } else {
c227f099 2583 target_phys_addr_t start_addr2, end_addr2;
db7b5426
BS
2584 int need_subpage = 0;
2585
2586 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2587 end_addr2, need_subpage);
2588
f6405247 2589 if (need_subpage) {
db7b5426 2590 subpage = subpage_init((addr & TARGET_PAGE_MASK),
0e0df1e2
AK
2591 &p->phys_offset,
2592 io_mem_unassigned.ram_addr,
67c4d23c 2593 addr & TARGET_PAGE_MASK);
db7b5426 2594 subpage_register(subpage, start_addr2, end_addr2,
8da3ff18
PB
2595 phys_offset, region_offset);
2596 p->region_offset = 0;
db7b5426
BS
2597 }
2598 }
2599 }
8da3ff18 2600 region_offset += TARGET_PAGE_SIZE;
3b8e6a2d
EI
2601 addr += TARGET_PAGE_SIZE;
2602 } while (addr != end_addr);
3b46e624 2603
9d42037b
FB
2604 /* since each CPU stores ram addresses in its TLB cache, we must
2605 reset the modified entries */
2606 /* XXX: slow ! */
2607 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2608 tlb_flush(env, 1);
2609 }
33417e70
FB
2610}
2611
c227f099 2612void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
f65ed4c1
AL
2613{
2614 if (kvm_enabled())
2615 kvm_coalesce_mmio_region(addr, size);
2616}
2617
c227f099 2618void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
f65ed4c1
AL
2619{
2620 if (kvm_enabled())
2621 kvm_uncoalesce_mmio_region(addr, size);
2622}
2623
62a2744c
SY
2624void qemu_flush_coalesced_mmio_buffer(void)
2625{
2626 if (kvm_enabled())
2627 kvm_flush_coalesced_mmio_buffer();
2628}
2629
c902760f
MT
2630#if defined(__linux__) && !defined(TARGET_S390X)
2631
2632#include <sys/vfs.h>
2633
2634#define HUGETLBFS_MAGIC 0x958458f6
2635
2636static long gethugepagesize(const char *path)
2637{
2638 struct statfs fs;
2639 int ret;
2640
2641 do {
9742bf26 2642 ret = statfs(path, &fs);
c902760f
MT
2643 } while (ret != 0 && errno == EINTR);
2644
2645 if (ret != 0) {
9742bf26
YT
2646 perror(path);
2647 return 0;
c902760f
MT
2648 }
2649
2650 if (fs.f_type != HUGETLBFS_MAGIC)
9742bf26 2651 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
c902760f
MT
2652
2653 return fs.f_bsize;
2654}
2655
04b16653
AW
2656static void *file_ram_alloc(RAMBlock *block,
2657 ram_addr_t memory,
2658 const char *path)
c902760f
MT
2659{
2660 char *filename;
2661 void *area;
2662 int fd;
2663#ifdef MAP_POPULATE
2664 int flags;
2665#endif
2666 unsigned long hpagesize;
2667
2668 hpagesize = gethugepagesize(path);
2669 if (!hpagesize) {
9742bf26 2670 return NULL;
c902760f
MT
2671 }
2672
2673 if (memory < hpagesize) {
2674 return NULL;
2675 }
2676
2677 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2678 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2679 return NULL;
2680 }
2681
2682 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
9742bf26 2683 return NULL;
c902760f
MT
2684 }
2685
2686 fd = mkstemp(filename);
2687 if (fd < 0) {
9742bf26
YT
2688 perror("unable to create backing store for hugepages");
2689 free(filename);
2690 return NULL;
c902760f
MT
2691 }
2692 unlink(filename);
2693 free(filename);
2694
2695 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2696
2697 /*
2698 * ftruncate is not supported by hugetlbfs in older
2699 * hosts, so don't bother bailing out on errors.
2700 * If anything goes wrong with it under other filesystems,
2701 * mmap will fail.
2702 */
2703 if (ftruncate(fd, memory))
9742bf26 2704 perror("ftruncate");
c902760f
MT
2705
2706#ifdef MAP_POPULATE
2707 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2708 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2709 * to sidestep this quirk.
2710 */
2711 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2712 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2713#else
2714 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2715#endif
2716 if (area == MAP_FAILED) {
9742bf26
YT
2717 perror("file_ram_alloc: can't mmap RAM pages");
2718 close(fd);
2719 return (NULL);
c902760f 2720 }
04b16653 2721 block->fd = fd;
c902760f
MT
2722 return area;
2723}
2724#endif
2725
d17b5288 2726static ram_addr_t find_ram_offset(ram_addr_t size)
04b16653
AW
2727{
2728 RAMBlock *block, *next_block;
3e837b2c 2729 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
04b16653
AW
2730
2731 if (QLIST_EMPTY(&ram_list.blocks))
2732 return 0;
2733
2734 QLIST_FOREACH(block, &ram_list.blocks, next) {
f15fbc4b 2735 ram_addr_t end, next = RAM_ADDR_MAX;
04b16653
AW
2736
2737 end = block->offset + block->length;
2738
2739 QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2740 if (next_block->offset >= end) {
2741 next = MIN(next, next_block->offset);
2742 }
2743 }
2744 if (next - end >= size && next - end < mingap) {
3e837b2c 2745 offset = end;
04b16653
AW
2746 mingap = next - end;
2747 }
2748 }
3e837b2c
AW
2749
2750 if (offset == RAM_ADDR_MAX) {
2751 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
2752 (uint64_t)size);
2753 abort();
2754 }
2755
04b16653
AW
2756 return offset;
2757}
2758
2759static ram_addr_t last_ram_offset(void)
d17b5288
AW
2760{
2761 RAMBlock *block;
2762 ram_addr_t last = 0;
2763
2764 QLIST_FOREACH(block, &ram_list.blocks, next)
2765 last = MAX(last, block->offset + block->length);
2766
2767 return last;
2768}
2769
c5705a77 2770void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
84b89d78
CM
2771{
2772 RAMBlock *new_block, *block;
2773
c5705a77
AK
2774 new_block = NULL;
2775 QLIST_FOREACH(block, &ram_list.blocks, next) {
2776 if (block->offset == addr) {
2777 new_block = block;
2778 break;
2779 }
2780 }
2781 assert(new_block);
2782 assert(!new_block->idstr[0]);
84b89d78
CM
2783
2784 if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
2785 char *id = dev->parent_bus->info->get_dev_path(dev);
2786 if (id) {
2787 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
7267c094 2788 g_free(id);
84b89d78
CM
2789 }
2790 }
2791 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2792
2793 QLIST_FOREACH(block, &ram_list.blocks, next) {
c5705a77 2794 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
84b89d78
CM
2795 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2796 new_block->idstr);
2797 abort();
2798 }
2799 }
c5705a77
AK
2800}
2801
2802ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
2803 MemoryRegion *mr)
2804{
2805 RAMBlock *new_block;
2806
2807 size = TARGET_PAGE_ALIGN(size);
2808 new_block = g_malloc0(sizeof(*new_block));
84b89d78 2809
7c637366 2810 new_block->mr = mr;
432d268c 2811 new_block->offset = find_ram_offset(size);
6977dfe6
YT
2812 if (host) {
2813 new_block->host = host;
cd19cfa2 2814 new_block->flags |= RAM_PREALLOC_MASK;
6977dfe6
YT
2815 } else {
2816 if (mem_path) {
c902760f 2817#if defined (__linux__) && !defined(TARGET_S390X)
6977dfe6
YT
2818 new_block->host = file_ram_alloc(new_block, size, mem_path);
2819 if (!new_block->host) {
2820 new_block->host = qemu_vmalloc(size);
e78815a5 2821 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
6977dfe6 2822 }
c902760f 2823#else
6977dfe6
YT
2824 fprintf(stderr, "-mem-path option unsupported\n");
2825 exit(1);
c902760f 2826#endif
6977dfe6 2827 } else {
6b02494d 2828#if defined(TARGET_S390X) && defined(CONFIG_KVM)
ff83678a
CB
2829 /* S390 KVM requires the topmost vma of the RAM to be smaller than
2830 an system defined value, which is at least 256GB. Larger systems
2831 have larger values. We put the guest between the end of data
2832 segment (system break) and this value. We use 32GB as a base to
2833 have enough room for the system break to grow. */
2834 new_block->host = mmap((void*)0x800000000, size,
6977dfe6 2835 PROT_EXEC|PROT_READ|PROT_WRITE,
ff83678a 2836 MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
fb8b2735
AG
2837 if (new_block->host == MAP_FAILED) {
2838 fprintf(stderr, "Allocating RAM failed\n");
2839 abort();
2840 }
6b02494d 2841#else
868bb33f 2842 if (xen_enabled()) {
fce537d4 2843 xen_ram_alloc(new_block->offset, size, mr);
432d268c
JN
2844 } else {
2845 new_block->host = qemu_vmalloc(size);
2846 }
6b02494d 2847#endif
e78815a5 2848 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
6977dfe6 2849 }
c902760f 2850 }
94a6b54f
PB
2851 new_block->length = size;
2852
f471a17e 2853 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
94a6b54f 2854
7267c094 2855 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
04b16653 2856 last_ram_offset() >> TARGET_PAGE_BITS);
d17b5288 2857 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
94a6b54f
PB
2858 0xff, size >> TARGET_PAGE_BITS);
2859
6f0437e8
JK
2860 if (kvm_enabled())
2861 kvm_setup_guest_memory(new_block->host, size);
2862
94a6b54f
PB
2863 return new_block->offset;
2864}
e9a1ab19 2865
c5705a77 2866ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
6977dfe6 2867{
c5705a77 2868 return qemu_ram_alloc_from_ptr(size, NULL, mr);
6977dfe6
YT
2869}
2870
1f2e98b6
AW
2871void qemu_ram_free_from_ptr(ram_addr_t addr)
2872{
2873 RAMBlock *block;
2874
2875 QLIST_FOREACH(block, &ram_list.blocks, next) {
2876 if (addr == block->offset) {
2877 QLIST_REMOVE(block, next);
7267c094 2878 g_free(block);
1f2e98b6
AW
2879 return;
2880 }
2881 }
2882}
2883
c227f099 2884void qemu_ram_free(ram_addr_t addr)
e9a1ab19 2885{
04b16653
AW
2886 RAMBlock *block;
2887
2888 QLIST_FOREACH(block, &ram_list.blocks, next) {
2889 if (addr == block->offset) {
2890 QLIST_REMOVE(block, next);
cd19cfa2
HY
2891 if (block->flags & RAM_PREALLOC_MASK) {
2892 ;
2893 } else if (mem_path) {
04b16653
AW
2894#if defined (__linux__) && !defined(TARGET_S390X)
2895 if (block->fd) {
2896 munmap(block->host, block->length);
2897 close(block->fd);
2898 } else {
2899 qemu_vfree(block->host);
2900 }
fd28aa13
JK
2901#else
2902 abort();
04b16653
AW
2903#endif
2904 } else {
2905#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2906 munmap(block->host, block->length);
2907#else
868bb33f 2908 if (xen_enabled()) {
e41d7c69 2909 xen_invalidate_map_cache_entry(block->host);
432d268c
JN
2910 } else {
2911 qemu_vfree(block->host);
2912 }
04b16653
AW
2913#endif
2914 }
7267c094 2915 g_free(block);
04b16653
AW
2916 return;
2917 }
2918 }
2919
e9a1ab19
FB
2920}
2921
cd19cfa2
HY
2922#ifndef _WIN32
2923void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
2924{
2925 RAMBlock *block;
2926 ram_addr_t offset;
2927 int flags;
2928 void *area, *vaddr;
2929
2930 QLIST_FOREACH(block, &ram_list.blocks, next) {
2931 offset = addr - block->offset;
2932 if (offset < block->length) {
2933 vaddr = block->host + offset;
2934 if (block->flags & RAM_PREALLOC_MASK) {
2935 ;
2936 } else {
2937 flags = MAP_FIXED;
2938 munmap(vaddr, length);
2939 if (mem_path) {
2940#if defined(__linux__) && !defined(TARGET_S390X)
2941 if (block->fd) {
2942#ifdef MAP_POPULATE
2943 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
2944 MAP_PRIVATE;
2945#else
2946 flags |= MAP_PRIVATE;
2947#endif
2948 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2949 flags, block->fd, offset);
2950 } else {
2951 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2952 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2953 flags, -1, 0);
2954 }
fd28aa13
JK
2955#else
2956 abort();
cd19cfa2
HY
2957#endif
2958 } else {
2959#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2960 flags |= MAP_SHARED | MAP_ANONYMOUS;
2961 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
2962 flags, -1, 0);
2963#else
2964 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2965 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2966 flags, -1, 0);
2967#endif
2968 }
2969 if (area != vaddr) {
f15fbc4b
AP
2970 fprintf(stderr, "Could not remap addr: "
2971 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
cd19cfa2
HY
2972 length, addr);
2973 exit(1);
2974 }
2975 qemu_madvise(vaddr, length, QEMU_MADV_MERGEABLE);
2976 }
2977 return;
2978 }
2979 }
2980}
2981#endif /* !_WIN32 */
2982
dc828ca1 2983/* Return a host pointer to ram allocated with qemu_ram_alloc.
5579c7f3
PB
2984 With the exception of the softmmu code in this file, this should
2985 only be used for local memory (e.g. video ram) that the device owns,
2986 and knows it isn't going to access beyond the end of the block.
2987
2988 It should not be used for general purpose DMA.
2989 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2990 */
c227f099 2991void *qemu_get_ram_ptr(ram_addr_t addr)
dc828ca1 2992{
94a6b54f
PB
2993 RAMBlock *block;
2994
f471a17e
AW
2995 QLIST_FOREACH(block, &ram_list.blocks, next) {
2996 if (addr - block->offset < block->length) {
7d82af38
VP
2997 /* Move this entry to to start of the list. */
2998 if (block != QLIST_FIRST(&ram_list.blocks)) {
2999 QLIST_REMOVE(block, next);
3000 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
3001 }
868bb33f 3002 if (xen_enabled()) {
432d268c
JN
3003 /* We need to check if the requested address is in the RAM
3004 * because we don't want to map the entire memory in QEMU.
712c2b41 3005 * In that case just map until the end of the page.
432d268c
JN
3006 */
3007 if (block->offset == 0) {
e41d7c69 3008 return xen_map_cache(addr, 0, 0);
432d268c 3009 } else if (block->host == NULL) {
e41d7c69
JK
3010 block->host =
3011 xen_map_cache(block->offset, block->length, 1);
432d268c
JN
3012 }
3013 }
f471a17e
AW
3014 return block->host + (addr - block->offset);
3015 }
94a6b54f 3016 }
f471a17e
AW
3017
3018 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3019 abort();
3020
3021 return NULL;
dc828ca1
PB
3022}
3023
b2e0a138
MT
3024/* Return a host pointer to ram allocated with qemu_ram_alloc.
3025 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
3026 */
3027void *qemu_safe_ram_ptr(ram_addr_t addr)
3028{
3029 RAMBlock *block;
3030
3031 QLIST_FOREACH(block, &ram_list.blocks, next) {
3032 if (addr - block->offset < block->length) {
868bb33f 3033 if (xen_enabled()) {
432d268c
JN
3034 /* We need to check if the requested address is in the RAM
3035 * because we don't want to map the entire memory in QEMU.
712c2b41 3036 * In that case just map until the end of the page.
432d268c
JN
3037 */
3038 if (block->offset == 0) {
e41d7c69 3039 return xen_map_cache(addr, 0, 0);
432d268c 3040 } else if (block->host == NULL) {
e41d7c69
JK
3041 block->host =
3042 xen_map_cache(block->offset, block->length, 1);
432d268c
JN
3043 }
3044 }
b2e0a138
MT
3045 return block->host + (addr - block->offset);
3046 }
3047 }
3048
3049 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3050 abort();
3051
3052 return NULL;
3053}
3054
38bee5dc
SS
3055/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
3056 * but takes a size argument */
8ab934f9 3057void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
38bee5dc 3058{
8ab934f9
SS
3059 if (*size == 0) {
3060 return NULL;
3061 }
868bb33f 3062 if (xen_enabled()) {
e41d7c69 3063 return xen_map_cache(addr, *size, 1);
868bb33f 3064 } else {
38bee5dc
SS
3065 RAMBlock *block;
3066
3067 QLIST_FOREACH(block, &ram_list.blocks, next) {
3068 if (addr - block->offset < block->length) {
3069 if (addr - block->offset + *size > block->length)
3070 *size = block->length - addr + block->offset;
3071 return block->host + (addr - block->offset);
3072 }
3073 }
3074
3075 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3076 abort();
38bee5dc
SS
3077 }
3078}
3079
050a0ddf
AP
3080void qemu_put_ram_ptr(void *addr)
3081{
3082 trace_qemu_put_ram_ptr(addr);
050a0ddf
AP
3083}
3084
e890261f 3085int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
5579c7f3 3086{
94a6b54f
PB
3087 RAMBlock *block;
3088 uint8_t *host = ptr;
3089
868bb33f 3090 if (xen_enabled()) {
e41d7c69 3091 *ram_addr = xen_ram_addr_from_mapcache(ptr);
712c2b41
SS
3092 return 0;
3093 }
3094
f471a17e 3095 QLIST_FOREACH(block, &ram_list.blocks, next) {
432d268c
JN
3096 /* This case append when the block is not mapped. */
3097 if (block->host == NULL) {
3098 continue;
3099 }
f471a17e 3100 if (host - block->host < block->length) {
e890261f
MT
3101 *ram_addr = block->offset + (host - block->host);
3102 return 0;
f471a17e 3103 }
94a6b54f 3104 }
432d268c 3105
e890261f
MT
3106 return -1;
3107}
f471a17e 3108
e890261f
MT
3109/* Some of the softmmu routines need to translate from a host pointer
3110 (typically a TLB entry) back to a ram offset. */
3111ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
3112{
3113 ram_addr_t ram_addr;
f471a17e 3114
e890261f
MT
3115 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
3116 fprintf(stderr, "Bad ram pointer %p\n", ptr);
3117 abort();
3118 }
3119 return ram_addr;
5579c7f3
PB
3120}
3121
0e0df1e2
AK
3122static uint64_t unassigned_mem_read(void *opaque, target_phys_addr_t addr,
3123 unsigned size)
e18231a3
BS
3124{
3125#ifdef DEBUG_UNASSIGNED
3126 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3127#endif
5b450407 3128#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
0e0df1e2 3129 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
e18231a3
BS
3130#endif
3131 return 0;
3132}
3133
0e0df1e2
AK
3134static void unassigned_mem_write(void *opaque, target_phys_addr_t addr,
3135 uint64_t val, unsigned size)
e18231a3
BS
3136{
3137#ifdef DEBUG_UNASSIGNED
0e0df1e2 3138 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
e18231a3 3139#endif
5b450407 3140#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
0e0df1e2 3141 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
67d3b957 3142#endif
33417e70
FB
3143}
3144
0e0df1e2
AK
3145static const MemoryRegionOps unassigned_mem_ops = {
3146 .read = unassigned_mem_read,
3147 .write = unassigned_mem_write,
3148 .endianness = DEVICE_NATIVE_ENDIAN,
3149};
e18231a3 3150
0e0df1e2
AK
3151static uint64_t error_mem_read(void *opaque, target_phys_addr_t addr,
3152 unsigned size)
e18231a3 3153{
0e0df1e2 3154 abort();
e18231a3
BS
3155}
3156
0e0df1e2
AK
3157static void error_mem_write(void *opaque, target_phys_addr_t addr,
3158 uint64_t value, unsigned size)
e18231a3 3159{
0e0df1e2 3160 abort();
33417e70
FB
3161}
3162
0e0df1e2
AK
3163static const MemoryRegionOps error_mem_ops = {
3164 .read = error_mem_read,
3165 .write = error_mem_write,
3166 .endianness = DEVICE_NATIVE_ENDIAN,
33417e70
FB
3167};
3168
0e0df1e2
AK
3169static const MemoryRegionOps rom_mem_ops = {
3170 .read = error_mem_read,
3171 .write = unassigned_mem_write,
3172 .endianness = DEVICE_NATIVE_ENDIAN,
33417e70
FB
3173};
3174
0e0df1e2
AK
3175static void notdirty_mem_write(void *opaque, target_phys_addr_t ram_addr,
3176 uint64_t val, unsigned size)
9fa3e853 3177{
3a7d929e 3178 int dirty_flags;
f7c11b53 3179 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3a7d929e 3180 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 3181#if !defined(CONFIG_USER_ONLY)
0e0df1e2 3182 tb_invalidate_phys_page_fast(ram_addr, size);
f7c11b53 3183 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
9fa3e853 3184#endif
3a7d929e 3185 }
0e0df1e2
AK
3186 switch (size) {
3187 case 1:
3188 stb_p(qemu_get_ram_ptr(ram_addr), val);
3189 break;
3190 case 2:
3191 stw_p(qemu_get_ram_ptr(ram_addr), val);
3192 break;
3193 case 4:
3194 stl_p(qemu_get_ram_ptr(ram_addr), val);
3195 break;
3196 default:
3197 abort();
3a7d929e 3198 }
f23db169 3199 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
f7c11b53 3200 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
f23db169
FB
3201 /* we remove the notdirty callback only if the code has been
3202 flushed */
3203 if (dirty_flags == 0xff)
2e70f6ef 3204 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
3205}
3206
0e0df1e2
AK
3207static const MemoryRegionOps notdirty_mem_ops = {
3208 .read = error_mem_read,
3209 .write = notdirty_mem_write,
3210 .endianness = DEVICE_NATIVE_ENDIAN,
1ccde1cb
FB
3211};
3212
0f459d16 3213/* Generate a debug exception if a watchpoint has been hit. */
b4051334 3214static void check_watchpoint(int offset, int len_mask, int flags)
0f459d16
PB
3215{
3216 CPUState *env = cpu_single_env;
06d55cc1
AL
3217 target_ulong pc, cs_base;
3218 TranslationBlock *tb;
0f459d16 3219 target_ulong vaddr;
a1d1bb31 3220 CPUWatchpoint *wp;
06d55cc1 3221 int cpu_flags;
0f459d16 3222
06d55cc1
AL
3223 if (env->watchpoint_hit) {
3224 /* We re-entered the check after replacing the TB. Now raise
3225 * the debug interrupt so that is will trigger after the
3226 * current instruction. */
3227 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3228 return;
3229 }
2e70f6ef 3230 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
72cf2d4f 3231 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334
AL
3232 if ((vaddr == (wp->vaddr & len_mask) ||
3233 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
6e140f28
AL
3234 wp->flags |= BP_WATCHPOINT_HIT;
3235 if (!env->watchpoint_hit) {
3236 env->watchpoint_hit = wp;
3237 tb = tb_find_pc(env->mem_io_pc);
3238 if (!tb) {
3239 cpu_abort(env, "check_watchpoint: could not find TB for "
3240 "pc=%p", (void *)env->mem_io_pc);
3241 }
618ba8e6 3242 cpu_restore_state(tb, env, env->mem_io_pc);
6e140f28
AL
3243 tb_phys_invalidate(tb, -1);
3244 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3245 env->exception_index = EXCP_DEBUG;
3246 } else {
3247 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3248 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
3249 }
3250 cpu_resume_from_signal(env, NULL);
06d55cc1 3251 }
6e140f28
AL
3252 } else {
3253 wp->flags &= ~BP_WATCHPOINT_HIT;
0f459d16
PB
3254 }
3255 }
3256}
3257
6658ffb8
PB
3258/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3259 so these check for a hit then pass through to the normal out-of-line
3260 phys routines. */
c227f099 3261static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
6658ffb8 3262{
b4051334 3263 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
6658ffb8
PB
3264 return ldub_phys(addr);
3265}
3266
c227f099 3267static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
6658ffb8 3268{
b4051334 3269 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
6658ffb8
PB
3270 return lduw_phys(addr);
3271}
3272
c227f099 3273static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
6658ffb8 3274{
b4051334 3275 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
6658ffb8
PB
3276 return ldl_phys(addr);
3277}
3278
c227f099 3279static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
6658ffb8
PB
3280 uint32_t val)
3281{
b4051334 3282 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
6658ffb8
PB
3283 stb_phys(addr, val);
3284}
3285
c227f099 3286static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
6658ffb8
PB
3287 uint32_t val)
3288{
b4051334 3289 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
6658ffb8
PB
3290 stw_phys(addr, val);
3291}
3292
c227f099 3293static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
6658ffb8
PB
3294 uint32_t val)
3295{
b4051334 3296 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
6658ffb8
PB
3297 stl_phys(addr, val);
3298}
3299
d60efc6b 3300static CPUReadMemoryFunc * const watch_mem_read[3] = {
6658ffb8
PB
3301 watch_mem_readb,
3302 watch_mem_readw,
3303 watch_mem_readl,
3304};
3305
d60efc6b 3306static CPUWriteMemoryFunc * const watch_mem_write[3] = {
6658ffb8
PB
3307 watch_mem_writeb,
3308 watch_mem_writew,
3309 watch_mem_writel,
3310};
6658ffb8 3311
f6405247
RH
3312static inline uint32_t subpage_readlen (subpage_t *mmio,
3313 target_phys_addr_t addr,
3314 unsigned int len)
db7b5426 3315{
f6405247 3316 unsigned int idx = SUBPAGE_IDX(addr);
db7b5426
BS
3317#if defined(DEBUG_SUBPAGE)
3318 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3319 mmio, len, addr, idx);
3320#endif
db7b5426 3321
f6405247
RH
3322 addr += mmio->region_offset[idx];
3323 idx = mmio->sub_io_index[idx];
acbbec5d 3324 return io_mem_read(idx, addr, 1 <<len);
db7b5426
BS
3325}
3326
c227f099 3327static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
f6405247 3328 uint32_t value, unsigned int len)
db7b5426 3329{
f6405247 3330 unsigned int idx = SUBPAGE_IDX(addr);
db7b5426 3331#if defined(DEBUG_SUBPAGE)
f6405247
RH
3332 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n",
3333 __func__, mmio, len, addr, idx, value);
db7b5426 3334#endif
f6405247
RH
3335
3336 addr += mmio->region_offset[idx];
3337 idx = mmio->sub_io_index[idx];
acbbec5d 3338 io_mem_write(idx, addr, value, 1 << len);
db7b5426
BS
3339}
3340
c227f099 3341static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
db7b5426 3342{
db7b5426
BS
3343 return subpage_readlen(opaque, addr, 0);
3344}
3345
c227f099 3346static void subpage_writeb (void *opaque, target_phys_addr_t addr,
db7b5426
BS
3347 uint32_t value)
3348{
db7b5426
BS
3349 subpage_writelen(opaque, addr, value, 0);
3350}
3351
c227f099 3352static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
db7b5426 3353{
db7b5426
BS
3354 return subpage_readlen(opaque, addr, 1);
3355}
3356
c227f099 3357static void subpage_writew (void *opaque, target_phys_addr_t addr,
db7b5426
BS
3358 uint32_t value)
3359{
db7b5426
BS
3360 subpage_writelen(opaque, addr, value, 1);
3361}
3362
c227f099 3363static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
db7b5426 3364{
db7b5426
BS
3365 return subpage_readlen(opaque, addr, 2);
3366}
3367
f6405247
RH
3368static void subpage_writel (void *opaque, target_phys_addr_t addr,
3369 uint32_t value)
db7b5426 3370{
db7b5426
BS
3371 subpage_writelen(opaque, addr, value, 2);
3372}
3373
d60efc6b 3374static CPUReadMemoryFunc * const subpage_read[] = {
db7b5426
BS
3375 &subpage_readb,
3376 &subpage_readw,
3377 &subpage_readl,
3378};
3379
d60efc6b 3380static CPUWriteMemoryFunc * const subpage_write[] = {
db7b5426
BS
3381 &subpage_writeb,
3382 &subpage_writew,
3383 &subpage_writel,
3384};
3385
56384e8b
AF
3386static uint32_t subpage_ram_readb(void *opaque, target_phys_addr_t addr)
3387{
3388 ram_addr_t raddr = addr;
3389 void *ptr = qemu_get_ram_ptr(raddr);
3390 return ldub_p(ptr);
3391}
3392
3393static void subpage_ram_writeb(void *opaque, target_phys_addr_t addr,
3394 uint32_t value)
3395{
3396 ram_addr_t raddr = addr;
3397 void *ptr = qemu_get_ram_ptr(raddr);
3398 stb_p(ptr, value);
3399}
3400
3401static uint32_t subpage_ram_readw(void *opaque, target_phys_addr_t addr)
3402{
3403 ram_addr_t raddr = addr;
3404 void *ptr = qemu_get_ram_ptr(raddr);
3405 return lduw_p(ptr);
3406}
3407
3408static void subpage_ram_writew(void *opaque, target_phys_addr_t addr,
3409 uint32_t value)
3410{
3411 ram_addr_t raddr = addr;
3412 void *ptr = qemu_get_ram_ptr(raddr);
3413 stw_p(ptr, value);
3414}
3415
3416static uint32_t subpage_ram_readl(void *opaque, target_phys_addr_t addr)
3417{
3418 ram_addr_t raddr = addr;
3419 void *ptr = qemu_get_ram_ptr(raddr);
3420 return ldl_p(ptr);
3421}
3422
3423static void subpage_ram_writel(void *opaque, target_phys_addr_t addr,
3424 uint32_t value)
3425{
3426 ram_addr_t raddr = addr;
3427 void *ptr = qemu_get_ram_ptr(raddr);
3428 stl_p(ptr, value);
3429}
3430
3431static CPUReadMemoryFunc * const subpage_ram_read[] = {
3432 &subpage_ram_readb,
3433 &subpage_ram_readw,
3434 &subpage_ram_readl,
3435};
3436
3437static CPUWriteMemoryFunc * const subpage_ram_write[] = {
3438 &subpage_ram_writeb,
3439 &subpage_ram_writew,
3440 &subpage_ram_writel,
3441};
3442
c227f099
AL
3443static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3444 ram_addr_t memory, ram_addr_t region_offset)
db7b5426
BS
3445{
3446 int idx, eidx;
3447
3448 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3449 return -1;
3450 idx = SUBPAGE_IDX(start);
3451 eidx = SUBPAGE_IDX(end);
3452#if defined(DEBUG_SUBPAGE)
0bf9e31a 3453 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
db7b5426
BS
3454 mmio, start, end, idx, eidx, memory);
3455#endif
0e0df1e2 3456 if ((memory & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr) {
56384e8b
AF
3457 memory = IO_MEM_SUBPAGE_RAM;
3458 }
f6405247 3459 memory = (memory >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
db7b5426 3460 for (; idx <= eidx; idx++) {
f6405247
RH
3461 mmio->sub_io_index[idx] = memory;
3462 mmio->region_offset[idx] = region_offset;
db7b5426
BS
3463 }
3464
3465 return 0;
3466}
3467
f6405247
RH
3468static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
3469 ram_addr_t orig_memory,
3470 ram_addr_t region_offset)
db7b5426 3471{
c227f099 3472 subpage_t *mmio;
db7b5426
BS
3473 int subpage_memory;
3474
7267c094 3475 mmio = g_malloc0(sizeof(subpage_t));
1eec614b
AL
3476
3477 mmio->base = base;
be675c97 3478 subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio);
db7b5426 3479#if defined(DEBUG_SUBPAGE)
1eec614b
AL
3480 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3481 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
db7b5426 3482#endif
1eec614b 3483 *phys = subpage_memory | IO_MEM_SUBPAGE;
f6405247 3484 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, orig_memory, region_offset);
db7b5426
BS
3485
3486 return mmio;
3487}
3488
88715657
AL
3489static int get_free_io_mem_idx(void)
3490{
3491 int i;
3492
3493 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3494 if (!io_mem_used[i]) {
3495 io_mem_used[i] = 1;
3496 return i;
3497 }
c6703b47 3498 fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
88715657
AL
3499 return -1;
3500}
3501
33417e70
FB
3502/* mem_read and mem_write are arrays of functions containing the
3503 function to access byte (index 0), word (index 1) and dword (index
0b4e6e3e 3504 2). Functions can be omitted with a NULL function pointer.
3ee89922 3505 If io_index is non zero, the corresponding io zone is
4254fab8
BS
3506 modified. If it is zero, a new io zone is allocated. The return
3507 value can be used with cpu_register_physical_memory(). (-1) is
3508 returned if error. */
1eed09cb 3509static int cpu_register_io_memory_fixed(int io_index,
d60efc6b
BS
3510 CPUReadMemoryFunc * const *mem_read,
3511 CPUWriteMemoryFunc * const *mem_write,
be675c97 3512 void *opaque)
33417e70 3513{
3cab721d
RH
3514 int i;
3515
33417e70 3516 if (io_index <= 0) {
88715657
AL
3517 io_index = get_free_io_mem_idx();
3518 if (io_index == -1)
3519 return io_index;
33417e70 3520 } else {
1eed09cb 3521 io_index >>= IO_MEM_SHIFT;
33417e70
FB
3522 if (io_index >= IO_MEM_NB_ENTRIES)
3523 return -1;
3524 }
b5ff1b31 3525
3cab721d 3526 for (i = 0; i < 3; ++i) {
0e0df1e2
AK
3527 assert(mem_read[i]);
3528 _io_mem_read[io_index][i] = mem_read[i];
3cab721d
RH
3529 }
3530 for (i = 0; i < 3; ++i) {
0e0df1e2
AK
3531 assert(mem_write[i]);
3532 _io_mem_write[io_index][i] = mem_write[i];
3cab721d 3533 }
a4193c8a 3534 io_mem_opaque[io_index] = opaque;
f6405247
RH
3535
3536 return (io_index << IO_MEM_SHIFT);
33417e70 3537}
61382a50 3538
d60efc6b
BS
3539int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
3540 CPUWriteMemoryFunc * const *mem_write,
be675c97 3541 void *opaque)
1eed09cb 3542{
be675c97 3543 return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque);
1eed09cb
AK
3544}
3545
88715657
AL
3546void cpu_unregister_io_memory(int io_table_address)
3547{
3548 int i;
3549 int io_index = io_table_address >> IO_MEM_SHIFT;
3550
3551 for (i=0;i < 3; i++) {
0e0df1e2
AK
3552 _io_mem_read[io_index][i] = NULL;
3553 _io_mem_write[io_index][i] = NULL;
88715657
AL
3554 }
3555 io_mem_opaque[io_index] = NULL;
3556 io_mem_used[io_index] = 0;
3557}
3558
e9179ce1
AK
3559static void io_mem_init(void)
3560{
3561 int i;
3562
0e0df1e2
AK
3563 /* Must be first: */
3564 memory_region_init_io(&io_mem_ram, &error_mem_ops, NULL, "ram", UINT64_MAX);
3565 assert(io_mem_ram.ram_addr == 0);
3566 memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
3567 memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
3568 "unassigned", UINT64_MAX);
3569 memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
3570 "notdirty", UINT64_MAX);
56384e8b 3571 cpu_register_io_memory_fixed(IO_MEM_SUBPAGE_RAM, subpage_ram_read,
be675c97 3572 subpage_ram_write, NULL);
e9179ce1
AK
3573 for (i=0; i<5; i++)
3574 io_mem_used[i] = 1;
3575
3576 io_mem_watch = cpu_register_io_memory(watch_mem_read,
be675c97 3577 watch_mem_write, NULL);
e9179ce1
AK
3578}
3579
62152b8a
AK
3580static void memory_map_init(void)
3581{
7267c094 3582 system_memory = g_malloc(sizeof(*system_memory));
8417cebf 3583 memory_region_init(system_memory, "system", INT64_MAX);
62152b8a 3584 set_system_memory_map(system_memory);
309cb471 3585
7267c094 3586 system_io = g_malloc(sizeof(*system_io));
309cb471
AK
3587 memory_region_init(system_io, "io", 65536);
3588 set_system_io_map(system_io);
62152b8a
AK
3589}
3590
3591MemoryRegion *get_system_memory(void)
3592{
3593 return system_memory;
3594}
3595
309cb471
AK
3596MemoryRegion *get_system_io(void)
3597{
3598 return system_io;
3599}
3600
e2eef170
PB
3601#endif /* !defined(CONFIG_USER_ONLY) */
3602
13eb76e0
FB
3603/* physical memory access (slow version, mainly for debug) */
3604#if defined(CONFIG_USER_ONLY)
a68fe89c
PB
3605int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3606 uint8_t *buf, int len, int is_write)
13eb76e0
FB
3607{
3608 int l, flags;
3609 target_ulong page;
53a5960a 3610 void * p;
13eb76e0
FB
3611
3612 while (len > 0) {
3613 page = addr & TARGET_PAGE_MASK;
3614 l = (page + TARGET_PAGE_SIZE) - addr;
3615 if (l > len)
3616 l = len;
3617 flags = page_get_flags(page);
3618 if (!(flags & PAGE_VALID))
a68fe89c 3619 return -1;
13eb76e0
FB
3620 if (is_write) {
3621 if (!(flags & PAGE_WRITE))
a68fe89c 3622 return -1;
579a97f7 3623 /* XXX: this code should not depend on lock_user */
72fb7daa 3624 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
a68fe89c 3625 return -1;
72fb7daa
AJ
3626 memcpy(p, buf, l);
3627 unlock_user(p, addr, l);
13eb76e0
FB
3628 } else {
3629 if (!(flags & PAGE_READ))
a68fe89c 3630 return -1;
579a97f7 3631 /* XXX: this code should not depend on lock_user */
72fb7daa 3632 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
a68fe89c 3633 return -1;
72fb7daa 3634 memcpy(buf, p, l);
5b257578 3635 unlock_user(p, addr, 0);
13eb76e0
FB
3636 }
3637 len -= l;
3638 buf += l;
3639 addr += l;
3640 }
a68fe89c 3641 return 0;
13eb76e0 3642}
8df1cd07 3643
13eb76e0 3644#else
c227f099 3645void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
3646 int len, int is_write)
3647{
3648 int l, io_index;
3649 uint8_t *ptr;
3650 uint32_t val;
c227f099 3651 target_phys_addr_t page;
8ca5692d 3652 ram_addr_t pd;
f1f6e3b8 3653 PhysPageDesc p;
3b46e624 3654
13eb76e0
FB
3655 while (len > 0) {
3656 page = addr & TARGET_PAGE_MASK;
3657 l = (page + TARGET_PAGE_SIZE) - addr;
3658 if (l > len)
3659 l = len;
92e873b9 3660 p = phys_page_find(page >> TARGET_PAGE_BITS);
f1f6e3b8 3661 pd = p.phys_offset;
3b46e624 3662
13eb76e0 3663 if (is_write) {
0e0df1e2 3664 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
f1f6e3b8 3665 target_phys_addr_t addr1;
13eb76e0 3666 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
f1f6e3b8 3667 addr1 = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
6a00d601
FB
3668 /* XXX: could force cpu_single_env to NULL to avoid
3669 potential bugs */
6c2934db 3670 if (l >= 4 && ((addr1 & 3) == 0)) {
1c213d19 3671 /* 32 bit write access */
c27004ec 3672 val = ldl_p(buf);
acbbec5d 3673 io_mem_write(io_index, addr1, val, 4);
13eb76e0 3674 l = 4;
6c2934db 3675 } else if (l >= 2 && ((addr1 & 1) == 0)) {
1c213d19 3676 /* 16 bit write access */
c27004ec 3677 val = lduw_p(buf);
acbbec5d 3678 io_mem_write(io_index, addr1, val, 2);
13eb76e0
FB
3679 l = 2;
3680 } else {
1c213d19 3681 /* 8 bit write access */
c27004ec 3682 val = ldub_p(buf);
acbbec5d 3683 io_mem_write(io_index, addr1, val, 1);
13eb76e0
FB
3684 l = 1;
3685 }
3686 } else {
8ca5692d 3687 ram_addr_t addr1;
b448f2f3 3688 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
13eb76e0 3689 /* RAM case */
5579c7f3 3690 ptr = qemu_get_ram_ptr(addr1);
13eb76e0 3691 memcpy(ptr, buf, l);
3a7d929e
FB
3692 if (!cpu_physical_memory_is_dirty(addr1)) {
3693 /* invalidate code */
3694 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3695 /* set dirty bit */
f7c11b53
YT
3696 cpu_physical_memory_set_dirty_flags(
3697 addr1, (0xff & ~CODE_DIRTY_FLAG));
3a7d929e 3698 }
050a0ddf 3699 qemu_put_ram_ptr(ptr);
13eb76e0
FB
3700 }
3701 } else {
1d393fa2 3702 if (!is_ram_rom_romd(pd)) {
f1f6e3b8 3703 target_phys_addr_t addr1;
13eb76e0
FB
3704 /* I/O case */
3705 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
f1f6e3b8 3706 addr1 = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
6c2934db 3707 if (l >= 4 && ((addr1 & 3) == 0)) {
13eb76e0 3708 /* 32 bit read access */
acbbec5d 3709 val = io_mem_read(io_index, addr1, 4);
c27004ec 3710 stl_p(buf, val);
13eb76e0 3711 l = 4;
6c2934db 3712 } else if (l >= 2 && ((addr1 & 1) == 0)) {
13eb76e0 3713 /* 16 bit read access */
acbbec5d 3714 val = io_mem_read(io_index, addr1, 2);
c27004ec 3715 stw_p(buf, val);
13eb76e0
FB
3716 l = 2;
3717 } else {
1c213d19 3718 /* 8 bit read access */
acbbec5d 3719 val = io_mem_read(io_index, addr1, 1);
c27004ec 3720 stb_p(buf, val);
13eb76e0
FB
3721 l = 1;
3722 }
3723 } else {
3724 /* RAM case */
050a0ddf
AP
3725 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
3726 memcpy(buf, ptr + (addr & ~TARGET_PAGE_MASK), l);
3727 qemu_put_ram_ptr(ptr);
13eb76e0
FB
3728 }
3729 }
3730 len -= l;
3731 buf += l;
3732 addr += l;
3733 }
3734}
8df1cd07 3735
d0ecd2aa 3736/* used for ROM loading : can write in RAM and ROM */
c227f099 3737void cpu_physical_memory_write_rom(target_phys_addr_t addr,
d0ecd2aa
FB
3738 const uint8_t *buf, int len)
3739{
3740 int l;
3741 uint8_t *ptr;
c227f099 3742 target_phys_addr_t page;
d0ecd2aa 3743 unsigned long pd;
f1f6e3b8 3744 PhysPageDesc p;
3b46e624 3745
d0ecd2aa
FB
3746 while (len > 0) {
3747 page = addr & TARGET_PAGE_MASK;
3748 l = (page + TARGET_PAGE_SIZE) - addr;
3749 if (l > len)
3750 l = len;
3751 p = phys_page_find(page >> TARGET_PAGE_BITS);
f1f6e3b8 3752 pd = p.phys_offset;
3b46e624 3753
1d393fa2 3754 if (!is_ram_rom_romd(pd)) {
d0ecd2aa
FB
3755 /* do nothing */
3756 } else {
3757 unsigned long addr1;
3758 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3759 /* ROM/RAM case */
5579c7f3 3760 ptr = qemu_get_ram_ptr(addr1);
d0ecd2aa 3761 memcpy(ptr, buf, l);
050a0ddf 3762 qemu_put_ram_ptr(ptr);
d0ecd2aa
FB
3763 }
3764 len -= l;
3765 buf += l;
3766 addr += l;
3767 }
3768}
3769
6d16c2f8
AL
3770typedef struct {
3771 void *buffer;
c227f099
AL
3772 target_phys_addr_t addr;
3773 target_phys_addr_t len;
6d16c2f8
AL
3774} BounceBuffer;
3775
3776static BounceBuffer bounce;
3777
ba223c29
AL
3778typedef struct MapClient {
3779 void *opaque;
3780 void (*callback)(void *opaque);
72cf2d4f 3781 QLIST_ENTRY(MapClient) link;
ba223c29
AL
3782} MapClient;
3783
72cf2d4f
BS
3784static QLIST_HEAD(map_client_list, MapClient) map_client_list
3785 = QLIST_HEAD_INITIALIZER(map_client_list);
ba223c29
AL
3786
3787void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3788{
7267c094 3789 MapClient *client = g_malloc(sizeof(*client));
ba223c29
AL
3790
3791 client->opaque = opaque;
3792 client->callback = callback;
72cf2d4f 3793 QLIST_INSERT_HEAD(&map_client_list, client, link);
ba223c29
AL
3794 return client;
3795}
3796
3797void cpu_unregister_map_client(void *_client)
3798{
3799 MapClient *client = (MapClient *)_client;
3800
72cf2d4f 3801 QLIST_REMOVE(client, link);
7267c094 3802 g_free(client);
ba223c29
AL
3803}
3804
3805static void cpu_notify_map_clients(void)
3806{
3807 MapClient *client;
3808
72cf2d4f
BS
3809 while (!QLIST_EMPTY(&map_client_list)) {
3810 client = QLIST_FIRST(&map_client_list);
ba223c29 3811 client->callback(client->opaque);
34d5e948 3812 cpu_unregister_map_client(client);
ba223c29
AL
3813 }
3814}
3815
6d16c2f8
AL
3816/* Map a physical memory region into a host virtual address.
3817 * May map a subset of the requested range, given by and returned in *plen.
3818 * May return NULL if resources needed to perform the mapping are exhausted.
3819 * Use only for reads OR writes - not for read-modify-write operations.
ba223c29
AL
3820 * Use cpu_register_map_client() to know when retrying the map operation is
3821 * likely to succeed.
6d16c2f8 3822 */
c227f099
AL
3823void *cpu_physical_memory_map(target_phys_addr_t addr,
3824 target_phys_addr_t *plen,
6d16c2f8
AL
3825 int is_write)
3826{
c227f099 3827 target_phys_addr_t len = *plen;
38bee5dc 3828 target_phys_addr_t todo = 0;
6d16c2f8 3829 int l;
c227f099 3830 target_phys_addr_t page;
6d16c2f8 3831 unsigned long pd;
f1f6e3b8 3832 PhysPageDesc p;
f15fbc4b 3833 ram_addr_t raddr = RAM_ADDR_MAX;
8ab934f9
SS
3834 ram_addr_t rlen;
3835 void *ret;
6d16c2f8
AL
3836
3837 while (len > 0) {
3838 page = addr & TARGET_PAGE_MASK;
3839 l = (page + TARGET_PAGE_SIZE) - addr;
3840 if (l > len)
3841 l = len;
3842 p = phys_page_find(page >> TARGET_PAGE_BITS);
f1f6e3b8 3843 pd = p.phys_offset;
6d16c2f8 3844
0e0df1e2 3845 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
38bee5dc 3846 if (todo || bounce.buffer) {
6d16c2f8
AL
3847 break;
3848 }
3849 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3850 bounce.addr = addr;
3851 bounce.len = l;
3852 if (!is_write) {
54f7b4a3 3853 cpu_physical_memory_read(addr, bounce.buffer, l);
6d16c2f8 3854 }
38bee5dc
SS
3855
3856 *plen = l;
3857 return bounce.buffer;
6d16c2f8 3858 }
8ab934f9
SS
3859 if (!todo) {
3860 raddr = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3861 }
6d16c2f8
AL
3862
3863 len -= l;
3864 addr += l;
38bee5dc 3865 todo += l;
6d16c2f8 3866 }
8ab934f9
SS
3867 rlen = todo;
3868 ret = qemu_ram_ptr_length(raddr, &rlen);
3869 *plen = rlen;
3870 return ret;
6d16c2f8
AL
3871}
3872
3873/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3874 * Will also mark the memory as dirty if is_write == 1. access_len gives
3875 * the amount of memory that was actually read or written by the caller.
3876 */
c227f099
AL
3877void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3878 int is_write, target_phys_addr_t access_len)
6d16c2f8
AL
3879{
3880 if (buffer != bounce.buffer) {
3881 if (is_write) {
e890261f 3882 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
6d16c2f8
AL
3883 while (access_len) {
3884 unsigned l;
3885 l = TARGET_PAGE_SIZE;
3886 if (l > access_len)
3887 l = access_len;
3888 if (!cpu_physical_memory_is_dirty(addr1)) {
3889 /* invalidate code */
3890 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3891 /* set dirty bit */
f7c11b53
YT
3892 cpu_physical_memory_set_dirty_flags(
3893 addr1, (0xff & ~CODE_DIRTY_FLAG));
6d16c2f8
AL
3894 }
3895 addr1 += l;
3896 access_len -= l;
3897 }
3898 }
868bb33f 3899 if (xen_enabled()) {
e41d7c69 3900 xen_invalidate_map_cache_entry(buffer);
050a0ddf 3901 }
6d16c2f8
AL
3902 return;
3903 }
3904 if (is_write) {
3905 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3906 }
f8a83245 3907 qemu_vfree(bounce.buffer);
6d16c2f8 3908 bounce.buffer = NULL;
ba223c29 3909 cpu_notify_map_clients();
6d16c2f8 3910}
d0ecd2aa 3911
8df1cd07 3912/* warning: addr must be aligned */
1e78bcc1
AG
3913static inline uint32_t ldl_phys_internal(target_phys_addr_t addr,
3914 enum device_endian endian)
8df1cd07
FB
3915{
3916 int io_index;
3917 uint8_t *ptr;
3918 uint32_t val;
3919 unsigned long pd;
f1f6e3b8 3920 PhysPageDesc p;
8df1cd07
FB
3921
3922 p = phys_page_find(addr >> TARGET_PAGE_BITS);
f1f6e3b8 3923 pd = p.phys_offset;
3b46e624 3924
1d393fa2 3925 if (!is_ram_rom_romd(pd)) {
8df1cd07
FB
3926 /* I/O case */
3927 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
f1f6e3b8 3928 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
acbbec5d 3929 val = io_mem_read(io_index, addr, 4);
1e78bcc1
AG
3930#if defined(TARGET_WORDS_BIGENDIAN)
3931 if (endian == DEVICE_LITTLE_ENDIAN) {
3932 val = bswap32(val);
3933 }
3934#else
3935 if (endian == DEVICE_BIG_ENDIAN) {
3936 val = bswap32(val);
3937 }
3938#endif
8df1cd07
FB
3939 } else {
3940 /* RAM case */
5579c7f3 3941 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
8df1cd07 3942 (addr & ~TARGET_PAGE_MASK);
1e78bcc1
AG
3943 switch (endian) {
3944 case DEVICE_LITTLE_ENDIAN:
3945 val = ldl_le_p(ptr);
3946 break;
3947 case DEVICE_BIG_ENDIAN:
3948 val = ldl_be_p(ptr);
3949 break;
3950 default:
3951 val = ldl_p(ptr);
3952 break;
3953 }
8df1cd07
FB
3954 }
3955 return val;
3956}
3957
1e78bcc1
AG
3958uint32_t ldl_phys(target_phys_addr_t addr)
3959{
3960 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3961}
3962
3963uint32_t ldl_le_phys(target_phys_addr_t addr)
3964{
3965 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3966}
3967
3968uint32_t ldl_be_phys(target_phys_addr_t addr)
3969{
3970 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
3971}
3972
84b7b8e7 3973/* warning: addr must be aligned */
1e78bcc1
AG
3974static inline uint64_t ldq_phys_internal(target_phys_addr_t addr,
3975 enum device_endian endian)
84b7b8e7
FB
3976{
3977 int io_index;
3978 uint8_t *ptr;
3979 uint64_t val;
3980 unsigned long pd;
f1f6e3b8 3981 PhysPageDesc p;
84b7b8e7
FB
3982
3983 p = phys_page_find(addr >> TARGET_PAGE_BITS);
f1f6e3b8 3984 pd = p.phys_offset;
3b46e624 3985
1d393fa2 3986 if (!is_ram_rom_romd(pd)) {
84b7b8e7
FB
3987 /* I/O case */
3988 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
f1f6e3b8 3989 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
1e78bcc1
AG
3990
3991 /* XXX This is broken when device endian != cpu endian.
3992 Fix and add "endian" variable check */
84b7b8e7 3993#ifdef TARGET_WORDS_BIGENDIAN
acbbec5d
AK
3994 val = io_mem_read(io_index, addr, 4) << 32;
3995 val |= io_mem_read(io_index, addr + 4, 4);
84b7b8e7 3996#else
acbbec5d
AK
3997 val = io_mem_read(io_index, addr, 4);
3998 val |= io_mem_read(io_index, addr + 4, 4) << 32;
84b7b8e7
FB
3999#endif
4000 } else {
4001 /* RAM case */
5579c7f3 4002 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
84b7b8e7 4003 (addr & ~TARGET_PAGE_MASK);
1e78bcc1
AG
4004 switch (endian) {
4005 case DEVICE_LITTLE_ENDIAN:
4006 val = ldq_le_p(ptr);
4007 break;
4008 case DEVICE_BIG_ENDIAN:
4009 val = ldq_be_p(ptr);
4010 break;
4011 default:
4012 val = ldq_p(ptr);
4013 break;
4014 }
84b7b8e7
FB
4015 }
4016 return val;
4017}
4018
1e78bcc1
AG
4019uint64_t ldq_phys(target_phys_addr_t addr)
4020{
4021 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4022}
4023
4024uint64_t ldq_le_phys(target_phys_addr_t addr)
4025{
4026 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4027}
4028
4029uint64_t ldq_be_phys(target_phys_addr_t addr)
4030{
4031 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
4032}
4033
aab33094 4034/* XXX: optimize */
c227f099 4035uint32_t ldub_phys(target_phys_addr_t addr)
aab33094
FB
4036{
4037 uint8_t val;
4038 cpu_physical_memory_read(addr, &val, 1);
4039 return val;
4040}
4041
733f0b02 4042/* warning: addr must be aligned */
1e78bcc1
AG
4043static inline uint32_t lduw_phys_internal(target_phys_addr_t addr,
4044 enum device_endian endian)
aab33094 4045{
733f0b02
MT
4046 int io_index;
4047 uint8_t *ptr;
4048 uint64_t val;
4049 unsigned long pd;
f1f6e3b8 4050 PhysPageDesc p;
733f0b02
MT
4051
4052 p = phys_page_find(addr >> TARGET_PAGE_BITS);
f1f6e3b8 4053 pd = p.phys_offset;
733f0b02 4054
1d393fa2 4055 if (!is_ram_rom_romd(pd)) {
733f0b02
MT
4056 /* I/O case */
4057 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
f1f6e3b8 4058 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
acbbec5d 4059 val = io_mem_read(io_index, addr, 2);
1e78bcc1
AG
4060#if defined(TARGET_WORDS_BIGENDIAN)
4061 if (endian == DEVICE_LITTLE_ENDIAN) {
4062 val = bswap16(val);
4063 }
4064#else
4065 if (endian == DEVICE_BIG_ENDIAN) {
4066 val = bswap16(val);
4067 }
4068#endif
733f0b02
MT
4069 } else {
4070 /* RAM case */
4071 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4072 (addr & ~TARGET_PAGE_MASK);
1e78bcc1
AG
4073 switch (endian) {
4074 case DEVICE_LITTLE_ENDIAN:
4075 val = lduw_le_p(ptr);
4076 break;
4077 case DEVICE_BIG_ENDIAN:
4078 val = lduw_be_p(ptr);
4079 break;
4080 default:
4081 val = lduw_p(ptr);
4082 break;
4083 }
733f0b02
MT
4084 }
4085 return val;
aab33094
FB
4086}
4087
1e78bcc1
AG
4088uint32_t lduw_phys(target_phys_addr_t addr)
4089{
4090 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4091}
4092
4093uint32_t lduw_le_phys(target_phys_addr_t addr)
4094{
4095 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4096}
4097
4098uint32_t lduw_be_phys(target_phys_addr_t addr)
4099{
4100 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
4101}
4102
8df1cd07
FB
4103/* warning: addr must be aligned. The ram page is not masked as dirty
4104 and the code inside is not invalidated. It is useful if the dirty
4105 bits are used to track modified PTEs */
c227f099 4106void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
8df1cd07
FB
4107{
4108 int io_index;
4109 uint8_t *ptr;
4110 unsigned long pd;
f1f6e3b8 4111 PhysPageDesc p;
8df1cd07
FB
4112
4113 p = phys_page_find(addr >> TARGET_PAGE_BITS);
f1f6e3b8 4114 pd = p.phys_offset;
3b46e624 4115
0e0df1e2 4116 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
8df1cd07 4117 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
f1f6e3b8 4118 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
acbbec5d 4119 io_mem_write(io_index, addr, val, 4);
8df1cd07 4120 } else {
74576198 4121 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
5579c7f3 4122 ptr = qemu_get_ram_ptr(addr1);
8df1cd07 4123 stl_p(ptr, val);
74576198
AL
4124
4125 if (unlikely(in_migration)) {
4126 if (!cpu_physical_memory_is_dirty(addr1)) {
4127 /* invalidate code */
4128 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4129 /* set dirty bit */
f7c11b53
YT
4130 cpu_physical_memory_set_dirty_flags(
4131 addr1, (0xff & ~CODE_DIRTY_FLAG));
74576198
AL
4132 }
4133 }
8df1cd07
FB
4134 }
4135}
4136
c227f099 4137void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
bc98a7ef
JM
4138{
4139 int io_index;
4140 uint8_t *ptr;
4141 unsigned long pd;
f1f6e3b8 4142 PhysPageDesc p;
bc98a7ef
JM
4143
4144 p = phys_page_find(addr >> TARGET_PAGE_BITS);
f1f6e3b8 4145 pd = p.phys_offset;
3b46e624 4146
0e0df1e2 4147 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
bc98a7ef 4148 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
f1f6e3b8 4149 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
bc98a7ef 4150#ifdef TARGET_WORDS_BIGENDIAN
acbbec5d
AK
4151 io_mem_write(io_index, addr, val >> 32, 4);
4152 io_mem_write(io_index, addr + 4, (uint32_t)val, 4);
bc98a7ef 4153#else
acbbec5d
AK
4154 io_mem_write(io_index, addr, (uint32_t)val, 4);
4155 io_mem_write(io_index, addr + 4, val >> 32, 4);
bc98a7ef
JM
4156#endif
4157 } else {
5579c7f3 4158 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
bc98a7ef
JM
4159 (addr & ~TARGET_PAGE_MASK);
4160 stq_p(ptr, val);
4161 }
4162}
4163
8df1cd07 4164/* warning: addr must be aligned */
1e78bcc1
AG
4165static inline void stl_phys_internal(target_phys_addr_t addr, uint32_t val,
4166 enum device_endian endian)
8df1cd07
FB
4167{
4168 int io_index;
4169 uint8_t *ptr;
4170 unsigned long pd;
f1f6e3b8 4171 PhysPageDesc p;
8df1cd07
FB
4172
4173 p = phys_page_find(addr >> TARGET_PAGE_BITS);
f1f6e3b8 4174 pd = p.phys_offset;
3b46e624 4175
0e0df1e2 4176 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
8df1cd07 4177 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
f1f6e3b8 4178 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
1e78bcc1
AG
4179#if defined(TARGET_WORDS_BIGENDIAN)
4180 if (endian == DEVICE_LITTLE_ENDIAN) {
4181 val = bswap32(val);
4182 }
4183#else
4184 if (endian == DEVICE_BIG_ENDIAN) {
4185 val = bswap32(val);
4186 }
4187#endif
acbbec5d 4188 io_mem_write(io_index, addr, val, 4);
8df1cd07
FB
4189 } else {
4190 unsigned long addr1;
4191 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4192 /* RAM case */
5579c7f3 4193 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
4194 switch (endian) {
4195 case DEVICE_LITTLE_ENDIAN:
4196 stl_le_p(ptr, val);
4197 break;
4198 case DEVICE_BIG_ENDIAN:
4199 stl_be_p(ptr, val);
4200 break;
4201 default:
4202 stl_p(ptr, val);
4203 break;
4204 }
3a7d929e
FB
4205 if (!cpu_physical_memory_is_dirty(addr1)) {
4206 /* invalidate code */
4207 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4208 /* set dirty bit */
f7c11b53
YT
4209 cpu_physical_memory_set_dirty_flags(addr1,
4210 (0xff & ~CODE_DIRTY_FLAG));
3a7d929e 4211 }
8df1cd07
FB
4212 }
4213}
4214
1e78bcc1
AG
4215void stl_phys(target_phys_addr_t addr, uint32_t val)
4216{
4217 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4218}
4219
4220void stl_le_phys(target_phys_addr_t addr, uint32_t val)
4221{
4222 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4223}
4224
4225void stl_be_phys(target_phys_addr_t addr, uint32_t val)
4226{
4227 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4228}
4229
aab33094 4230/* XXX: optimize */
c227f099 4231void stb_phys(target_phys_addr_t addr, uint32_t val)
aab33094
FB
4232{
4233 uint8_t v = val;
4234 cpu_physical_memory_write(addr, &v, 1);
4235}
4236
733f0b02 4237/* warning: addr must be aligned */
1e78bcc1
AG
4238static inline void stw_phys_internal(target_phys_addr_t addr, uint32_t val,
4239 enum device_endian endian)
aab33094 4240{
733f0b02
MT
4241 int io_index;
4242 uint8_t *ptr;
4243 unsigned long pd;
f1f6e3b8 4244 PhysPageDesc p;
733f0b02
MT
4245
4246 p = phys_page_find(addr >> TARGET_PAGE_BITS);
f1f6e3b8 4247 pd = p.phys_offset;
733f0b02 4248
0e0df1e2 4249 if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
733f0b02 4250 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
f1f6e3b8 4251 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
1e78bcc1
AG
4252#if defined(TARGET_WORDS_BIGENDIAN)
4253 if (endian == DEVICE_LITTLE_ENDIAN) {
4254 val = bswap16(val);
4255 }
4256#else
4257 if (endian == DEVICE_BIG_ENDIAN) {
4258 val = bswap16(val);
4259 }
4260#endif
acbbec5d 4261 io_mem_write(io_index, addr, val, 2);
733f0b02
MT
4262 } else {
4263 unsigned long addr1;
4264 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4265 /* RAM case */
4266 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
4267 switch (endian) {
4268 case DEVICE_LITTLE_ENDIAN:
4269 stw_le_p(ptr, val);
4270 break;
4271 case DEVICE_BIG_ENDIAN:
4272 stw_be_p(ptr, val);
4273 break;
4274 default:
4275 stw_p(ptr, val);
4276 break;
4277 }
733f0b02
MT
4278 if (!cpu_physical_memory_is_dirty(addr1)) {
4279 /* invalidate code */
4280 tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
4281 /* set dirty bit */
4282 cpu_physical_memory_set_dirty_flags(addr1,
4283 (0xff & ~CODE_DIRTY_FLAG));
4284 }
4285 }
aab33094
FB
4286}
4287
1e78bcc1
AG
4288void stw_phys(target_phys_addr_t addr, uint32_t val)
4289{
4290 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4291}
4292
4293void stw_le_phys(target_phys_addr_t addr, uint32_t val)
4294{
4295 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4296}
4297
4298void stw_be_phys(target_phys_addr_t addr, uint32_t val)
4299{
4300 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4301}
4302
aab33094 4303/* XXX: optimize */
c227f099 4304void stq_phys(target_phys_addr_t addr, uint64_t val)
aab33094
FB
4305{
4306 val = tswap64(val);
71d2b725 4307 cpu_physical_memory_write(addr, &val, 8);
aab33094
FB
4308}
4309
1e78bcc1
AG
4310void stq_le_phys(target_phys_addr_t addr, uint64_t val)
4311{
4312 val = cpu_to_le64(val);
4313 cpu_physical_memory_write(addr, &val, 8);
4314}
4315
4316void stq_be_phys(target_phys_addr_t addr, uint64_t val)
4317{
4318 val = cpu_to_be64(val);
4319 cpu_physical_memory_write(addr, &val, 8);
4320}
4321
5e2972fd 4322/* virtual memory access for debug (includes writing to ROM) */
5fafdf24 4323int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
b448f2f3 4324 uint8_t *buf, int len, int is_write)
13eb76e0
FB
4325{
4326 int l;
c227f099 4327 target_phys_addr_t phys_addr;
9b3c35e0 4328 target_ulong page;
13eb76e0
FB
4329
4330 while (len > 0) {
4331 page = addr & TARGET_PAGE_MASK;
4332 phys_addr = cpu_get_phys_page_debug(env, page);
4333 /* if no physical page mapped, return an error */
4334 if (phys_addr == -1)
4335 return -1;
4336 l = (page + TARGET_PAGE_SIZE) - addr;
4337 if (l > len)
4338 l = len;
5e2972fd 4339 phys_addr += (addr & ~TARGET_PAGE_MASK);
5e2972fd
AL
4340 if (is_write)
4341 cpu_physical_memory_write_rom(phys_addr, buf, l);
4342 else
5e2972fd 4343 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
13eb76e0
FB
4344 len -= l;
4345 buf += l;
4346 addr += l;
4347 }
4348 return 0;
4349}
a68fe89c 4350#endif
13eb76e0 4351
2e70f6ef
PB
4352/* in deterministic execution mode, instructions doing device I/Os
4353 must be at the end of the TB */
4354void cpu_io_recompile(CPUState *env, void *retaddr)
4355{
4356 TranslationBlock *tb;
4357 uint32_t n, cflags;
4358 target_ulong pc, cs_base;
4359 uint64_t flags;
4360
4361 tb = tb_find_pc((unsigned long)retaddr);
4362 if (!tb) {
4363 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
4364 retaddr);
4365 }
4366 n = env->icount_decr.u16.low + tb->icount;
618ba8e6 4367 cpu_restore_state(tb, env, (unsigned long)retaddr);
2e70f6ef 4368 /* Calculate how many instructions had been executed before the fault
bf20dc07 4369 occurred. */
2e70f6ef
PB
4370 n = n - env->icount_decr.u16.low;
4371 /* Generate a new TB ending on the I/O insn. */
4372 n++;
4373 /* On MIPS and SH, delay slot instructions can only be restarted if
4374 they were already the first instruction in the TB. If this is not
bf20dc07 4375 the first instruction in a TB then re-execute the preceding
2e70f6ef
PB
4376 branch. */
4377#if defined(TARGET_MIPS)
4378 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4379 env->active_tc.PC -= 4;
4380 env->icount_decr.u16.low++;
4381 env->hflags &= ~MIPS_HFLAG_BMASK;
4382 }
4383#elif defined(TARGET_SH4)
4384 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4385 && n > 1) {
4386 env->pc -= 2;
4387 env->icount_decr.u16.low++;
4388 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4389 }
4390#endif
4391 /* This should never happen. */
4392 if (n > CF_COUNT_MASK)
4393 cpu_abort(env, "TB too big during recompile");
4394
4395 cflags = n | CF_LAST_IO;
4396 pc = tb->pc;
4397 cs_base = tb->cs_base;
4398 flags = tb->flags;
4399 tb_phys_invalidate(tb, -1);
4400 /* FIXME: In theory this could raise an exception. In practice
4401 we have already translated the block once so it's probably ok. */
4402 tb_gen_code(env, pc, cs_base, flags, cflags);
bf20dc07 4403 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
2e70f6ef
PB
4404 the first in the TB) then we end up generating a whole new TB and
4405 repeating the fault, which is horribly inefficient.
4406 Better would be to execute just this insn uncached, or generate a
4407 second new TB. */
4408 cpu_resume_from_signal(env, NULL);
4409}
4410
b3755a91
PB
4411#if !defined(CONFIG_USER_ONLY)
4412
055403b2 4413void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
e3db7226
FB
4414{
4415 int i, target_code_size, max_target_code_size;
4416 int direct_jmp_count, direct_jmp2_count, cross_page;
4417 TranslationBlock *tb;
3b46e624 4418
e3db7226
FB
4419 target_code_size = 0;
4420 max_target_code_size = 0;
4421 cross_page = 0;
4422 direct_jmp_count = 0;
4423 direct_jmp2_count = 0;
4424 for(i = 0; i < nb_tbs; i++) {
4425 tb = &tbs[i];
4426 target_code_size += tb->size;
4427 if (tb->size > max_target_code_size)
4428 max_target_code_size = tb->size;
4429 if (tb->page_addr[1] != -1)
4430 cross_page++;
4431 if (tb->tb_next_offset[0] != 0xffff) {
4432 direct_jmp_count++;
4433 if (tb->tb_next_offset[1] != 0xffff) {
4434 direct_jmp2_count++;
4435 }
4436 }
4437 }
4438 /* XXX: avoid using doubles ? */
57fec1fe 4439 cpu_fprintf(f, "Translation buffer state:\n");
055403b2 4440 cpu_fprintf(f, "gen code size %td/%ld\n",
26a5f13b
FB
4441 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4442 cpu_fprintf(f, "TB count %d/%d\n",
4443 nb_tbs, code_gen_max_blocks);
5fafdf24 4444 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
e3db7226
FB
4445 nb_tbs ? target_code_size / nb_tbs : 0,
4446 max_target_code_size);
055403b2 4447 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
e3db7226
FB
4448 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4449 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
5fafdf24
TS
4450 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4451 cross_page,
e3db7226
FB
4452 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4453 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
5fafdf24 4454 direct_jmp_count,
e3db7226
FB
4455 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4456 direct_jmp2_count,
4457 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
57fec1fe 4458 cpu_fprintf(f, "\nStatistics:\n");
e3db7226
FB
4459 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4460 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4461 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
b67d9a52 4462 tcg_dump_info(f, cpu_fprintf);
e3db7226
FB
4463}
4464
d39e8222
AK
4465/* NOTE: this function can trigger an exception */
4466/* NOTE2: the returned address is not exactly the physical address: it
4467 is the offset relative to phys_ram_base */
4468tb_page_addr_t get_page_addr_code(CPUState *env1, target_ulong addr)
4469{
4470 int mmu_idx, page_index, pd;
4471 void *p;
4472
4473 page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
4474 mmu_idx = cpu_mmu_index(env1);
4475 if (unlikely(env1->tlb_table[mmu_idx][page_index].addr_code !=
4476 (addr & TARGET_PAGE_MASK))) {
4477 ldub_code(addr);
4478 }
4479 pd = env1->tlb_table[mmu_idx][page_index].addr_code & ~TARGET_PAGE_MASK;
0e0df1e2
AK
4480 if (pd != io_mem_ram.ram_addr && pd != io_mem_rom.ram_addr
4481 && !(pd & IO_MEM_ROMD)) {
d39e8222
AK
4482#if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_SPARC)
4483 cpu_unassigned_access(env1, addr, 0, 1, 0, 4);
4484#else
4485 cpu_abort(env1, "Trying to execute code outside RAM or ROM at 0x" TARGET_FMT_lx "\n", addr);
4486#endif
4487 }
4488 p = (void *)((uintptr_t)addr + env1->tlb_table[mmu_idx][page_index].addend);
4489 return qemu_ram_addr_from_host_nofail(p);
4490}
4491
61382a50 4492#define MMUSUFFIX _cmmu
3917149d 4493#undef GETPC
61382a50
FB
4494#define GETPC() NULL
4495#define env cpu_single_env
b769d8fe 4496#define SOFTMMU_CODE_ACCESS
61382a50
FB
4497
4498#define SHIFT 0
4499#include "softmmu_template.h"
4500
4501#define SHIFT 1
4502#include "softmmu_template.h"
4503
4504#define SHIFT 2
4505#include "softmmu_template.h"
4506
4507#define SHIFT 3
4508#include "softmmu_template.h"
4509
4510#undef env
4511
4512#endif