]> git.proxmox.com Git - mirror_qemu.git/blame - exec.c
Avoid range comparisons on io index types
[mirror_qemu.git] / exec.c
CommitLineData
54936004 1/*
fd6ce8f6 2 * virtual page mapping and translated block handling
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
54936004 18 */
67b915a5 19#include "config.h"
d5a8f07c
FB
20#ifdef _WIN32
21#include <windows.h>
22#else
a98d49b1 23#include <sys/types.h>
d5a8f07c
FB
24#include <sys/mman.h>
25#endif
54936004 26
055403b2 27#include "qemu-common.h"
6180a181 28#include "cpu.h"
b67d9a52 29#include "tcg.h"
b3c7724c 30#include "hw/hw.h"
cc9e98cb 31#include "hw/qdev.h"
74576198 32#include "osdep.h"
7ba1e619 33#include "kvm.h"
432d268c 34#include "hw/xen.h"
29e922b6 35#include "qemu-timer.h"
62152b8a
AK
36#include "memory.h"
37#include "exec-memory.h"
53a5960a
PB
38#if defined(CONFIG_USER_ONLY)
39#include <qemu.h>
f01576f1
JL
40#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41#include <sys/param.h>
42#if __FreeBSD_version >= 700104
43#define HAVE_KINFO_GETVMMAP
44#define sigqueue sigqueue_freebsd /* avoid redefinition */
45#include <sys/time.h>
46#include <sys/proc.h>
47#include <machine/profile.h>
48#define _KERNEL
49#include <sys/user.h>
50#undef _KERNEL
51#undef sigqueue
52#include <libutil.h>
53#endif
54#endif
432d268c
JN
55#else /* !CONFIG_USER_ONLY */
56#include "xen-mapcache.h"
6506e4f9 57#include "trace.h"
53a5960a 58#endif
54936004 59
67d95c15
AK
60#define WANT_EXEC_OBSOLETE
61#include "exec-obsolete.h"
62
fd6ce8f6 63//#define DEBUG_TB_INVALIDATE
66e85a21 64//#define DEBUG_FLUSH
9fa3e853 65//#define DEBUG_TLB
67d3b957 66//#define DEBUG_UNASSIGNED
fd6ce8f6
FB
67
68/* make various TB consistency checks */
5fafdf24
TS
69//#define DEBUG_TB_CHECK
70//#define DEBUG_TLB_CHECK
fd6ce8f6 71
1196be37 72//#define DEBUG_IOPORT
db7b5426 73//#define DEBUG_SUBPAGE
1196be37 74
99773bd4
PB
75#if !defined(CONFIG_USER_ONLY)
76/* TB consistency checks only implemented for usermode emulation. */
77#undef DEBUG_TB_CHECK
78#endif
79
9fa3e853
FB
80#define SMC_BITMAP_USE_THRESHOLD 10
81
bdaf78e0 82static TranslationBlock *tbs;
24ab68ac 83static int code_gen_max_blocks;
9fa3e853 84TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
bdaf78e0 85static int nb_tbs;
eb51d102 86/* any access to the tbs or the page table must use this lock */
c227f099 87spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
fd6ce8f6 88
141ac468
BS
89#if defined(__arm__) || defined(__sparc_v9__)
90/* The prologue must be reachable with a direct jump. ARM and Sparc64
91 have limited branch ranges (possibly also PPC) so place it in a
d03d860b
BS
92 section close to code segment. */
93#define code_gen_section \
94 __attribute__((__section__(".gen_code"))) \
95 __attribute__((aligned (32)))
f8e2af11
SW
96#elif defined(_WIN32)
97/* Maximum alignment for Win32 is 16. */
98#define code_gen_section \
99 __attribute__((aligned (16)))
d03d860b
BS
100#else
101#define code_gen_section \
102 __attribute__((aligned (32)))
103#endif
104
105uint8_t code_gen_prologue[1024] code_gen_section;
bdaf78e0
BS
106static uint8_t *code_gen_buffer;
107static unsigned long code_gen_buffer_size;
26a5f13b 108/* threshold to flush the translated code buffer */
bdaf78e0 109static unsigned long code_gen_buffer_max_size;
24ab68ac 110static uint8_t *code_gen_ptr;
fd6ce8f6 111
e2eef170 112#if !defined(CONFIG_USER_ONLY)
9fa3e853 113int phys_ram_fd;
74576198 114static int in_migration;
94a6b54f 115
85d59fef 116RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
62152b8a
AK
117
118static MemoryRegion *system_memory;
309cb471 119static MemoryRegion *system_io;
62152b8a 120
e2eef170 121#endif
9fa3e853 122
6a00d601
FB
123CPUState *first_cpu;
124/* current CPU in the current thread. It is only valid inside
125 cpu_exec() */
b3c4bbe5 126DEFINE_TLS(CPUState *,cpu_single_env);
2e70f6ef 127/* 0 = Do not count executed instructions.
bf20dc07 128 1 = Precise instruction counting.
2e70f6ef
PB
129 2 = Adaptive rate instruction counting. */
130int use_icount = 0;
6a00d601 131
54936004 132typedef struct PageDesc {
92e873b9 133 /* list of TBs intersecting this ram page */
fd6ce8f6 134 TranslationBlock *first_tb;
9fa3e853
FB
135 /* in order to optimize self modifying code, we count the number
136 of lookups we do to a given page to use a bitmap */
137 unsigned int code_write_count;
138 uint8_t *code_bitmap;
139#if defined(CONFIG_USER_ONLY)
140 unsigned long flags;
141#endif
54936004
FB
142} PageDesc;
143
41c1b1c9 144/* In system mode we want L1_MAP to be based on ram offsets,
5cd2c5b6
RH
145 while in user mode we want it to be based on virtual addresses. */
146#if !defined(CONFIG_USER_ONLY)
41c1b1c9
PB
147#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
148# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
149#else
5cd2c5b6 150# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
41c1b1c9 151#endif
bedb69ea 152#else
5cd2c5b6 153# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
bedb69ea 154#endif
54936004 155
5cd2c5b6
RH
156/* Size of the L2 (and L3, etc) page tables. */
157#define L2_BITS 10
54936004
FB
158#define L2_SIZE (1 << L2_BITS)
159
5cd2c5b6
RH
160/* The bits remaining after N lower levels of page tables. */
161#define P_L1_BITS_REM \
162 ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
163#define V_L1_BITS_REM \
164 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
165
166/* Size of the L1 page table. Avoid silly small sizes. */
167#if P_L1_BITS_REM < 4
168#define P_L1_BITS (P_L1_BITS_REM + L2_BITS)
169#else
170#define P_L1_BITS P_L1_BITS_REM
171#endif
172
173#if V_L1_BITS_REM < 4
174#define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
175#else
176#define V_L1_BITS V_L1_BITS_REM
177#endif
178
179#define P_L1_SIZE ((target_phys_addr_t)1 << P_L1_BITS)
180#define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
181
182#define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS)
183#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
184
83fb7adf 185unsigned long qemu_real_host_page_size;
83fb7adf
FB
186unsigned long qemu_host_page_size;
187unsigned long qemu_host_page_mask;
54936004 188
5cd2c5b6
RH
189/* This is a multi-level map on the virtual address space.
190 The bottom level has pointers to PageDesc. */
191static void *l1_map[V_L1_SIZE];
54936004 192
e2eef170 193#if !defined(CONFIG_USER_ONLY)
41c1b1c9
PB
194typedef struct PhysPageDesc {
195 /* offset in host memory of the page + io_index in the low bits */
196 ram_addr_t phys_offset;
197 ram_addr_t region_offset;
198} PhysPageDesc;
199
5cd2c5b6
RH
200/* This is a multi-level map on the physical address space.
201 The bottom level has pointers to PhysPageDesc. */
202static void *l1_phys_map[P_L1_SIZE];
6d9a1304 203
e2eef170 204static void io_mem_init(void);
62152b8a 205static void memory_map_init(void);
e2eef170 206
33417e70 207/* io memory support */
acbbec5d
AK
208CPUWriteMemoryFunc *_io_mem_write[IO_MEM_NB_ENTRIES][4];
209CPUReadMemoryFunc *_io_mem_read[IO_MEM_NB_ENTRIES][4];
a4193c8a 210void *io_mem_opaque[IO_MEM_NB_ENTRIES];
511d2b14 211static char io_mem_used[IO_MEM_NB_ENTRIES];
6658ffb8
PB
212static int io_mem_watch;
213#endif
33417e70 214
34865134 215/* log support */
1e8b27ca
JR
216#ifdef WIN32
217static const char *logfilename = "qemu.log";
218#else
d9b630fd 219static const char *logfilename = "/tmp/qemu.log";
1e8b27ca 220#endif
34865134
FB
221FILE *logfile;
222int loglevel;
e735b91c 223static int log_append = 0;
34865134 224
e3db7226 225/* statistics */
b3755a91 226#if !defined(CONFIG_USER_ONLY)
e3db7226 227static int tlb_flush_count;
b3755a91 228#endif
e3db7226
FB
229static int tb_flush_count;
230static int tb_phys_invalidate_count;
231
7cb69cae
FB
232#ifdef _WIN32
233static void map_exec(void *addr, long size)
234{
235 DWORD old_protect;
236 VirtualProtect(addr, size,
237 PAGE_EXECUTE_READWRITE, &old_protect);
238
239}
240#else
241static void map_exec(void *addr, long size)
242{
4369415f 243 unsigned long start, end, page_size;
7cb69cae 244
4369415f 245 page_size = getpagesize();
7cb69cae 246 start = (unsigned long)addr;
4369415f 247 start &= ~(page_size - 1);
7cb69cae
FB
248
249 end = (unsigned long)addr + size;
4369415f
FB
250 end += page_size - 1;
251 end &= ~(page_size - 1);
7cb69cae
FB
252
253 mprotect((void *)start, end - start,
254 PROT_READ | PROT_WRITE | PROT_EXEC);
255}
256#endif
257
b346ff46 258static void page_init(void)
54936004 259{
83fb7adf 260 /* NOTE: we can always suppose that qemu_host_page_size >=
54936004 261 TARGET_PAGE_SIZE */
c2b48b69
AL
262#ifdef _WIN32
263 {
264 SYSTEM_INFO system_info;
265
266 GetSystemInfo(&system_info);
267 qemu_real_host_page_size = system_info.dwPageSize;
268 }
269#else
270 qemu_real_host_page_size = getpagesize();
271#endif
83fb7adf
FB
272 if (qemu_host_page_size == 0)
273 qemu_host_page_size = qemu_real_host_page_size;
274 if (qemu_host_page_size < TARGET_PAGE_SIZE)
275 qemu_host_page_size = TARGET_PAGE_SIZE;
83fb7adf 276 qemu_host_page_mask = ~(qemu_host_page_size - 1);
50a9569b 277
2e9a5713 278#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
50a9569b 279 {
f01576f1
JL
280#ifdef HAVE_KINFO_GETVMMAP
281 struct kinfo_vmentry *freep;
282 int i, cnt;
283
284 freep = kinfo_getvmmap(getpid(), &cnt);
285 if (freep) {
286 mmap_lock();
287 for (i = 0; i < cnt; i++) {
288 unsigned long startaddr, endaddr;
289
290 startaddr = freep[i].kve_start;
291 endaddr = freep[i].kve_end;
292 if (h2g_valid(startaddr)) {
293 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
294
295 if (h2g_valid(endaddr)) {
296 endaddr = h2g(endaddr);
fd436907 297 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
f01576f1
JL
298 } else {
299#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
300 endaddr = ~0ul;
fd436907 301 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
f01576f1
JL
302#endif
303 }
304 }
305 }
306 free(freep);
307 mmap_unlock();
308 }
309#else
50a9569b 310 FILE *f;
50a9569b 311
0776590d 312 last_brk = (unsigned long)sbrk(0);
5cd2c5b6 313
fd436907 314 f = fopen("/compat/linux/proc/self/maps", "r");
50a9569b 315 if (f) {
5cd2c5b6
RH
316 mmap_lock();
317
50a9569b 318 do {
5cd2c5b6
RH
319 unsigned long startaddr, endaddr;
320 int n;
321
322 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
323
324 if (n == 2 && h2g_valid(startaddr)) {
325 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
326
327 if (h2g_valid(endaddr)) {
328 endaddr = h2g(endaddr);
329 } else {
330 endaddr = ~0ul;
331 }
332 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
50a9569b
AZ
333 }
334 } while (!feof(f));
5cd2c5b6 335
50a9569b 336 fclose(f);
5cd2c5b6 337 mmap_unlock();
50a9569b 338 }
f01576f1 339#endif
50a9569b
AZ
340 }
341#endif
54936004
FB
342}
343
41c1b1c9 344static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
54936004 345{
41c1b1c9
PB
346 PageDesc *pd;
347 void **lp;
348 int i;
349
5cd2c5b6 350#if defined(CONFIG_USER_ONLY)
7267c094 351 /* We can't use g_malloc because it may recurse into a locked mutex. */
5cd2c5b6
RH
352# define ALLOC(P, SIZE) \
353 do { \
354 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
355 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
5cd2c5b6
RH
356 } while (0)
357#else
358# define ALLOC(P, SIZE) \
7267c094 359 do { P = g_malloc0(SIZE); } while (0)
17e2377a 360#endif
434929bf 361
5cd2c5b6
RH
362 /* Level 1. Always allocated. */
363 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
364
365 /* Level 2..N-1. */
366 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
367 void **p = *lp;
368
369 if (p == NULL) {
370 if (!alloc) {
371 return NULL;
372 }
373 ALLOC(p, sizeof(void *) * L2_SIZE);
374 *lp = p;
17e2377a 375 }
5cd2c5b6
RH
376
377 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
378 }
379
380 pd = *lp;
381 if (pd == NULL) {
382 if (!alloc) {
383 return NULL;
384 }
385 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
386 *lp = pd;
54936004 387 }
5cd2c5b6
RH
388
389#undef ALLOC
5cd2c5b6
RH
390
391 return pd + (index & (L2_SIZE - 1));
54936004
FB
392}
393
41c1b1c9 394static inline PageDesc *page_find(tb_page_addr_t index)
54936004 395{
5cd2c5b6 396 return page_find_alloc(index, 0);
fd6ce8f6
FB
397}
398
6d9a1304 399#if !defined(CONFIG_USER_ONLY)
c227f099 400static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
92e873b9 401{
e3f4e2a4 402 PhysPageDesc *pd;
5cd2c5b6
RH
403 void **lp;
404 int i;
92e873b9 405
5cd2c5b6
RH
406 /* Level 1. Always allocated. */
407 lp = l1_phys_map + ((index >> P_L1_SHIFT) & (P_L1_SIZE - 1));
108c49b8 408
5cd2c5b6
RH
409 /* Level 2..N-1. */
410 for (i = P_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
411 void **p = *lp;
412 if (p == NULL) {
413 if (!alloc) {
414 return NULL;
415 }
7267c094 416 *lp = p = g_malloc0(sizeof(void *) * L2_SIZE);
5cd2c5b6
RH
417 }
418 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
108c49b8 419 }
5cd2c5b6 420
e3f4e2a4 421 pd = *lp;
5cd2c5b6 422 if (pd == NULL) {
e3f4e2a4 423 int i;
5ab97b7f 424 int first_index = index & ~(L2_SIZE - 1);
5cd2c5b6
RH
425
426 if (!alloc) {
108c49b8 427 return NULL;
5cd2c5b6
RH
428 }
429
7267c094 430 *lp = pd = g_malloc(sizeof(PhysPageDesc) * L2_SIZE);
5cd2c5b6 431
67c4d23c 432 for (i = 0; i < L2_SIZE; i++) {
5cd2c5b6 433 pd[i].phys_offset = IO_MEM_UNASSIGNED;
5ab97b7f 434 pd[i].region_offset = (first_index + i) << TARGET_PAGE_BITS;
67c4d23c 435 }
92e873b9 436 }
5cd2c5b6
RH
437
438 return pd + (index & (L2_SIZE - 1));
92e873b9
FB
439}
440
f1f6e3b8 441static inline PhysPageDesc phys_page_find(target_phys_addr_t index)
92e873b9 442{
f1f6e3b8
AK
443 PhysPageDesc *p = phys_page_find_alloc(index, 0);
444
445 if (p) {
446 return *p;
447 } else {
448 return (PhysPageDesc) {
449 .phys_offset = IO_MEM_UNASSIGNED,
450 .region_offset = index << TARGET_PAGE_BITS,
451 };
452 }
92e873b9
FB
453}
454
c227f099
AL
455static void tlb_protect_code(ram_addr_t ram_addr);
456static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 457 target_ulong vaddr);
c8a706fe
PB
458#define mmap_lock() do { } while(0)
459#define mmap_unlock() do { } while(0)
9fa3e853 460#endif
fd6ce8f6 461
4369415f
FB
462#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
463
464#if defined(CONFIG_USER_ONLY)
ccbb4d44 465/* Currently it is not recommended to allocate big chunks of data in
4369415f
FB
466 user mode. It will change when a dedicated libc will be used */
467#define USE_STATIC_CODE_GEN_BUFFER
468#endif
469
470#ifdef USE_STATIC_CODE_GEN_BUFFER
ebf50fb3
AJ
471static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
472 __attribute__((aligned (CODE_GEN_ALIGN)));
4369415f
FB
473#endif
474
8fcd3692 475static void code_gen_alloc(unsigned long tb_size)
26a5f13b 476{
4369415f
FB
477#ifdef USE_STATIC_CODE_GEN_BUFFER
478 code_gen_buffer = static_code_gen_buffer;
479 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
480 map_exec(code_gen_buffer, code_gen_buffer_size);
481#else
26a5f13b
FB
482 code_gen_buffer_size = tb_size;
483 if (code_gen_buffer_size == 0) {
4369415f 484#if defined(CONFIG_USER_ONLY)
4369415f
FB
485 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
486#else
ccbb4d44 487 /* XXX: needs adjustments */
94a6b54f 488 code_gen_buffer_size = (unsigned long)(ram_size / 4);
4369415f 489#endif
26a5f13b
FB
490 }
491 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
492 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
493 /* The code gen buffer location may have constraints depending on
494 the host cpu and OS */
495#if defined(__linux__)
496 {
497 int flags;
141ac468
BS
498 void *start = NULL;
499
26a5f13b
FB
500 flags = MAP_PRIVATE | MAP_ANONYMOUS;
501#if defined(__x86_64__)
502 flags |= MAP_32BIT;
503 /* Cannot map more than that */
504 if (code_gen_buffer_size > (800 * 1024 * 1024))
505 code_gen_buffer_size = (800 * 1024 * 1024);
141ac468
BS
506#elif defined(__sparc_v9__)
507 // Map the buffer below 2G, so we can use direct calls and branches
508 flags |= MAP_FIXED;
509 start = (void *) 0x60000000UL;
510 if (code_gen_buffer_size > (512 * 1024 * 1024))
511 code_gen_buffer_size = (512 * 1024 * 1024);
1cb0661e 512#elif defined(__arm__)
222f23f5 513 /* Keep the buffer no bigger than 16GB to branch between blocks */
1cb0661e
AZ
514 if (code_gen_buffer_size > 16 * 1024 * 1024)
515 code_gen_buffer_size = 16 * 1024 * 1024;
eba0b893
RH
516#elif defined(__s390x__)
517 /* Map the buffer so that we can use direct calls and branches. */
518 /* We have a +- 4GB range on the branches; leave some slop. */
519 if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
520 code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
521 }
522 start = (void *)0x90000000UL;
26a5f13b 523#endif
141ac468
BS
524 code_gen_buffer = mmap(start, code_gen_buffer_size,
525 PROT_WRITE | PROT_READ | PROT_EXEC,
26a5f13b
FB
526 flags, -1, 0);
527 if (code_gen_buffer == MAP_FAILED) {
528 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
529 exit(1);
530 }
531 }
cbb608a5 532#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
9f4b09a4
TN
533 || defined(__DragonFly__) || defined(__OpenBSD__) \
534 || defined(__NetBSD__)
06e67a82
AL
535 {
536 int flags;
537 void *addr = NULL;
538 flags = MAP_PRIVATE | MAP_ANONYMOUS;
539#if defined(__x86_64__)
540 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
541 * 0x40000000 is free */
542 flags |= MAP_FIXED;
543 addr = (void *)0x40000000;
544 /* Cannot map more than that */
545 if (code_gen_buffer_size > (800 * 1024 * 1024))
546 code_gen_buffer_size = (800 * 1024 * 1024);
4cd31ad2
BS
547#elif defined(__sparc_v9__)
548 // Map the buffer below 2G, so we can use direct calls and branches
549 flags |= MAP_FIXED;
550 addr = (void *) 0x60000000UL;
551 if (code_gen_buffer_size > (512 * 1024 * 1024)) {
552 code_gen_buffer_size = (512 * 1024 * 1024);
553 }
06e67a82
AL
554#endif
555 code_gen_buffer = mmap(addr, code_gen_buffer_size,
556 PROT_WRITE | PROT_READ | PROT_EXEC,
557 flags, -1, 0);
558 if (code_gen_buffer == MAP_FAILED) {
559 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
560 exit(1);
561 }
562 }
26a5f13b 563#else
7267c094 564 code_gen_buffer = g_malloc(code_gen_buffer_size);
26a5f13b
FB
565 map_exec(code_gen_buffer, code_gen_buffer_size);
566#endif
4369415f 567#endif /* !USE_STATIC_CODE_GEN_BUFFER */
26a5f13b 568 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
a884da8a
PM
569 code_gen_buffer_max_size = code_gen_buffer_size -
570 (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
26a5f13b 571 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
7267c094 572 tbs = g_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
26a5f13b
FB
573}
574
575/* Must be called before using the QEMU cpus. 'tb_size' is the size
576 (in bytes) allocated to the translation buffer. Zero means default
577 size. */
d5ab9713 578void tcg_exec_init(unsigned long tb_size)
26a5f13b 579{
26a5f13b
FB
580 cpu_gen_init();
581 code_gen_alloc(tb_size);
582 code_gen_ptr = code_gen_buffer;
4369415f 583 page_init();
9002ec79
RH
584#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
585 /* There's no guest base to take into account, so go ahead and
586 initialize the prologue now. */
587 tcg_prologue_init(&tcg_ctx);
588#endif
26a5f13b
FB
589}
590
d5ab9713
JK
591bool tcg_enabled(void)
592{
593 return code_gen_buffer != NULL;
594}
595
596void cpu_exec_init_all(void)
597{
598#if !defined(CONFIG_USER_ONLY)
599 memory_map_init();
600 io_mem_init();
601#endif
602}
603
9656f324
PB
604#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
605
e59fb374 606static int cpu_common_post_load(void *opaque, int version_id)
e7f4eff7
JQ
607{
608 CPUState *env = opaque;
9656f324 609
3098dba0
AJ
610 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
611 version_id is increased. */
612 env->interrupt_request &= ~0x01;
9656f324
PB
613 tlb_flush(env, 1);
614
615 return 0;
616}
e7f4eff7
JQ
617
618static const VMStateDescription vmstate_cpu_common = {
619 .name = "cpu_common",
620 .version_id = 1,
621 .minimum_version_id = 1,
622 .minimum_version_id_old = 1,
e7f4eff7
JQ
623 .post_load = cpu_common_post_load,
624 .fields = (VMStateField []) {
625 VMSTATE_UINT32(halted, CPUState),
626 VMSTATE_UINT32(interrupt_request, CPUState),
627 VMSTATE_END_OF_LIST()
628 }
629};
9656f324
PB
630#endif
631
950f1472
GC
632CPUState *qemu_get_cpu(int cpu)
633{
634 CPUState *env = first_cpu;
635
636 while (env) {
637 if (env->cpu_index == cpu)
638 break;
639 env = env->next_cpu;
640 }
641
642 return env;
643}
644
6a00d601 645void cpu_exec_init(CPUState *env)
fd6ce8f6 646{
6a00d601
FB
647 CPUState **penv;
648 int cpu_index;
649
c2764719
PB
650#if defined(CONFIG_USER_ONLY)
651 cpu_list_lock();
652#endif
6a00d601
FB
653 env->next_cpu = NULL;
654 penv = &first_cpu;
655 cpu_index = 0;
656 while (*penv != NULL) {
1e9fa730 657 penv = &(*penv)->next_cpu;
6a00d601
FB
658 cpu_index++;
659 }
660 env->cpu_index = cpu_index;
268a362c 661 env->numa_node = 0;
72cf2d4f
BS
662 QTAILQ_INIT(&env->breakpoints);
663 QTAILQ_INIT(&env->watchpoints);
dc7a09cf
JK
664#ifndef CONFIG_USER_ONLY
665 env->thread_id = qemu_get_thread_id();
666#endif
6a00d601 667 *penv = env;
c2764719
PB
668#if defined(CONFIG_USER_ONLY)
669 cpu_list_unlock();
670#endif
b3c7724c 671#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
0be71e32
AW
672 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
673 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
b3c7724c
PB
674 cpu_save, cpu_load, env);
675#endif
fd6ce8f6
FB
676}
677
d1a1eb74
TG
678/* Allocate a new translation block. Flush the translation buffer if
679 too many translation blocks or too much generated code. */
680static TranslationBlock *tb_alloc(target_ulong pc)
681{
682 TranslationBlock *tb;
683
684 if (nb_tbs >= code_gen_max_blocks ||
685 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
686 return NULL;
687 tb = &tbs[nb_tbs++];
688 tb->pc = pc;
689 tb->cflags = 0;
690 return tb;
691}
692
693void tb_free(TranslationBlock *tb)
694{
695 /* In practice this is mostly used for single use temporary TB
696 Ignore the hard cases and just back up if this TB happens to
697 be the last one generated. */
698 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
699 code_gen_ptr = tb->tc_ptr;
700 nb_tbs--;
701 }
702}
703
9fa3e853
FB
704static inline void invalidate_page_bitmap(PageDesc *p)
705{
706 if (p->code_bitmap) {
7267c094 707 g_free(p->code_bitmap);
9fa3e853
FB
708 p->code_bitmap = NULL;
709 }
710 p->code_write_count = 0;
711}
712
5cd2c5b6
RH
713/* Set to NULL all the 'first_tb' fields in all PageDescs. */
714
715static void page_flush_tb_1 (int level, void **lp)
fd6ce8f6 716{
5cd2c5b6 717 int i;
fd6ce8f6 718
5cd2c5b6
RH
719 if (*lp == NULL) {
720 return;
721 }
722 if (level == 0) {
723 PageDesc *pd = *lp;
7296abac 724 for (i = 0; i < L2_SIZE; ++i) {
5cd2c5b6
RH
725 pd[i].first_tb = NULL;
726 invalidate_page_bitmap(pd + i);
fd6ce8f6 727 }
5cd2c5b6
RH
728 } else {
729 void **pp = *lp;
7296abac 730 for (i = 0; i < L2_SIZE; ++i) {
5cd2c5b6
RH
731 page_flush_tb_1 (level - 1, pp + i);
732 }
733 }
734}
735
736static void page_flush_tb(void)
737{
738 int i;
739 for (i = 0; i < V_L1_SIZE; i++) {
740 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
fd6ce8f6
FB
741 }
742}
743
744/* flush all the translation blocks */
d4e8164f 745/* XXX: tb_flush is currently not thread safe */
6a00d601 746void tb_flush(CPUState *env1)
fd6ce8f6 747{
6a00d601 748 CPUState *env;
0124311e 749#if defined(DEBUG_FLUSH)
ab3d1727
BS
750 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
751 (unsigned long)(code_gen_ptr - code_gen_buffer),
752 nb_tbs, nb_tbs > 0 ?
753 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
fd6ce8f6 754#endif
26a5f13b 755 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
a208e54a
PB
756 cpu_abort(env1, "Internal error: code buffer overflow\n");
757
fd6ce8f6 758 nb_tbs = 0;
3b46e624 759
6a00d601
FB
760 for(env = first_cpu; env != NULL; env = env->next_cpu) {
761 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
762 }
9fa3e853 763
8a8a608f 764 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
fd6ce8f6 765 page_flush_tb();
9fa3e853 766
fd6ce8f6 767 code_gen_ptr = code_gen_buffer;
d4e8164f
FB
768 /* XXX: flush processor icache at this point if cache flush is
769 expensive */
e3db7226 770 tb_flush_count++;
fd6ce8f6
FB
771}
772
773#ifdef DEBUG_TB_CHECK
774
bc98a7ef 775static void tb_invalidate_check(target_ulong address)
fd6ce8f6
FB
776{
777 TranslationBlock *tb;
778 int i;
779 address &= TARGET_PAGE_MASK;
99773bd4
PB
780 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
781 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
782 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
783 address >= tb->pc + tb->size)) {
0bf9e31a
BS
784 printf("ERROR invalidate: address=" TARGET_FMT_lx
785 " PC=%08lx size=%04x\n",
99773bd4 786 address, (long)tb->pc, tb->size);
fd6ce8f6
FB
787 }
788 }
789 }
790}
791
792/* verify that all the pages have correct rights for code */
793static void tb_page_check(void)
794{
795 TranslationBlock *tb;
796 int i, flags1, flags2;
3b46e624 797
99773bd4
PB
798 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
799 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
800 flags1 = page_get_flags(tb->pc);
801 flags2 = page_get_flags(tb->pc + tb->size - 1);
802 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
803 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
99773bd4 804 (long)tb->pc, tb->size, flags1, flags2);
fd6ce8f6
FB
805 }
806 }
807 }
808}
809
810#endif
811
812/* invalidate one TB */
813static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
814 int next_offset)
815{
816 TranslationBlock *tb1;
817 for(;;) {
818 tb1 = *ptb;
819 if (tb1 == tb) {
820 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
821 break;
822 }
823 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
824 }
825}
826
9fa3e853
FB
827static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
828{
829 TranslationBlock *tb1;
830 unsigned int n1;
831
832 for(;;) {
833 tb1 = *ptb;
834 n1 = (long)tb1 & 3;
835 tb1 = (TranslationBlock *)((long)tb1 & ~3);
836 if (tb1 == tb) {
837 *ptb = tb1->page_next[n1];
838 break;
839 }
840 ptb = &tb1->page_next[n1];
841 }
842}
843
d4e8164f
FB
844static inline void tb_jmp_remove(TranslationBlock *tb, int n)
845{
846 TranslationBlock *tb1, **ptb;
847 unsigned int n1;
848
849 ptb = &tb->jmp_next[n];
850 tb1 = *ptb;
851 if (tb1) {
852 /* find tb(n) in circular list */
853 for(;;) {
854 tb1 = *ptb;
855 n1 = (long)tb1 & 3;
856 tb1 = (TranslationBlock *)((long)tb1 & ~3);
857 if (n1 == n && tb1 == tb)
858 break;
859 if (n1 == 2) {
860 ptb = &tb1->jmp_first;
861 } else {
862 ptb = &tb1->jmp_next[n1];
863 }
864 }
865 /* now we can suppress tb(n) from the list */
866 *ptb = tb->jmp_next[n];
867
868 tb->jmp_next[n] = NULL;
869 }
870}
871
872/* reset the jump entry 'n' of a TB so that it is not chained to
873 another TB */
874static inline void tb_reset_jump(TranslationBlock *tb, int n)
875{
876 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
877}
878
41c1b1c9 879void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
fd6ce8f6 880{
6a00d601 881 CPUState *env;
8a40a180 882 PageDesc *p;
d4e8164f 883 unsigned int h, n1;
41c1b1c9 884 tb_page_addr_t phys_pc;
8a40a180 885 TranslationBlock *tb1, *tb2;
3b46e624 886
8a40a180
FB
887 /* remove the TB from the hash list */
888 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
889 h = tb_phys_hash_func(phys_pc);
5fafdf24 890 tb_remove(&tb_phys_hash[h], tb,
8a40a180
FB
891 offsetof(TranslationBlock, phys_hash_next));
892
893 /* remove the TB from the page list */
894 if (tb->page_addr[0] != page_addr) {
895 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
896 tb_page_remove(&p->first_tb, tb);
897 invalidate_page_bitmap(p);
898 }
899 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
900 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
901 tb_page_remove(&p->first_tb, tb);
902 invalidate_page_bitmap(p);
903 }
904
36bdbe54 905 tb_invalidated_flag = 1;
59817ccb 906
fd6ce8f6 907 /* remove the TB from the hash list */
8a40a180 908 h = tb_jmp_cache_hash_func(tb->pc);
6a00d601
FB
909 for(env = first_cpu; env != NULL; env = env->next_cpu) {
910 if (env->tb_jmp_cache[h] == tb)
911 env->tb_jmp_cache[h] = NULL;
912 }
d4e8164f
FB
913
914 /* suppress this TB from the two jump lists */
915 tb_jmp_remove(tb, 0);
916 tb_jmp_remove(tb, 1);
917
918 /* suppress any remaining jumps to this TB */
919 tb1 = tb->jmp_first;
920 for(;;) {
921 n1 = (long)tb1 & 3;
922 if (n1 == 2)
923 break;
924 tb1 = (TranslationBlock *)((long)tb1 & ~3);
925 tb2 = tb1->jmp_next[n1];
926 tb_reset_jump(tb1, n1);
927 tb1->jmp_next[n1] = NULL;
928 tb1 = tb2;
929 }
930 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
9fa3e853 931
e3db7226 932 tb_phys_invalidate_count++;
9fa3e853
FB
933}
934
935static inline void set_bits(uint8_t *tab, int start, int len)
936{
937 int end, mask, end1;
938
939 end = start + len;
940 tab += start >> 3;
941 mask = 0xff << (start & 7);
942 if ((start & ~7) == (end & ~7)) {
943 if (start < end) {
944 mask &= ~(0xff << (end & 7));
945 *tab |= mask;
946 }
947 } else {
948 *tab++ |= mask;
949 start = (start + 8) & ~7;
950 end1 = end & ~7;
951 while (start < end1) {
952 *tab++ = 0xff;
953 start += 8;
954 }
955 if (start < end) {
956 mask = ~(0xff << (end & 7));
957 *tab |= mask;
958 }
959 }
960}
961
962static void build_page_bitmap(PageDesc *p)
963{
964 int n, tb_start, tb_end;
965 TranslationBlock *tb;
3b46e624 966
7267c094 967 p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8);
9fa3e853
FB
968
969 tb = p->first_tb;
970 while (tb != NULL) {
971 n = (long)tb & 3;
972 tb = (TranslationBlock *)((long)tb & ~3);
973 /* NOTE: this is subtle as a TB may span two physical pages */
974 if (n == 0) {
975 /* NOTE: tb_end may be after the end of the page, but
976 it is not a problem */
977 tb_start = tb->pc & ~TARGET_PAGE_MASK;
978 tb_end = tb_start + tb->size;
979 if (tb_end > TARGET_PAGE_SIZE)
980 tb_end = TARGET_PAGE_SIZE;
981 } else {
982 tb_start = 0;
983 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
984 }
985 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
986 tb = tb->page_next[n];
987 }
988}
989
2e70f6ef
PB
990TranslationBlock *tb_gen_code(CPUState *env,
991 target_ulong pc, target_ulong cs_base,
992 int flags, int cflags)
d720b93d
FB
993{
994 TranslationBlock *tb;
995 uint8_t *tc_ptr;
41c1b1c9
PB
996 tb_page_addr_t phys_pc, phys_page2;
997 target_ulong virt_page2;
d720b93d
FB
998 int code_gen_size;
999
41c1b1c9 1000 phys_pc = get_page_addr_code(env, pc);
c27004ec 1001 tb = tb_alloc(pc);
d720b93d
FB
1002 if (!tb) {
1003 /* flush must be done */
1004 tb_flush(env);
1005 /* cannot fail at this point */
c27004ec 1006 tb = tb_alloc(pc);
2e70f6ef
PB
1007 /* Don't forget to invalidate previous TB info. */
1008 tb_invalidated_flag = 1;
d720b93d
FB
1009 }
1010 tc_ptr = code_gen_ptr;
1011 tb->tc_ptr = tc_ptr;
1012 tb->cs_base = cs_base;
1013 tb->flags = flags;
1014 tb->cflags = cflags;
d07bde88 1015 cpu_gen_code(env, tb, &code_gen_size);
d720b93d 1016 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
3b46e624 1017
d720b93d 1018 /* check next page if needed */
c27004ec 1019 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
d720b93d 1020 phys_page2 = -1;
c27004ec 1021 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
41c1b1c9 1022 phys_page2 = get_page_addr_code(env, virt_page2);
d720b93d 1023 }
41c1b1c9 1024 tb_link_page(tb, phys_pc, phys_page2);
2e70f6ef 1025 return tb;
d720b93d 1026}
3b46e624 1027
9fa3e853
FB
1028/* invalidate all TBs which intersect with the target physical page
1029 starting in range [start;end[. NOTE: start and end must refer to
d720b93d
FB
1030 the same physical page. 'is_cpu_write_access' should be true if called
1031 from a real cpu write access: the virtual CPU will exit the current
1032 TB if code is modified inside this TB. */
41c1b1c9 1033void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
d720b93d
FB
1034 int is_cpu_write_access)
1035{
6b917547 1036 TranslationBlock *tb, *tb_next, *saved_tb;
d720b93d 1037 CPUState *env = cpu_single_env;
41c1b1c9 1038 tb_page_addr_t tb_start, tb_end;
6b917547
AL
1039 PageDesc *p;
1040 int n;
1041#ifdef TARGET_HAS_PRECISE_SMC
1042 int current_tb_not_found = is_cpu_write_access;
1043 TranslationBlock *current_tb = NULL;
1044 int current_tb_modified = 0;
1045 target_ulong current_pc = 0;
1046 target_ulong current_cs_base = 0;
1047 int current_flags = 0;
1048#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
1049
1050 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 1051 if (!p)
9fa3e853 1052 return;
5fafdf24 1053 if (!p->code_bitmap &&
d720b93d
FB
1054 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1055 is_cpu_write_access) {
9fa3e853
FB
1056 /* build code bitmap */
1057 build_page_bitmap(p);
1058 }
1059
1060 /* we remove all the TBs in the range [start, end[ */
1061 /* XXX: see if in some cases it could be faster to invalidate all the code */
1062 tb = p->first_tb;
1063 while (tb != NULL) {
1064 n = (long)tb & 3;
1065 tb = (TranslationBlock *)((long)tb & ~3);
1066 tb_next = tb->page_next[n];
1067 /* NOTE: this is subtle as a TB may span two physical pages */
1068 if (n == 0) {
1069 /* NOTE: tb_end may be after the end of the page, but
1070 it is not a problem */
1071 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1072 tb_end = tb_start + tb->size;
1073 } else {
1074 tb_start = tb->page_addr[1];
1075 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1076 }
1077 if (!(tb_end <= start || tb_start >= end)) {
d720b93d
FB
1078#ifdef TARGET_HAS_PRECISE_SMC
1079 if (current_tb_not_found) {
1080 current_tb_not_found = 0;
1081 current_tb = NULL;
2e70f6ef 1082 if (env->mem_io_pc) {
d720b93d 1083 /* now we have a real cpu fault */
2e70f6ef 1084 current_tb = tb_find_pc(env->mem_io_pc);
d720b93d
FB
1085 }
1086 }
1087 if (current_tb == tb &&
2e70f6ef 1088 (current_tb->cflags & CF_COUNT_MASK) != 1) {
d720b93d
FB
1089 /* If we are modifying the current TB, we must stop
1090 its execution. We could be more precise by checking
1091 that the modification is after the current PC, but it
1092 would require a specialized function to partially
1093 restore the CPU state */
3b46e624 1094
d720b93d 1095 current_tb_modified = 1;
618ba8e6 1096 cpu_restore_state(current_tb, env, env->mem_io_pc);
6b917547
AL
1097 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1098 &current_flags);
d720b93d
FB
1099 }
1100#endif /* TARGET_HAS_PRECISE_SMC */
6f5a9f7e
FB
1101 /* we need to do that to handle the case where a signal
1102 occurs while doing tb_phys_invalidate() */
1103 saved_tb = NULL;
1104 if (env) {
1105 saved_tb = env->current_tb;
1106 env->current_tb = NULL;
1107 }
9fa3e853 1108 tb_phys_invalidate(tb, -1);
6f5a9f7e
FB
1109 if (env) {
1110 env->current_tb = saved_tb;
1111 if (env->interrupt_request && env->current_tb)
1112 cpu_interrupt(env, env->interrupt_request);
1113 }
9fa3e853
FB
1114 }
1115 tb = tb_next;
1116 }
1117#if !defined(CONFIG_USER_ONLY)
1118 /* if no code remaining, no need to continue to use slow writes */
1119 if (!p->first_tb) {
1120 invalidate_page_bitmap(p);
d720b93d 1121 if (is_cpu_write_access) {
2e70f6ef 1122 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
d720b93d
FB
1123 }
1124 }
1125#endif
1126#ifdef TARGET_HAS_PRECISE_SMC
1127 if (current_tb_modified) {
1128 /* we generate a block containing just the instruction
1129 modifying the memory. It will ensure that it cannot modify
1130 itself */
ea1c1802 1131 env->current_tb = NULL;
2e70f6ef 1132 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
d720b93d 1133 cpu_resume_from_signal(env, NULL);
9fa3e853 1134 }
fd6ce8f6 1135#endif
9fa3e853 1136}
fd6ce8f6 1137
9fa3e853 1138/* len must be <= 8 and start must be a multiple of len */
41c1b1c9 1139static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
9fa3e853
FB
1140{
1141 PageDesc *p;
1142 int offset, b;
59817ccb 1143#if 0
a4193c8a 1144 if (1) {
93fcfe39
AL
1145 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1146 cpu_single_env->mem_io_vaddr, len,
1147 cpu_single_env->eip,
1148 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
59817ccb
FB
1149 }
1150#endif
9fa3e853 1151 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 1152 if (!p)
9fa3e853
FB
1153 return;
1154 if (p->code_bitmap) {
1155 offset = start & ~TARGET_PAGE_MASK;
1156 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1157 if (b & ((1 << len) - 1))
1158 goto do_invalidate;
1159 } else {
1160 do_invalidate:
d720b93d 1161 tb_invalidate_phys_page_range(start, start + len, 1);
9fa3e853
FB
1162 }
1163}
1164
9fa3e853 1165#if !defined(CONFIG_SOFTMMU)
41c1b1c9 1166static void tb_invalidate_phys_page(tb_page_addr_t addr,
d720b93d 1167 unsigned long pc, void *puc)
9fa3e853 1168{
6b917547 1169 TranslationBlock *tb;
9fa3e853 1170 PageDesc *p;
6b917547 1171 int n;
d720b93d 1172#ifdef TARGET_HAS_PRECISE_SMC
6b917547 1173 TranslationBlock *current_tb = NULL;
d720b93d 1174 CPUState *env = cpu_single_env;
6b917547
AL
1175 int current_tb_modified = 0;
1176 target_ulong current_pc = 0;
1177 target_ulong current_cs_base = 0;
1178 int current_flags = 0;
d720b93d 1179#endif
9fa3e853
FB
1180
1181 addr &= TARGET_PAGE_MASK;
1182 p = page_find(addr >> TARGET_PAGE_BITS);
5fafdf24 1183 if (!p)
9fa3e853
FB
1184 return;
1185 tb = p->first_tb;
d720b93d
FB
1186#ifdef TARGET_HAS_PRECISE_SMC
1187 if (tb && pc != 0) {
1188 current_tb = tb_find_pc(pc);
1189 }
1190#endif
9fa3e853
FB
1191 while (tb != NULL) {
1192 n = (long)tb & 3;
1193 tb = (TranslationBlock *)((long)tb & ~3);
d720b93d
FB
1194#ifdef TARGET_HAS_PRECISE_SMC
1195 if (current_tb == tb &&
2e70f6ef 1196 (current_tb->cflags & CF_COUNT_MASK) != 1) {
d720b93d
FB
1197 /* If we are modifying the current TB, we must stop
1198 its execution. We could be more precise by checking
1199 that the modification is after the current PC, but it
1200 would require a specialized function to partially
1201 restore the CPU state */
3b46e624 1202
d720b93d 1203 current_tb_modified = 1;
618ba8e6 1204 cpu_restore_state(current_tb, env, pc);
6b917547
AL
1205 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1206 &current_flags);
d720b93d
FB
1207 }
1208#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
1209 tb_phys_invalidate(tb, addr);
1210 tb = tb->page_next[n];
1211 }
fd6ce8f6 1212 p->first_tb = NULL;
d720b93d
FB
1213#ifdef TARGET_HAS_PRECISE_SMC
1214 if (current_tb_modified) {
1215 /* we generate a block containing just the instruction
1216 modifying the memory. It will ensure that it cannot modify
1217 itself */
ea1c1802 1218 env->current_tb = NULL;
2e70f6ef 1219 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
d720b93d
FB
1220 cpu_resume_from_signal(env, puc);
1221 }
1222#endif
fd6ce8f6 1223}
9fa3e853 1224#endif
fd6ce8f6
FB
1225
1226/* add the tb in the target page and protect it if necessary */
5fafdf24 1227static inline void tb_alloc_page(TranslationBlock *tb,
41c1b1c9 1228 unsigned int n, tb_page_addr_t page_addr)
fd6ce8f6
FB
1229{
1230 PageDesc *p;
4429ab44
JQ
1231#ifndef CONFIG_USER_ONLY
1232 bool page_already_protected;
1233#endif
9fa3e853
FB
1234
1235 tb->page_addr[n] = page_addr;
5cd2c5b6 1236 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
9fa3e853 1237 tb->page_next[n] = p->first_tb;
4429ab44
JQ
1238#ifndef CONFIG_USER_ONLY
1239 page_already_protected = p->first_tb != NULL;
1240#endif
9fa3e853
FB
1241 p->first_tb = (TranslationBlock *)((long)tb | n);
1242 invalidate_page_bitmap(p);
fd6ce8f6 1243
107db443 1244#if defined(TARGET_HAS_SMC) || 1
d720b93d 1245
9fa3e853 1246#if defined(CONFIG_USER_ONLY)
fd6ce8f6 1247 if (p->flags & PAGE_WRITE) {
53a5960a
PB
1248 target_ulong addr;
1249 PageDesc *p2;
9fa3e853
FB
1250 int prot;
1251
fd6ce8f6
FB
1252 /* force the host page as non writable (writes will have a
1253 page fault + mprotect overhead) */
53a5960a 1254 page_addr &= qemu_host_page_mask;
fd6ce8f6 1255 prot = 0;
53a5960a
PB
1256 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1257 addr += TARGET_PAGE_SIZE) {
1258
1259 p2 = page_find (addr >> TARGET_PAGE_BITS);
1260 if (!p2)
1261 continue;
1262 prot |= p2->flags;
1263 p2->flags &= ~PAGE_WRITE;
53a5960a 1264 }
5fafdf24 1265 mprotect(g2h(page_addr), qemu_host_page_size,
fd6ce8f6
FB
1266 (prot & PAGE_BITS) & ~PAGE_WRITE);
1267#ifdef DEBUG_TB_INVALIDATE
ab3d1727 1268 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
53a5960a 1269 page_addr);
fd6ce8f6 1270#endif
fd6ce8f6 1271 }
9fa3e853
FB
1272#else
1273 /* if some code is already present, then the pages are already
1274 protected. So we handle the case where only the first TB is
1275 allocated in a physical page */
4429ab44 1276 if (!page_already_protected) {
6a00d601 1277 tlb_protect_code(page_addr);
9fa3e853
FB
1278 }
1279#endif
d720b93d
FB
1280
1281#endif /* TARGET_HAS_SMC */
fd6ce8f6
FB
1282}
1283
9fa3e853
FB
1284/* add a new TB and link it to the physical page tables. phys_page2 is
1285 (-1) to indicate that only one page contains the TB. */
41c1b1c9
PB
1286void tb_link_page(TranslationBlock *tb,
1287 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
d4e8164f 1288{
9fa3e853
FB
1289 unsigned int h;
1290 TranslationBlock **ptb;
1291
c8a706fe
PB
1292 /* Grab the mmap lock to stop another thread invalidating this TB
1293 before we are done. */
1294 mmap_lock();
9fa3e853
FB
1295 /* add in the physical hash table */
1296 h = tb_phys_hash_func(phys_pc);
1297 ptb = &tb_phys_hash[h];
1298 tb->phys_hash_next = *ptb;
1299 *ptb = tb;
fd6ce8f6
FB
1300
1301 /* add in the page list */
9fa3e853
FB
1302 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1303 if (phys_page2 != -1)
1304 tb_alloc_page(tb, 1, phys_page2);
1305 else
1306 tb->page_addr[1] = -1;
9fa3e853 1307
d4e8164f
FB
1308 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1309 tb->jmp_next[0] = NULL;
1310 tb->jmp_next[1] = NULL;
1311
1312 /* init original jump addresses */
1313 if (tb->tb_next_offset[0] != 0xffff)
1314 tb_reset_jump(tb, 0);
1315 if (tb->tb_next_offset[1] != 0xffff)
1316 tb_reset_jump(tb, 1);
8a40a180
FB
1317
1318#ifdef DEBUG_TB_CHECK
1319 tb_page_check();
1320#endif
c8a706fe 1321 mmap_unlock();
fd6ce8f6
FB
1322}
1323
9fa3e853
FB
1324/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1325 tb[1].tc_ptr. Return NULL if not found */
1326TranslationBlock *tb_find_pc(unsigned long tc_ptr)
fd6ce8f6 1327{
9fa3e853
FB
1328 int m_min, m_max, m;
1329 unsigned long v;
1330 TranslationBlock *tb;
a513fe19
FB
1331
1332 if (nb_tbs <= 0)
1333 return NULL;
1334 if (tc_ptr < (unsigned long)code_gen_buffer ||
1335 tc_ptr >= (unsigned long)code_gen_ptr)
1336 return NULL;
1337 /* binary search (cf Knuth) */
1338 m_min = 0;
1339 m_max = nb_tbs - 1;
1340 while (m_min <= m_max) {
1341 m = (m_min + m_max) >> 1;
1342 tb = &tbs[m];
1343 v = (unsigned long)tb->tc_ptr;
1344 if (v == tc_ptr)
1345 return tb;
1346 else if (tc_ptr < v) {
1347 m_max = m - 1;
1348 } else {
1349 m_min = m + 1;
1350 }
5fafdf24 1351 }
a513fe19
FB
1352 return &tbs[m_max];
1353}
7501267e 1354
ea041c0e
FB
1355static void tb_reset_jump_recursive(TranslationBlock *tb);
1356
1357static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1358{
1359 TranslationBlock *tb1, *tb_next, **ptb;
1360 unsigned int n1;
1361
1362 tb1 = tb->jmp_next[n];
1363 if (tb1 != NULL) {
1364 /* find head of list */
1365 for(;;) {
1366 n1 = (long)tb1 & 3;
1367 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1368 if (n1 == 2)
1369 break;
1370 tb1 = tb1->jmp_next[n1];
1371 }
1372 /* we are now sure now that tb jumps to tb1 */
1373 tb_next = tb1;
1374
1375 /* remove tb from the jmp_first list */
1376 ptb = &tb_next->jmp_first;
1377 for(;;) {
1378 tb1 = *ptb;
1379 n1 = (long)tb1 & 3;
1380 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1381 if (n1 == n && tb1 == tb)
1382 break;
1383 ptb = &tb1->jmp_next[n1];
1384 }
1385 *ptb = tb->jmp_next[n];
1386 tb->jmp_next[n] = NULL;
3b46e624 1387
ea041c0e
FB
1388 /* suppress the jump to next tb in generated code */
1389 tb_reset_jump(tb, n);
1390
0124311e 1391 /* suppress jumps in the tb on which we could have jumped */
ea041c0e
FB
1392 tb_reset_jump_recursive(tb_next);
1393 }
1394}
1395
1396static void tb_reset_jump_recursive(TranslationBlock *tb)
1397{
1398 tb_reset_jump_recursive2(tb, 0);
1399 tb_reset_jump_recursive2(tb, 1);
1400}
1401
1fddef4b 1402#if defined(TARGET_HAS_ICE)
94df27fd
PB
1403#if defined(CONFIG_USER_ONLY)
1404static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1405{
1406 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1407}
1408#else
d720b93d
FB
1409static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1410{
c227f099 1411 target_phys_addr_t addr;
9b3c35e0 1412 target_ulong pd;
c227f099 1413 ram_addr_t ram_addr;
f1f6e3b8 1414 PhysPageDesc p;
d720b93d 1415
c2f07f81
PB
1416 addr = cpu_get_phys_page_debug(env, pc);
1417 p = phys_page_find(addr >> TARGET_PAGE_BITS);
f1f6e3b8 1418 pd = p.phys_offset;
c2f07f81 1419 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
706cd4b5 1420 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
d720b93d 1421}
c27004ec 1422#endif
94df27fd 1423#endif /* TARGET_HAS_ICE */
d720b93d 1424
c527ee8f
PB
1425#if defined(CONFIG_USER_ONLY)
1426void cpu_watchpoint_remove_all(CPUState *env, int mask)
1427
1428{
1429}
1430
1431int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1432 int flags, CPUWatchpoint **watchpoint)
1433{
1434 return -ENOSYS;
1435}
1436#else
6658ffb8 1437/* Add a watchpoint. */
a1d1bb31
AL
1438int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1439 int flags, CPUWatchpoint **watchpoint)
6658ffb8 1440{
b4051334 1441 target_ulong len_mask = ~(len - 1);
c0ce998e 1442 CPUWatchpoint *wp;
6658ffb8 1443
b4051334
AL
1444 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1445 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1446 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1447 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1448 return -EINVAL;
1449 }
7267c094 1450 wp = g_malloc(sizeof(*wp));
a1d1bb31
AL
1451
1452 wp->vaddr = addr;
b4051334 1453 wp->len_mask = len_mask;
a1d1bb31
AL
1454 wp->flags = flags;
1455
2dc9f411 1456 /* keep all GDB-injected watchpoints in front */
c0ce998e 1457 if (flags & BP_GDB)
72cf2d4f 1458 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
c0ce998e 1459 else
72cf2d4f 1460 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
6658ffb8 1461
6658ffb8 1462 tlb_flush_page(env, addr);
a1d1bb31
AL
1463
1464 if (watchpoint)
1465 *watchpoint = wp;
1466 return 0;
6658ffb8
PB
1467}
1468
a1d1bb31
AL
1469/* Remove a specific watchpoint. */
1470int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1471 int flags)
6658ffb8 1472{
b4051334 1473 target_ulong len_mask = ~(len - 1);
a1d1bb31 1474 CPUWatchpoint *wp;
6658ffb8 1475
72cf2d4f 1476 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334 1477 if (addr == wp->vaddr && len_mask == wp->len_mask
6e140f28 1478 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
a1d1bb31 1479 cpu_watchpoint_remove_by_ref(env, wp);
6658ffb8
PB
1480 return 0;
1481 }
1482 }
a1d1bb31 1483 return -ENOENT;
6658ffb8
PB
1484}
1485
a1d1bb31
AL
1486/* Remove a specific watchpoint by reference. */
1487void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1488{
72cf2d4f 1489 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
7d03f82f 1490
a1d1bb31
AL
1491 tlb_flush_page(env, watchpoint->vaddr);
1492
7267c094 1493 g_free(watchpoint);
a1d1bb31
AL
1494}
1495
1496/* Remove all matching watchpoints. */
1497void cpu_watchpoint_remove_all(CPUState *env, int mask)
1498{
c0ce998e 1499 CPUWatchpoint *wp, *next;
a1d1bb31 1500
72cf2d4f 1501 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
a1d1bb31
AL
1502 if (wp->flags & mask)
1503 cpu_watchpoint_remove_by_ref(env, wp);
c0ce998e 1504 }
7d03f82f 1505}
c527ee8f 1506#endif
7d03f82f 1507
a1d1bb31
AL
1508/* Add a breakpoint. */
1509int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1510 CPUBreakpoint **breakpoint)
4c3a88a2 1511{
1fddef4b 1512#if defined(TARGET_HAS_ICE)
c0ce998e 1513 CPUBreakpoint *bp;
3b46e624 1514
7267c094 1515 bp = g_malloc(sizeof(*bp));
4c3a88a2 1516
a1d1bb31
AL
1517 bp->pc = pc;
1518 bp->flags = flags;
1519
2dc9f411 1520 /* keep all GDB-injected breakpoints in front */
c0ce998e 1521 if (flags & BP_GDB)
72cf2d4f 1522 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
c0ce998e 1523 else
72cf2d4f 1524 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
3b46e624 1525
d720b93d 1526 breakpoint_invalidate(env, pc);
a1d1bb31
AL
1527
1528 if (breakpoint)
1529 *breakpoint = bp;
4c3a88a2
FB
1530 return 0;
1531#else
a1d1bb31 1532 return -ENOSYS;
4c3a88a2
FB
1533#endif
1534}
1535
a1d1bb31
AL
1536/* Remove a specific breakpoint. */
1537int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1538{
7d03f82f 1539#if defined(TARGET_HAS_ICE)
a1d1bb31
AL
1540 CPUBreakpoint *bp;
1541
72cf2d4f 1542 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31
AL
1543 if (bp->pc == pc && bp->flags == flags) {
1544 cpu_breakpoint_remove_by_ref(env, bp);
1545 return 0;
1546 }
7d03f82f 1547 }
a1d1bb31
AL
1548 return -ENOENT;
1549#else
1550 return -ENOSYS;
7d03f82f
EI
1551#endif
1552}
1553
a1d1bb31
AL
1554/* Remove a specific breakpoint by reference. */
1555void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
4c3a88a2 1556{
1fddef4b 1557#if defined(TARGET_HAS_ICE)
72cf2d4f 1558 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
d720b93d 1559
a1d1bb31
AL
1560 breakpoint_invalidate(env, breakpoint->pc);
1561
7267c094 1562 g_free(breakpoint);
a1d1bb31
AL
1563#endif
1564}
1565
1566/* Remove all matching breakpoints. */
1567void cpu_breakpoint_remove_all(CPUState *env, int mask)
1568{
1569#if defined(TARGET_HAS_ICE)
c0ce998e 1570 CPUBreakpoint *bp, *next;
a1d1bb31 1571
72cf2d4f 1572 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
a1d1bb31
AL
1573 if (bp->flags & mask)
1574 cpu_breakpoint_remove_by_ref(env, bp);
c0ce998e 1575 }
4c3a88a2
FB
1576#endif
1577}
1578
c33a346e
FB
1579/* enable or disable single step mode. EXCP_DEBUG is returned by the
1580 CPU loop after each instruction */
1581void cpu_single_step(CPUState *env, int enabled)
1582{
1fddef4b 1583#if defined(TARGET_HAS_ICE)
c33a346e
FB
1584 if (env->singlestep_enabled != enabled) {
1585 env->singlestep_enabled = enabled;
e22a25c9
AL
1586 if (kvm_enabled())
1587 kvm_update_guest_debug(env, 0);
1588 else {
ccbb4d44 1589 /* must flush all the translated code to avoid inconsistencies */
e22a25c9
AL
1590 /* XXX: only flush what is necessary */
1591 tb_flush(env);
1592 }
c33a346e
FB
1593 }
1594#endif
1595}
1596
34865134
FB
1597/* enable or disable low levels log */
1598void cpu_set_log(int log_flags)
1599{
1600 loglevel = log_flags;
1601 if (loglevel && !logfile) {
11fcfab4 1602 logfile = fopen(logfilename, log_append ? "a" : "w");
34865134
FB
1603 if (!logfile) {
1604 perror(logfilename);
1605 _exit(1);
1606 }
9fa3e853
FB
1607#if !defined(CONFIG_SOFTMMU)
1608 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1609 {
b55266b5 1610 static char logfile_buf[4096];
9fa3e853
FB
1611 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1612 }
daf767b1
SW
1613#elif defined(_WIN32)
1614 /* Win32 doesn't support line-buffering, so use unbuffered output. */
1615 setvbuf(logfile, NULL, _IONBF, 0);
1616#else
34865134 1617 setvbuf(logfile, NULL, _IOLBF, 0);
9fa3e853 1618#endif
e735b91c
PB
1619 log_append = 1;
1620 }
1621 if (!loglevel && logfile) {
1622 fclose(logfile);
1623 logfile = NULL;
34865134
FB
1624 }
1625}
1626
1627void cpu_set_log_filename(const char *filename)
1628{
1629 logfilename = strdup(filename);
e735b91c
PB
1630 if (logfile) {
1631 fclose(logfile);
1632 logfile = NULL;
1633 }
1634 cpu_set_log(loglevel);
34865134 1635}
c33a346e 1636
3098dba0 1637static void cpu_unlink_tb(CPUState *env)
ea041c0e 1638{
3098dba0
AJ
1639 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1640 problem and hope the cpu will stop of its own accord. For userspace
1641 emulation this often isn't actually as bad as it sounds. Often
1642 signals are used primarily to interrupt blocking syscalls. */
ea041c0e 1643 TranslationBlock *tb;
c227f099 1644 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
59817ccb 1645
cab1b4bd 1646 spin_lock(&interrupt_lock);
3098dba0
AJ
1647 tb = env->current_tb;
1648 /* if the cpu is currently executing code, we must unlink it and
1649 all the potentially executing TB */
f76cfe56 1650 if (tb) {
3098dba0
AJ
1651 env->current_tb = NULL;
1652 tb_reset_jump_recursive(tb);
be214e6c 1653 }
cab1b4bd 1654 spin_unlock(&interrupt_lock);
3098dba0
AJ
1655}
1656
97ffbd8d 1657#ifndef CONFIG_USER_ONLY
3098dba0 1658/* mask must never be zero, except for A20 change call */
ec6959d0 1659static void tcg_handle_interrupt(CPUState *env, int mask)
3098dba0
AJ
1660{
1661 int old_mask;
be214e6c 1662
2e70f6ef 1663 old_mask = env->interrupt_request;
68a79315 1664 env->interrupt_request |= mask;
3098dba0 1665
8edac960
AL
1666 /*
1667 * If called from iothread context, wake the target cpu in
1668 * case its halted.
1669 */
b7680cb6 1670 if (!qemu_cpu_is_self(env)) {
8edac960
AL
1671 qemu_cpu_kick(env);
1672 return;
1673 }
8edac960 1674
2e70f6ef 1675 if (use_icount) {
266910c4 1676 env->icount_decr.u16.high = 0xffff;
2e70f6ef 1677 if (!can_do_io(env)
be214e6c 1678 && (mask & ~old_mask) != 0) {
2e70f6ef
PB
1679 cpu_abort(env, "Raised interrupt while not in I/O function");
1680 }
2e70f6ef 1681 } else {
3098dba0 1682 cpu_unlink_tb(env);
ea041c0e
FB
1683 }
1684}
1685
ec6959d0
JK
1686CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1687
97ffbd8d
JK
1688#else /* CONFIG_USER_ONLY */
1689
1690void cpu_interrupt(CPUState *env, int mask)
1691{
1692 env->interrupt_request |= mask;
1693 cpu_unlink_tb(env);
1694}
1695#endif /* CONFIG_USER_ONLY */
1696
b54ad049
FB
1697void cpu_reset_interrupt(CPUState *env, int mask)
1698{
1699 env->interrupt_request &= ~mask;
1700}
1701
3098dba0
AJ
1702void cpu_exit(CPUState *env)
1703{
1704 env->exit_request = 1;
1705 cpu_unlink_tb(env);
1706}
1707
c7cd6a37 1708const CPULogItem cpu_log_items[] = {
5fafdf24 1709 { CPU_LOG_TB_OUT_ASM, "out_asm",
f193c797
FB
1710 "show generated host assembly code for each compiled TB" },
1711 { CPU_LOG_TB_IN_ASM, "in_asm",
1712 "show target assembly code for each compiled TB" },
5fafdf24 1713 { CPU_LOG_TB_OP, "op",
57fec1fe 1714 "show micro ops for each compiled TB" },
f193c797 1715 { CPU_LOG_TB_OP_OPT, "op_opt",
e01a1157
BS
1716 "show micro ops "
1717#ifdef TARGET_I386
1718 "before eflags optimization and "
f193c797 1719#endif
e01a1157 1720 "after liveness analysis" },
f193c797
FB
1721 { CPU_LOG_INT, "int",
1722 "show interrupts/exceptions in short format" },
1723 { CPU_LOG_EXEC, "exec",
1724 "show trace before each executed TB (lots of logs)" },
9fddaa0c 1725 { CPU_LOG_TB_CPU, "cpu",
e91c8a77 1726 "show CPU state before block translation" },
f193c797
FB
1727#ifdef TARGET_I386
1728 { CPU_LOG_PCALL, "pcall",
1729 "show protected mode far calls/returns/exceptions" },
eca1bdf4
AL
1730 { CPU_LOG_RESET, "cpu_reset",
1731 "show CPU state before CPU resets" },
f193c797 1732#endif
8e3a9fd2 1733#ifdef DEBUG_IOPORT
fd872598
FB
1734 { CPU_LOG_IOPORT, "ioport",
1735 "show all i/o ports accesses" },
8e3a9fd2 1736#endif
f193c797
FB
1737 { 0, NULL, NULL },
1738};
1739
1740static int cmp1(const char *s1, int n, const char *s2)
1741{
1742 if (strlen(s2) != n)
1743 return 0;
1744 return memcmp(s1, s2, n) == 0;
1745}
3b46e624 1746
f193c797
FB
1747/* takes a comma separated list of log masks. Return 0 if error. */
1748int cpu_str_to_log_mask(const char *str)
1749{
c7cd6a37 1750 const CPULogItem *item;
f193c797
FB
1751 int mask;
1752 const char *p, *p1;
1753
1754 p = str;
1755 mask = 0;
1756 for(;;) {
1757 p1 = strchr(p, ',');
1758 if (!p1)
1759 p1 = p + strlen(p);
9742bf26
YT
1760 if(cmp1(p,p1-p,"all")) {
1761 for(item = cpu_log_items; item->mask != 0; item++) {
1762 mask |= item->mask;
1763 }
1764 } else {
1765 for(item = cpu_log_items; item->mask != 0; item++) {
1766 if (cmp1(p, p1 - p, item->name))
1767 goto found;
1768 }
1769 return 0;
f193c797 1770 }
f193c797
FB
1771 found:
1772 mask |= item->mask;
1773 if (*p1 != ',')
1774 break;
1775 p = p1 + 1;
1776 }
1777 return mask;
1778}
ea041c0e 1779
7501267e
FB
1780void cpu_abort(CPUState *env, const char *fmt, ...)
1781{
1782 va_list ap;
493ae1f0 1783 va_list ap2;
7501267e
FB
1784
1785 va_start(ap, fmt);
493ae1f0 1786 va_copy(ap2, ap);
7501267e
FB
1787 fprintf(stderr, "qemu: fatal: ");
1788 vfprintf(stderr, fmt, ap);
1789 fprintf(stderr, "\n");
1790#ifdef TARGET_I386
7fe48483
FB
1791 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1792#else
1793 cpu_dump_state(env, stderr, fprintf, 0);
7501267e 1794#endif
93fcfe39
AL
1795 if (qemu_log_enabled()) {
1796 qemu_log("qemu: fatal: ");
1797 qemu_log_vprintf(fmt, ap2);
1798 qemu_log("\n");
f9373291 1799#ifdef TARGET_I386
93fcfe39 1800 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
f9373291 1801#else
93fcfe39 1802 log_cpu_state(env, 0);
f9373291 1803#endif
31b1a7b4 1804 qemu_log_flush();
93fcfe39 1805 qemu_log_close();
924edcae 1806 }
493ae1f0 1807 va_end(ap2);
f9373291 1808 va_end(ap);
fd052bf6
RV
1809#if defined(CONFIG_USER_ONLY)
1810 {
1811 struct sigaction act;
1812 sigfillset(&act.sa_mask);
1813 act.sa_handler = SIG_DFL;
1814 sigaction(SIGABRT, &act, NULL);
1815 }
1816#endif
7501267e
FB
1817 abort();
1818}
1819
c5be9f08
TS
1820CPUState *cpu_copy(CPUState *env)
1821{
01ba9816 1822 CPUState *new_env = cpu_init(env->cpu_model_str);
c5be9f08
TS
1823 CPUState *next_cpu = new_env->next_cpu;
1824 int cpu_index = new_env->cpu_index;
5a38f081
AL
1825#if defined(TARGET_HAS_ICE)
1826 CPUBreakpoint *bp;
1827 CPUWatchpoint *wp;
1828#endif
1829
c5be9f08 1830 memcpy(new_env, env, sizeof(CPUState));
5a38f081
AL
1831
1832 /* Preserve chaining and index. */
c5be9f08
TS
1833 new_env->next_cpu = next_cpu;
1834 new_env->cpu_index = cpu_index;
5a38f081
AL
1835
1836 /* Clone all break/watchpoints.
1837 Note: Once we support ptrace with hw-debug register access, make sure
1838 BP_CPU break/watchpoints are handled correctly on clone. */
72cf2d4f
BS
1839 QTAILQ_INIT(&env->breakpoints);
1840 QTAILQ_INIT(&env->watchpoints);
5a38f081 1841#if defined(TARGET_HAS_ICE)
72cf2d4f 1842 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
5a38f081
AL
1843 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1844 }
72cf2d4f 1845 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
5a38f081
AL
1846 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1847 wp->flags, NULL);
1848 }
1849#endif
1850
c5be9f08
TS
1851 return new_env;
1852}
1853
0124311e
FB
1854#if !defined(CONFIG_USER_ONLY)
1855
5c751e99
EI
1856static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1857{
1858 unsigned int i;
1859
1860 /* Discard jump cache entries for any tb which might potentially
1861 overlap the flushed page. */
1862 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1863 memset (&env->tb_jmp_cache[i], 0,
9742bf26 1864 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
5c751e99
EI
1865
1866 i = tb_jmp_cache_hash_page(addr);
1867 memset (&env->tb_jmp_cache[i], 0,
9742bf26 1868 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
5c751e99
EI
1869}
1870
08738984
IK
1871static CPUTLBEntry s_cputlb_empty_entry = {
1872 .addr_read = -1,
1873 .addr_write = -1,
1874 .addr_code = -1,
1875 .addend = -1,
1876};
1877
ee8b7021
FB
1878/* NOTE: if flush_global is true, also flush global entries (not
1879 implemented yet) */
1880void tlb_flush(CPUState *env, int flush_global)
33417e70 1881{
33417e70 1882 int i;
0124311e 1883
9fa3e853
FB
1884#if defined(DEBUG_TLB)
1885 printf("tlb_flush:\n");
1886#endif
0124311e
FB
1887 /* must reset current TB so that interrupts cannot modify the
1888 links while we are modifying them */
1889 env->current_tb = NULL;
1890
33417e70 1891 for(i = 0; i < CPU_TLB_SIZE; i++) {
cfde4bd9
IY
1892 int mmu_idx;
1893 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
08738984 1894 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
cfde4bd9 1895 }
33417e70 1896 }
9fa3e853 1897
8a40a180 1898 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
9fa3e853 1899
d4c430a8
PB
1900 env->tlb_flush_addr = -1;
1901 env->tlb_flush_mask = 0;
e3db7226 1902 tlb_flush_count++;
33417e70
FB
1903}
1904
274da6b2 1905static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
61382a50 1906{
5fafdf24 1907 if (addr == (tlb_entry->addr_read &
84b7b8e7 1908 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1909 addr == (tlb_entry->addr_write &
84b7b8e7 1910 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1911 addr == (tlb_entry->addr_code &
84b7b8e7 1912 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
08738984 1913 *tlb_entry = s_cputlb_empty_entry;
84b7b8e7 1914 }
61382a50
FB
1915}
1916
2e12669a 1917void tlb_flush_page(CPUState *env, target_ulong addr)
33417e70 1918{
8a40a180 1919 int i;
cfde4bd9 1920 int mmu_idx;
0124311e 1921
9fa3e853 1922#if defined(DEBUG_TLB)
108c49b8 1923 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
9fa3e853 1924#endif
d4c430a8
PB
1925 /* Check if we need to flush due to large pages. */
1926 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
1927#if defined(DEBUG_TLB)
1928 printf("tlb_flush_page: forced full flush ("
1929 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
1930 env->tlb_flush_addr, env->tlb_flush_mask);
1931#endif
1932 tlb_flush(env, 1);
1933 return;
1934 }
0124311e
FB
1935 /* must reset current TB so that interrupts cannot modify the
1936 links while we are modifying them */
1937 env->current_tb = NULL;
61382a50
FB
1938
1939 addr &= TARGET_PAGE_MASK;
1940 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
cfde4bd9
IY
1941 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1942 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
0124311e 1943
5c751e99 1944 tlb_flush_jmp_cache(env, addr);
9fa3e853
FB
1945}
1946
9fa3e853
FB
1947/* update the TLBs so that writes to code in the virtual page 'addr'
1948 can be detected */
c227f099 1949static void tlb_protect_code(ram_addr_t ram_addr)
9fa3e853 1950{
5fafdf24 1951 cpu_physical_memory_reset_dirty(ram_addr,
6a00d601
FB
1952 ram_addr + TARGET_PAGE_SIZE,
1953 CODE_DIRTY_FLAG);
9fa3e853
FB
1954}
1955
9fa3e853 1956/* update the TLB so that writes in physical page 'phys_addr' are no longer
3a7d929e 1957 tested for self modifying code */
c227f099 1958static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 1959 target_ulong vaddr)
9fa3e853 1960{
f7c11b53 1961 cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
1ccde1cb
FB
1962}
1963
5fafdf24 1964static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1ccde1cb
FB
1965 unsigned long start, unsigned long length)
1966{
1967 unsigned long addr;
84b7b8e7
FB
1968 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1969 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1ccde1cb 1970 if ((addr - start) < length) {
0f459d16 1971 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1ccde1cb
FB
1972 }
1973 }
1974}
1975
5579c7f3 1976/* Note: start and end must be within the same ram block. */
c227f099 1977void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 1978 int dirty_flags)
1ccde1cb
FB
1979{
1980 CPUState *env;
4f2ac237 1981 unsigned long length, start1;
f7c11b53 1982 int i;
1ccde1cb
FB
1983
1984 start &= TARGET_PAGE_MASK;
1985 end = TARGET_PAGE_ALIGN(end);
1986
1987 length = end - start;
1988 if (length == 0)
1989 return;
f7c11b53 1990 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
f23db169 1991
1ccde1cb
FB
1992 /* we modify the TLB cache so that the dirty bit will be set again
1993 when accessing the range */
b2e0a138 1994 start1 = (unsigned long)qemu_safe_ram_ptr(start);
a57d23e4 1995 /* Check that we don't span multiple blocks - this breaks the
5579c7f3 1996 address comparisons below. */
b2e0a138 1997 if ((unsigned long)qemu_safe_ram_ptr(end - 1) - start1
5579c7f3
PB
1998 != (end - 1) - start) {
1999 abort();
2000 }
2001
6a00d601 2002 for(env = first_cpu; env != NULL; env = env->next_cpu) {
cfde4bd9
IY
2003 int mmu_idx;
2004 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2005 for(i = 0; i < CPU_TLB_SIZE; i++)
2006 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
2007 start1, length);
2008 }
6a00d601 2009 }
1ccde1cb
FB
2010}
2011
74576198
AL
2012int cpu_physical_memory_set_dirty_tracking(int enable)
2013{
f6f3fbca 2014 int ret = 0;
74576198 2015 in_migration = enable;
f6f3fbca 2016 return ret;
74576198
AL
2017}
2018
3a7d929e
FB
2019static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2020{
c227f099 2021 ram_addr_t ram_addr;
5579c7f3 2022 void *p;
3a7d929e 2023
84b7b8e7 2024 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
5579c7f3
PB
2025 p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
2026 + tlb_entry->addend);
e890261f 2027 ram_addr = qemu_ram_addr_from_host_nofail(p);
3a7d929e 2028 if (!cpu_physical_memory_is_dirty(ram_addr)) {
0f459d16 2029 tlb_entry->addr_write |= TLB_NOTDIRTY;
3a7d929e
FB
2030 }
2031 }
2032}
2033
2034/* update the TLB according to the current state of the dirty bits */
2035void cpu_tlb_update_dirty(CPUState *env)
2036{
2037 int i;
cfde4bd9
IY
2038 int mmu_idx;
2039 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2040 for(i = 0; i < CPU_TLB_SIZE; i++)
2041 tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2042 }
3a7d929e
FB
2043}
2044
0f459d16 2045static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1ccde1cb 2046{
0f459d16
PB
2047 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2048 tlb_entry->addr_write = vaddr;
1ccde1cb
FB
2049}
2050
0f459d16
PB
2051/* update the TLB corresponding to virtual page vaddr
2052 so that it is no longer dirty */
2053static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1ccde1cb 2054{
1ccde1cb 2055 int i;
cfde4bd9 2056 int mmu_idx;
1ccde1cb 2057
0f459d16 2058 vaddr &= TARGET_PAGE_MASK;
1ccde1cb 2059 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
cfde4bd9
IY
2060 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2061 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
9fa3e853
FB
2062}
2063
d4c430a8
PB
2064/* Our TLB does not support large pages, so remember the area covered by
2065 large pages and trigger a full TLB flush if these are invalidated. */
2066static void tlb_add_large_page(CPUState *env, target_ulong vaddr,
2067 target_ulong size)
2068{
2069 target_ulong mask = ~(size - 1);
2070
2071 if (env->tlb_flush_addr == (target_ulong)-1) {
2072 env->tlb_flush_addr = vaddr & mask;
2073 env->tlb_flush_mask = mask;
2074 return;
2075 }
2076 /* Extend the existing region to include the new page.
2077 This is a compromise between unnecessary flushes and the cost
2078 of maintaining a full variable size TLB. */
2079 mask &= env->tlb_flush_mask;
2080 while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
2081 mask <<= 1;
2082 }
2083 env->tlb_flush_addr &= mask;
2084 env->tlb_flush_mask = mask;
2085}
2086
1d393fa2
AK
2087static bool is_ram_rom(ram_addr_t pd)
2088{
2089 pd &= ~TARGET_PAGE_MASK;
2090 return pd == IO_MEM_RAM || pd == IO_MEM_ROM;
2091}
2092
2093static bool is_ram_rom_romd(ram_addr_t pd)
2094{
2095 return is_ram_rom(pd) || (pd & IO_MEM_ROMD);
2096}
2097
d4c430a8
PB
2098/* Add a new TLB entry. At most one entry for a given virtual address
2099 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2100 supplied size is only used by tlb_flush_page. */
2101void tlb_set_page(CPUState *env, target_ulong vaddr,
2102 target_phys_addr_t paddr, int prot,
2103 int mmu_idx, target_ulong size)
9fa3e853 2104{
f1f6e3b8 2105 PhysPageDesc p;
4f2ac237 2106 unsigned long pd;
9fa3e853 2107 unsigned int index;
4f2ac237 2108 target_ulong address;
0f459d16 2109 target_ulong code_address;
355b1943 2110 unsigned long addend;
84b7b8e7 2111 CPUTLBEntry *te;
a1d1bb31 2112 CPUWatchpoint *wp;
c227f099 2113 target_phys_addr_t iotlb;
9fa3e853 2114
d4c430a8
PB
2115 assert(size >= TARGET_PAGE_SIZE);
2116 if (size != TARGET_PAGE_SIZE) {
2117 tlb_add_large_page(env, vaddr, size);
2118 }
92e873b9 2119 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
f1f6e3b8 2120 pd = p.phys_offset;
9fa3e853 2121#if defined(DEBUG_TLB)
7fd3f494
SW
2122 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
2123 " prot=%x idx=%d pd=0x%08lx\n",
2124 vaddr, paddr, prot, mmu_idx, pd);
9fa3e853
FB
2125#endif
2126
0f459d16 2127 address = vaddr;
1d393fa2 2128 if (!is_ram_rom_romd(pd)) {
0f459d16
PB
2129 /* IO memory case (romd handled later) */
2130 address |= TLB_MMIO;
2131 }
5579c7f3 2132 addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
1d393fa2 2133 if (is_ram_rom(pd)) {
0f459d16
PB
2134 /* Normal RAM. */
2135 iotlb = pd & TARGET_PAGE_MASK;
2136 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2137 iotlb |= IO_MEM_NOTDIRTY;
2138 else
2139 iotlb |= IO_MEM_ROM;
2140 } else {
ccbb4d44 2141 /* IO handlers are currently passed a physical address.
0f459d16
PB
2142 It would be nice to pass an offset from the base address
2143 of that region. This would avoid having to special case RAM,
2144 and avoid full address decoding in every device.
2145 We can't use the high bits of pd for this because
2146 IO_MEM_ROMD uses these as a ram address. */
8da3ff18 2147 iotlb = (pd & ~TARGET_PAGE_MASK);
f1f6e3b8 2148 iotlb += p.region_offset;
0f459d16
PB
2149 }
2150
2151 code_address = address;
2152 /* Make accesses to pages with watchpoints go via the
2153 watchpoint trap routines. */
72cf2d4f 2154 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
a1d1bb31 2155 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
bf298f83
JK
2156 /* Avoid trapping reads of pages with a write breakpoint. */
2157 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
2158 iotlb = io_mem_watch + paddr;
2159 address |= TLB_MMIO;
2160 break;
2161 }
6658ffb8 2162 }
0f459d16 2163 }
d79acba4 2164
0f459d16
PB
2165 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2166 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2167 te = &env->tlb_table[mmu_idx][index];
2168 te->addend = addend - vaddr;
2169 if (prot & PAGE_READ) {
2170 te->addr_read = address;
2171 } else {
2172 te->addr_read = -1;
2173 }
5c751e99 2174
0f459d16
PB
2175 if (prot & PAGE_EXEC) {
2176 te->addr_code = code_address;
2177 } else {
2178 te->addr_code = -1;
2179 }
2180 if (prot & PAGE_WRITE) {
2181 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2182 (pd & IO_MEM_ROMD)) {
2183 /* Write access calls the I/O callback. */
2184 te->addr_write = address | TLB_MMIO;
2185 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2186 !cpu_physical_memory_is_dirty(pd)) {
2187 te->addr_write = address | TLB_NOTDIRTY;
9fa3e853 2188 } else {
0f459d16 2189 te->addr_write = address;
9fa3e853 2190 }
0f459d16
PB
2191 } else {
2192 te->addr_write = -1;
9fa3e853 2193 }
9fa3e853
FB
2194}
2195
0124311e
FB
2196#else
2197
ee8b7021 2198void tlb_flush(CPUState *env, int flush_global)
0124311e
FB
2199{
2200}
2201
2e12669a 2202void tlb_flush_page(CPUState *env, target_ulong addr)
0124311e
FB
2203{
2204}
2205
edf8e2af
MW
2206/*
2207 * Walks guest process memory "regions" one by one
2208 * and calls callback function 'fn' for each region.
2209 */
5cd2c5b6
RH
2210
2211struct walk_memory_regions_data
2212{
2213 walk_memory_regions_fn fn;
2214 void *priv;
2215 unsigned long start;
2216 int prot;
2217};
2218
2219static int walk_memory_regions_end(struct walk_memory_regions_data *data,
b480d9b7 2220 abi_ulong end, int new_prot)
5cd2c5b6
RH
2221{
2222 if (data->start != -1ul) {
2223 int rc = data->fn(data->priv, data->start, end, data->prot);
2224 if (rc != 0) {
2225 return rc;
2226 }
2227 }
2228
2229 data->start = (new_prot ? end : -1ul);
2230 data->prot = new_prot;
2231
2232 return 0;
2233}
2234
2235static int walk_memory_regions_1(struct walk_memory_regions_data *data,
b480d9b7 2236 abi_ulong base, int level, void **lp)
5cd2c5b6 2237{
b480d9b7 2238 abi_ulong pa;
5cd2c5b6
RH
2239 int i, rc;
2240
2241 if (*lp == NULL) {
2242 return walk_memory_regions_end(data, base, 0);
2243 }
2244
2245 if (level == 0) {
2246 PageDesc *pd = *lp;
7296abac 2247 for (i = 0; i < L2_SIZE; ++i) {
5cd2c5b6
RH
2248 int prot = pd[i].flags;
2249
2250 pa = base | (i << TARGET_PAGE_BITS);
2251 if (prot != data->prot) {
2252 rc = walk_memory_regions_end(data, pa, prot);
2253 if (rc != 0) {
2254 return rc;
9fa3e853 2255 }
9fa3e853 2256 }
5cd2c5b6
RH
2257 }
2258 } else {
2259 void **pp = *lp;
7296abac 2260 for (i = 0; i < L2_SIZE; ++i) {
b480d9b7
PB
2261 pa = base | ((abi_ulong)i <<
2262 (TARGET_PAGE_BITS + L2_BITS * level));
5cd2c5b6
RH
2263 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2264 if (rc != 0) {
2265 return rc;
2266 }
2267 }
2268 }
2269
2270 return 0;
2271}
2272
2273int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2274{
2275 struct walk_memory_regions_data data;
2276 unsigned long i;
2277
2278 data.fn = fn;
2279 data.priv = priv;
2280 data.start = -1ul;
2281 data.prot = 0;
2282
2283 for (i = 0; i < V_L1_SIZE; i++) {
b480d9b7 2284 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
5cd2c5b6
RH
2285 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2286 if (rc != 0) {
2287 return rc;
9fa3e853 2288 }
33417e70 2289 }
5cd2c5b6
RH
2290
2291 return walk_memory_regions_end(&data, 0, 0);
edf8e2af
MW
2292}
2293
b480d9b7
PB
2294static int dump_region(void *priv, abi_ulong start,
2295 abi_ulong end, unsigned long prot)
edf8e2af
MW
2296{
2297 FILE *f = (FILE *)priv;
2298
b480d9b7
PB
2299 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2300 " "TARGET_ABI_FMT_lx" %c%c%c\n",
edf8e2af
MW
2301 start, end, end - start,
2302 ((prot & PAGE_READ) ? 'r' : '-'),
2303 ((prot & PAGE_WRITE) ? 'w' : '-'),
2304 ((prot & PAGE_EXEC) ? 'x' : '-'));
2305
2306 return (0);
2307}
2308
2309/* dump memory mappings */
2310void page_dump(FILE *f)
2311{
2312 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2313 "start", "end", "size", "prot");
2314 walk_memory_regions(f, dump_region);
33417e70
FB
2315}
2316
53a5960a 2317int page_get_flags(target_ulong address)
33417e70 2318{
9fa3e853
FB
2319 PageDesc *p;
2320
2321 p = page_find(address >> TARGET_PAGE_BITS);
33417e70 2322 if (!p)
9fa3e853
FB
2323 return 0;
2324 return p->flags;
2325}
2326
376a7909
RH
2327/* Modify the flags of a page and invalidate the code if necessary.
2328 The flag PAGE_WRITE_ORG is positioned automatically depending
2329 on PAGE_WRITE. The mmap_lock should already be held. */
53a5960a 2330void page_set_flags(target_ulong start, target_ulong end, int flags)
9fa3e853 2331{
376a7909
RH
2332 target_ulong addr, len;
2333
2334 /* This function should never be called with addresses outside the
2335 guest address space. If this assert fires, it probably indicates
2336 a missing call to h2g_valid. */
b480d9b7
PB
2337#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2338 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
376a7909
RH
2339#endif
2340 assert(start < end);
9fa3e853
FB
2341
2342 start = start & TARGET_PAGE_MASK;
2343 end = TARGET_PAGE_ALIGN(end);
376a7909
RH
2344
2345 if (flags & PAGE_WRITE) {
9fa3e853 2346 flags |= PAGE_WRITE_ORG;
376a7909
RH
2347 }
2348
2349 for (addr = start, len = end - start;
2350 len != 0;
2351 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2352 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2353
2354 /* If the write protection bit is set, then we invalidate
2355 the code inside. */
5fafdf24 2356 if (!(p->flags & PAGE_WRITE) &&
9fa3e853
FB
2357 (flags & PAGE_WRITE) &&
2358 p->first_tb) {
d720b93d 2359 tb_invalidate_phys_page(addr, 0, NULL);
9fa3e853
FB
2360 }
2361 p->flags = flags;
2362 }
33417e70
FB
2363}
2364
3d97b40b
TS
2365int page_check_range(target_ulong start, target_ulong len, int flags)
2366{
2367 PageDesc *p;
2368 target_ulong end;
2369 target_ulong addr;
2370
376a7909
RH
2371 /* This function should never be called with addresses outside the
2372 guest address space. If this assert fires, it probably indicates
2373 a missing call to h2g_valid. */
338e9e6c
BS
2374#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2375 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
376a7909
RH
2376#endif
2377
3e0650a9
RH
2378 if (len == 0) {
2379 return 0;
2380 }
376a7909
RH
2381 if (start + len - 1 < start) {
2382 /* We've wrapped around. */
55f280c9 2383 return -1;
376a7909 2384 }
55f280c9 2385
3d97b40b
TS
2386 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2387 start = start & TARGET_PAGE_MASK;
2388
376a7909
RH
2389 for (addr = start, len = end - start;
2390 len != 0;
2391 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
3d97b40b
TS
2392 p = page_find(addr >> TARGET_PAGE_BITS);
2393 if( !p )
2394 return -1;
2395 if( !(p->flags & PAGE_VALID) )
2396 return -1;
2397
dae3270c 2398 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
3d97b40b 2399 return -1;
dae3270c
FB
2400 if (flags & PAGE_WRITE) {
2401 if (!(p->flags & PAGE_WRITE_ORG))
2402 return -1;
2403 /* unprotect the page if it was put read-only because it
2404 contains translated code */
2405 if (!(p->flags & PAGE_WRITE)) {
2406 if (!page_unprotect(addr, 0, NULL))
2407 return -1;
2408 }
2409 return 0;
2410 }
3d97b40b
TS
2411 }
2412 return 0;
2413}
2414
9fa3e853 2415/* called from signal handler: invalidate the code and unprotect the
ccbb4d44 2416 page. Return TRUE if the fault was successfully handled. */
53a5960a 2417int page_unprotect(target_ulong address, unsigned long pc, void *puc)
9fa3e853 2418{
45d679d6
AJ
2419 unsigned int prot;
2420 PageDesc *p;
53a5960a 2421 target_ulong host_start, host_end, addr;
9fa3e853 2422
c8a706fe
PB
2423 /* Technically this isn't safe inside a signal handler. However we
2424 know this only ever happens in a synchronous SEGV handler, so in
2425 practice it seems to be ok. */
2426 mmap_lock();
2427
45d679d6
AJ
2428 p = page_find(address >> TARGET_PAGE_BITS);
2429 if (!p) {
c8a706fe 2430 mmap_unlock();
9fa3e853 2431 return 0;
c8a706fe 2432 }
45d679d6 2433
9fa3e853
FB
2434 /* if the page was really writable, then we change its
2435 protection back to writable */
45d679d6
AJ
2436 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2437 host_start = address & qemu_host_page_mask;
2438 host_end = host_start + qemu_host_page_size;
2439
2440 prot = 0;
2441 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2442 p = page_find(addr >> TARGET_PAGE_BITS);
2443 p->flags |= PAGE_WRITE;
2444 prot |= p->flags;
2445
9fa3e853
FB
2446 /* and since the content will be modified, we must invalidate
2447 the corresponding translated code. */
45d679d6 2448 tb_invalidate_phys_page(addr, pc, puc);
9fa3e853 2449#ifdef DEBUG_TB_CHECK
45d679d6 2450 tb_invalidate_check(addr);
9fa3e853 2451#endif
9fa3e853 2452 }
45d679d6
AJ
2453 mprotect((void *)g2h(host_start), qemu_host_page_size,
2454 prot & PAGE_BITS);
2455
2456 mmap_unlock();
2457 return 1;
9fa3e853 2458 }
c8a706fe 2459 mmap_unlock();
9fa3e853
FB
2460 return 0;
2461}
2462
6a00d601
FB
2463static inline void tlb_set_dirty(CPUState *env,
2464 unsigned long addr, target_ulong vaddr)
1ccde1cb
FB
2465{
2466}
9fa3e853
FB
2467#endif /* defined(CONFIG_USER_ONLY) */
2468
e2eef170 2469#if !defined(CONFIG_USER_ONLY)
8da3ff18 2470
c04b2b78
PB
2471#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2472typedef struct subpage_t {
2473 target_phys_addr_t base;
f6405247
RH
2474 ram_addr_t sub_io_index[TARGET_PAGE_SIZE];
2475 ram_addr_t region_offset[TARGET_PAGE_SIZE];
c04b2b78
PB
2476} subpage_t;
2477
c227f099
AL
2478static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2479 ram_addr_t memory, ram_addr_t region_offset);
f6405247
RH
2480static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2481 ram_addr_t orig_memory,
2482 ram_addr_t region_offset);
db7b5426
BS
2483#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2484 need_subpage) \
2485 do { \
2486 if (addr > start_addr) \
2487 start_addr2 = 0; \
2488 else { \
2489 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2490 if (start_addr2 > 0) \
2491 need_subpage = 1; \
2492 } \
2493 \
49e9fba2 2494 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
db7b5426
BS
2495 end_addr2 = TARGET_PAGE_SIZE - 1; \
2496 else { \
2497 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2498 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2499 need_subpage = 1; \
2500 } \
2501 } while (0)
2502
8f2498f9
MT
2503/* register physical memory.
2504 For RAM, 'size' must be a multiple of the target page size.
2505 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
8da3ff18
PB
2506 io memory page. The address used when calling the IO function is
2507 the offset from the start of the region, plus region_offset. Both
ccbb4d44 2508 start_addr and region_offset are rounded down to a page boundary
8da3ff18
PB
2509 before calculating this offset. This should not be a problem unless
2510 the low bits of start_addr and region_offset differ. */
0fd542fb 2511void cpu_register_physical_memory_log(target_phys_addr_t start_addr,
c227f099
AL
2512 ram_addr_t size,
2513 ram_addr_t phys_offset,
0fd542fb
MT
2514 ram_addr_t region_offset,
2515 bool log_dirty)
33417e70 2516{
c227f099 2517 target_phys_addr_t addr, end_addr;
92e873b9 2518 PhysPageDesc *p;
9d42037b 2519 CPUState *env;
c227f099 2520 ram_addr_t orig_size = size;
f6405247 2521 subpage_t *subpage;
33417e70 2522
3b8e6a2d 2523 assert(size);
f6f3fbca 2524
67c4d23c
PB
2525 if (phys_offset == IO_MEM_UNASSIGNED) {
2526 region_offset = start_addr;
2527 }
8da3ff18 2528 region_offset &= TARGET_PAGE_MASK;
5fd386f6 2529 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
c227f099 2530 end_addr = start_addr + (target_phys_addr_t)size;
3b8e6a2d
EI
2531
2532 addr = start_addr;
2533 do {
f1f6e3b8 2534 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 0);
db7b5426 2535 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
c227f099
AL
2536 ram_addr_t orig_memory = p->phys_offset;
2537 target_phys_addr_t start_addr2, end_addr2;
db7b5426
BS
2538 int need_subpage = 0;
2539
2540 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2541 need_subpage);
f6405247 2542 if (need_subpage) {
db7b5426
BS
2543 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2544 subpage = subpage_init((addr & TARGET_PAGE_MASK),
8da3ff18
PB
2545 &p->phys_offset, orig_memory,
2546 p->region_offset);
db7b5426
BS
2547 } else {
2548 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2549 >> IO_MEM_SHIFT];
2550 }
8da3ff18
PB
2551 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2552 region_offset);
2553 p->region_offset = 0;
db7b5426
BS
2554 } else {
2555 p->phys_offset = phys_offset;
2774c6d0 2556 p->region_offset = region_offset;
1d393fa2 2557 if (is_ram_rom_romd(phys_offset))
db7b5426
BS
2558 phys_offset += TARGET_PAGE_SIZE;
2559 }
2560 } else {
2561 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2562 p->phys_offset = phys_offset;
8da3ff18 2563 p->region_offset = region_offset;
1d393fa2 2564 if (is_ram_rom_romd(phys_offset)) {
db7b5426 2565 phys_offset += TARGET_PAGE_SIZE;
0e8f0967 2566 } else {
c227f099 2567 target_phys_addr_t start_addr2, end_addr2;
db7b5426
BS
2568 int need_subpage = 0;
2569
2570 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2571 end_addr2, need_subpage);
2572
f6405247 2573 if (need_subpage) {
db7b5426 2574 subpage = subpage_init((addr & TARGET_PAGE_MASK),
8da3ff18 2575 &p->phys_offset, IO_MEM_UNASSIGNED,
67c4d23c 2576 addr & TARGET_PAGE_MASK);
db7b5426 2577 subpage_register(subpage, start_addr2, end_addr2,
8da3ff18
PB
2578 phys_offset, region_offset);
2579 p->region_offset = 0;
db7b5426
BS
2580 }
2581 }
2582 }
8da3ff18 2583 region_offset += TARGET_PAGE_SIZE;
3b8e6a2d
EI
2584 addr += TARGET_PAGE_SIZE;
2585 } while (addr != end_addr);
3b46e624 2586
9d42037b
FB
2587 /* since each CPU stores ram addresses in its TLB cache, we must
2588 reset the modified entries */
2589 /* XXX: slow ! */
2590 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2591 tlb_flush(env, 1);
2592 }
33417e70
FB
2593}
2594
c227f099 2595void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
f65ed4c1
AL
2596{
2597 if (kvm_enabled())
2598 kvm_coalesce_mmio_region(addr, size);
2599}
2600
c227f099 2601void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
f65ed4c1
AL
2602{
2603 if (kvm_enabled())
2604 kvm_uncoalesce_mmio_region(addr, size);
2605}
2606
62a2744c
SY
2607void qemu_flush_coalesced_mmio_buffer(void)
2608{
2609 if (kvm_enabled())
2610 kvm_flush_coalesced_mmio_buffer();
2611}
2612
c902760f
MT
2613#if defined(__linux__) && !defined(TARGET_S390X)
2614
2615#include <sys/vfs.h>
2616
2617#define HUGETLBFS_MAGIC 0x958458f6
2618
2619static long gethugepagesize(const char *path)
2620{
2621 struct statfs fs;
2622 int ret;
2623
2624 do {
9742bf26 2625 ret = statfs(path, &fs);
c902760f
MT
2626 } while (ret != 0 && errno == EINTR);
2627
2628 if (ret != 0) {
9742bf26
YT
2629 perror(path);
2630 return 0;
c902760f
MT
2631 }
2632
2633 if (fs.f_type != HUGETLBFS_MAGIC)
9742bf26 2634 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
c902760f
MT
2635
2636 return fs.f_bsize;
2637}
2638
04b16653
AW
2639static void *file_ram_alloc(RAMBlock *block,
2640 ram_addr_t memory,
2641 const char *path)
c902760f
MT
2642{
2643 char *filename;
2644 void *area;
2645 int fd;
2646#ifdef MAP_POPULATE
2647 int flags;
2648#endif
2649 unsigned long hpagesize;
2650
2651 hpagesize = gethugepagesize(path);
2652 if (!hpagesize) {
9742bf26 2653 return NULL;
c902760f
MT
2654 }
2655
2656 if (memory < hpagesize) {
2657 return NULL;
2658 }
2659
2660 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2661 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2662 return NULL;
2663 }
2664
2665 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
9742bf26 2666 return NULL;
c902760f
MT
2667 }
2668
2669 fd = mkstemp(filename);
2670 if (fd < 0) {
9742bf26
YT
2671 perror("unable to create backing store for hugepages");
2672 free(filename);
2673 return NULL;
c902760f
MT
2674 }
2675 unlink(filename);
2676 free(filename);
2677
2678 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2679
2680 /*
2681 * ftruncate is not supported by hugetlbfs in older
2682 * hosts, so don't bother bailing out on errors.
2683 * If anything goes wrong with it under other filesystems,
2684 * mmap will fail.
2685 */
2686 if (ftruncate(fd, memory))
9742bf26 2687 perror("ftruncate");
c902760f
MT
2688
2689#ifdef MAP_POPULATE
2690 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2691 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2692 * to sidestep this quirk.
2693 */
2694 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2695 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2696#else
2697 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2698#endif
2699 if (area == MAP_FAILED) {
9742bf26
YT
2700 perror("file_ram_alloc: can't mmap RAM pages");
2701 close(fd);
2702 return (NULL);
c902760f 2703 }
04b16653 2704 block->fd = fd;
c902760f
MT
2705 return area;
2706}
2707#endif
2708
d17b5288 2709static ram_addr_t find_ram_offset(ram_addr_t size)
04b16653
AW
2710{
2711 RAMBlock *block, *next_block;
3e837b2c 2712 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
04b16653
AW
2713
2714 if (QLIST_EMPTY(&ram_list.blocks))
2715 return 0;
2716
2717 QLIST_FOREACH(block, &ram_list.blocks, next) {
f15fbc4b 2718 ram_addr_t end, next = RAM_ADDR_MAX;
04b16653
AW
2719
2720 end = block->offset + block->length;
2721
2722 QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2723 if (next_block->offset >= end) {
2724 next = MIN(next, next_block->offset);
2725 }
2726 }
2727 if (next - end >= size && next - end < mingap) {
3e837b2c 2728 offset = end;
04b16653
AW
2729 mingap = next - end;
2730 }
2731 }
3e837b2c
AW
2732
2733 if (offset == RAM_ADDR_MAX) {
2734 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
2735 (uint64_t)size);
2736 abort();
2737 }
2738
04b16653
AW
2739 return offset;
2740}
2741
2742static ram_addr_t last_ram_offset(void)
d17b5288
AW
2743{
2744 RAMBlock *block;
2745 ram_addr_t last = 0;
2746
2747 QLIST_FOREACH(block, &ram_list.blocks, next)
2748 last = MAX(last, block->offset + block->length);
2749
2750 return last;
2751}
2752
c5705a77 2753void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
84b89d78
CM
2754{
2755 RAMBlock *new_block, *block;
2756
c5705a77
AK
2757 new_block = NULL;
2758 QLIST_FOREACH(block, &ram_list.blocks, next) {
2759 if (block->offset == addr) {
2760 new_block = block;
2761 break;
2762 }
2763 }
2764 assert(new_block);
2765 assert(!new_block->idstr[0]);
84b89d78
CM
2766
2767 if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
2768 char *id = dev->parent_bus->info->get_dev_path(dev);
2769 if (id) {
2770 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
7267c094 2771 g_free(id);
84b89d78
CM
2772 }
2773 }
2774 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2775
2776 QLIST_FOREACH(block, &ram_list.blocks, next) {
c5705a77 2777 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
84b89d78
CM
2778 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2779 new_block->idstr);
2780 abort();
2781 }
2782 }
c5705a77
AK
2783}
2784
2785ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
2786 MemoryRegion *mr)
2787{
2788 RAMBlock *new_block;
2789
2790 size = TARGET_PAGE_ALIGN(size);
2791 new_block = g_malloc0(sizeof(*new_block));
84b89d78 2792
7c637366 2793 new_block->mr = mr;
432d268c 2794 new_block->offset = find_ram_offset(size);
6977dfe6
YT
2795 if (host) {
2796 new_block->host = host;
cd19cfa2 2797 new_block->flags |= RAM_PREALLOC_MASK;
6977dfe6
YT
2798 } else {
2799 if (mem_path) {
c902760f 2800#if defined (__linux__) && !defined(TARGET_S390X)
6977dfe6
YT
2801 new_block->host = file_ram_alloc(new_block, size, mem_path);
2802 if (!new_block->host) {
2803 new_block->host = qemu_vmalloc(size);
e78815a5 2804 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
6977dfe6 2805 }
c902760f 2806#else
6977dfe6
YT
2807 fprintf(stderr, "-mem-path option unsupported\n");
2808 exit(1);
c902760f 2809#endif
6977dfe6 2810 } else {
6b02494d 2811#if defined(TARGET_S390X) && defined(CONFIG_KVM)
ff83678a
CB
2812 /* S390 KVM requires the topmost vma of the RAM to be smaller than
2813 an system defined value, which is at least 256GB. Larger systems
2814 have larger values. We put the guest between the end of data
2815 segment (system break) and this value. We use 32GB as a base to
2816 have enough room for the system break to grow. */
2817 new_block->host = mmap((void*)0x800000000, size,
6977dfe6 2818 PROT_EXEC|PROT_READ|PROT_WRITE,
ff83678a 2819 MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
fb8b2735
AG
2820 if (new_block->host == MAP_FAILED) {
2821 fprintf(stderr, "Allocating RAM failed\n");
2822 abort();
2823 }
6b02494d 2824#else
868bb33f 2825 if (xen_enabled()) {
fce537d4 2826 xen_ram_alloc(new_block->offset, size, mr);
432d268c
JN
2827 } else {
2828 new_block->host = qemu_vmalloc(size);
2829 }
6b02494d 2830#endif
e78815a5 2831 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
6977dfe6 2832 }
c902760f 2833 }
94a6b54f
PB
2834 new_block->length = size;
2835
f471a17e 2836 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
94a6b54f 2837
7267c094 2838 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
04b16653 2839 last_ram_offset() >> TARGET_PAGE_BITS);
d17b5288 2840 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
94a6b54f
PB
2841 0xff, size >> TARGET_PAGE_BITS);
2842
6f0437e8
JK
2843 if (kvm_enabled())
2844 kvm_setup_guest_memory(new_block->host, size);
2845
94a6b54f
PB
2846 return new_block->offset;
2847}
e9a1ab19 2848
c5705a77 2849ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
6977dfe6 2850{
c5705a77 2851 return qemu_ram_alloc_from_ptr(size, NULL, mr);
6977dfe6
YT
2852}
2853
1f2e98b6
AW
2854void qemu_ram_free_from_ptr(ram_addr_t addr)
2855{
2856 RAMBlock *block;
2857
2858 QLIST_FOREACH(block, &ram_list.blocks, next) {
2859 if (addr == block->offset) {
2860 QLIST_REMOVE(block, next);
7267c094 2861 g_free(block);
1f2e98b6
AW
2862 return;
2863 }
2864 }
2865}
2866
c227f099 2867void qemu_ram_free(ram_addr_t addr)
e9a1ab19 2868{
04b16653
AW
2869 RAMBlock *block;
2870
2871 QLIST_FOREACH(block, &ram_list.blocks, next) {
2872 if (addr == block->offset) {
2873 QLIST_REMOVE(block, next);
cd19cfa2
HY
2874 if (block->flags & RAM_PREALLOC_MASK) {
2875 ;
2876 } else if (mem_path) {
04b16653
AW
2877#if defined (__linux__) && !defined(TARGET_S390X)
2878 if (block->fd) {
2879 munmap(block->host, block->length);
2880 close(block->fd);
2881 } else {
2882 qemu_vfree(block->host);
2883 }
fd28aa13
JK
2884#else
2885 abort();
04b16653
AW
2886#endif
2887 } else {
2888#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2889 munmap(block->host, block->length);
2890#else
868bb33f 2891 if (xen_enabled()) {
e41d7c69 2892 xen_invalidate_map_cache_entry(block->host);
432d268c
JN
2893 } else {
2894 qemu_vfree(block->host);
2895 }
04b16653
AW
2896#endif
2897 }
7267c094 2898 g_free(block);
04b16653
AW
2899 return;
2900 }
2901 }
2902
e9a1ab19
FB
2903}
2904
cd19cfa2
HY
2905#ifndef _WIN32
2906void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
2907{
2908 RAMBlock *block;
2909 ram_addr_t offset;
2910 int flags;
2911 void *area, *vaddr;
2912
2913 QLIST_FOREACH(block, &ram_list.blocks, next) {
2914 offset = addr - block->offset;
2915 if (offset < block->length) {
2916 vaddr = block->host + offset;
2917 if (block->flags & RAM_PREALLOC_MASK) {
2918 ;
2919 } else {
2920 flags = MAP_FIXED;
2921 munmap(vaddr, length);
2922 if (mem_path) {
2923#if defined(__linux__) && !defined(TARGET_S390X)
2924 if (block->fd) {
2925#ifdef MAP_POPULATE
2926 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
2927 MAP_PRIVATE;
2928#else
2929 flags |= MAP_PRIVATE;
2930#endif
2931 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2932 flags, block->fd, offset);
2933 } else {
2934 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2935 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2936 flags, -1, 0);
2937 }
fd28aa13
JK
2938#else
2939 abort();
cd19cfa2
HY
2940#endif
2941 } else {
2942#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2943 flags |= MAP_SHARED | MAP_ANONYMOUS;
2944 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
2945 flags, -1, 0);
2946#else
2947 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2948 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2949 flags, -1, 0);
2950#endif
2951 }
2952 if (area != vaddr) {
f15fbc4b
AP
2953 fprintf(stderr, "Could not remap addr: "
2954 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
cd19cfa2
HY
2955 length, addr);
2956 exit(1);
2957 }
2958 qemu_madvise(vaddr, length, QEMU_MADV_MERGEABLE);
2959 }
2960 return;
2961 }
2962 }
2963}
2964#endif /* !_WIN32 */
2965
dc828ca1 2966/* Return a host pointer to ram allocated with qemu_ram_alloc.
5579c7f3
PB
2967 With the exception of the softmmu code in this file, this should
2968 only be used for local memory (e.g. video ram) that the device owns,
2969 and knows it isn't going to access beyond the end of the block.
2970
2971 It should not be used for general purpose DMA.
2972 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2973 */
c227f099 2974void *qemu_get_ram_ptr(ram_addr_t addr)
dc828ca1 2975{
94a6b54f
PB
2976 RAMBlock *block;
2977
f471a17e
AW
2978 QLIST_FOREACH(block, &ram_list.blocks, next) {
2979 if (addr - block->offset < block->length) {
7d82af38
VP
2980 /* Move this entry to to start of the list. */
2981 if (block != QLIST_FIRST(&ram_list.blocks)) {
2982 QLIST_REMOVE(block, next);
2983 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
2984 }
868bb33f 2985 if (xen_enabled()) {
432d268c
JN
2986 /* We need to check if the requested address is in the RAM
2987 * because we don't want to map the entire memory in QEMU.
712c2b41 2988 * In that case just map until the end of the page.
432d268c
JN
2989 */
2990 if (block->offset == 0) {
e41d7c69 2991 return xen_map_cache(addr, 0, 0);
432d268c 2992 } else if (block->host == NULL) {
e41d7c69
JK
2993 block->host =
2994 xen_map_cache(block->offset, block->length, 1);
432d268c
JN
2995 }
2996 }
f471a17e
AW
2997 return block->host + (addr - block->offset);
2998 }
94a6b54f 2999 }
f471a17e
AW
3000
3001 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3002 abort();
3003
3004 return NULL;
dc828ca1
PB
3005}
3006
b2e0a138
MT
3007/* Return a host pointer to ram allocated with qemu_ram_alloc.
3008 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
3009 */
3010void *qemu_safe_ram_ptr(ram_addr_t addr)
3011{
3012 RAMBlock *block;
3013
3014 QLIST_FOREACH(block, &ram_list.blocks, next) {
3015 if (addr - block->offset < block->length) {
868bb33f 3016 if (xen_enabled()) {
432d268c
JN
3017 /* We need to check if the requested address is in the RAM
3018 * because we don't want to map the entire memory in QEMU.
712c2b41 3019 * In that case just map until the end of the page.
432d268c
JN
3020 */
3021 if (block->offset == 0) {
e41d7c69 3022 return xen_map_cache(addr, 0, 0);
432d268c 3023 } else if (block->host == NULL) {
e41d7c69
JK
3024 block->host =
3025 xen_map_cache(block->offset, block->length, 1);
432d268c
JN
3026 }
3027 }
b2e0a138
MT
3028 return block->host + (addr - block->offset);
3029 }
3030 }
3031
3032 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3033 abort();
3034
3035 return NULL;
3036}
3037
38bee5dc
SS
3038/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
3039 * but takes a size argument */
8ab934f9 3040void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
38bee5dc 3041{
8ab934f9
SS
3042 if (*size == 0) {
3043 return NULL;
3044 }
868bb33f 3045 if (xen_enabled()) {
e41d7c69 3046 return xen_map_cache(addr, *size, 1);
868bb33f 3047 } else {
38bee5dc
SS
3048 RAMBlock *block;
3049
3050 QLIST_FOREACH(block, &ram_list.blocks, next) {
3051 if (addr - block->offset < block->length) {
3052 if (addr - block->offset + *size > block->length)
3053 *size = block->length - addr + block->offset;
3054 return block->host + (addr - block->offset);
3055 }
3056 }
3057
3058 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3059 abort();
38bee5dc
SS
3060 }
3061}
3062
050a0ddf
AP
3063void qemu_put_ram_ptr(void *addr)
3064{
3065 trace_qemu_put_ram_ptr(addr);
050a0ddf
AP
3066}
3067
e890261f 3068int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
5579c7f3 3069{
94a6b54f
PB
3070 RAMBlock *block;
3071 uint8_t *host = ptr;
3072
868bb33f 3073 if (xen_enabled()) {
e41d7c69 3074 *ram_addr = xen_ram_addr_from_mapcache(ptr);
712c2b41
SS
3075 return 0;
3076 }
3077
f471a17e 3078 QLIST_FOREACH(block, &ram_list.blocks, next) {
432d268c
JN
3079 /* This case append when the block is not mapped. */
3080 if (block->host == NULL) {
3081 continue;
3082 }
f471a17e 3083 if (host - block->host < block->length) {
e890261f
MT
3084 *ram_addr = block->offset + (host - block->host);
3085 return 0;
f471a17e 3086 }
94a6b54f 3087 }
432d268c 3088
e890261f
MT
3089 return -1;
3090}
f471a17e 3091
e890261f
MT
3092/* Some of the softmmu routines need to translate from a host pointer
3093 (typically a TLB entry) back to a ram offset. */
3094ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
3095{
3096 ram_addr_t ram_addr;
f471a17e 3097
e890261f
MT
3098 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
3099 fprintf(stderr, "Bad ram pointer %p\n", ptr);
3100 abort();
3101 }
3102 return ram_addr;
5579c7f3
PB
3103}
3104
c227f099 3105static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
33417e70 3106{
67d3b957 3107#ifdef DEBUG_UNASSIGNED
ab3d1727 3108 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
b4f0a316 3109#endif
5b450407 3110#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
b14ef7c9 3111 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, 1);
e18231a3
BS
3112#endif
3113 return 0;
3114}
3115
c227f099 3116static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
e18231a3
BS
3117{
3118#ifdef DEBUG_UNASSIGNED
3119 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3120#endif
5b450407 3121#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
b14ef7c9 3122 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, 2);
e18231a3
BS
3123#endif
3124 return 0;
3125}
3126
c227f099 3127static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
e18231a3
BS
3128{
3129#ifdef DEBUG_UNASSIGNED
3130 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3131#endif
5b450407 3132#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
b14ef7c9 3133 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, 4);
67d3b957 3134#endif
33417e70
FB
3135 return 0;
3136}
3137
c227f099 3138static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
33417e70 3139{
67d3b957 3140#ifdef DEBUG_UNASSIGNED
ab3d1727 3141 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
67d3b957 3142#endif
5b450407 3143#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
b14ef7c9 3144 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, 1);
e18231a3
BS
3145#endif
3146}
3147
c227f099 3148static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
e18231a3
BS
3149{
3150#ifdef DEBUG_UNASSIGNED
3151 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3152#endif
5b450407 3153#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
b14ef7c9 3154 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, 2);
e18231a3
BS
3155#endif
3156}
3157
c227f099 3158static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
e18231a3
BS
3159{
3160#ifdef DEBUG_UNASSIGNED
3161 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3162#endif
5b450407 3163#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
b14ef7c9 3164 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, 4);
b4f0a316 3165#endif
33417e70
FB
3166}
3167
d60efc6b 3168static CPUReadMemoryFunc * const unassigned_mem_read[3] = {
33417e70 3169 unassigned_mem_readb,
e18231a3
BS
3170 unassigned_mem_readw,
3171 unassigned_mem_readl,
33417e70
FB
3172};
3173
d60efc6b 3174static CPUWriteMemoryFunc * const unassigned_mem_write[3] = {
33417e70 3175 unassigned_mem_writeb,
e18231a3
BS
3176 unassigned_mem_writew,
3177 unassigned_mem_writel,
33417e70
FB
3178};
3179
c227f099 3180static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
0f459d16 3181 uint32_t val)
9fa3e853 3182{
3a7d929e 3183 int dirty_flags;
f7c11b53 3184 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3a7d929e 3185 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 3186#if !defined(CONFIG_USER_ONLY)
3a7d929e 3187 tb_invalidate_phys_page_fast(ram_addr, 1);
f7c11b53 3188 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
9fa3e853 3189#endif
3a7d929e 3190 }
5579c7f3 3191 stb_p(qemu_get_ram_ptr(ram_addr), val);
f23db169 3192 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
f7c11b53 3193 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
f23db169
FB
3194 /* we remove the notdirty callback only if the code has been
3195 flushed */
3196 if (dirty_flags == 0xff)
2e70f6ef 3197 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
3198}
3199
c227f099 3200static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
0f459d16 3201 uint32_t val)
9fa3e853 3202{
3a7d929e 3203 int dirty_flags;
f7c11b53 3204 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3a7d929e 3205 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 3206#if !defined(CONFIG_USER_ONLY)
3a7d929e 3207 tb_invalidate_phys_page_fast(ram_addr, 2);
f7c11b53 3208 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
9fa3e853 3209#endif
3a7d929e 3210 }
5579c7f3 3211 stw_p(qemu_get_ram_ptr(ram_addr), val);
f23db169 3212 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
f7c11b53 3213 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
f23db169
FB
3214 /* we remove the notdirty callback only if the code has been
3215 flushed */
3216 if (dirty_flags == 0xff)
2e70f6ef 3217 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
3218}
3219
c227f099 3220static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
0f459d16 3221 uint32_t val)
9fa3e853 3222{
3a7d929e 3223 int dirty_flags;
f7c11b53 3224 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3a7d929e 3225 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 3226#if !defined(CONFIG_USER_ONLY)
3a7d929e 3227 tb_invalidate_phys_page_fast(ram_addr, 4);
f7c11b53 3228 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
9fa3e853 3229#endif
3a7d929e 3230 }
5579c7f3 3231 stl_p(qemu_get_ram_ptr(ram_addr), val);
f23db169 3232 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
f7c11b53 3233 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
f23db169
FB
3234 /* we remove the notdirty callback only if the code has been
3235 flushed */
3236 if (dirty_flags == 0xff)
2e70f6ef 3237 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
3238}
3239
d60efc6b 3240static CPUReadMemoryFunc * const error_mem_read[3] = {
9fa3e853
FB
3241 NULL, /* never used */
3242 NULL, /* never used */
3243 NULL, /* never used */
3244};
3245
d60efc6b 3246static CPUWriteMemoryFunc * const notdirty_mem_write[3] = {
1ccde1cb
FB
3247 notdirty_mem_writeb,
3248 notdirty_mem_writew,
3249 notdirty_mem_writel,
3250};
3251
0f459d16 3252/* Generate a debug exception if a watchpoint has been hit. */
b4051334 3253static void check_watchpoint(int offset, int len_mask, int flags)
0f459d16
PB
3254{
3255 CPUState *env = cpu_single_env;
06d55cc1
AL
3256 target_ulong pc, cs_base;
3257 TranslationBlock *tb;
0f459d16 3258 target_ulong vaddr;
a1d1bb31 3259 CPUWatchpoint *wp;
06d55cc1 3260 int cpu_flags;
0f459d16 3261
06d55cc1
AL
3262 if (env->watchpoint_hit) {
3263 /* We re-entered the check after replacing the TB. Now raise
3264 * the debug interrupt so that is will trigger after the
3265 * current instruction. */
3266 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3267 return;
3268 }
2e70f6ef 3269 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
72cf2d4f 3270 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334
AL
3271 if ((vaddr == (wp->vaddr & len_mask) ||
3272 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
6e140f28
AL
3273 wp->flags |= BP_WATCHPOINT_HIT;
3274 if (!env->watchpoint_hit) {
3275 env->watchpoint_hit = wp;
3276 tb = tb_find_pc(env->mem_io_pc);
3277 if (!tb) {
3278 cpu_abort(env, "check_watchpoint: could not find TB for "
3279 "pc=%p", (void *)env->mem_io_pc);
3280 }
618ba8e6 3281 cpu_restore_state(tb, env, env->mem_io_pc);
6e140f28
AL
3282 tb_phys_invalidate(tb, -1);
3283 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3284 env->exception_index = EXCP_DEBUG;
3285 } else {
3286 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3287 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
3288 }
3289 cpu_resume_from_signal(env, NULL);
06d55cc1 3290 }
6e140f28
AL
3291 } else {
3292 wp->flags &= ~BP_WATCHPOINT_HIT;
0f459d16
PB
3293 }
3294 }
3295}
3296
6658ffb8
PB
3297/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3298 so these check for a hit then pass through to the normal out-of-line
3299 phys routines. */
c227f099 3300static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
6658ffb8 3301{
b4051334 3302 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
6658ffb8
PB
3303 return ldub_phys(addr);
3304}
3305
c227f099 3306static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
6658ffb8 3307{
b4051334 3308 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
6658ffb8
PB
3309 return lduw_phys(addr);
3310}
3311
c227f099 3312static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
6658ffb8 3313{
b4051334 3314 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
6658ffb8
PB
3315 return ldl_phys(addr);
3316}
3317
c227f099 3318static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
6658ffb8
PB
3319 uint32_t val)
3320{
b4051334 3321 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
6658ffb8
PB
3322 stb_phys(addr, val);
3323}
3324
c227f099 3325static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
6658ffb8
PB
3326 uint32_t val)
3327{
b4051334 3328 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
6658ffb8
PB
3329 stw_phys(addr, val);
3330}
3331
c227f099 3332static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
6658ffb8
PB
3333 uint32_t val)
3334{
b4051334 3335 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
6658ffb8
PB
3336 stl_phys(addr, val);
3337}
3338
d60efc6b 3339static CPUReadMemoryFunc * const watch_mem_read[3] = {
6658ffb8
PB
3340 watch_mem_readb,
3341 watch_mem_readw,
3342 watch_mem_readl,
3343};
3344
d60efc6b 3345static CPUWriteMemoryFunc * const watch_mem_write[3] = {
6658ffb8
PB
3346 watch_mem_writeb,
3347 watch_mem_writew,
3348 watch_mem_writel,
3349};
6658ffb8 3350
f6405247
RH
3351static inline uint32_t subpage_readlen (subpage_t *mmio,
3352 target_phys_addr_t addr,
3353 unsigned int len)
db7b5426 3354{
f6405247 3355 unsigned int idx = SUBPAGE_IDX(addr);
db7b5426
BS
3356#if defined(DEBUG_SUBPAGE)
3357 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3358 mmio, len, addr, idx);
3359#endif
db7b5426 3360
f6405247
RH
3361 addr += mmio->region_offset[idx];
3362 idx = mmio->sub_io_index[idx];
acbbec5d 3363 return io_mem_read(idx, addr, 1 <<len);
db7b5426
BS
3364}
3365
c227f099 3366static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
f6405247 3367 uint32_t value, unsigned int len)
db7b5426 3368{
f6405247 3369 unsigned int idx = SUBPAGE_IDX(addr);
db7b5426 3370#if defined(DEBUG_SUBPAGE)
f6405247
RH
3371 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n",
3372 __func__, mmio, len, addr, idx, value);
db7b5426 3373#endif
f6405247
RH
3374
3375 addr += mmio->region_offset[idx];
3376 idx = mmio->sub_io_index[idx];
acbbec5d 3377 io_mem_write(idx, addr, value, 1 << len);
db7b5426
BS
3378}
3379
c227f099 3380static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
db7b5426 3381{
db7b5426
BS
3382 return subpage_readlen(opaque, addr, 0);
3383}
3384
c227f099 3385static void subpage_writeb (void *opaque, target_phys_addr_t addr,
db7b5426
BS
3386 uint32_t value)
3387{
db7b5426
BS
3388 subpage_writelen(opaque, addr, value, 0);
3389}
3390
c227f099 3391static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
db7b5426 3392{
db7b5426
BS
3393 return subpage_readlen(opaque, addr, 1);
3394}
3395
c227f099 3396static void subpage_writew (void *opaque, target_phys_addr_t addr,
db7b5426
BS
3397 uint32_t value)
3398{
db7b5426
BS
3399 subpage_writelen(opaque, addr, value, 1);
3400}
3401
c227f099 3402static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
db7b5426 3403{
db7b5426
BS
3404 return subpage_readlen(opaque, addr, 2);
3405}
3406
f6405247
RH
3407static void subpage_writel (void *opaque, target_phys_addr_t addr,
3408 uint32_t value)
db7b5426 3409{
db7b5426
BS
3410 subpage_writelen(opaque, addr, value, 2);
3411}
3412
d60efc6b 3413static CPUReadMemoryFunc * const subpage_read[] = {
db7b5426
BS
3414 &subpage_readb,
3415 &subpage_readw,
3416 &subpage_readl,
3417};
3418
d60efc6b 3419static CPUWriteMemoryFunc * const subpage_write[] = {
db7b5426
BS
3420 &subpage_writeb,
3421 &subpage_writew,
3422 &subpage_writel,
3423};
3424
56384e8b
AF
3425static uint32_t subpage_ram_readb(void *opaque, target_phys_addr_t addr)
3426{
3427 ram_addr_t raddr = addr;
3428 void *ptr = qemu_get_ram_ptr(raddr);
3429 return ldub_p(ptr);
3430}
3431
3432static void subpage_ram_writeb(void *opaque, target_phys_addr_t addr,
3433 uint32_t value)
3434{
3435 ram_addr_t raddr = addr;
3436 void *ptr = qemu_get_ram_ptr(raddr);
3437 stb_p(ptr, value);
3438}
3439
3440static uint32_t subpage_ram_readw(void *opaque, target_phys_addr_t addr)
3441{
3442 ram_addr_t raddr = addr;
3443 void *ptr = qemu_get_ram_ptr(raddr);
3444 return lduw_p(ptr);
3445}
3446
3447static void subpage_ram_writew(void *opaque, target_phys_addr_t addr,
3448 uint32_t value)
3449{
3450 ram_addr_t raddr = addr;
3451 void *ptr = qemu_get_ram_ptr(raddr);
3452 stw_p(ptr, value);
3453}
3454
3455static uint32_t subpage_ram_readl(void *opaque, target_phys_addr_t addr)
3456{
3457 ram_addr_t raddr = addr;
3458 void *ptr = qemu_get_ram_ptr(raddr);
3459 return ldl_p(ptr);
3460}
3461
3462static void subpage_ram_writel(void *opaque, target_phys_addr_t addr,
3463 uint32_t value)
3464{
3465 ram_addr_t raddr = addr;
3466 void *ptr = qemu_get_ram_ptr(raddr);
3467 stl_p(ptr, value);
3468}
3469
3470static CPUReadMemoryFunc * const subpage_ram_read[] = {
3471 &subpage_ram_readb,
3472 &subpage_ram_readw,
3473 &subpage_ram_readl,
3474};
3475
3476static CPUWriteMemoryFunc * const subpage_ram_write[] = {
3477 &subpage_ram_writeb,
3478 &subpage_ram_writew,
3479 &subpage_ram_writel,
3480};
3481
c227f099
AL
3482static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3483 ram_addr_t memory, ram_addr_t region_offset)
db7b5426
BS
3484{
3485 int idx, eidx;
3486
3487 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3488 return -1;
3489 idx = SUBPAGE_IDX(start);
3490 eidx = SUBPAGE_IDX(end);
3491#if defined(DEBUG_SUBPAGE)
0bf9e31a 3492 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
db7b5426
BS
3493 mmio, start, end, idx, eidx, memory);
3494#endif
56384e8b
AF
3495 if ((memory & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
3496 memory = IO_MEM_SUBPAGE_RAM;
3497 }
f6405247 3498 memory = (memory >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
db7b5426 3499 for (; idx <= eidx; idx++) {
f6405247
RH
3500 mmio->sub_io_index[idx] = memory;
3501 mmio->region_offset[idx] = region_offset;
db7b5426
BS
3502 }
3503
3504 return 0;
3505}
3506
f6405247
RH
3507static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
3508 ram_addr_t orig_memory,
3509 ram_addr_t region_offset)
db7b5426 3510{
c227f099 3511 subpage_t *mmio;
db7b5426
BS
3512 int subpage_memory;
3513
7267c094 3514 mmio = g_malloc0(sizeof(subpage_t));
1eec614b
AL
3515
3516 mmio->base = base;
be675c97 3517 subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio);
db7b5426 3518#if defined(DEBUG_SUBPAGE)
1eec614b
AL
3519 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3520 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
db7b5426 3521#endif
1eec614b 3522 *phys = subpage_memory | IO_MEM_SUBPAGE;
f6405247 3523 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, orig_memory, region_offset);
db7b5426
BS
3524
3525 return mmio;
3526}
3527
88715657
AL
3528static int get_free_io_mem_idx(void)
3529{
3530 int i;
3531
3532 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3533 if (!io_mem_used[i]) {
3534 io_mem_used[i] = 1;
3535 return i;
3536 }
c6703b47 3537 fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
88715657
AL
3538 return -1;
3539}
3540
33417e70
FB
3541/* mem_read and mem_write are arrays of functions containing the
3542 function to access byte (index 0), word (index 1) and dword (index
0b4e6e3e 3543 2). Functions can be omitted with a NULL function pointer.
3ee89922 3544 If io_index is non zero, the corresponding io zone is
4254fab8
BS
3545 modified. If it is zero, a new io zone is allocated. The return
3546 value can be used with cpu_register_physical_memory(). (-1) is
3547 returned if error. */
1eed09cb 3548static int cpu_register_io_memory_fixed(int io_index,
d60efc6b
BS
3549 CPUReadMemoryFunc * const *mem_read,
3550 CPUWriteMemoryFunc * const *mem_write,
be675c97 3551 void *opaque)
33417e70 3552{
3cab721d
RH
3553 int i;
3554
33417e70 3555 if (io_index <= 0) {
88715657
AL
3556 io_index = get_free_io_mem_idx();
3557 if (io_index == -1)
3558 return io_index;
33417e70 3559 } else {
1eed09cb 3560 io_index >>= IO_MEM_SHIFT;
33417e70
FB
3561 if (io_index >= IO_MEM_NB_ENTRIES)
3562 return -1;
3563 }
b5ff1b31 3564
3cab721d 3565 for (i = 0; i < 3; ++i) {
acbbec5d 3566 _io_mem_read[io_index][i]
3cab721d
RH
3567 = (mem_read[i] ? mem_read[i] : unassigned_mem_read[i]);
3568 }
3569 for (i = 0; i < 3; ++i) {
acbbec5d 3570 _io_mem_write[io_index][i]
3cab721d
RH
3571 = (mem_write[i] ? mem_write[i] : unassigned_mem_write[i]);
3572 }
a4193c8a 3573 io_mem_opaque[io_index] = opaque;
f6405247
RH
3574
3575 return (io_index << IO_MEM_SHIFT);
33417e70 3576}
61382a50 3577
d60efc6b
BS
3578int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
3579 CPUWriteMemoryFunc * const *mem_write,
be675c97 3580 void *opaque)
1eed09cb 3581{
be675c97 3582 return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque);
1eed09cb
AK
3583}
3584
88715657
AL
3585void cpu_unregister_io_memory(int io_table_address)
3586{
3587 int i;
3588 int io_index = io_table_address >> IO_MEM_SHIFT;
3589
3590 for (i=0;i < 3; i++) {
acbbec5d
AK
3591 _io_mem_read[io_index][i] = unassigned_mem_read[i];
3592 _io_mem_write[io_index][i] = unassigned_mem_write[i];
88715657
AL
3593 }
3594 io_mem_opaque[io_index] = NULL;
3595 io_mem_used[io_index] = 0;
3596}
3597
e9179ce1
AK
3598static void io_mem_init(void)
3599{
3600 int i;
3601
2507c12a 3602 cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read,
be675c97 3603 unassigned_mem_write, NULL);
2507c12a 3604 cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read,
be675c97 3605 unassigned_mem_write, NULL);
2507c12a 3606 cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read,
be675c97 3607 notdirty_mem_write, NULL);
56384e8b 3608 cpu_register_io_memory_fixed(IO_MEM_SUBPAGE_RAM, subpage_ram_read,
be675c97 3609 subpage_ram_write, NULL);
e9179ce1
AK
3610 for (i=0; i<5; i++)
3611 io_mem_used[i] = 1;
3612
3613 io_mem_watch = cpu_register_io_memory(watch_mem_read,
be675c97 3614 watch_mem_write, NULL);
e9179ce1
AK
3615}
3616
62152b8a
AK
3617static void memory_map_init(void)
3618{
7267c094 3619 system_memory = g_malloc(sizeof(*system_memory));
8417cebf 3620 memory_region_init(system_memory, "system", INT64_MAX);
62152b8a 3621 set_system_memory_map(system_memory);
309cb471 3622
7267c094 3623 system_io = g_malloc(sizeof(*system_io));
309cb471
AK
3624 memory_region_init(system_io, "io", 65536);
3625 set_system_io_map(system_io);
62152b8a
AK
3626}
3627
3628MemoryRegion *get_system_memory(void)
3629{
3630 return system_memory;
3631}
3632
309cb471
AK
3633MemoryRegion *get_system_io(void)
3634{
3635 return system_io;
3636}
3637
e2eef170
PB
3638#endif /* !defined(CONFIG_USER_ONLY) */
3639
13eb76e0
FB
3640/* physical memory access (slow version, mainly for debug) */
3641#if defined(CONFIG_USER_ONLY)
a68fe89c
PB
3642int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3643 uint8_t *buf, int len, int is_write)
13eb76e0
FB
3644{
3645 int l, flags;
3646 target_ulong page;
53a5960a 3647 void * p;
13eb76e0
FB
3648
3649 while (len > 0) {
3650 page = addr & TARGET_PAGE_MASK;
3651 l = (page + TARGET_PAGE_SIZE) - addr;
3652 if (l > len)
3653 l = len;
3654 flags = page_get_flags(page);
3655 if (!(flags & PAGE_VALID))
a68fe89c 3656 return -1;
13eb76e0
FB
3657 if (is_write) {
3658 if (!(flags & PAGE_WRITE))
a68fe89c 3659 return -1;
579a97f7 3660 /* XXX: this code should not depend on lock_user */
72fb7daa 3661 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
a68fe89c 3662 return -1;
72fb7daa
AJ
3663 memcpy(p, buf, l);
3664 unlock_user(p, addr, l);
13eb76e0
FB
3665 } else {
3666 if (!(flags & PAGE_READ))
a68fe89c 3667 return -1;
579a97f7 3668 /* XXX: this code should not depend on lock_user */
72fb7daa 3669 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
a68fe89c 3670 return -1;
72fb7daa 3671 memcpy(buf, p, l);
5b257578 3672 unlock_user(p, addr, 0);
13eb76e0
FB
3673 }
3674 len -= l;
3675 buf += l;
3676 addr += l;
3677 }
a68fe89c 3678 return 0;
13eb76e0 3679}
8df1cd07 3680
13eb76e0 3681#else
c227f099 3682void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
3683 int len, int is_write)
3684{
3685 int l, io_index;
3686 uint8_t *ptr;
3687 uint32_t val;
c227f099 3688 target_phys_addr_t page;
8ca5692d 3689 ram_addr_t pd;
f1f6e3b8 3690 PhysPageDesc p;
3b46e624 3691
13eb76e0
FB
3692 while (len > 0) {
3693 page = addr & TARGET_PAGE_MASK;
3694 l = (page + TARGET_PAGE_SIZE) - addr;
3695 if (l > len)
3696 l = len;
92e873b9 3697 p = phys_page_find(page >> TARGET_PAGE_BITS);
f1f6e3b8 3698 pd = p.phys_offset;
3b46e624 3699
13eb76e0 3700 if (is_write) {
3a7d929e 3701 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
f1f6e3b8 3702 target_phys_addr_t addr1;
13eb76e0 3703 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
f1f6e3b8 3704 addr1 = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
6a00d601
FB
3705 /* XXX: could force cpu_single_env to NULL to avoid
3706 potential bugs */
6c2934db 3707 if (l >= 4 && ((addr1 & 3) == 0)) {
1c213d19 3708 /* 32 bit write access */
c27004ec 3709 val = ldl_p(buf);
acbbec5d 3710 io_mem_write(io_index, addr1, val, 4);
13eb76e0 3711 l = 4;
6c2934db 3712 } else if (l >= 2 && ((addr1 & 1) == 0)) {
1c213d19 3713 /* 16 bit write access */
c27004ec 3714 val = lduw_p(buf);
acbbec5d 3715 io_mem_write(io_index, addr1, val, 2);
13eb76e0
FB
3716 l = 2;
3717 } else {
1c213d19 3718 /* 8 bit write access */
c27004ec 3719 val = ldub_p(buf);
acbbec5d 3720 io_mem_write(io_index, addr1, val, 1);
13eb76e0
FB
3721 l = 1;
3722 }
3723 } else {
8ca5692d 3724 ram_addr_t addr1;
b448f2f3 3725 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
13eb76e0 3726 /* RAM case */
5579c7f3 3727 ptr = qemu_get_ram_ptr(addr1);
13eb76e0 3728 memcpy(ptr, buf, l);
3a7d929e
FB
3729 if (!cpu_physical_memory_is_dirty(addr1)) {
3730 /* invalidate code */
3731 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3732 /* set dirty bit */
f7c11b53
YT
3733 cpu_physical_memory_set_dirty_flags(
3734 addr1, (0xff & ~CODE_DIRTY_FLAG));
3a7d929e 3735 }
050a0ddf 3736 qemu_put_ram_ptr(ptr);
13eb76e0
FB
3737 }
3738 } else {
1d393fa2 3739 if (!is_ram_rom_romd(pd)) {
f1f6e3b8 3740 target_phys_addr_t addr1;
13eb76e0
FB
3741 /* I/O case */
3742 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
f1f6e3b8 3743 addr1 = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
6c2934db 3744 if (l >= 4 && ((addr1 & 3) == 0)) {
13eb76e0 3745 /* 32 bit read access */
acbbec5d 3746 val = io_mem_read(io_index, addr1, 4);
c27004ec 3747 stl_p(buf, val);
13eb76e0 3748 l = 4;
6c2934db 3749 } else if (l >= 2 && ((addr1 & 1) == 0)) {
13eb76e0 3750 /* 16 bit read access */
acbbec5d 3751 val = io_mem_read(io_index, addr1, 2);
c27004ec 3752 stw_p(buf, val);
13eb76e0
FB
3753 l = 2;
3754 } else {
1c213d19 3755 /* 8 bit read access */
acbbec5d 3756 val = io_mem_read(io_index, addr1, 1);
c27004ec 3757 stb_p(buf, val);
13eb76e0
FB
3758 l = 1;
3759 }
3760 } else {
3761 /* RAM case */
050a0ddf
AP
3762 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
3763 memcpy(buf, ptr + (addr & ~TARGET_PAGE_MASK), l);
3764 qemu_put_ram_ptr(ptr);
13eb76e0
FB
3765 }
3766 }
3767 len -= l;
3768 buf += l;
3769 addr += l;
3770 }
3771}
8df1cd07 3772
d0ecd2aa 3773/* used for ROM loading : can write in RAM and ROM */
c227f099 3774void cpu_physical_memory_write_rom(target_phys_addr_t addr,
d0ecd2aa
FB
3775 const uint8_t *buf, int len)
3776{
3777 int l;
3778 uint8_t *ptr;
c227f099 3779 target_phys_addr_t page;
d0ecd2aa 3780 unsigned long pd;
f1f6e3b8 3781 PhysPageDesc p;
3b46e624 3782
d0ecd2aa
FB
3783 while (len > 0) {
3784 page = addr & TARGET_PAGE_MASK;
3785 l = (page + TARGET_PAGE_SIZE) - addr;
3786 if (l > len)
3787 l = len;
3788 p = phys_page_find(page >> TARGET_PAGE_BITS);
f1f6e3b8 3789 pd = p.phys_offset;
3b46e624 3790
1d393fa2 3791 if (!is_ram_rom_romd(pd)) {
d0ecd2aa
FB
3792 /* do nothing */
3793 } else {
3794 unsigned long addr1;
3795 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3796 /* ROM/RAM case */
5579c7f3 3797 ptr = qemu_get_ram_ptr(addr1);
d0ecd2aa 3798 memcpy(ptr, buf, l);
050a0ddf 3799 qemu_put_ram_ptr(ptr);
d0ecd2aa
FB
3800 }
3801 len -= l;
3802 buf += l;
3803 addr += l;
3804 }
3805}
3806
6d16c2f8
AL
3807typedef struct {
3808 void *buffer;
c227f099
AL
3809 target_phys_addr_t addr;
3810 target_phys_addr_t len;
6d16c2f8
AL
3811} BounceBuffer;
3812
3813static BounceBuffer bounce;
3814
ba223c29
AL
3815typedef struct MapClient {
3816 void *opaque;
3817 void (*callback)(void *opaque);
72cf2d4f 3818 QLIST_ENTRY(MapClient) link;
ba223c29
AL
3819} MapClient;
3820
72cf2d4f
BS
3821static QLIST_HEAD(map_client_list, MapClient) map_client_list
3822 = QLIST_HEAD_INITIALIZER(map_client_list);
ba223c29
AL
3823
3824void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3825{
7267c094 3826 MapClient *client = g_malloc(sizeof(*client));
ba223c29
AL
3827
3828 client->opaque = opaque;
3829 client->callback = callback;
72cf2d4f 3830 QLIST_INSERT_HEAD(&map_client_list, client, link);
ba223c29
AL
3831 return client;
3832}
3833
3834void cpu_unregister_map_client(void *_client)
3835{
3836 MapClient *client = (MapClient *)_client;
3837
72cf2d4f 3838 QLIST_REMOVE(client, link);
7267c094 3839 g_free(client);
ba223c29
AL
3840}
3841
3842static void cpu_notify_map_clients(void)
3843{
3844 MapClient *client;
3845
72cf2d4f
BS
3846 while (!QLIST_EMPTY(&map_client_list)) {
3847 client = QLIST_FIRST(&map_client_list);
ba223c29 3848 client->callback(client->opaque);
34d5e948 3849 cpu_unregister_map_client(client);
ba223c29
AL
3850 }
3851}
3852
6d16c2f8
AL
3853/* Map a physical memory region into a host virtual address.
3854 * May map a subset of the requested range, given by and returned in *plen.
3855 * May return NULL if resources needed to perform the mapping are exhausted.
3856 * Use only for reads OR writes - not for read-modify-write operations.
ba223c29
AL
3857 * Use cpu_register_map_client() to know when retrying the map operation is
3858 * likely to succeed.
6d16c2f8 3859 */
c227f099
AL
3860void *cpu_physical_memory_map(target_phys_addr_t addr,
3861 target_phys_addr_t *plen,
6d16c2f8
AL
3862 int is_write)
3863{
c227f099 3864 target_phys_addr_t len = *plen;
38bee5dc 3865 target_phys_addr_t todo = 0;
6d16c2f8 3866 int l;
c227f099 3867 target_phys_addr_t page;
6d16c2f8 3868 unsigned long pd;
f1f6e3b8 3869 PhysPageDesc p;
f15fbc4b 3870 ram_addr_t raddr = RAM_ADDR_MAX;
8ab934f9
SS
3871 ram_addr_t rlen;
3872 void *ret;
6d16c2f8
AL
3873
3874 while (len > 0) {
3875 page = addr & TARGET_PAGE_MASK;
3876 l = (page + TARGET_PAGE_SIZE) - addr;
3877 if (l > len)
3878 l = len;
3879 p = phys_page_find(page >> TARGET_PAGE_BITS);
f1f6e3b8 3880 pd = p.phys_offset;
6d16c2f8
AL
3881
3882 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
38bee5dc 3883 if (todo || bounce.buffer) {
6d16c2f8
AL
3884 break;
3885 }
3886 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3887 bounce.addr = addr;
3888 bounce.len = l;
3889 if (!is_write) {
54f7b4a3 3890 cpu_physical_memory_read(addr, bounce.buffer, l);
6d16c2f8 3891 }
38bee5dc
SS
3892
3893 *plen = l;
3894 return bounce.buffer;
6d16c2f8 3895 }
8ab934f9
SS
3896 if (!todo) {
3897 raddr = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3898 }
6d16c2f8
AL
3899
3900 len -= l;
3901 addr += l;
38bee5dc 3902 todo += l;
6d16c2f8 3903 }
8ab934f9
SS
3904 rlen = todo;
3905 ret = qemu_ram_ptr_length(raddr, &rlen);
3906 *plen = rlen;
3907 return ret;
6d16c2f8
AL
3908}
3909
3910/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3911 * Will also mark the memory as dirty if is_write == 1. access_len gives
3912 * the amount of memory that was actually read or written by the caller.
3913 */
c227f099
AL
3914void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3915 int is_write, target_phys_addr_t access_len)
6d16c2f8
AL
3916{
3917 if (buffer != bounce.buffer) {
3918 if (is_write) {
e890261f 3919 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
6d16c2f8
AL
3920 while (access_len) {
3921 unsigned l;
3922 l = TARGET_PAGE_SIZE;
3923 if (l > access_len)
3924 l = access_len;
3925 if (!cpu_physical_memory_is_dirty(addr1)) {
3926 /* invalidate code */
3927 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3928 /* set dirty bit */
f7c11b53
YT
3929 cpu_physical_memory_set_dirty_flags(
3930 addr1, (0xff & ~CODE_DIRTY_FLAG));
6d16c2f8
AL
3931 }
3932 addr1 += l;
3933 access_len -= l;
3934 }
3935 }
868bb33f 3936 if (xen_enabled()) {
e41d7c69 3937 xen_invalidate_map_cache_entry(buffer);
050a0ddf 3938 }
6d16c2f8
AL
3939 return;
3940 }
3941 if (is_write) {
3942 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3943 }
f8a83245 3944 qemu_vfree(bounce.buffer);
6d16c2f8 3945 bounce.buffer = NULL;
ba223c29 3946 cpu_notify_map_clients();
6d16c2f8 3947}
d0ecd2aa 3948
8df1cd07 3949/* warning: addr must be aligned */
1e78bcc1
AG
3950static inline uint32_t ldl_phys_internal(target_phys_addr_t addr,
3951 enum device_endian endian)
8df1cd07
FB
3952{
3953 int io_index;
3954 uint8_t *ptr;
3955 uint32_t val;
3956 unsigned long pd;
f1f6e3b8 3957 PhysPageDesc p;
8df1cd07
FB
3958
3959 p = phys_page_find(addr >> TARGET_PAGE_BITS);
f1f6e3b8 3960 pd = p.phys_offset;
3b46e624 3961
1d393fa2 3962 if (!is_ram_rom_romd(pd)) {
8df1cd07
FB
3963 /* I/O case */
3964 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
f1f6e3b8 3965 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
acbbec5d 3966 val = io_mem_read(io_index, addr, 4);
1e78bcc1
AG
3967#if defined(TARGET_WORDS_BIGENDIAN)
3968 if (endian == DEVICE_LITTLE_ENDIAN) {
3969 val = bswap32(val);
3970 }
3971#else
3972 if (endian == DEVICE_BIG_ENDIAN) {
3973 val = bswap32(val);
3974 }
3975#endif
8df1cd07
FB
3976 } else {
3977 /* RAM case */
5579c7f3 3978 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
8df1cd07 3979 (addr & ~TARGET_PAGE_MASK);
1e78bcc1
AG
3980 switch (endian) {
3981 case DEVICE_LITTLE_ENDIAN:
3982 val = ldl_le_p(ptr);
3983 break;
3984 case DEVICE_BIG_ENDIAN:
3985 val = ldl_be_p(ptr);
3986 break;
3987 default:
3988 val = ldl_p(ptr);
3989 break;
3990 }
8df1cd07
FB
3991 }
3992 return val;
3993}
3994
1e78bcc1
AG
3995uint32_t ldl_phys(target_phys_addr_t addr)
3996{
3997 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3998}
3999
4000uint32_t ldl_le_phys(target_phys_addr_t addr)
4001{
4002 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4003}
4004
4005uint32_t ldl_be_phys(target_phys_addr_t addr)
4006{
4007 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
4008}
4009
84b7b8e7 4010/* warning: addr must be aligned */
1e78bcc1
AG
4011static inline uint64_t ldq_phys_internal(target_phys_addr_t addr,
4012 enum device_endian endian)
84b7b8e7
FB
4013{
4014 int io_index;
4015 uint8_t *ptr;
4016 uint64_t val;
4017 unsigned long pd;
f1f6e3b8 4018 PhysPageDesc p;
84b7b8e7
FB
4019
4020 p = phys_page_find(addr >> TARGET_PAGE_BITS);
f1f6e3b8 4021 pd = p.phys_offset;
3b46e624 4022
1d393fa2 4023 if (!is_ram_rom_romd(pd)) {
84b7b8e7
FB
4024 /* I/O case */
4025 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
f1f6e3b8 4026 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
1e78bcc1
AG
4027
4028 /* XXX This is broken when device endian != cpu endian.
4029 Fix and add "endian" variable check */
84b7b8e7 4030#ifdef TARGET_WORDS_BIGENDIAN
acbbec5d
AK
4031 val = io_mem_read(io_index, addr, 4) << 32;
4032 val |= io_mem_read(io_index, addr + 4, 4);
84b7b8e7 4033#else
acbbec5d
AK
4034 val = io_mem_read(io_index, addr, 4);
4035 val |= io_mem_read(io_index, addr + 4, 4) << 32;
84b7b8e7
FB
4036#endif
4037 } else {
4038 /* RAM case */
5579c7f3 4039 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
84b7b8e7 4040 (addr & ~TARGET_PAGE_MASK);
1e78bcc1
AG
4041 switch (endian) {
4042 case DEVICE_LITTLE_ENDIAN:
4043 val = ldq_le_p(ptr);
4044 break;
4045 case DEVICE_BIG_ENDIAN:
4046 val = ldq_be_p(ptr);
4047 break;
4048 default:
4049 val = ldq_p(ptr);
4050 break;
4051 }
84b7b8e7
FB
4052 }
4053 return val;
4054}
4055
1e78bcc1
AG
4056uint64_t ldq_phys(target_phys_addr_t addr)
4057{
4058 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4059}
4060
4061uint64_t ldq_le_phys(target_phys_addr_t addr)
4062{
4063 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4064}
4065
4066uint64_t ldq_be_phys(target_phys_addr_t addr)
4067{
4068 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
4069}
4070
aab33094 4071/* XXX: optimize */
c227f099 4072uint32_t ldub_phys(target_phys_addr_t addr)
aab33094
FB
4073{
4074 uint8_t val;
4075 cpu_physical_memory_read(addr, &val, 1);
4076 return val;
4077}
4078
733f0b02 4079/* warning: addr must be aligned */
1e78bcc1
AG
4080static inline uint32_t lduw_phys_internal(target_phys_addr_t addr,
4081 enum device_endian endian)
aab33094 4082{
733f0b02
MT
4083 int io_index;
4084 uint8_t *ptr;
4085 uint64_t val;
4086 unsigned long pd;
f1f6e3b8 4087 PhysPageDesc p;
733f0b02
MT
4088
4089 p = phys_page_find(addr >> TARGET_PAGE_BITS);
f1f6e3b8 4090 pd = p.phys_offset;
733f0b02 4091
1d393fa2 4092 if (!is_ram_rom_romd(pd)) {
733f0b02
MT
4093 /* I/O case */
4094 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
f1f6e3b8 4095 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
acbbec5d 4096 val = io_mem_read(io_index, addr, 2);
1e78bcc1
AG
4097#if defined(TARGET_WORDS_BIGENDIAN)
4098 if (endian == DEVICE_LITTLE_ENDIAN) {
4099 val = bswap16(val);
4100 }
4101#else
4102 if (endian == DEVICE_BIG_ENDIAN) {
4103 val = bswap16(val);
4104 }
4105#endif
733f0b02
MT
4106 } else {
4107 /* RAM case */
4108 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4109 (addr & ~TARGET_PAGE_MASK);
1e78bcc1
AG
4110 switch (endian) {
4111 case DEVICE_LITTLE_ENDIAN:
4112 val = lduw_le_p(ptr);
4113 break;
4114 case DEVICE_BIG_ENDIAN:
4115 val = lduw_be_p(ptr);
4116 break;
4117 default:
4118 val = lduw_p(ptr);
4119 break;
4120 }
733f0b02
MT
4121 }
4122 return val;
aab33094
FB
4123}
4124
1e78bcc1
AG
4125uint32_t lduw_phys(target_phys_addr_t addr)
4126{
4127 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4128}
4129
4130uint32_t lduw_le_phys(target_phys_addr_t addr)
4131{
4132 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4133}
4134
4135uint32_t lduw_be_phys(target_phys_addr_t addr)
4136{
4137 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
4138}
4139
8df1cd07
FB
4140/* warning: addr must be aligned. The ram page is not masked as dirty
4141 and the code inside is not invalidated. It is useful if the dirty
4142 bits are used to track modified PTEs */
c227f099 4143void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
8df1cd07
FB
4144{
4145 int io_index;
4146 uint8_t *ptr;
4147 unsigned long pd;
f1f6e3b8 4148 PhysPageDesc p;
8df1cd07
FB
4149
4150 p = phys_page_find(addr >> TARGET_PAGE_BITS);
f1f6e3b8 4151 pd = p.phys_offset;
3b46e624 4152
3a7d929e 4153 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07 4154 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
f1f6e3b8 4155 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
acbbec5d 4156 io_mem_write(io_index, addr, val, 4);
8df1cd07 4157 } else {
74576198 4158 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
5579c7f3 4159 ptr = qemu_get_ram_ptr(addr1);
8df1cd07 4160 stl_p(ptr, val);
74576198
AL
4161
4162 if (unlikely(in_migration)) {
4163 if (!cpu_physical_memory_is_dirty(addr1)) {
4164 /* invalidate code */
4165 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4166 /* set dirty bit */
f7c11b53
YT
4167 cpu_physical_memory_set_dirty_flags(
4168 addr1, (0xff & ~CODE_DIRTY_FLAG));
74576198
AL
4169 }
4170 }
8df1cd07
FB
4171 }
4172}
4173
c227f099 4174void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
bc98a7ef
JM
4175{
4176 int io_index;
4177 uint8_t *ptr;
4178 unsigned long pd;
f1f6e3b8 4179 PhysPageDesc p;
bc98a7ef
JM
4180
4181 p = phys_page_find(addr >> TARGET_PAGE_BITS);
f1f6e3b8 4182 pd = p.phys_offset;
3b46e624 4183
bc98a7ef
JM
4184 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4185 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
f1f6e3b8 4186 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
bc98a7ef 4187#ifdef TARGET_WORDS_BIGENDIAN
acbbec5d
AK
4188 io_mem_write(io_index, addr, val >> 32, 4);
4189 io_mem_write(io_index, addr + 4, (uint32_t)val, 4);
bc98a7ef 4190#else
acbbec5d
AK
4191 io_mem_write(io_index, addr, (uint32_t)val, 4);
4192 io_mem_write(io_index, addr + 4, val >> 32, 4);
bc98a7ef
JM
4193#endif
4194 } else {
5579c7f3 4195 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
bc98a7ef
JM
4196 (addr & ~TARGET_PAGE_MASK);
4197 stq_p(ptr, val);
4198 }
4199}
4200
8df1cd07 4201/* warning: addr must be aligned */
1e78bcc1
AG
4202static inline void stl_phys_internal(target_phys_addr_t addr, uint32_t val,
4203 enum device_endian endian)
8df1cd07
FB
4204{
4205 int io_index;
4206 uint8_t *ptr;
4207 unsigned long pd;
f1f6e3b8 4208 PhysPageDesc p;
8df1cd07
FB
4209
4210 p = phys_page_find(addr >> TARGET_PAGE_BITS);
f1f6e3b8 4211 pd = p.phys_offset;
3b46e624 4212
3a7d929e 4213 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07 4214 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
f1f6e3b8 4215 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
1e78bcc1
AG
4216#if defined(TARGET_WORDS_BIGENDIAN)
4217 if (endian == DEVICE_LITTLE_ENDIAN) {
4218 val = bswap32(val);
4219 }
4220#else
4221 if (endian == DEVICE_BIG_ENDIAN) {
4222 val = bswap32(val);
4223 }
4224#endif
acbbec5d 4225 io_mem_write(io_index, addr, val, 4);
8df1cd07
FB
4226 } else {
4227 unsigned long addr1;
4228 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4229 /* RAM case */
5579c7f3 4230 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
4231 switch (endian) {
4232 case DEVICE_LITTLE_ENDIAN:
4233 stl_le_p(ptr, val);
4234 break;
4235 case DEVICE_BIG_ENDIAN:
4236 stl_be_p(ptr, val);
4237 break;
4238 default:
4239 stl_p(ptr, val);
4240 break;
4241 }
3a7d929e
FB
4242 if (!cpu_physical_memory_is_dirty(addr1)) {
4243 /* invalidate code */
4244 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4245 /* set dirty bit */
f7c11b53
YT
4246 cpu_physical_memory_set_dirty_flags(addr1,
4247 (0xff & ~CODE_DIRTY_FLAG));
3a7d929e 4248 }
8df1cd07
FB
4249 }
4250}
4251
1e78bcc1
AG
4252void stl_phys(target_phys_addr_t addr, uint32_t val)
4253{
4254 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4255}
4256
4257void stl_le_phys(target_phys_addr_t addr, uint32_t val)
4258{
4259 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4260}
4261
4262void stl_be_phys(target_phys_addr_t addr, uint32_t val)
4263{
4264 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4265}
4266
aab33094 4267/* XXX: optimize */
c227f099 4268void stb_phys(target_phys_addr_t addr, uint32_t val)
aab33094
FB
4269{
4270 uint8_t v = val;
4271 cpu_physical_memory_write(addr, &v, 1);
4272}
4273
733f0b02 4274/* warning: addr must be aligned */
1e78bcc1
AG
4275static inline void stw_phys_internal(target_phys_addr_t addr, uint32_t val,
4276 enum device_endian endian)
aab33094 4277{
733f0b02
MT
4278 int io_index;
4279 uint8_t *ptr;
4280 unsigned long pd;
f1f6e3b8 4281 PhysPageDesc p;
733f0b02
MT
4282
4283 p = phys_page_find(addr >> TARGET_PAGE_BITS);
f1f6e3b8 4284 pd = p.phys_offset;
733f0b02
MT
4285
4286 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4287 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
f1f6e3b8 4288 addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
1e78bcc1
AG
4289#if defined(TARGET_WORDS_BIGENDIAN)
4290 if (endian == DEVICE_LITTLE_ENDIAN) {
4291 val = bswap16(val);
4292 }
4293#else
4294 if (endian == DEVICE_BIG_ENDIAN) {
4295 val = bswap16(val);
4296 }
4297#endif
acbbec5d 4298 io_mem_write(io_index, addr, val, 2);
733f0b02
MT
4299 } else {
4300 unsigned long addr1;
4301 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4302 /* RAM case */
4303 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
4304 switch (endian) {
4305 case DEVICE_LITTLE_ENDIAN:
4306 stw_le_p(ptr, val);
4307 break;
4308 case DEVICE_BIG_ENDIAN:
4309 stw_be_p(ptr, val);
4310 break;
4311 default:
4312 stw_p(ptr, val);
4313 break;
4314 }
733f0b02
MT
4315 if (!cpu_physical_memory_is_dirty(addr1)) {
4316 /* invalidate code */
4317 tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
4318 /* set dirty bit */
4319 cpu_physical_memory_set_dirty_flags(addr1,
4320 (0xff & ~CODE_DIRTY_FLAG));
4321 }
4322 }
aab33094
FB
4323}
4324
1e78bcc1
AG
4325void stw_phys(target_phys_addr_t addr, uint32_t val)
4326{
4327 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4328}
4329
4330void stw_le_phys(target_phys_addr_t addr, uint32_t val)
4331{
4332 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4333}
4334
4335void stw_be_phys(target_phys_addr_t addr, uint32_t val)
4336{
4337 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4338}
4339
aab33094 4340/* XXX: optimize */
c227f099 4341void stq_phys(target_phys_addr_t addr, uint64_t val)
aab33094
FB
4342{
4343 val = tswap64(val);
71d2b725 4344 cpu_physical_memory_write(addr, &val, 8);
aab33094
FB
4345}
4346
1e78bcc1
AG
4347void stq_le_phys(target_phys_addr_t addr, uint64_t val)
4348{
4349 val = cpu_to_le64(val);
4350 cpu_physical_memory_write(addr, &val, 8);
4351}
4352
4353void stq_be_phys(target_phys_addr_t addr, uint64_t val)
4354{
4355 val = cpu_to_be64(val);
4356 cpu_physical_memory_write(addr, &val, 8);
4357}
4358
5e2972fd 4359/* virtual memory access for debug (includes writing to ROM) */
5fafdf24 4360int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
b448f2f3 4361 uint8_t *buf, int len, int is_write)
13eb76e0
FB
4362{
4363 int l;
c227f099 4364 target_phys_addr_t phys_addr;
9b3c35e0 4365 target_ulong page;
13eb76e0
FB
4366
4367 while (len > 0) {
4368 page = addr & TARGET_PAGE_MASK;
4369 phys_addr = cpu_get_phys_page_debug(env, page);
4370 /* if no physical page mapped, return an error */
4371 if (phys_addr == -1)
4372 return -1;
4373 l = (page + TARGET_PAGE_SIZE) - addr;
4374 if (l > len)
4375 l = len;
5e2972fd 4376 phys_addr += (addr & ~TARGET_PAGE_MASK);
5e2972fd
AL
4377 if (is_write)
4378 cpu_physical_memory_write_rom(phys_addr, buf, l);
4379 else
5e2972fd 4380 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
13eb76e0
FB
4381 len -= l;
4382 buf += l;
4383 addr += l;
4384 }
4385 return 0;
4386}
a68fe89c 4387#endif
13eb76e0 4388
2e70f6ef
PB
4389/* in deterministic execution mode, instructions doing device I/Os
4390 must be at the end of the TB */
4391void cpu_io_recompile(CPUState *env, void *retaddr)
4392{
4393 TranslationBlock *tb;
4394 uint32_t n, cflags;
4395 target_ulong pc, cs_base;
4396 uint64_t flags;
4397
4398 tb = tb_find_pc((unsigned long)retaddr);
4399 if (!tb) {
4400 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
4401 retaddr);
4402 }
4403 n = env->icount_decr.u16.low + tb->icount;
618ba8e6 4404 cpu_restore_state(tb, env, (unsigned long)retaddr);
2e70f6ef 4405 /* Calculate how many instructions had been executed before the fault
bf20dc07 4406 occurred. */
2e70f6ef
PB
4407 n = n - env->icount_decr.u16.low;
4408 /* Generate a new TB ending on the I/O insn. */
4409 n++;
4410 /* On MIPS and SH, delay slot instructions can only be restarted if
4411 they were already the first instruction in the TB. If this is not
bf20dc07 4412 the first instruction in a TB then re-execute the preceding
2e70f6ef
PB
4413 branch. */
4414#if defined(TARGET_MIPS)
4415 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4416 env->active_tc.PC -= 4;
4417 env->icount_decr.u16.low++;
4418 env->hflags &= ~MIPS_HFLAG_BMASK;
4419 }
4420#elif defined(TARGET_SH4)
4421 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4422 && n > 1) {
4423 env->pc -= 2;
4424 env->icount_decr.u16.low++;
4425 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4426 }
4427#endif
4428 /* This should never happen. */
4429 if (n > CF_COUNT_MASK)
4430 cpu_abort(env, "TB too big during recompile");
4431
4432 cflags = n | CF_LAST_IO;
4433 pc = tb->pc;
4434 cs_base = tb->cs_base;
4435 flags = tb->flags;
4436 tb_phys_invalidate(tb, -1);
4437 /* FIXME: In theory this could raise an exception. In practice
4438 we have already translated the block once so it's probably ok. */
4439 tb_gen_code(env, pc, cs_base, flags, cflags);
bf20dc07 4440 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
2e70f6ef
PB
4441 the first in the TB) then we end up generating a whole new TB and
4442 repeating the fault, which is horribly inefficient.
4443 Better would be to execute just this insn uncached, or generate a
4444 second new TB. */
4445 cpu_resume_from_signal(env, NULL);
4446}
4447
b3755a91
PB
4448#if !defined(CONFIG_USER_ONLY)
4449
055403b2 4450void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
e3db7226
FB
4451{
4452 int i, target_code_size, max_target_code_size;
4453 int direct_jmp_count, direct_jmp2_count, cross_page;
4454 TranslationBlock *tb;
3b46e624 4455
e3db7226
FB
4456 target_code_size = 0;
4457 max_target_code_size = 0;
4458 cross_page = 0;
4459 direct_jmp_count = 0;
4460 direct_jmp2_count = 0;
4461 for(i = 0; i < nb_tbs; i++) {
4462 tb = &tbs[i];
4463 target_code_size += tb->size;
4464 if (tb->size > max_target_code_size)
4465 max_target_code_size = tb->size;
4466 if (tb->page_addr[1] != -1)
4467 cross_page++;
4468 if (tb->tb_next_offset[0] != 0xffff) {
4469 direct_jmp_count++;
4470 if (tb->tb_next_offset[1] != 0xffff) {
4471 direct_jmp2_count++;
4472 }
4473 }
4474 }
4475 /* XXX: avoid using doubles ? */
57fec1fe 4476 cpu_fprintf(f, "Translation buffer state:\n");
055403b2 4477 cpu_fprintf(f, "gen code size %td/%ld\n",
26a5f13b
FB
4478 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4479 cpu_fprintf(f, "TB count %d/%d\n",
4480 nb_tbs, code_gen_max_blocks);
5fafdf24 4481 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
e3db7226
FB
4482 nb_tbs ? target_code_size / nb_tbs : 0,
4483 max_target_code_size);
055403b2 4484 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
e3db7226
FB
4485 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4486 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
5fafdf24
TS
4487 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4488 cross_page,
e3db7226
FB
4489 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4490 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
5fafdf24 4491 direct_jmp_count,
e3db7226
FB
4492 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4493 direct_jmp2_count,
4494 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
57fec1fe 4495 cpu_fprintf(f, "\nStatistics:\n");
e3db7226
FB
4496 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4497 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4498 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
b67d9a52 4499 tcg_dump_info(f, cpu_fprintf);
e3db7226
FB
4500}
4501
61382a50 4502#define MMUSUFFIX _cmmu
3917149d 4503#undef GETPC
61382a50
FB
4504#define GETPC() NULL
4505#define env cpu_single_env
b769d8fe 4506#define SOFTMMU_CODE_ACCESS
61382a50
FB
4507
4508#define SHIFT 0
4509#include "softmmu_template.h"
4510
4511#define SHIFT 1
4512#include "softmmu_template.h"
4513
4514#define SHIFT 2
4515#include "softmmu_template.h"
4516
4517#define SHIFT 3
4518#include "softmmu_template.h"
4519
4520#undef env
4521
4522#endif