]> git.proxmox.com Git - mirror_qemu.git/blame - exec.c
Support running QEMU on Valgrind
[mirror_qemu.git] / exec.c
CommitLineData
54936004 1/*
fd6ce8f6 2 * virtual page mapping and translated block handling
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
54936004 18 */
67b915a5 19#include "config.h"
d5a8f07c
FB
20#ifdef _WIN32
21#include <windows.h>
22#else
a98d49b1 23#include <sys/types.h>
d5a8f07c
FB
24#include <sys/mman.h>
25#endif
54936004 26
055403b2 27#include "qemu-common.h"
6180a181 28#include "cpu.h"
b67d9a52 29#include "tcg.h"
b3c7724c 30#include "hw/hw.h"
cc9e98cb 31#include "hw/qdev.h"
74576198 32#include "osdep.h"
7ba1e619 33#include "kvm.h"
432d268c 34#include "hw/xen.h"
29e922b6 35#include "qemu-timer.h"
62152b8a
AK
36#include "memory.h"
37#include "exec-memory.h"
53a5960a
PB
38#if defined(CONFIG_USER_ONLY)
39#include <qemu.h>
f01576f1
JL
40#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41#include <sys/param.h>
42#if __FreeBSD_version >= 700104
43#define HAVE_KINFO_GETVMMAP
44#define sigqueue sigqueue_freebsd /* avoid redefinition */
45#include <sys/time.h>
46#include <sys/proc.h>
47#include <machine/profile.h>
48#define _KERNEL
49#include <sys/user.h>
50#undef _KERNEL
51#undef sigqueue
52#include <libutil.h>
53#endif
54#endif
432d268c
JN
55#else /* !CONFIG_USER_ONLY */
56#include "xen-mapcache.h"
6506e4f9 57#include "trace.h"
53a5960a 58#endif
54936004 59
fd6ce8f6 60//#define DEBUG_TB_INVALIDATE
66e85a21 61//#define DEBUG_FLUSH
9fa3e853 62//#define DEBUG_TLB
67d3b957 63//#define DEBUG_UNASSIGNED
fd6ce8f6
FB
64
65/* make various TB consistency checks */
5fafdf24
TS
66//#define DEBUG_TB_CHECK
67//#define DEBUG_TLB_CHECK
fd6ce8f6 68
1196be37 69//#define DEBUG_IOPORT
db7b5426 70//#define DEBUG_SUBPAGE
1196be37 71
99773bd4
PB
72#if !defined(CONFIG_USER_ONLY)
73/* TB consistency checks only implemented for usermode emulation. */
74#undef DEBUG_TB_CHECK
75#endif
76
9fa3e853
FB
77#define SMC_BITMAP_USE_THRESHOLD 10
78
bdaf78e0 79static TranslationBlock *tbs;
24ab68ac 80static int code_gen_max_blocks;
9fa3e853 81TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
bdaf78e0 82static int nb_tbs;
eb51d102 83/* any access to the tbs or the page table must use this lock */
c227f099 84spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
fd6ce8f6 85
141ac468
BS
86#if defined(__arm__) || defined(__sparc_v9__)
87/* The prologue must be reachable with a direct jump. ARM and Sparc64
88 have limited branch ranges (possibly also PPC) so place it in a
d03d860b
BS
89 section close to code segment. */
90#define code_gen_section \
91 __attribute__((__section__(".gen_code"))) \
92 __attribute__((aligned (32)))
f8e2af11
SW
93#elif defined(_WIN32)
94/* Maximum alignment for Win32 is 16. */
95#define code_gen_section \
96 __attribute__((aligned (16)))
d03d860b
BS
97#else
98#define code_gen_section \
99 __attribute__((aligned (32)))
100#endif
101
102uint8_t code_gen_prologue[1024] code_gen_section;
bdaf78e0
BS
103static uint8_t *code_gen_buffer;
104static unsigned long code_gen_buffer_size;
26a5f13b 105/* threshold to flush the translated code buffer */
bdaf78e0 106static unsigned long code_gen_buffer_max_size;
24ab68ac 107static uint8_t *code_gen_ptr;
fd6ce8f6 108
e2eef170 109#if !defined(CONFIG_USER_ONLY)
9fa3e853 110int phys_ram_fd;
74576198 111static int in_migration;
94a6b54f 112
85d59fef 113RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
62152b8a
AK
114
115static MemoryRegion *system_memory;
309cb471 116static MemoryRegion *system_io;
62152b8a 117
e2eef170 118#endif
9fa3e853 119
6a00d601
FB
120CPUState *first_cpu;
121/* current CPU in the current thread. It is only valid inside
122 cpu_exec() */
5fafdf24 123CPUState *cpu_single_env;
2e70f6ef 124/* 0 = Do not count executed instructions.
bf20dc07 125 1 = Precise instruction counting.
2e70f6ef
PB
126 2 = Adaptive rate instruction counting. */
127int use_icount = 0;
6a00d601 128
54936004 129typedef struct PageDesc {
92e873b9 130 /* list of TBs intersecting this ram page */
fd6ce8f6 131 TranslationBlock *first_tb;
9fa3e853
FB
132 /* in order to optimize self modifying code, we count the number
133 of lookups we do to a given page to use a bitmap */
134 unsigned int code_write_count;
135 uint8_t *code_bitmap;
136#if defined(CONFIG_USER_ONLY)
137 unsigned long flags;
138#endif
54936004
FB
139} PageDesc;
140
41c1b1c9 141/* In system mode we want L1_MAP to be based on ram offsets,
5cd2c5b6
RH
142 while in user mode we want it to be based on virtual addresses. */
143#if !defined(CONFIG_USER_ONLY)
41c1b1c9
PB
144#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
145# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
146#else
5cd2c5b6 147# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
41c1b1c9 148#endif
bedb69ea 149#else
5cd2c5b6 150# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
bedb69ea 151#endif
54936004 152
5cd2c5b6
RH
153/* Size of the L2 (and L3, etc) page tables. */
154#define L2_BITS 10
54936004
FB
155#define L2_SIZE (1 << L2_BITS)
156
5cd2c5b6
RH
157/* The bits remaining after N lower levels of page tables. */
158#define P_L1_BITS_REM \
159 ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
160#define V_L1_BITS_REM \
161 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
162
163/* Size of the L1 page table. Avoid silly small sizes. */
164#if P_L1_BITS_REM < 4
165#define P_L1_BITS (P_L1_BITS_REM + L2_BITS)
166#else
167#define P_L1_BITS P_L1_BITS_REM
168#endif
169
170#if V_L1_BITS_REM < 4
171#define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
172#else
173#define V_L1_BITS V_L1_BITS_REM
174#endif
175
176#define P_L1_SIZE ((target_phys_addr_t)1 << P_L1_BITS)
177#define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
178
179#define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS)
180#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
181
83fb7adf 182unsigned long qemu_real_host_page_size;
83fb7adf
FB
183unsigned long qemu_host_page_size;
184unsigned long qemu_host_page_mask;
54936004 185
5cd2c5b6
RH
186/* This is a multi-level map on the virtual address space.
187 The bottom level has pointers to PageDesc. */
188static void *l1_map[V_L1_SIZE];
54936004 189
e2eef170 190#if !defined(CONFIG_USER_ONLY)
41c1b1c9
PB
191typedef struct PhysPageDesc {
192 /* offset in host memory of the page + io_index in the low bits */
193 ram_addr_t phys_offset;
194 ram_addr_t region_offset;
195} PhysPageDesc;
196
5cd2c5b6
RH
197/* This is a multi-level map on the physical address space.
198 The bottom level has pointers to PhysPageDesc. */
199static void *l1_phys_map[P_L1_SIZE];
6d9a1304 200
e2eef170 201static void io_mem_init(void);
62152b8a 202static void memory_map_init(void);
e2eef170 203
33417e70 204/* io memory support */
33417e70
FB
205CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
206CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
a4193c8a 207void *io_mem_opaque[IO_MEM_NB_ENTRIES];
511d2b14 208static char io_mem_used[IO_MEM_NB_ENTRIES];
6658ffb8
PB
209static int io_mem_watch;
210#endif
33417e70 211
34865134 212/* log support */
1e8b27ca
JR
213#ifdef WIN32
214static const char *logfilename = "qemu.log";
215#else
d9b630fd 216static const char *logfilename = "/tmp/qemu.log";
1e8b27ca 217#endif
34865134
FB
218FILE *logfile;
219int loglevel;
e735b91c 220static int log_append = 0;
34865134 221
e3db7226 222/* statistics */
b3755a91 223#if !defined(CONFIG_USER_ONLY)
e3db7226 224static int tlb_flush_count;
b3755a91 225#endif
e3db7226
FB
226static int tb_flush_count;
227static int tb_phys_invalidate_count;
228
7cb69cae
FB
229#ifdef _WIN32
230static void map_exec(void *addr, long size)
231{
232 DWORD old_protect;
233 VirtualProtect(addr, size,
234 PAGE_EXECUTE_READWRITE, &old_protect);
235
236}
237#else
238static void map_exec(void *addr, long size)
239{
4369415f 240 unsigned long start, end, page_size;
7cb69cae 241
4369415f 242 page_size = getpagesize();
7cb69cae 243 start = (unsigned long)addr;
4369415f 244 start &= ~(page_size - 1);
7cb69cae
FB
245
246 end = (unsigned long)addr + size;
4369415f
FB
247 end += page_size - 1;
248 end &= ~(page_size - 1);
7cb69cae
FB
249
250 mprotect((void *)start, end - start,
251 PROT_READ | PROT_WRITE | PROT_EXEC);
252}
253#endif
254
b346ff46 255static void page_init(void)
54936004 256{
83fb7adf 257 /* NOTE: we can always suppose that qemu_host_page_size >=
54936004 258 TARGET_PAGE_SIZE */
c2b48b69
AL
259#ifdef _WIN32
260 {
261 SYSTEM_INFO system_info;
262
263 GetSystemInfo(&system_info);
264 qemu_real_host_page_size = system_info.dwPageSize;
265 }
266#else
267 qemu_real_host_page_size = getpagesize();
268#endif
83fb7adf
FB
269 if (qemu_host_page_size == 0)
270 qemu_host_page_size = qemu_real_host_page_size;
271 if (qemu_host_page_size < TARGET_PAGE_SIZE)
272 qemu_host_page_size = TARGET_PAGE_SIZE;
83fb7adf 273 qemu_host_page_mask = ~(qemu_host_page_size - 1);
50a9569b 274
2e9a5713 275#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
50a9569b 276 {
f01576f1
JL
277#ifdef HAVE_KINFO_GETVMMAP
278 struct kinfo_vmentry *freep;
279 int i, cnt;
280
281 freep = kinfo_getvmmap(getpid(), &cnt);
282 if (freep) {
283 mmap_lock();
284 for (i = 0; i < cnt; i++) {
285 unsigned long startaddr, endaddr;
286
287 startaddr = freep[i].kve_start;
288 endaddr = freep[i].kve_end;
289 if (h2g_valid(startaddr)) {
290 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
291
292 if (h2g_valid(endaddr)) {
293 endaddr = h2g(endaddr);
fd436907 294 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
f01576f1
JL
295 } else {
296#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
297 endaddr = ~0ul;
fd436907 298 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
f01576f1
JL
299#endif
300 }
301 }
302 }
303 free(freep);
304 mmap_unlock();
305 }
306#else
50a9569b 307 FILE *f;
50a9569b 308
0776590d 309 last_brk = (unsigned long)sbrk(0);
5cd2c5b6 310
fd436907 311 f = fopen("/compat/linux/proc/self/maps", "r");
50a9569b 312 if (f) {
5cd2c5b6
RH
313 mmap_lock();
314
50a9569b 315 do {
5cd2c5b6
RH
316 unsigned long startaddr, endaddr;
317 int n;
318
319 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
320
321 if (n == 2 && h2g_valid(startaddr)) {
322 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
323
324 if (h2g_valid(endaddr)) {
325 endaddr = h2g(endaddr);
326 } else {
327 endaddr = ~0ul;
328 }
329 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
50a9569b
AZ
330 }
331 } while (!feof(f));
5cd2c5b6 332
50a9569b 333 fclose(f);
5cd2c5b6 334 mmap_unlock();
50a9569b 335 }
f01576f1 336#endif
50a9569b
AZ
337 }
338#endif
54936004
FB
339}
340
41c1b1c9 341static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
54936004 342{
41c1b1c9
PB
343 PageDesc *pd;
344 void **lp;
345 int i;
346
5cd2c5b6 347#if defined(CONFIG_USER_ONLY)
7267c094 348 /* We can't use g_malloc because it may recurse into a locked mutex. */
5cd2c5b6
RH
349# define ALLOC(P, SIZE) \
350 do { \
351 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
352 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
5cd2c5b6
RH
353 } while (0)
354#else
355# define ALLOC(P, SIZE) \
7267c094 356 do { P = g_malloc0(SIZE); } while (0)
17e2377a 357#endif
434929bf 358
5cd2c5b6
RH
359 /* Level 1. Always allocated. */
360 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
361
362 /* Level 2..N-1. */
363 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
364 void **p = *lp;
365
366 if (p == NULL) {
367 if (!alloc) {
368 return NULL;
369 }
370 ALLOC(p, sizeof(void *) * L2_SIZE);
371 *lp = p;
17e2377a 372 }
5cd2c5b6
RH
373
374 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
375 }
376
377 pd = *lp;
378 if (pd == NULL) {
379 if (!alloc) {
380 return NULL;
381 }
382 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
383 *lp = pd;
54936004 384 }
5cd2c5b6
RH
385
386#undef ALLOC
5cd2c5b6
RH
387
388 return pd + (index & (L2_SIZE - 1));
54936004
FB
389}
390
41c1b1c9 391static inline PageDesc *page_find(tb_page_addr_t index)
54936004 392{
5cd2c5b6 393 return page_find_alloc(index, 0);
fd6ce8f6
FB
394}
395
6d9a1304 396#if !defined(CONFIG_USER_ONLY)
c227f099 397static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
92e873b9 398{
e3f4e2a4 399 PhysPageDesc *pd;
5cd2c5b6
RH
400 void **lp;
401 int i;
92e873b9 402
5cd2c5b6
RH
403 /* Level 1. Always allocated. */
404 lp = l1_phys_map + ((index >> P_L1_SHIFT) & (P_L1_SIZE - 1));
108c49b8 405
5cd2c5b6
RH
406 /* Level 2..N-1. */
407 for (i = P_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
408 void **p = *lp;
409 if (p == NULL) {
410 if (!alloc) {
411 return NULL;
412 }
7267c094 413 *lp = p = g_malloc0(sizeof(void *) * L2_SIZE);
5cd2c5b6
RH
414 }
415 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
108c49b8 416 }
5cd2c5b6 417
e3f4e2a4 418 pd = *lp;
5cd2c5b6 419 if (pd == NULL) {
e3f4e2a4 420 int i;
5cd2c5b6
RH
421
422 if (!alloc) {
108c49b8 423 return NULL;
5cd2c5b6
RH
424 }
425
7267c094 426 *lp = pd = g_malloc(sizeof(PhysPageDesc) * L2_SIZE);
5cd2c5b6 427
67c4d23c 428 for (i = 0; i < L2_SIZE; i++) {
5cd2c5b6
RH
429 pd[i].phys_offset = IO_MEM_UNASSIGNED;
430 pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
67c4d23c 431 }
92e873b9 432 }
5cd2c5b6
RH
433
434 return pd + (index & (L2_SIZE - 1));
92e873b9
FB
435}
436
c227f099 437static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
92e873b9 438{
108c49b8 439 return phys_page_find_alloc(index, 0);
92e873b9
FB
440}
441
c227f099
AL
442static void tlb_protect_code(ram_addr_t ram_addr);
443static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 444 target_ulong vaddr);
c8a706fe
PB
445#define mmap_lock() do { } while(0)
446#define mmap_unlock() do { } while(0)
9fa3e853 447#endif
fd6ce8f6 448
4369415f
FB
449#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
450
451#if defined(CONFIG_USER_ONLY)
ccbb4d44 452/* Currently it is not recommended to allocate big chunks of data in
4369415f
FB
453 user mode. It will change when a dedicated libc will be used */
454#define USE_STATIC_CODE_GEN_BUFFER
455#endif
456
457#ifdef USE_STATIC_CODE_GEN_BUFFER
ebf50fb3
AJ
458static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
459 __attribute__((aligned (CODE_GEN_ALIGN)));
4369415f
FB
460#endif
461
8fcd3692 462static void code_gen_alloc(unsigned long tb_size)
26a5f13b 463{
4369415f
FB
464#ifdef USE_STATIC_CODE_GEN_BUFFER
465 code_gen_buffer = static_code_gen_buffer;
466 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
467 map_exec(code_gen_buffer, code_gen_buffer_size);
468#else
26a5f13b
FB
469 code_gen_buffer_size = tb_size;
470 if (code_gen_buffer_size == 0) {
4369415f 471#if defined(CONFIG_USER_ONLY)
4369415f
FB
472 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
473#else
ccbb4d44 474 /* XXX: needs adjustments */
94a6b54f 475 code_gen_buffer_size = (unsigned long)(ram_size / 4);
4369415f 476#endif
26a5f13b
FB
477 }
478 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
479 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
480 /* The code gen buffer location may have constraints depending on
481 the host cpu and OS */
482#if defined(__linux__)
483 {
484 int flags;
141ac468
BS
485 void *start = NULL;
486
26a5f13b
FB
487 flags = MAP_PRIVATE | MAP_ANONYMOUS;
488#if defined(__x86_64__)
489 flags |= MAP_32BIT;
490 /* Cannot map more than that */
491 if (code_gen_buffer_size > (800 * 1024 * 1024))
492 code_gen_buffer_size = (800 * 1024 * 1024);
141ac468
BS
493#elif defined(__sparc_v9__)
494 // Map the buffer below 2G, so we can use direct calls and branches
495 flags |= MAP_FIXED;
496 start = (void *) 0x60000000UL;
497 if (code_gen_buffer_size > (512 * 1024 * 1024))
498 code_gen_buffer_size = (512 * 1024 * 1024);
1cb0661e 499#elif defined(__arm__)
63d41246 500 /* Map the buffer below 32M, so we can use direct calls and branches */
1cb0661e
AZ
501 flags |= MAP_FIXED;
502 start = (void *) 0x01000000UL;
503 if (code_gen_buffer_size > 16 * 1024 * 1024)
504 code_gen_buffer_size = 16 * 1024 * 1024;
eba0b893
RH
505#elif defined(__s390x__)
506 /* Map the buffer so that we can use direct calls and branches. */
507 /* We have a +- 4GB range on the branches; leave some slop. */
508 if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
509 code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
510 }
511 start = (void *)0x90000000UL;
26a5f13b 512#endif
141ac468
BS
513 code_gen_buffer = mmap(start, code_gen_buffer_size,
514 PROT_WRITE | PROT_READ | PROT_EXEC,
26a5f13b
FB
515 flags, -1, 0);
516 if (code_gen_buffer == MAP_FAILED) {
517 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
518 exit(1);
519 }
520 }
cbb608a5 521#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
9f4b09a4
TN
522 || defined(__DragonFly__) || defined(__OpenBSD__) \
523 || defined(__NetBSD__)
06e67a82
AL
524 {
525 int flags;
526 void *addr = NULL;
527 flags = MAP_PRIVATE | MAP_ANONYMOUS;
528#if defined(__x86_64__)
529 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
530 * 0x40000000 is free */
531 flags |= MAP_FIXED;
532 addr = (void *)0x40000000;
533 /* Cannot map more than that */
534 if (code_gen_buffer_size > (800 * 1024 * 1024))
535 code_gen_buffer_size = (800 * 1024 * 1024);
4cd31ad2
BS
536#elif defined(__sparc_v9__)
537 // Map the buffer below 2G, so we can use direct calls and branches
538 flags |= MAP_FIXED;
539 addr = (void *) 0x60000000UL;
540 if (code_gen_buffer_size > (512 * 1024 * 1024)) {
541 code_gen_buffer_size = (512 * 1024 * 1024);
542 }
06e67a82
AL
543#endif
544 code_gen_buffer = mmap(addr, code_gen_buffer_size,
545 PROT_WRITE | PROT_READ | PROT_EXEC,
546 flags, -1, 0);
547 if (code_gen_buffer == MAP_FAILED) {
548 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
549 exit(1);
550 }
551 }
26a5f13b 552#else
7267c094 553 code_gen_buffer = g_malloc(code_gen_buffer_size);
26a5f13b
FB
554 map_exec(code_gen_buffer, code_gen_buffer_size);
555#endif
4369415f 556#endif /* !USE_STATIC_CODE_GEN_BUFFER */
26a5f13b 557 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
a884da8a
PM
558 code_gen_buffer_max_size = code_gen_buffer_size -
559 (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
26a5f13b 560 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
7267c094 561 tbs = g_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
26a5f13b
FB
562}
563
564/* Must be called before using the QEMU cpus. 'tb_size' is the size
565 (in bytes) allocated to the translation buffer. Zero means default
566 size. */
d5ab9713 567void tcg_exec_init(unsigned long tb_size)
26a5f13b 568{
26a5f13b
FB
569 cpu_gen_init();
570 code_gen_alloc(tb_size);
571 code_gen_ptr = code_gen_buffer;
4369415f 572 page_init();
9002ec79
RH
573#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
574 /* There's no guest base to take into account, so go ahead and
575 initialize the prologue now. */
576 tcg_prologue_init(&tcg_ctx);
577#endif
26a5f13b
FB
578}
579
d5ab9713
JK
580bool tcg_enabled(void)
581{
582 return code_gen_buffer != NULL;
583}
584
585void cpu_exec_init_all(void)
586{
587#if !defined(CONFIG_USER_ONLY)
588 memory_map_init();
589 io_mem_init();
590#endif
591}
592
9656f324
PB
593#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
594
e59fb374 595static int cpu_common_post_load(void *opaque, int version_id)
e7f4eff7
JQ
596{
597 CPUState *env = opaque;
9656f324 598
3098dba0
AJ
599 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
600 version_id is increased. */
601 env->interrupt_request &= ~0x01;
9656f324
PB
602 tlb_flush(env, 1);
603
604 return 0;
605}
e7f4eff7
JQ
606
607static const VMStateDescription vmstate_cpu_common = {
608 .name = "cpu_common",
609 .version_id = 1,
610 .minimum_version_id = 1,
611 .minimum_version_id_old = 1,
e7f4eff7
JQ
612 .post_load = cpu_common_post_load,
613 .fields = (VMStateField []) {
614 VMSTATE_UINT32(halted, CPUState),
615 VMSTATE_UINT32(interrupt_request, CPUState),
616 VMSTATE_END_OF_LIST()
617 }
618};
9656f324
PB
619#endif
620
950f1472
GC
621CPUState *qemu_get_cpu(int cpu)
622{
623 CPUState *env = first_cpu;
624
625 while (env) {
626 if (env->cpu_index == cpu)
627 break;
628 env = env->next_cpu;
629 }
630
631 return env;
632}
633
6a00d601 634void cpu_exec_init(CPUState *env)
fd6ce8f6 635{
6a00d601
FB
636 CPUState **penv;
637 int cpu_index;
638
c2764719
PB
639#if defined(CONFIG_USER_ONLY)
640 cpu_list_lock();
641#endif
6a00d601
FB
642 env->next_cpu = NULL;
643 penv = &first_cpu;
644 cpu_index = 0;
645 while (*penv != NULL) {
1e9fa730 646 penv = &(*penv)->next_cpu;
6a00d601
FB
647 cpu_index++;
648 }
649 env->cpu_index = cpu_index;
268a362c 650 env->numa_node = 0;
72cf2d4f
BS
651 QTAILQ_INIT(&env->breakpoints);
652 QTAILQ_INIT(&env->watchpoints);
dc7a09cf
JK
653#ifndef CONFIG_USER_ONLY
654 env->thread_id = qemu_get_thread_id();
655#endif
6a00d601 656 *penv = env;
c2764719
PB
657#if defined(CONFIG_USER_ONLY)
658 cpu_list_unlock();
659#endif
b3c7724c 660#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
0be71e32
AW
661 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
662 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
b3c7724c
PB
663 cpu_save, cpu_load, env);
664#endif
fd6ce8f6
FB
665}
666
d1a1eb74
TG
667/* Allocate a new translation block. Flush the translation buffer if
668 too many translation blocks or too much generated code. */
669static TranslationBlock *tb_alloc(target_ulong pc)
670{
671 TranslationBlock *tb;
672
673 if (nb_tbs >= code_gen_max_blocks ||
674 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
675 return NULL;
676 tb = &tbs[nb_tbs++];
677 tb->pc = pc;
678 tb->cflags = 0;
679 return tb;
680}
681
682void tb_free(TranslationBlock *tb)
683{
684 /* In practice this is mostly used for single use temporary TB
685 Ignore the hard cases and just back up if this TB happens to
686 be the last one generated. */
687 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
688 code_gen_ptr = tb->tc_ptr;
689 nb_tbs--;
690 }
691}
692
9fa3e853
FB
693static inline void invalidate_page_bitmap(PageDesc *p)
694{
695 if (p->code_bitmap) {
7267c094 696 g_free(p->code_bitmap);
9fa3e853
FB
697 p->code_bitmap = NULL;
698 }
699 p->code_write_count = 0;
700}
701
5cd2c5b6
RH
702/* Set to NULL all the 'first_tb' fields in all PageDescs. */
703
704static void page_flush_tb_1 (int level, void **lp)
fd6ce8f6 705{
5cd2c5b6 706 int i;
fd6ce8f6 707
5cd2c5b6
RH
708 if (*lp == NULL) {
709 return;
710 }
711 if (level == 0) {
712 PageDesc *pd = *lp;
7296abac 713 for (i = 0; i < L2_SIZE; ++i) {
5cd2c5b6
RH
714 pd[i].first_tb = NULL;
715 invalidate_page_bitmap(pd + i);
fd6ce8f6 716 }
5cd2c5b6
RH
717 } else {
718 void **pp = *lp;
7296abac 719 for (i = 0; i < L2_SIZE; ++i) {
5cd2c5b6
RH
720 page_flush_tb_1 (level - 1, pp + i);
721 }
722 }
723}
724
725static void page_flush_tb(void)
726{
727 int i;
728 for (i = 0; i < V_L1_SIZE; i++) {
729 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
fd6ce8f6
FB
730 }
731}
732
733/* flush all the translation blocks */
d4e8164f 734/* XXX: tb_flush is currently not thread safe */
6a00d601 735void tb_flush(CPUState *env1)
fd6ce8f6 736{
6a00d601 737 CPUState *env;
0124311e 738#if defined(DEBUG_FLUSH)
ab3d1727
BS
739 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
740 (unsigned long)(code_gen_ptr - code_gen_buffer),
741 nb_tbs, nb_tbs > 0 ?
742 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
fd6ce8f6 743#endif
26a5f13b 744 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
a208e54a
PB
745 cpu_abort(env1, "Internal error: code buffer overflow\n");
746
fd6ce8f6 747 nb_tbs = 0;
3b46e624 748
6a00d601
FB
749 for(env = first_cpu; env != NULL; env = env->next_cpu) {
750 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
751 }
9fa3e853 752
8a8a608f 753 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
fd6ce8f6 754 page_flush_tb();
9fa3e853 755
fd6ce8f6 756 code_gen_ptr = code_gen_buffer;
d4e8164f
FB
757 /* XXX: flush processor icache at this point if cache flush is
758 expensive */
e3db7226 759 tb_flush_count++;
fd6ce8f6
FB
760}
761
762#ifdef DEBUG_TB_CHECK
763
bc98a7ef 764static void tb_invalidate_check(target_ulong address)
fd6ce8f6
FB
765{
766 TranslationBlock *tb;
767 int i;
768 address &= TARGET_PAGE_MASK;
99773bd4
PB
769 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
770 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
771 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
772 address >= tb->pc + tb->size)) {
0bf9e31a
BS
773 printf("ERROR invalidate: address=" TARGET_FMT_lx
774 " PC=%08lx size=%04x\n",
99773bd4 775 address, (long)tb->pc, tb->size);
fd6ce8f6
FB
776 }
777 }
778 }
779}
780
781/* verify that all the pages have correct rights for code */
782static void tb_page_check(void)
783{
784 TranslationBlock *tb;
785 int i, flags1, flags2;
3b46e624 786
99773bd4
PB
787 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
788 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
789 flags1 = page_get_flags(tb->pc);
790 flags2 = page_get_flags(tb->pc + tb->size - 1);
791 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
792 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
99773bd4 793 (long)tb->pc, tb->size, flags1, flags2);
fd6ce8f6
FB
794 }
795 }
796 }
797}
798
799#endif
800
801/* invalidate one TB */
802static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
803 int next_offset)
804{
805 TranslationBlock *tb1;
806 for(;;) {
807 tb1 = *ptb;
808 if (tb1 == tb) {
809 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
810 break;
811 }
812 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
813 }
814}
815
9fa3e853
FB
816static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
817{
818 TranslationBlock *tb1;
819 unsigned int n1;
820
821 for(;;) {
822 tb1 = *ptb;
823 n1 = (long)tb1 & 3;
824 tb1 = (TranslationBlock *)((long)tb1 & ~3);
825 if (tb1 == tb) {
826 *ptb = tb1->page_next[n1];
827 break;
828 }
829 ptb = &tb1->page_next[n1];
830 }
831}
832
d4e8164f
FB
833static inline void tb_jmp_remove(TranslationBlock *tb, int n)
834{
835 TranslationBlock *tb1, **ptb;
836 unsigned int n1;
837
838 ptb = &tb->jmp_next[n];
839 tb1 = *ptb;
840 if (tb1) {
841 /* find tb(n) in circular list */
842 for(;;) {
843 tb1 = *ptb;
844 n1 = (long)tb1 & 3;
845 tb1 = (TranslationBlock *)((long)tb1 & ~3);
846 if (n1 == n && tb1 == tb)
847 break;
848 if (n1 == 2) {
849 ptb = &tb1->jmp_first;
850 } else {
851 ptb = &tb1->jmp_next[n1];
852 }
853 }
854 /* now we can suppress tb(n) from the list */
855 *ptb = tb->jmp_next[n];
856
857 tb->jmp_next[n] = NULL;
858 }
859}
860
861/* reset the jump entry 'n' of a TB so that it is not chained to
862 another TB */
863static inline void tb_reset_jump(TranslationBlock *tb, int n)
864{
865 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
866}
867
41c1b1c9 868void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
fd6ce8f6 869{
6a00d601 870 CPUState *env;
8a40a180 871 PageDesc *p;
d4e8164f 872 unsigned int h, n1;
41c1b1c9 873 tb_page_addr_t phys_pc;
8a40a180 874 TranslationBlock *tb1, *tb2;
3b46e624 875
8a40a180
FB
876 /* remove the TB from the hash list */
877 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
878 h = tb_phys_hash_func(phys_pc);
5fafdf24 879 tb_remove(&tb_phys_hash[h], tb,
8a40a180
FB
880 offsetof(TranslationBlock, phys_hash_next));
881
882 /* remove the TB from the page list */
883 if (tb->page_addr[0] != page_addr) {
884 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
885 tb_page_remove(&p->first_tb, tb);
886 invalidate_page_bitmap(p);
887 }
888 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
889 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
890 tb_page_remove(&p->first_tb, tb);
891 invalidate_page_bitmap(p);
892 }
893
36bdbe54 894 tb_invalidated_flag = 1;
59817ccb 895
fd6ce8f6 896 /* remove the TB from the hash list */
8a40a180 897 h = tb_jmp_cache_hash_func(tb->pc);
6a00d601
FB
898 for(env = first_cpu; env != NULL; env = env->next_cpu) {
899 if (env->tb_jmp_cache[h] == tb)
900 env->tb_jmp_cache[h] = NULL;
901 }
d4e8164f
FB
902
903 /* suppress this TB from the two jump lists */
904 tb_jmp_remove(tb, 0);
905 tb_jmp_remove(tb, 1);
906
907 /* suppress any remaining jumps to this TB */
908 tb1 = tb->jmp_first;
909 for(;;) {
910 n1 = (long)tb1 & 3;
911 if (n1 == 2)
912 break;
913 tb1 = (TranslationBlock *)((long)tb1 & ~3);
914 tb2 = tb1->jmp_next[n1];
915 tb_reset_jump(tb1, n1);
916 tb1->jmp_next[n1] = NULL;
917 tb1 = tb2;
918 }
919 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
9fa3e853 920
e3db7226 921 tb_phys_invalidate_count++;
9fa3e853
FB
922}
923
924static inline void set_bits(uint8_t *tab, int start, int len)
925{
926 int end, mask, end1;
927
928 end = start + len;
929 tab += start >> 3;
930 mask = 0xff << (start & 7);
931 if ((start & ~7) == (end & ~7)) {
932 if (start < end) {
933 mask &= ~(0xff << (end & 7));
934 *tab |= mask;
935 }
936 } else {
937 *tab++ |= mask;
938 start = (start + 8) & ~7;
939 end1 = end & ~7;
940 while (start < end1) {
941 *tab++ = 0xff;
942 start += 8;
943 }
944 if (start < end) {
945 mask = ~(0xff << (end & 7));
946 *tab |= mask;
947 }
948 }
949}
950
951static void build_page_bitmap(PageDesc *p)
952{
953 int n, tb_start, tb_end;
954 TranslationBlock *tb;
3b46e624 955
7267c094 956 p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8);
9fa3e853
FB
957
958 tb = p->first_tb;
959 while (tb != NULL) {
960 n = (long)tb & 3;
961 tb = (TranslationBlock *)((long)tb & ~3);
962 /* NOTE: this is subtle as a TB may span two physical pages */
963 if (n == 0) {
964 /* NOTE: tb_end may be after the end of the page, but
965 it is not a problem */
966 tb_start = tb->pc & ~TARGET_PAGE_MASK;
967 tb_end = tb_start + tb->size;
968 if (tb_end > TARGET_PAGE_SIZE)
969 tb_end = TARGET_PAGE_SIZE;
970 } else {
971 tb_start = 0;
972 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
973 }
974 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
975 tb = tb->page_next[n];
976 }
977}
978
2e70f6ef
PB
979TranslationBlock *tb_gen_code(CPUState *env,
980 target_ulong pc, target_ulong cs_base,
981 int flags, int cflags)
d720b93d
FB
982{
983 TranslationBlock *tb;
984 uint8_t *tc_ptr;
41c1b1c9
PB
985 tb_page_addr_t phys_pc, phys_page2;
986 target_ulong virt_page2;
d720b93d
FB
987 int code_gen_size;
988
41c1b1c9 989 phys_pc = get_page_addr_code(env, pc);
c27004ec 990 tb = tb_alloc(pc);
d720b93d
FB
991 if (!tb) {
992 /* flush must be done */
993 tb_flush(env);
994 /* cannot fail at this point */
c27004ec 995 tb = tb_alloc(pc);
2e70f6ef
PB
996 /* Don't forget to invalidate previous TB info. */
997 tb_invalidated_flag = 1;
d720b93d
FB
998 }
999 tc_ptr = code_gen_ptr;
1000 tb->tc_ptr = tc_ptr;
1001 tb->cs_base = cs_base;
1002 tb->flags = flags;
1003 tb->cflags = cflags;
d07bde88 1004 cpu_gen_code(env, tb, &code_gen_size);
d720b93d 1005 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
3b46e624 1006
d720b93d 1007 /* check next page if needed */
c27004ec 1008 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
d720b93d 1009 phys_page2 = -1;
c27004ec 1010 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
41c1b1c9 1011 phys_page2 = get_page_addr_code(env, virt_page2);
d720b93d 1012 }
41c1b1c9 1013 tb_link_page(tb, phys_pc, phys_page2);
2e70f6ef 1014 return tb;
d720b93d 1015}
3b46e624 1016
9fa3e853
FB
1017/* invalidate all TBs which intersect with the target physical page
1018 starting in range [start;end[. NOTE: start and end must refer to
d720b93d
FB
1019 the same physical page. 'is_cpu_write_access' should be true if called
1020 from a real cpu write access: the virtual CPU will exit the current
1021 TB if code is modified inside this TB. */
41c1b1c9 1022void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
d720b93d
FB
1023 int is_cpu_write_access)
1024{
6b917547 1025 TranslationBlock *tb, *tb_next, *saved_tb;
d720b93d 1026 CPUState *env = cpu_single_env;
41c1b1c9 1027 tb_page_addr_t tb_start, tb_end;
6b917547
AL
1028 PageDesc *p;
1029 int n;
1030#ifdef TARGET_HAS_PRECISE_SMC
1031 int current_tb_not_found = is_cpu_write_access;
1032 TranslationBlock *current_tb = NULL;
1033 int current_tb_modified = 0;
1034 target_ulong current_pc = 0;
1035 target_ulong current_cs_base = 0;
1036 int current_flags = 0;
1037#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
1038
1039 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 1040 if (!p)
9fa3e853 1041 return;
5fafdf24 1042 if (!p->code_bitmap &&
d720b93d
FB
1043 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1044 is_cpu_write_access) {
9fa3e853
FB
1045 /* build code bitmap */
1046 build_page_bitmap(p);
1047 }
1048
1049 /* we remove all the TBs in the range [start, end[ */
1050 /* XXX: see if in some cases it could be faster to invalidate all the code */
1051 tb = p->first_tb;
1052 while (tb != NULL) {
1053 n = (long)tb & 3;
1054 tb = (TranslationBlock *)((long)tb & ~3);
1055 tb_next = tb->page_next[n];
1056 /* NOTE: this is subtle as a TB may span two physical pages */
1057 if (n == 0) {
1058 /* NOTE: tb_end may be after the end of the page, but
1059 it is not a problem */
1060 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1061 tb_end = tb_start + tb->size;
1062 } else {
1063 tb_start = tb->page_addr[1];
1064 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1065 }
1066 if (!(tb_end <= start || tb_start >= end)) {
d720b93d
FB
1067#ifdef TARGET_HAS_PRECISE_SMC
1068 if (current_tb_not_found) {
1069 current_tb_not_found = 0;
1070 current_tb = NULL;
2e70f6ef 1071 if (env->mem_io_pc) {
d720b93d 1072 /* now we have a real cpu fault */
2e70f6ef 1073 current_tb = tb_find_pc(env->mem_io_pc);
d720b93d
FB
1074 }
1075 }
1076 if (current_tb == tb &&
2e70f6ef 1077 (current_tb->cflags & CF_COUNT_MASK) != 1) {
d720b93d
FB
1078 /* If we are modifying the current TB, we must stop
1079 its execution. We could be more precise by checking
1080 that the modification is after the current PC, but it
1081 would require a specialized function to partially
1082 restore the CPU state */
3b46e624 1083
d720b93d 1084 current_tb_modified = 1;
618ba8e6 1085 cpu_restore_state(current_tb, env, env->mem_io_pc);
6b917547
AL
1086 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1087 &current_flags);
d720b93d
FB
1088 }
1089#endif /* TARGET_HAS_PRECISE_SMC */
6f5a9f7e
FB
1090 /* we need to do that to handle the case where a signal
1091 occurs while doing tb_phys_invalidate() */
1092 saved_tb = NULL;
1093 if (env) {
1094 saved_tb = env->current_tb;
1095 env->current_tb = NULL;
1096 }
9fa3e853 1097 tb_phys_invalidate(tb, -1);
6f5a9f7e
FB
1098 if (env) {
1099 env->current_tb = saved_tb;
1100 if (env->interrupt_request && env->current_tb)
1101 cpu_interrupt(env, env->interrupt_request);
1102 }
9fa3e853
FB
1103 }
1104 tb = tb_next;
1105 }
1106#if !defined(CONFIG_USER_ONLY)
1107 /* if no code remaining, no need to continue to use slow writes */
1108 if (!p->first_tb) {
1109 invalidate_page_bitmap(p);
d720b93d 1110 if (is_cpu_write_access) {
2e70f6ef 1111 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
d720b93d
FB
1112 }
1113 }
1114#endif
1115#ifdef TARGET_HAS_PRECISE_SMC
1116 if (current_tb_modified) {
1117 /* we generate a block containing just the instruction
1118 modifying the memory. It will ensure that it cannot modify
1119 itself */
ea1c1802 1120 env->current_tb = NULL;
2e70f6ef 1121 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
d720b93d 1122 cpu_resume_from_signal(env, NULL);
9fa3e853 1123 }
fd6ce8f6 1124#endif
9fa3e853 1125}
fd6ce8f6 1126
9fa3e853 1127/* len must be <= 8 and start must be a multiple of len */
41c1b1c9 1128static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
9fa3e853
FB
1129{
1130 PageDesc *p;
1131 int offset, b;
59817ccb 1132#if 0
a4193c8a 1133 if (1) {
93fcfe39
AL
1134 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1135 cpu_single_env->mem_io_vaddr, len,
1136 cpu_single_env->eip,
1137 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
59817ccb
FB
1138 }
1139#endif
9fa3e853 1140 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 1141 if (!p)
9fa3e853
FB
1142 return;
1143 if (p->code_bitmap) {
1144 offset = start & ~TARGET_PAGE_MASK;
1145 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1146 if (b & ((1 << len) - 1))
1147 goto do_invalidate;
1148 } else {
1149 do_invalidate:
d720b93d 1150 tb_invalidate_phys_page_range(start, start + len, 1);
9fa3e853
FB
1151 }
1152}
1153
9fa3e853 1154#if !defined(CONFIG_SOFTMMU)
41c1b1c9 1155static void tb_invalidate_phys_page(tb_page_addr_t addr,
d720b93d 1156 unsigned long pc, void *puc)
9fa3e853 1157{
6b917547 1158 TranslationBlock *tb;
9fa3e853 1159 PageDesc *p;
6b917547 1160 int n;
d720b93d 1161#ifdef TARGET_HAS_PRECISE_SMC
6b917547 1162 TranslationBlock *current_tb = NULL;
d720b93d 1163 CPUState *env = cpu_single_env;
6b917547
AL
1164 int current_tb_modified = 0;
1165 target_ulong current_pc = 0;
1166 target_ulong current_cs_base = 0;
1167 int current_flags = 0;
d720b93d 1168#endif
9fa3e853
FB
1169
1170 addr &= TARGET_PAGE_MASK;
1171 p = page_find(addr >> TARGET_PAGE_BITS);
5fafdf24 1172 if (!p)
9fa3e853
FB
1173 return;
1174 tb = p->first_tb;
d720b93d
FB
1175#ifdef TARGET_HAS_PRECISE_SMC
1176 if (tb && pc != 0) {
1177 current_tb = tb_find_pc(pc);
1178 }
1179#endif
9fa3e853
FB
1180 while (tb != NULL) {
1181 n = (long)tb & 3;
1182 tb = (TranslationBlock *)((long)tb & ~3);
d720b93d
FB
1183#ifdef TARGET_HAS_PRECISE_SMC
1184 if (current_tb == tb &&
2e70f6ef 1185 (current_tb->cflags & CF_COUNT_MASK) != 1) {
d720b93d
FB
1186 /* If we are modifying the current TB, we must stop
1187 its execution. We could be more precise by checking
1188 that the modification is after the current PC, but it
1189 would require a specialized function to partially
1190 restore the CPU state */
3b46e624 1191
d720b93d 1192 current_tb_modified = 1;
618ba8e6 1193 cpu_restore_state(current_tb, env, pc);
6b917547
AL
1194 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1195 &current_flags);
d720b93d
FB
1196 }
1197#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
1198 tb_phys_invalidate(tb, addr);
1199 tb = tb->page_next[n];
1200 }
fd6ce8f6 1201 p->first_tb = NULL;
d720b93d
FB
1202#ifdef TARGET_HAS_PRECISE_SMC
1203 if (current_tb_modified) {
1204 /* we generate a block containing just the instruction
1205 modifying the memory. It will ensure that it cannot modify
1206 itself */
ea1c1802 1207 env->current_tb = NULL;
2e70f6ef 1208 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
d720b93d
FB
1209 cpu_resume_from_signal(env, puc);
1210 }
1211#endif
fd6ce8f6 1212}
9fa3e853 1213#endif
fd6ce8f6
FB
1214
1215/* add the tb in the target page and protect it if necessary */
5fafdf24 1216static inline void tb_alloc_page(TranslationBlock *tb,
41c1b1c9 1217 unsigned int n, tb_page_addr_t page_addr)
fd6ce8f6
FB
1218{
1219 PageDesc *p;
4429ab44
JQ
1220#ifndef CONFIG_USER_ONLY
1221 bool page_already_protected;
1222#endif
9fa3e853
FB
1223
1224 tb->page_addr[n] = page_addr;
5cd2c5b6 1225 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
9fa3e853 1226 tb->page_next[n] = p->first_tb;
4429ab44
JQ
1227#ifndef CONFIG_USER_ONLY
1228 page_already_protected = p->first_tb != NULL;
1229#endif
9fa3e853
FB
1230 p->first_tb = (TranslationBlock *)((long)tb | n);
1231 invalidate_page_bitmap(p);
fd6ce8f6 1232
107db443 1233#if defined(TARGET_HAS_SMC) || 1
d720b93d 1234
9fa3e853 1235#if defined(CONFIG_USER_ONLY)
fd6ce8f6 1236 if (p->flags & PAGE_WRITE) {
53a5960a
PB
1237 target_ulong addr;
1238 PageDesc *p2;
9fa3e853
FB
1239 int prot;
1240
fd6ce8f6
FB
1241 /* force the host page as non writable (writes will have a
1242 page fault + mprotect overhead) */
53a5960a 1243 page_addr &= qemu_host_page_mask;
fd6ce8f6 1244 prot = 0;
53a5960a
PB
1245 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1246 addr += TARGET_PAGE_SIZE) {
1247
1248 p2 = page_find (addr >> TARGET_PAGE_BITS);
1249 if (!p2)
1250 continue;
1251 prot |= p2->flags;
1252 p2->flags &= ~PAGE_WRITE;
53a5960a 1253 }
5fafdf24 1254 mprotect(g2h(page_addr), qemu_host_page_size,
fd6ce8f6
FB
1255 (prot & PAGE_BITS) & ~PAGE_WRITE);
1256#ifdef DEBUG_TB_INVALIDATE
ab3d1727 1257 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
53a5960a 1258 page_addr);
fd6ce8f6 1259#endif
fd6ce8f6 1260 }
9fa3e853
FB
1261#else
1262 /* if some code is already present, then the pages are already
1263 protected. So we handle the case where only the first TB is
1264 allocated in a physical page */
4429ab44 1265 if (!page_already_protected) {
6a00d601 1266 tlb_protect_code(page_addr);
9fa3e853
FB
1267 }
1268#endif
d720b93d
FB
1269
1270#endif /* TARGET_HAS_SMC */
fd6ce8f6
FB
1271}
1272
9fa3e853
FB
1273/* add a new TB and link it to the physical page tables. phys_page2 is
1274 (-1) to indicate that only one page contains the TB. */
41c1b1c9
PB
1275void tb_link_page(TranslationBlock *tb,
1276 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
d4e8164f 1277{
9fa3e853
FB
1278 unsigned int h;
1279 TranslationBlock **ptb;
1280
c8a706fe
PB
1281 /* Grab the mmap lock to stop another thread invalidating this TB
1282 before we are done. */
1283 mmap_lock();
9fa3e853
FB
1284 /* add in the physical hash table */
1285 h = tb_phys_hash_func(phys_pc);
1286 ptb = &tb_phys_hash[h];
1287 tb->phys_hash_next = *ptb;
1288 *ptb = tb;
fd6ce8f6
FB
1289
1290 /* add in the page list */
9fa3e853
FB
1291 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1292 if (phys_page2 != -1)
1293 tb_alloc_page(tb, 1, phys_page2);
1294 else
1295 tb->page_addr[1] = -1;
9fa3e853 1296
d4e8164f
FB
1297 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1298 tb->jmp_next[0] = NULL;
1299 tb->jmp_next[1] = NULL;
1300
1301 /* init original jump addresses */
1302 if (tb->tb_next_offset[0] != 0xffff)
1303 tb_reset_jump(tb, 0);
1304 if (tb->tb_next_offset[1] != 0xffff)
1305 tb_reset_jump(tb, 1);
8a40a180
FB
1306
1307#ifdef DEBUG_TB_CHECK
1308 tb_page_check();
1309#endif
c8a706fe 1310 mmap_unlock();
fd6ce8f6
FB
1311}
1312
9fa3e853
FB
1313/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1314 tb[1].tc_ptr. Return NULL if not found */
1315TranslationBlock *tb_find_pc(unsigned long tc_ptr)
fd6ce8f6 1316{
9fa3e853
FB
1317 int m_min, m_max, m;
1318 unsigned long v;
1319 TranslationBlock *tb;
a513fe19
FB
1320
1321 if (nb_tbs <= 0)
1322 return NULL;
1323 if (tc_ptr < (unsigned long)code_gen_buffer ||
1324 tc_ptr >= (unsigned long)code_gen_ptr)
1325 return NULL;
1326 /* binary search (cf Knuth) */
1327 m_min = 0;
1328 m_max = nb_tbs - 1;
1329 while (m_min <= m_max) {
1330 m = (m_min + m_max) >> 1;
1331 tb = &tbs[m];
1332 v = (unsigned long)tb->tc_ptr;
1333 if (v == tc_ptr)
1334 return tb;
1335 else if (tc_ptr < v) {
1336 m_max = m - 1;
1337 } else {
1338 m_min = m + 1;
1339 }
5fafdf24 1340 }
a513fe19
FB
1341 return &tbs[m_max];
1342}
7501267e 1343
ea041c0e
FB
1344static void tb_reset_jump_recursive(TranslationBlock *tb);
1345
1346static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1347{
1348 TranslationBlock *tb1, *tb_next, **ptb;
1349 unsigned int n1;
1350
1351 tb1 = tb->jmp_next[n];
1352 if (tb1 != NULL) {
1353 /* find head of list */
1354 for(;;) {
1355 n1 = (long)tb1 & 3;
1356 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1357 if (n1 == 2)
1358 break;
1359 tb1 = tb1->jmp_next[n1];
1360 }
1361 /* we are now sure now that tb jumps to tb1 */
1362 tb_next = tb1;
1363
1364 /* remove tb from the jmp_first list */
1365 ptb = &tb_next->jmp_first;
1366 for(;;) {
1367 tb1 = *ptb;
1368 n1 = (long)tb1 & 3;
1369 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1370 if (n1 == n && tb1 == tb)
1371 break;
1372 ptb = &tb1->jmp_next[n1];
1373 }
1374 *ptb = tb->jmp_next[n];
1375 tb->jmp_next[n] = NULL;
3b46e624 1376
ea041c0e
FB
1377 /* suppress the jump to next tb in generated code */
1378 tb_reset_jump(tb, n);
1379
0124311e 1380 /* suppress jumps in the tb on which we could have jumped */
ea041c0e
FB
1381 tb_reset_jump_recursive(tb_next);
1382 }
1383}
1384
1385static void tb_reset_jump_recursive(TranslationBlock *tb)
1386{
1387 tb_reset_jump_recursive2(tb, 0);
1388 tb_reset_jump_recursive2(tb, 1);
1389}
1390
1fddef4b 1391#if defined(TARGET_HAS_ICE)
94df27fd
PB
1392#if defined(CONFIG_USER_ONLY)
1393static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1394{
1395 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1396}
1397#else
d720b93d
FB
1398static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1399{
c227f099 1400 target_phys_addr_t addr;
9b3c35e0 1401 target_ulong pd;
c227f099 1402 ram_addr_t ram_addr;
c2f07f81 1403 PhysPageDesc *p;
d720b93d 1404
c2f07f81
PB
1405 addr = cpu_get_phys_page_debug(env, pc);
1406 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1407 if (!p) {
1408 pd = IO_MEM_UNASSIGNED;
1409 } else {
1410 pd = p->phys_offset;
1411 }
1412 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
706cd4b5 1413 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
d720b93d 1414}
c27004ec 1415#endif
94df27fd 1416#endif /* TARGET_HAS_ICE */
d720b93d 1417
c527ee8f
PB
1418#if defined(CONFIG_USER_ONLY)
1419void cpu_watchpoint_remove_all(CPUState *env, int mask)
1420
1421{
1422}
1423
1424int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1425 int flags, CPUWatchpoint **watchpoint)
1426{
1427 return -ENOSYS;
1428}
1429#else
6658ffb8 1430/* Add a watchpoint. */
a1d1bb31
AL
1431int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1432 int flags, CPUWatchpoint **watchpoint)
6658ffb8 1433{
b4051334 1434 target_ulong len_mask = ~(len - 1);
c0ce998e 1435 CPUWatchpoint *wp;
6658ffb8 1436
b4051334
AL
1437 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1438 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1439 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1440 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1441 return -EINVAL;
1442 }
7267c094 1443 wp = g_malloc(sizeof(*wp));
a1d1bb31
AL
1444
1445 wp->vaddr = addr;
b4051334 1446 wp->len_mask = len_mask;
a1d1bb31
AL
1447 wp->flags = flags;
1448
2dc9f411 1449 /* keep all GDB-injected watchpoints in front */
c0ce998e 1450 if (flags & BP_GDB)
72cf2d4f 1451 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
c0ce998e 1452 else
72cf2d4f 1453 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
6658ffb8 1454
6658ffb8 1455 tlb_flush_page(env, addr);
a1d1bb31
AL
1456
1457 if (watchpoint)
1458 *watchpoint = wp;
1459 return 0;
6658ffb8
PB
1460}
1461
a1d1bb31
AL
1462/* Remove a specific watchpoint. */
1463int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1464 int flags)
6658ffb8 1465{
b4051334 1466 target_ulong len_mask = ~(len - 1);
a1d1bb31 1467 CPUWatchpoint *wp;
6658ffb8 1468
72cf2d4f 1469 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334 1470 if (addr == wp->vaddr && len_mask == wp->len_mask
6e140f28 1471 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
a1d1bb31 1472 cpu_watchpoint_remove_by_ref(env, wp);
6658ffb8
PB
1473 return 0;
1474 }
1475 }
a1d1bb31 1476 return -ENOENT;
6658ffb8
PB
1477}
1478
a1d1bb31
AL
1479/* Remove a specific watchpoint by reference. */
1480void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1481{
72cf2d4f 1482 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
7d03f82f 1483
a1d1bb31
AL
1484 tlb_flush_page(env, watchpoint->vaddr);
1485
7267c094 1486 g_free(watchpoint);
a1d1bb31
AL
1487}
1488
1489/* Remove all matching watchpoints. */
1490void cpu_watchpoint_remove_all(CPUState *env, int mask)
1491{
c0ce998e 1492 CPUWatchpoint *wp, *next;
a1d1bb31 1493
72cf2d4f 1494 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
a1d1bb31
AL
1495 if (wp->flags & mask)
1496 cpu_watchpoint_remove_by_ref(env, wp);
c0ce998e 1497 }
7d03f82f 1498}
c527ee8f 1499#endif
7d03f82f 1500
a1d1bb31
AL
1501/* Add a breakpoint. */
1502int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1503 CPUBreakpoint **breakpoint)
4c3a88a2 1504{
1fddef4b 1505#if defined(TARGET_HAS_ICE)
c0ce998e 1506 CPUBreakpoint *bp;
3b46e624 1507
7267c094 1508 bp = g_malloc(sizeof(*bp));
4c3a88a2 1509
a1d1bb31
AL
1510 bp->pc = pc;
1511 bp->flags = flags;
1512
2dc9f411 1513 /* keep all GDB-injected breakpoints in front */
c0ce998e 1514 if (flags & BP_GDB)
72cf2d4f 1515 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
c0ce998e 1516 else
72cf2d4f 1517 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
3b46e624 1518
d720b93d 1519 breakpoint_invalidate(env, pc);
a1d1bb31
AL
1520
1521 if (breakpoint)
1522 *breakpoint = bp;
4c3a88a2
FB
1523 return 0;
1524#else
a1d1bb31 1525 return -ENOSYS;
4c3a88a2
FB
1526#endif
1527}
1528
a1d1bb31
AL
1529/* Remove a specific breakpoint. */
1530int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1531{
7d03f82f 1532#if defined(TARGET_HAS_ICE)
a1d1bb31
AL
1533 CPUBreakpoint *bp;
1534
72cf2d4f 1535 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31
AL
1536 if (bp->pc == pc && bp->flags == flags) {
1537 cpu_breakpoint_remove_by_ref(env, bp);
1538 return 0;
1539 }
7d03f82f 1540 }
a1d1bb31
AL
1541 return -ENOENT;
1542#else
1543 return -ENOSYS;
7d03f82f
EI
1544#endif
1545}
1546
a1d1bb31
AL
1547/* Remove a specific breakpoint by reference. */
1548void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
4c3a88a2 1549{
1fddef4b 1550#if defined(TARGET_HAS_ICE)
72cf2d4f 1551 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
d720b93d 1552
a1d1bb31
AL
1553 breakpoint_invalidate(env, breakpoint->pc);
1554
7267c094 1555 g_free(breakpoint);
a1d1bb31
AL
1556#endif
1557}
1558
1559/* Remove all matching breakpoints. */
1560void cpu_breakpoint_remove_all(CPUState *env, int mask)
1561{
1562#if defined(TARGET_HAS_ICE)
c0ce998e 1563 CPUBreakpoint *bp, *next;
a1d1bb31 1564
72cf2d4f 1565 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
a1d1bb31
AL
1566 if (bp->flags & mask)
1567 cpu_breakpoint_remove_by_ref(env, bp);
c0ce998e 1568 }
4c3a88a2
FB
1569#endif
1570}
1571
c33a346e
FB
1572/* enable or disable single step mode. EXCP_DEBUG is returned by the
1573 CPU loop after each instruction */
1574void cpu_single_step(CPUState *env, int enabled)
1575{
1fddef4b 1576#if defined(TARGET_HAS_ICE)
c33a346e
FB
1577 if (env->singlestep_enabled != enabled) {
1578 env->singlestep_enabled = enabled;
e22a25c9
AL
1579 if (kvm_enabled())
1580 kvm_update_guest_debug(env, 0);
1581 else {
ccbb4d44 1582 /* must flush all the translated code to avoid inconsistencies */
e22a25c9
AL
1583 /* XXX: only flush what is necessary */
1584 tb_flush(env);
1585 }
c33a346e
FB
1586 }
1587#endif
1588}
1589
34865134
FB
1590/* enable or disable low levels log */
1591void cpu_set_log(int log_flags)
1592{
1593 loglevel = log_flags;
1594 if (loglevel && !logfile) {
11fcfab4 1595 logfile = fopen(logfilename, log_append ? "a" : "w");
34865134
FB
1596 if (!logfile) {
1597 perror(logfilename);
1598 _exit(1);
1599 }
9fa3e853
FB
1600#if !defined(CONFIG_SOFTMMU)
1601 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1602 {
b55266b5 1603 static char logfile_buf[4096];
9fa3e853
FB
1604 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1605 }
bf65f53f
FN
1606#elif !defined(_WIN32)
1607 /* Win32 doesn't support line-buffering and requires size >= 2 */
34865134 1608 setvbuf(logfile, NULL, _IOLBF, 0);
9fa3e853 1609#endif
e735b91c
PB
1610 log_append = 1;
1611 }
1612 if (!loglevel && logfile) {
1613 fclose(logfile);
1614 logfile = NULL;
34865134
FB
1615 }
1616}
1617
1618void cpu_set_log_filename(const char *filename)
1619{
1620 logfilename = strdup(filename);
e735b91c
PB
1621 if (logfile) {
1622 fclose(logfile);
1623 logfile = NULL;
1624 }
1625 cpu_set_log(loglevel);
34865134 1626}
c33a346e 1627
3098dba0 1628static void cpu_unlink_tb(CPUState *env)
ea041c0e 1629{
3098dba0
AJ
1630 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1631 problem and hope the cpu will stop of its own accord. For userspace
1632 emulation this often isn't actually as bad as it sounds. Often
1633 signals are used primarily to interrupt blocking syscalls. */
ea041c0e 1634 TranslationBlock *tb;
c227f099 1635 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
59817ccb 1636
cab1b4bd 1637 spin_lock(&interrupt_lock);
3098dba0
AJ
1638 tb = env->current_tb;
1639 /* if the cpu is currently executing code, we must unlink it and
1640 all the potentially executing TB */
f76cfe56 1641 if (tb) {
3098dba0
AJ
1642 env->current_tb = NULL;
1643 tb_reset_jump_recursive(tb);
be214e6c 1644 }
cab1b4bd 1645 spin_unlock(&interrupt_lock);
3098dba0
AJ
1646}
1647
97ffbd8d 1648#ifndef CONFIG_USER_ONLY
3098dba0 1649/* mask must never be zero, except for A20 change call */
ec6959d0 1650static void tcg_handle_interrupt(CPUState *env, int mask)
3098dba0
AJ
1651{
1652 int old_mask;
be214e6c 1653
2e70f6ef 1654 old_mask = env->interrupt_request;
68a79315 1655 env->interrupt_request |= mask;
3098dba0 1656
8edac960
AL
1657 /*
1658 * If called from iothread context, wake the target cpu in
1659 * case its halted.
1660 */
b7680cb6 1661 if (!qemu_cpu_is_self(env)) {
8edac960
AL
1662 qemu_cpu_kick(env);
1663 return;
1664 }
8edac960 1665
2e70f6ef 1666 if (use_icount) {
266910c4 1667 env->icount_decr.u16.high = 0xffff;
2e70f6ef 1668 if (!can_do_io(env)
be214e6c 1669 && (mask & ~old_mask) != 0) {
2e70f6ef
PB
1670 cpu_abort(env, "Raised interrupt while not in I/O function");
1671 }
2e70f6ef 1672 } else {
3098dba0 1673 cpu_unlink_tb(env);
ea041c0e
FB
1674 }
1675}
1676
ec6959d0
JK
1677CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1678
97ffbd8d
JK
1679#else /* CONFIG_USER_ONLY */
1680
1681void cpu_interrupt(CPUState *env, int mask)
1682{
1683 env->interrupt_request |= mask;
1684 cpu_unlink_tb(env);
1685}
1686#endif /* CONFIG_USER_ONLY */
1687
b54ad049
FB
1688void cpu_reset_interrupt(CPUState *env, int mask)
1689{
1690 env->interrupt_request &= ~mask;
1691}
1692
3098dba0
AJ
1693void cpu_exit(CPUState *env)
1694{
1695 env->exit_request = 1;
1696 cpu_unlink_tb(env);
1697}
1698
c7cd6a37 1699const CPULogItem cpu_log_items[] = {
5fafdf24 1700 { CPU_LOG_TB_OUT_ASM, "out_asm",
f193c797
FB
1701 "show generated host assembly code for each compiled TB" },
1702 { CPU_LOG_TB_IN_ASM, "in_asm",
1703 "show target assembly code for each compiled TB" },
5fafdf24 1704 { CPU_LOG_TB_OP, "op",
57fec1fe 1705 "show micro ops for each compiled TB" },
f193c797 1706 { CPU_LOG_TB_OP_OPT, "op_opt",
e01a1157
BS
1707 "show micro ops "
1708#ifdef TARGET_I386
1709 "before eflags optimization and "
f193c797 1710#endif
e01a1157 1711 "after liveness analysis" },
f193c797
FB
1712 { CPU_LOG_INT, "int",
1713 "show interrupts/exceptions in short format" },
1714 { CPU_LOG_EXEC, "exec",
1715 "show trace before each executed TB (lots of logs)" },
9fddaa0c 1716 { CPU_LOG_TB_CPU, "cpu",
e91c8a77 1717 "show CPU state before block translation" },
f193c797
FB
1718#ifdef TARGET_I386
1719 { CPU_LOG_PCALL, "pcall",
1720 "show protected mode far calls/returns/exceptions" },
eca1bdf4
AL
1721 { CPU_LOG_RESET, "cpu_reset",
1722 "show CPU state before CPU resets" },
f193c797 1723#endif
8e3a9fd2 1724#ifdef DEBUG_IOPORT
fd872598
FB
1725 { CPU_LOG_IOPORT, "ioport",
1726 "show all i/o ports accesses" },
8e3a9fd2 1727#endif
f193c797
FB
1728 { 0, NULL, NULL },
1729};
1730
f6f3fbca
MT
1731#ifndef CONFIG_USER_ONLY
1732static QLIST_HEAD(memory_client_list, CPUPhysMemoryClient) memory_client_list
1733 = QLIST_HEAD_INITIALIZER(memory_client_list);
1734
1735static void cpu_notify_set_memory(target_phys_addr_t start_addr,
9742bf26 1736 ram_addr_t size,
0fd542fb
MT
1737 ram_addr_t phys_offset,
1738 bool log_dirty)
f6f3fbca
MT
1739{
1740 CPUPhysMemoryClient *client;
1741 QLIST_FOREACH(client, &memory_client_list, list) {
0fd542fb 1742 client->set_memory(client, start_addr, size, phys_offset, log_dirty);
f6f3fbca
MT
1743 }
1744}
1745
1746static int cpu_notify_sync_dirty_bitmap(target_phys_addr_t start,
9742bf26 1747 target_phys_addr_t end)
f6f3fbca
MT
1748{
1749 CPUPhysMemoryClient *client;
1750 QLIST_FOREACH(client, &memory_client_list, list) {
1751 int r = client->sync_dirty_bitmap(client, start, end);
1752 if (r < 0)
1753 return r;
1754 }
1755 return 0;
1756}
1757
1758static int cpu_notify_migration_log(int enable)
1759{
1760 CPUPhysMemoryClient *client;
1761 QLIST_FOREACH(client, &memory_client_list, list) {
1762 int r = client->migration_log(client, enable);
1763 if (r < 0)
1764 return r;
1765 }
1766 return 0;
1767}
1768
2173a75f
AW
1769struct last_map {
1770 target_phys_addr_t start_addr;
1771 ram_addr_t size;
1772 ram_addr_t phys_offset;
1773};
1774
8d4c78e7
AW
1775/* The l1_phys_map provides the upper P_L1_BITs of the guest physical
1776 * address. Each intermediate table provides the next L2_BITs of guest
1777 * physical address space. The number of levels vary based on host and
1778 * guest configuration, making it efficient to build the final guest
1779 * physical address by seeding the L1 offset and shifting and adding in
1780 * each L2 offset as we recurse through them. */
2173a75f
AW
1781static void phys_page_for_each_1(CPUPhysMemoryClient *client, int level,
1782 void **lp, target_phys_addr_t addr,
1783 struct last_map *map)
f6f3fbca 1784{
5cd2c5b6 1785 int i;
f6f3fbca 1786
5cd2c5b6
RH
1787 if (*lp == NULL) {
1788 return;
1789 }
1790 if (level == 0) {
1791 PhysPageDesc *pd = *lp;
8d4c78e7 1792 addr <<= L2_BITS + TARGET_PAGE_BITS;
7296abac 1793 for (i = 0; i < L2_SIZE; ++i) {
5cd2c5b6 1794 if (pd[i].phys_offset != IO_MEM_UNASSIGNED) {
2173a75f
AW
1795 target_phys_addr_t start_addr = addr | i << TARGET_PAGE_BITS;
1796
1797 if (map->size &&
1798 start_addr == map->start_addr + map->size &&
1799 pd[i].phys_offset == map->phys_offset + map->size) {
1800
1801 map->size += TARGET_PAGE_SIZE;
1802 continue;
1803 } else if (map->size) {
1804 client->set_memory(client, map->start_addr,
1805 map->size, map->phys_offset, false);
1806 }
1807
1808 map->start_addr = start_addr;
1809 map->size = TARGET_PAGE_SIZE;
1810 map->phys_offset = pd[i].phys_offset;
f6f3fbca 1811 }
5cd2c5b6
RH
1812 }
1813 } else {
1814 void **pp = *lp;
7296abac 1815 for (i = 0; i < L2_SIZE; ++i) {
8d4c78e7 1816 phys_page_for_each_1(client, level - 1, pp + i,
2173a75f 1817 (addr << L2_BITS) | i, map);
f6f3fbca
MT
1818 }
1819 }
1820}
1821
1822static void phys_page_for_each(CPUPhysMemoryClient *client)
1823{
5cd2c5b6 1824 int i;
2173a75f
AW
1825 struct last_map map = { };
1826
5cd2c5b6
RH
1827 for (i = 0; i < P_L1_SIZE; ++i) {
1828 phys_page_for_each_1(client, P_L1_SHIFT / L2_BITS - 1,
2173a75f
AW
1829 l1_phys_map + i, i, &map);
1830 }
1831 if (map.size) {
1832 client->set_memory(client, map.start_addr, map.size, map.phys_offset,
1833 false);
f6f3fbca 1834 }
f6f3fbca
MT
1835}
1836
1837void cpu_register_phys_memory_client(CPUPhysMemoryClient *client)
1838{
1839 QLIST_INSERT_HEAD(&memory_client_list, client, list);
1840 phys_page_for_each(client);
1841}
1842
1843void cpu_unregister_phys_memory_client(CPUPhysMemoryClient *client)
1844{
1845 QLIST_REMOVE(client, list);
1846}
1847#endif
1848
f193c797
FB
1849static int cmp1(const char *s1, int n, const char *s2)
1850{
1851 if (strlen(s2) != n)
1852 return 0;
1853 return memcmp(s1, s2, n) == 0;
1854}
3b46e624 1855
f193c797
FB
1856/* takes a comma separated list of log masks. Return 0 if error. */
1857int cpu_str_to_log_mask(const char *str)
1858{
c7cd6a37 1859 const CPULogItem *item;
f193c797
FB
1860 int mask;
1861 const char *p, *p1;
1862
1863 p = str;
1864 mask = 0;
1865 for(;;) {
1866 p1 = strchr(p, ',');
1867 if (!p1)
1868 p1 = p + strlen(p);
9742bf26
YT
1869 if(cmp1(p,p1-p,"all")) {
1870 for(item = cpu_log_items; item->mask != 0; item++) {
1871 mask |= item->mask;
1872 }
1873 } else {
1874 for(item = cpu_log_items; item->mask != 0; item++) {
1875 if (cmp1(p, p1 - p, item->name))
1876 goto found;
1877 }
1878 return 0;
f193c797 1879 }
f193c797
FB
1880 found:
1881 mask |= item->mask;
1882 if (*p1 != ',')
1883 break;
1884 p = p1 + 1;
1885 }
1886 return mask;
1887}
ea041c0e 1888
7501267e
FB
1889void cpu_abort(CPUState *env, const char *fmt, ...)
1890{
1891 va_list ap;
493ae1f0 1892 va_list ap2;
7501267e
FB
1893
1894 va_start(ap, fmt);
493ae1f0 1895 va_copy(ap2, ap);
7501267e
FB
1896 fprintf(stderr, "qemu: fatal: ");
1897 vfprintf(stderr, fmt, ap);
1898 fprintf(stderr, "\n");
1899#ifdef TARGET_I386
7fe48483
FB
1900 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1901#else
1902 cpu_dump_state(env, stderr, fprintf, 0);
7501267e 1903#endif
93fcfe39
AL
1904 if (qemu_log_enabled()) {
1905 qemu_log("qemu: fatal: ");
1906 qemu_log_vprintf(fmt, ap2);
1907 qemu_log("\n");
f9373291 1908#ifdef TARGET_I386
93fcfe39 1909 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
f9373291 1910#else
93fcfe39 1911 log_cpu_state(env, 0);
f9373291 1912#endif
31b1a7b4 1913 qemu_log_flush();
93fcfe39 1914 qemu_log_close();
924edcae 1915 }
493ae1f0 1916 va_end(ap2);
f9373291 1917 va_end(ap);
fd052bf6
RV
1918#if defined(CONFIG_USER_ONLY)
1919 {
1920 struct sigaction act;
1921 sigfillset(&act.sa_mask);
1922 act.sa_handler = SIG_DFL;
1923 sigaction(SIGABRT, &act, NULL);
1924 }
1925#endif
7501267e
FB
1926 abort();
1927}
1928
c5be9f08
TS
1929CPUState *cpu_copy(CPUState *env)
1930{
01ba9816 1931 CPUState *new_env = cpu_init(env->cpu_model_str);
c5be9f08
TS
1932 CPUState *next_cpu = new_env->next_cpu;
1933 int cpu_index = new_env->cpu_index;
5a38f081
AL
1934#if defined(TARGET_HAS_ICE)
1935 CPUBreakpoint *bp;
1936 CPUWatchpoint *wp;
1937#endif
1938
c5be9f08 1939 memcpy(new_env, env, sizeof(CPUState));
5a38f081
AL
1940
1941 /* Preserve chaining and index. */
c5be9f08
TS
1942 new_env->next_cpu = next_cpu;
1943 new_env->cpu_index = cpu_index;
5a38f081
AL
1944
1945 /* Clone all break/watchpoints.
1946 Note: Once we support ptrace with hw-debug register access, make sure
1947 BP_CPU break/watchpoints are handled correctly on clone. */
72cf2d4f
BS
1948 QTAILQ_INIT(&env->breakpoints);
1949 QTAILQ_INIT(&env->watchpoints);
5a38f081 1950#if defined(TARGET_HAS_ICE)
72cf2d4f 1951 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
5a38f081
AL
1952 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1953 }
72cf2d4f 1954 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
5a38f081
AL
1955 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1956 wp->flags, NULL);
1957 }
1958#endif
1959
c5be9f08
TS
1960 return new_env;
1961}
1962
0124311e
FB
1963#if !defined(CONFIG_USER_ONLY)
1964
5c751e99
EI
1965static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1966{
1967 unsigned int i;
1968
1969 /* Discard jump cache entries for any tb which might potentially
1970 overlap the flushed page. */
1971 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1972 memset (&env->tb_jmp_cache[i], 0,
9742bf26 1973 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
5c751e99
EI
1974
1975 i = tb_jmp_cache_hash_page(addr);
1976 memset (&env->tb_jmp_cache[i], 0,
9742bf26 1977 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
5c751e99
EI
1978}
1979
08738984
IK
1980static CPUTLBEntry s_cputlb_empty_entry = {
1981 .addr_read = -1,
1982 .addr_write = -1,
1983 .addr_code = -1,
1984 .addend = -1,
1985};
1986
ee8b7021
FB
1987/* NOTE: if flush_global is true, also flush global entries (not
1988 implemented yet) */
1989void tlb_flush(CPUState *env, int flush_global)
33417e70 1990{
33417e70 1991 int i;
0124311e 1992
9fa3e853
FB
1993#if defined(DEBUG_TLB)
1994 printf("tlb_flush:\n");
1995#endif
0124311e
FB
1996 /* must reset current TB so that interrupts cannot modify the
1997 links while we are modifying them */
1998 env->current_tb = NULL;
1999
33417e70 2000 for(i = 0; i < CPU_TLB_SIZE; i++) {
cfde4bd9
IY
2001 int mmu_idx;
2002 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
08738984 2003 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
cfde4bd9 2004 }
33417e70 2005 }
9fa3e853 2006
8a40a180 2007 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
9fa3e853 2008
d4c430a8
PB
2009 env->tlb_flush_addr = -1;
2010 env->tlb_flush_mask = 0;
e3db7226 2011 tlb_flush_count++;
33417e70
FB
2012}
2013
274da6b2 2014static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
61382a50 2015{
5fafdf24 2016 if (addr == (tlb_entry->addr_read &
84b7b8e7 2017 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 2018 addr == (tlb_entry->addr_write &
84b7b8e7 2019 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 2020 addr == (tlb_entry->addr_code &
84b7b8e7 2021 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
08738984 2022 *tlb_entry = s_cputlb_empty_entry;
84b7b8e7 2023 }
61382a50
FB
2024}
2025
2e12669a 2026void tlb_flush_page(CPUState *env, target_ulong addr)
33417e70 2027{
8a40a180 2028 int i;
cfde4bd9 2029 int mmu_idx;
0124311e 2030
9fa3e853 2031#if defined(DEBUG_TLB)
108c49b8 2032 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
9fa3e853 2033#endif
d4c430a8
PB
2034 /* Check if we need to flush due to large pages. */
2035 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
2036#if defined(DEBUG_TLB)
2037 printf("tlb_flush_page: forced full flush ("
2038 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
2039 env->tlb_flush_addr, env->tlb_flush_mask);
2040#endif
2041 tlb_flush(env, 1);
2042 return;
2043 }
0124311e
FB
2044 /* must reset current TB so that interrupts cannot modify the
2045 links while we are modifying them */
2046 env->current_tb = NULL;
61382a50
FB
2047
2048 addr &= TARGET_PAGE_MASK;
2049 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
cfde4bd9
IY
2050 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2051 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
0124311e 2052
5c751e99 2053 tlb_flush_jmp_cache(env, addr);
9fa3e853
FB
2054}
2055
9fa3e853
FB
2056/* update the TLBs so that writes to code in the virtual page 'addr'
2057 can be detected */
c227f099 2058static void tlb_protect_code(ram_addr_t ram_addr)
9fa3e853 2059{
5fafdf24 2060 cpu_physical_memory_reset_dirty(ram_addr,
6a00d601
FB
2061 ram_addr + TARGET_PAGE_SIZE,
2062 CODE_DIRTY_FLAG);
9fa3e853
FB
2063}
2064
9fa3e853 2065/* update the TLB so that writes in physical page 'phys_addr' are no longer
3a7d929e 2066 tested for self modifying code */
c227f099 2067static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 2068 target_ulong vaddr)
9fa3e853 2069{
f7c11b53 2070 cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
1ccde1cb
FB
2071}
2072
5fafdf24 2073static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1ccde1cb
FB
2074 unsigned long start, unsigned long length)
2075{
2076 unsigned long addr;
84b7b8e7
FB
2077 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2078 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1ccde1cb 2079 if ((addr - start) < length) {
0f459d16 2080 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1ccde1cb
FB
2081 }
2082 }
2083}
2084
5579c7f3 2085/* Note: start and end must be within the same ram block. */
c227f099 2086void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 2087 int dirty_flags)
1ccde1cb
FB
2088{
2089 CPUState *env;
4f2ac237 2090 unsigned long length, start1;
f7c11b53 2091 int i;
1ccde1cb
FB
2092
2093 start &= TARGET_PAGE_MASK;
2094 end = TARGET_PAGE_ALIGN(end);
2095
2096 length = end - start;
2097 if (length == 0)
2098 return;
f7c11b53 2099 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
f23db169 2100
1ccde1cb
FB
2101 /* we modify the TLB cache so that the dirty bit will be set again
2102 when accessing the range */
b2e0a138 2103 start1 = (unsigned long)qemu_safe_ram_ptr(start);
a57d23e4 2104 /* Check that we don't span multiple blocks - this breaks the
5579c7f3 2105 address comparisons below. */
b2e0a138 2106 if ((unsigned long)qemu_safe_ram_ptr(end - 1) - start1
5579c7f3
PB
2107 != (end - 1) - start) {
2108 abort();
2109 }
2110
6a00d601 2111 for(env = first_cpu; env != NULL; env = env->next_cpu) {
cfde4bd9
IY
2112 int mmu_idx;
2113 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2114 for(i = 0; i < CPU_TLB_SIZE; i++)
2115 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
2116 start1, length);
2117 }
6a00d601 2118 }
1ccde1cb
FB
2119}
2120
74576198
AL
2121int cpu_physical_memory_set_dirty_tracking(int enable)
2122{
f6f3fbca 2123 int ret = 0;
74576198 2124 in_migration = enable;
f6f3fbca
MT
2125 ret = cpu_notify_migration_log(!!enable);
2126 return ret;
74576198
AL
2127}
2128
2129int cpu_physical_memory_get_dirty_tracking(void)
2130{
2131 return in_migration;
2132}
2133
c227f099
AL
2134int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
2135 target_phys_addr_t end_addr)
2bec46dc 2136{
7b8f3b78 2137 int ret;
151f7749 2138
f6f3fbca 2139 ret = cpu_notify_sync_dirty_bitmap(start_addr, end_addr);
151f7749 2140 return ret;
2bec46dc
AL
2141}
2142
e5896b12
AP
2143int cpu_physical_log_start(target_phys_addr_t start_addr,
2144 ram_addr_t size)
2145{
2146 CPUPhysMemoryClient *client;
2147 QLIST_FOREACH(client, &memory_client_list, list) {
2148 if (client->log_start) {
2149 int r = client->log_start(client, start_addr, size);
2150 if (r < 0) {
2151 return r;
2152 }
2153 }
2154 }
2155 return 0;
2156}
2157
2158int cpu_physical_log_stop(target_phys_addr_t start_addr,
2159 ram_addr_t size)
2160{
2161 CPUPhysMemoryClient *client;
2162 QLIST_FOREACH(client, &memory_client_list, list) {
2163 if (client->log_stop) {
2164 int r = client->log_stop(client, start_addr, size);
2165 if (r < 0) {
2166 return r;
2167 }
2168 }
2169 }
2170 return 0;
2171}
2172
3a7d929e
FB
2173static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2174{
c227f099 2175 ram_addr_t ram_addr;
5579c7f3 2176 void *p;
3a7d929e 2177
84b7b8e7 2178 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
5579c7f3
PB
2179 p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
2180 + tlb_entry->addend);
e890261f 2181 ram_addr = qemu_ram_addr_from_host_nofail(p);
3a7d929e 2182 if (!cpu_physical_memory_is_dirty(ram_addr)) {
0f459d16 2183 tlb_entry->addr_write |= TLB_NOTDIRTY;
3a7d929e
FB
2184 }
2185 }
2186}
2187
2188/* update the TLB according to the current state of the dirty bits */
2189void cpu_tlb_update_dirty(CPUState *env)
2190{
2191 int i;
cfde4bd9
IY
2192 int mmu_idx;
2193 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2194 for(i = 0; i < CPU_TLB_SIZE; i++)
2195 tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2196 }
3a7d929e
FB
2197}
2198
0f459d16 2199static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1ccde1cb 2200{
0f459d16
PB
2201 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2202 tlb_entry->addr_write = vaddr;
1ccde1cb
FB
2203}
2204
0f459d16
PB
2205/* update the TLB corresponding to virtual page vaddr
2206 so that it is no longer dirty */
2207static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1ccde1cb 2208{
1ccde1cb 2209 int i;
cfde4bd9 2210 int mmu_idx;
1ccde1cb 2211
0f459d16 2212 vaddr &= TARGET_PAGE_MASK;
1ccde1cb 2213 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
cfde4bd9
IY
2214 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2215 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
9fa3e853
FB
2216}
2217
d4c430a8
PB
2218/* Our TLB does not support large pages, so remember the area covered by
2219 large pages and trigger a full TLB flush if these are invalidated. */
2220static void tlb_add_large_page(CPUState *env, target_ulong vaddr,
2221 target_ulong size)
2222{
2223 target_ulong mask = ~(size - 1);
2224
2225 if (env->tlb_flush_addr == (target_ulong)-1) {
2226 env->tlb_flush_addr = vaddr & mask;
2227 env->tlb_flush_mask = mask;
2228 return;
2229 }
2230 /* Extend the existing region to include the new page.
2231 This is a compromise between unnecessary flushes and the cost
2232 of maintaining a full variable size TLB. */
2233 mask &= env->tlb_flush_mask;
2234 while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
2235 mask <<= 1;
2236 }
2237 env->tlb_flush_addr &= mask;
2238 env->tlb_flush_mask = mask;
2239}
2240
2241/* Add a new TLB entry. At most one entry for a given virtual address
2242 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2243 supplied size is only used by tlb_flush_page. */
2244void tlb_set_page(CPUState *env, target_ulong vaddr,
2245 target_phys_addr_t paddr, int prot,
2246 int mmu_idx, target_ulong size)
9fa3e853 2247{
92e873b9 2248 PhysPageDesc *p;
4f2ac237 2249 unsigned long pd;
9fa3e853 2250 unsigned int index;
4f2ac237 2251 target_ulong address;
0f459d16 2252 target_ulong code_address;
355b1943 2253 unsigned long addend;
84b7b8e7 2254 CPUTLBEntry *te;
a1d1bb31 2255 CPUWatchpoint *wp;
c227f099 2256 target_phys_addr_t iotlb;
9fa3e853 2257
d4c430a8
PB
2258 assert(size >= TARGET_PAGE_SIZE);
2259 if (size != TARGET_PAGE_SIZE) {
2260 tlb_add_large_page(env, vaddr, size);
2261 }
92e873b9 2262 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
9fa3e853
FB
2263 if (!p) {
2264 pd = IO_MEM_UNASSIGNED;
9fa3e853
FB
2265 } else {
2266 pd = p->phys_offset;
9fa3e853
FB
2267 }
2268#if defined(DEBUG_TLB)
7fd3f494
SW
2269 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
2270 " prot=%x idx=%d pd=0x%08lx\n",
2271 vaddr, paddr, prot, mmu_idx, pd);
9fa3e853
FB
2272#endif
2273
0f459d16
PB
2274 address = vaddr;
2275 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2276 /* IO memory case (romd handled later) */
2277 address |= TLB_MMIO;
2278 }
5579c7f3 2279 addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
0f459d16
PB
2280 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2281 /* Normal RAM. */
2282 iotlb = pd & TARGET_PAGE_MASK;
2283 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2284 iotlb |= IO_MEM_NOTDIRTY;
2285 else
2286 iotlb |= IO_MEM_ROM;
2287 } else {
ccbb4d44 2288 /* IO handlers are currently passed a physical address.
0f459d16
PB
2289 It would be nice to pass an offset from the base address
2290 of that region. This would avoid having to special case RAM,
2291 and avoid full address decoding in every device.
2292 We can't use the high bits of pd for this because
2293 IO_MEM_ROMD uses these as a ram address. */
8da3ff18
PB
2294 iotlb = (pd & ~TARGET_PAGE_MASK);
2295 if (p) {
8da3ff18
PB
2296 iotlb += p->region_offset;
2297 } else {
2298 iotlb += paddr;
2299 }
0f459d16
PB
2300 }
2301
2302 code_address = address;
2303 /* Make accesses to pages with watchpoints go via the
2304 watchpoint trap routines. */
72cf2d4f 2305 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
a1d1bb31 2306 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
bf298f83
JK
2307 /* Avoid trapping reads of pages with a write breakpoint. */
2308 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
2309 iotlb = io_mem_watch + paddr;
2310 address |= TLB_MMIO;
2311 break;
2312 }
6658ffb8 2313 }
0f459d16 2314 }
d79acba4 2315
0f459d16
PB
2316 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2317 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2318 te = &env->tlb_table[mmu_idx][index];
2319 te->addend = addend - vaddr;
2320 if (prot & PAGE_READ) {
2321 te->addr_read = address;
2322 } else {
2323 te->addr_read = -1;
2324 }
5c751e99 2325
0f459d16
PB
2326 if (prot & PAGE_EXEC) {
2327 te->addr_code = code_address;
2328 } else {
2329 te->addr_code = -1;
2330 }
2331 if (prot & PAGE_WRITE) {
2332 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2333 (pd & IO_MEM_ROMD)) {
2334 /* Write access calls the I/O callback. */
2335 te->addr_write = address | TLB_MMIO;
2336 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2337 !cpu_physical_memory_is_dirty(pd)) {
2338 te->addr_write = address | TLB_NOTDIRTY;
9fa3e853 2339 } else {
0f459d16 2340 te->addr_write = address;
9fa3e853 2341 }
0f459d16
PB
2342 } else {
2343 te->addr_write = -1;
9fa3e853 2344 }
9fa3e853
FB
2345}
2346
0124311e
FB
2347#else
2348
ee8b7021 2349void tlb_flush(CPUState *env, int flush_global)
0124311e
FB
2350{
2351}
2352
2e12669a 2353void tlb_flush_page(CPUState *env, target_ulong addr)
0124311e
FB
2354{
2355}
2356
edf8e2af
MW
2357/*
2358 * Walks guest process memory "regions" one by one
2359 * and calls callback function 'fn' for each region.
2360 */
5cd2c5b6
RH
2361
2362struct walk_memory_regions_data
2363{
2364 walk_memory_regions_fn fn;
2365 void *priv;
2366 unsigned long start;
2367 int prot;
2368};
2369
2370static int walk_memory_regions_end(struct walk_memory_regions_data *data,
b480d9b7 2371 abi_ulong end, int new_prot)
5cd2c5b6
RH
2372{
2373 if (data->start != -1ul) {
2374 int rc = data->fn(data->priv, data->start, end, data->prot);
2375 if (rc != 0) {
2376 return rc;
2377 }
2378 }
2379
2380 data->start = (new_prot ? end : -1ul);
2381 data->prot = new_prot;
2382
2383 return 0;
2384}
2385
2386static int walk_memory_regions_1(struct walk_memory_regions_data *data,
b480d9b7 2387 abi_ulong base, int level, void **lp)
5cd2c5b6 2388{
b480d9b7 2389 abi_ulong pa;
5cd2c5b6
RH
2390 int i, rc;
2391
2392 if (*lp == NULL) {
2393 return walk_memory_regions_end(data, base, 0);
2394 }
2395
2396 if (level == 0) {
2397 PageDesc *pd = *lp;
7296abac 2398 for (i = 0; i < L2_SIZE; ++i) {
5cd2c5b6
RH
2399 int prot = pd[i].flags;
2400
2401 pa = base | (i << TARGET_PAGE_BITS);
2402 if (prot != data->prot) {
2403 rc = walk_memory_regions_end(data, pa, prot);
2404 if (rc != 0) {
2405 return rc;
9fa3e853 2406 }
9fa3e853 2407 }
5cd2c5b6
RH
2408 }
2409 } else {
2410 void **pp = *lp;
7296abac 2411 for (i = 0; i < L2_SIZE; ++i) {
b480d9b7
PB
2412 pa = base | ((abi_ulong)i <<
2413 (TARGET_PAGE_BITS + L2_BITS * level));
5cd2c5b6
RH
2414 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2415 if (rc != 0) {
2416 return rc;
2417 }
2418 }
2419 }
2420
2421 return 0;
2422}
2423
2424int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2425{
2426 struct walk_memory_regions_data data;
2427 unsigned long i;
2428
2429 data.fn = fn;
2430 data.priv = priv;
2431 data.start = -1ul;
2432 data.prot = 0;
2433
2434 for (i = 0; i < V_L1_SIZE; i++) {
b480d9b7 2435 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
5cd2c5b6
RH
2436 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2437 if (rc != 0) {
2438 return rc;
9fa3e853 2439 }
33417e70 2440 }
5cd2c5b6
RH
2441
2442 return walk_memory_regions_end(&data, 0, 0);
edf8e2af
MW
2443}
2444
b480d9b7
PB
2445static int dump_region(void *priv, abi_ulong start,
2446 abi_ulong end, unsigned long prot)
edf8e2af
MW
2447{
2448 FILE *f = (FILE *)priv;
2449
b480d9b7
PB
2450 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2451 " "TARGET_ABI_FMT_lx" %c%c%c\n",
edf8e2af
MW
2452 start, end, end - start,
2453 ((prot & PAGE_READ) ? 'r' : '-'),
2454 ((prot & PAGE_WRITE) ? 'w' : '-'),
2455 ((prot & PAGE_EXEC) ? 'x' : '-'));
2456
2457 return (0);
2458}
2459
2460/* dump memory mappings */
2461void page_dump(FILE *f)
2462{
2463 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2464 "start", "end", "size", "prot");
2465 walk_memory_regions(f, dump_region);
33417e70
FB
2466}
2467
53a5960a 2468int page_get_flags(target_ulong address)
33417e70 2469{
9fa3e853
FB
2470 PageDesc *p;
2471
2472 p = page_find(address >> TARGET_PAGE_BITS);
33417e70 2473 if (!p)
9fa3e853
FB
2474 return 0;
2475 return p->flags;
2476}
2477
376a7909
RH
2478/* Modify the flags of a page and invalidate the code if necessary.
2479 The flag PAGE_WRITE_ORG is positioned automatically depending
2480 on PAGE_WRITE. The mmap_lock should already be held. */
53a5960a 2481void page_set_flags(target_ulong start, target_ulong end, int flags)
9fa3e853 2482{
376a7909
RH
2483 target_ulong addr, len;
2484
2485 /* This function should never be called with addresses outside the
2486 guest address space. If this assert fires, it probably indicates
2487 a missing call to h2g_valid. */
b480d9b7
PB
2488#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2489 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
376a7909
RH
2490#endif
2491 assert(start < end);
9fa3e853
FB
2492
2493 start = start & TARGET_PAGE_MASK;
2494 end = TARGET_PAGE_ALIGN(end);
376a7909
RH
2495
2496 if (flags & PAGE_WRITE) {
9fa3e853 2497 flags |= PAGE_WRITE_ORG;
376a7909
RH
2498 }
2499
2500 for (addr = start, len = end - start;
2501 len != 0;
2502 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2503 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2504
2505 /* If the write protection bit is set, then we invalidate
2506 the code inside. */
5fafdf24 2507 if (!(p->flags & PAGE_WRITE) &&
9fa3e853
FB
2508 (flags & PAGE_WRITE) &&
2509 p->first_tb) {
d720b93d 2510 tb_invalidate_phys_page(addr, 0, NULL);
9fa3e853
FB
2511 }
2512 p->flags = flags;
2513 }
33417e70
FB
2514}
2515
3d97b40b
TS
2516int page_check_range(target_ulong start, target_ulong len, int flags)
2517{
2518 PageDesc *p;
2519 target_ulong end;
2520 target_ulong addr;
2521
376a7909
RH
2522 /* This function should never be called with addresses outside the
2523 guest address space. If this assert fires, it probably indicates
2524 a missing call to h2g_valid. */
338e9e6c
BS
2525#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2526 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
376a7909
RH
2527#endif
2528
3e0650a9
RH
2529 if (len == 0) {
2530 return 0;
2531 }
376a7909
RH
2532 if (start + len - 1 < start) {
2533 /* We've wrapped around. */
55f280c9 2534 return -1;
376a7909 2535 }
55f280c9 2536
3d97b40b
TS
2537 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2538 start = start & TARGET_PAGE_MASK;
2539
376a7909
RH
2540 for (addr = start, len = end - start;
2541 len != 0;
2542 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
3d97b40b
TS
2543 p = page_find(addr >> TARGET_PAGE_BITS);
2544 if( !p )
2545 return -1;
2546 if( !(p->flags & PAGE_VALID) )
2547 return -1;
2548
dae3270c 2549 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
3d97b40b 2550 return -1;
dae3270c
FB
2551 if (flags & PAGE_WRITE) {
2552 if (!(p->flags & PAGE_WRITE_ORG))
2553 return -1;
2554 /* unprotect the page if it was put read-only because it
2555 contains translated code */
2556 if (!(p->flags & PAGE_WRITE)) {
2557 if (!page_unprotect(addr, 0, NULL))
2558 return -1;
2559 }
2560 return 0;
2561 }
3d97b40b
TS
2562 }
2563 return 0;
2564}
2565
9fa3e853 2566/* called from signal handler: invalidate the code and unprotect the
ccbb4d44 2567 page. Return TRUE if the fault was successfully handled. */
53a5960a 2568int page_unprotect(target_ulong address, unsigned long pc, void *puc)
9fa3e853 2569{
45d679d6
AJ
2570 unsigned int prot;
2571 PageDesc *p;
53a5960a 2572 target_ulong host_start, host_end, addr;
9fa3e853 2573
c8a706fe
PB
2574 /* Technically this isn't safe inside a signal handler. However we
2575 know this only ever happens in a synchronous SEGV handler, so in
2576 practice it seems to be ok. */
2577 mmap_lock();
2578
45d679d6
AJ
2579 p = page_find(address >> TARGET_PAGE_BITS);
2580 if (!p) {
c8a706fe 2581 mmap_unlock();
9fa3e853 2582 return 0;
c8a706fe 2583 }
45d679d6 2584
9fa3e853
FB
2585 /* if the page was really writable, then we change its
2586 protection back to writable */
45d679d6
AJ
2587 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2588 host_start = address & qemu_host_page_mask;
2589 host_end = host_start + qemu_host_page_size;
2590
2591 prot = 0;
2592 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2593 p = page_find(addr >> TARGET_PAGE_BITS);
2594 p->flags |= PAGE_WRITE;
2595 prot |= p->flags;
2596
9fa3e853
FB
2597 /* and since the content will be modified, we must invalidate
2598 the corresponding translated code. */
45d679d6 2599 tb_invalidate_phys_page(addr, pc, puc);
9fa3e853 2600#ifdef DEBUG_TB_CHECK
45d679d6 2601 tb_invalidate_check(addr);
9fa3e853 2602#endif
9fa3e853 2603 }
45d679d6
AJ
2604 mprotect((void *)g2h(host_start), qemu_host_page_size,
2605 prot & PAGE_BITS);
2606
2607 mmap_unlock();
2608 return 1;
9fa3e853 2609 }
c8a706fe 2610 mmap_unlock();
9fa3e853
FB
2611 return 0;
2612}
2613
6a00d601
FB
2614static inline void tlb_set_dirty(CPUState *env,
2615 unsigned long addr, target_ulong vaddr)
1ccde1cb
FB
2616{
2617}
9fa3e853
FB
2618#endif /* defined(CONFIG_USER_ONLY) */
2619
e2eef170 2620#if !defined(CONFIG_USER_ONLY)
8da3ff18 2621
c04b2b78
PB
2622#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2623typedef struct subpage_t {
2624 target_phys_addr_t base;
f6405247
RH
2625 ram_addr_t sub_io_index[TARGET_PAGE_SIZE];
2626 ram_addr_t region_offset[TARGET_PAGE_SIZE];
c04b2b78
PB
2627} subpage_t;
2628
c227f099
AL
2629static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2630 ram_addr_t memory, ram_addr_t region_offset);
f6405247
RH
2631static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2632 ram_addr_t orig_memory,
2633 ram_addr_t region_offset);
db7b5426
BS
2634#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2635 need_subpage) \
2636 do { \
2637 if (addr > start_addr) \
2638 start_addr2 = 0; \
2639 else { \
2640 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2641 if (start_addr2 > 0) \
2642 need_subpage = 1; \
2643 } \
2644 \
49e9fba2 2645 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
db7b5426
BS
2646 end_addr2 = TARGET_PAGE_SIZE - 1; \
2647 else { \
2648 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2649 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2650 need_subpage = 1; \
2651 } \
2652 } while (0)
2653
8f2498f9
MT
2654/* register physical memory.
2655 For RAM, 'size' must be a multiple of the target page size.
2656 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
8da3ff18
PB
2657 io memory page. The address used when calling the IO function is
2658 the offset from the start of the region, plus region_offset. Both
ccbb4d44 2659 start_addr and region_offset are rounded down to a page boundary
8da3ff18
PB
2660 before calculating this offset. This should not be a problem unless
2661 the low bits of start_addr and region_offset differ. */
0fd542fb 2662void cpu_register_physical_memory_log(target_phys_addr_t start_addr,
c227f099
AL
2663 ram_addr_t size,
2664 ram_addr_t phys_offset,
0fd542fb
MT
2665 ram_addr_t region_offset,
2666 bool log_dirty)
33417e70 2667{
c227f099 2668 target_phys_addr_t addr, end_addr;
92e873b9 2669 PhysPageDesc *p;
9d42037b 2670 CPUState *env;
c227f099 2671 ram_addr_t orig_size = size;
f6405247 2672 subpage_t *subpage;
33417e70 2673
3b8e6a2d 2674 assert(size);
0fd542fb 2675 cpu_notify_set_memory(start_addr, size, phys_offset, log_dirty);
f6f3fbca 2676
67c4d23c
PB
2677 if (phys_offset == IO_MEM_UNASSIGNED) {
2678 region_offset = start_addr;
2679 }
8da3ff18 2680 region_offset &= TARGET_PAGE_MASK;
5fd386f6 2681 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
c227f099 2682 end_addr = start_addr + (target_phys_addr_t)size;
3b8e6a2d
EI
2683
2684 addr = start_addr;
2685 do {
db7b5426
BS
2686 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2687 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
c227f099
AL
2688 ram_addr_t orig_memory = p->phys_offset;
2689 target_phys_addr_t start_addr2, end_addr2;
db7b5426
BS
2690 int need_subpage = 0;
2691
2692 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2693 need_subpage);
f6405247 2694 if (need_subpage) {
db7b5426
BS
2695 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2696 subpage = subpage_init((addr & TARGET_PAGE_MASK),
8da3ff18
PB
2697 &p->phys_offset, orig_memory,
2698 p->region_offset);
db7b5426
BS
2699 } else {
2700 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2701 >> IO_MEM_SHIFT];
2702 }
8da3ff18
PB
2703 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2704 region_offset);
2705 p->region_offset = 0;
db7b5426
BS
2706 } else {
2707 p->phys_offset = phys_offset;
2708 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2709 (phys_offset & IO_MEM_ROMD))
2710 phys_offset += TARGET_PAGE_SIZE;
2711 }
2712 } else {
2713 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2714 p->phys_offset = phys_offset;
8da3ff18 2715 p->region_offset = region_offset;
db7b5426 2716 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
8da3ff18 2717 (phys_offset & IO_MEM_ROMD)) {
db7b5426 2718 phys_offset += TARGET_PAGE_SIZE;
0e8f0967 2719 } else {
c227f099 2720 target_phys_addr_t start_addr2, end_addr2;
db7b5426
BS
2721 int need_subpage = 0;
2722
2723 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2724 end_addr2, need_subpage);
2725
f6405247 2726 if (need_subpage) {
db7b5426 2727 subpage = subpage_init((addr & TARGET_PAGE_MASK),
8da3ff18 2728 &p->phys_offset, IO_MEM_UNASSIGNED,
67c4d23c 2729 addr & TARGET_PAGE_MASK);
db7b5426 2730 subpage_register(subpage, start_addr2, end_addr2,
8da3ff18
PB
2731 phys_offset, region_offset);
2732 p->region_offset = 0;
db7b5426
BS
2733 }
2734 }
2735 }
8da3ff18 2736 region_offset += TARGET_PAGE_SIZE;
3b8e6a2d
EI
2737 addr += TARGET_PAGE_SIZE;
2738 } while (addr != end_addr);
3b46e624 2739
9d42037b
FB
2740 /* since each CPU stores ram addresses in its TLB cache, we must
2741 reset the modified entries */
2742 /* XXX: slow ! */
2743 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2744 tlb_flush(env, 1);
2745 }
33417e70
FB
2746}
2747
ba863458 2748/* XXX: temporary until new memory mapping API */
c227f099 2749ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
ba863458
FB
2750{
2751 PhysPageDesc *p;
2752
2753 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2754 if (!p)
2755 return IO_MEM_UNASSIGNED;
2756 return p->phys_offset;
2757}
2758
c227f099 2759void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
f65ed4c1
AL
2760{
2761 if (kvm_enabled())
2762 kvm_coalesce_mmio_region(addr, size);
2763}
2764
c227f099 2765void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
f65ed4c1
AL
2766{
2767 if (kvm_enabled())
2768 kvm_uncoalesce_mmio_region(addr, size);
2769}
2770
62a2744c
SY
2771void qemu_flush_coalesced_mmio_buffer(void)
2772{
2773 if (kvm_enabled())
2774 kvm_flush_coalesced_mmio_buffer();
2775}
2776
c902760f
MT
2777#if defined(__linux__) && !defined(TARGET_S390X)
2778
2779#include <sys/vfs.h>
2780
2781#define HUGETLBFS_MAGIC 0x958458f6
2782
2783static long gethugepagesize(const char *path)
2784{
2785 struct statfs fs;
2786 int ret;
2787
2788 do {
9742bf26 2789 ret = statfs(path, &fs);
c902760f
MT
2790 } while (ret != 0 && errno == EINTR);
2791
2792 if (ret != 0) {
9742bf26
YT
2793 perror(path);
2794 return 0;
c902760f
MT
2795 }
2796
2797 if (fs.f_type != HUGETLBFS_MAGIC)
9742bf26 2798 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
c902760f
MT
2799
2800 return fs.f_bsize;
2801}
2802
04b16653
AW
2803static void *file_ram_alloc(RAMBlock *block,
2804 ram_addr_t memory,
2805 const char *path)
c902760f
MT
2806{
2807 char *filename;
2808 void *area;
2809 int fd;
2810#ifdef MAP_POPULATE
2811 int flags;
2812#endif
2813 unsigned long hpagesize;
2814
2815 hpagesize = gethugepagesize(path);
2816 if (!hpagesize) {
9742bf26 2817 return NULL;
c902760f
MT
2818 }
2819
2820 if (memory < hpagesize) {
2821 return NULL;
2822 }
2823
2824 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2825 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2826 return NULL;
2827 }
2828
2829 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
9742bf26 2830 return NULL;
c902760f
MT
2831 }
2832
2833 fd = mkstemp(filename);
2834 if (fd < 0) {
9742bf26
YT
2835 perror("unable to create backing store for hugepages");
2836 free(filename);
2837 return NULL;
c902760f
MT
2838 }
2839 unlink(filename);
2840 free(filename);
2841
2842 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2843
2844 /*
2845 * ftruncate is not supported by hugetlbfs in older
2846 * hosts, so don't bother bailing out on errors.
2847 * If anything goes wrong with it under other filesystems,
2848 * mmap will fail.
2849 */
2850 if (ftruncate(fd, memory))
9742bf26 2851 perror("ftruncate");
c902760f
MT
2852
2853#ifdef MAP_POPULATE
2854 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2855 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2856 * to sidestep this quirk.
2857 */
2858 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2859 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2860#else
2861 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2862#endif
2863 if (area == MAP_FAILED) {
9742bf26
YT
2864 perror("file_ram_alloc: can't mmap RAM pages");
2865 close(fd);
2866 return (NULL);
c902760f 2867 }
04b16653 2868 block->fd = fd;
c902760f
MT
2869 return area;
2870}
2871#endif
2872
d17b5288 2873static ram_addr_t find_ram_offset(ram_addr_t size)
04b16653
AW
2874{
2875 RAMBlock *block, *next_block;
f15fbc4b 2876 ram_addr_t offset = 0, mingap = RAM_ADDR_MAX;
04b16653
AW
2877
2878 if (QLIST_EMPTY(&ram_list.blocks))
2879 return 0;
2880
2881 QLIST_FOREACH(block, &ram_list.blocks, next) {
f15fbc4b 2882 ram_addr_t end, next = RAM_ADDR_MAX;
04b16653
AW
2883
2884 end = block->offset + block->length;
2885
2886 QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2887 if (next_block->offset >= end) {
2888 next = MIN(next, next_block->offset);
2889 }
2890 }
2891 if (next - end >= size && next - end < mingap) {
2892 offset = end;
2893 mingap = next - end;
2894 }
2895 }
2896 return offset;
2897}
2898
2899static ram_addr_t last_ram_offset(void)
d17b5288
AW
2900{
2901 RAMBlock *block;
2902 ram_addr_t last = 0;
2903
2904 QLIST_FOREACH(block, &ram_list.blocks, next)
2905 last = MAX(last, block->offset + block->length);
2906
2907 return last;
2908}
2909
84b89d78 2910ram_addr_t qemu_ram_alloc_from_ptr(DeviceState *dev, const char *name,
6977dfe6 2911 ram_addr_t size, void *host)
84b89d78
CM
2912{
2913 RAMBlock *new_block, *block;
2914
2915 size = TARGET_PAGE_ALIGN(size);
7267c094 2916 new_block = g_malloc0(sizeof(*new_block));
84b89d78
CM
2917
2918 if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
2919 char *id = dev->parent_bus->info->get_dev_path(dev);
2920 if (id) {
2921 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
7267c094 2922 g_free(id);
84b89d78
CM
2923 }
2924 }
2925 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2926
2927 QLIST_FOREACH(block, &ram_list.blocks, next) {
2928 if (!strcmp(block->idstr, new_block->idstr)) {
2929 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2930 new_block->idstr);
2931 abort();
2932 }
2933 }
2934
432d268c 2935 new_block->offset = find_ram_offset(size);
6977dfe6
YT
2936 if (host) {
2937 new_block->host = host;
cd19cfa2 2938 new_block->flags |= RAM_PREALLOC_MASK;
6977dfe6
YT
2939 } else {
2940 if (mem_path) {
c902760f 2941#if defined (__linux__) && !defined(TARGET_S390X)
6977dfe6
YT
2942 new_block->host = file_ram_alloc(new_block, size, mem_path);
2943 if (!new_block->host) {
2944 new_block->host = qemu_vmalloc(size);
e78815a5 2945 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
6977dfe6 2946 }
c902760f 2947#else
6977dfe6
YT
2948 fprintf(stderr, "-mem-path option unsupported\n");
2949 exit(1);
c902760f 2950#endif
6977dfe6 2951 } else {
6b02494d 2952#if defined(TARGET_S390X) && defined(CONFIG_KVM)
ff83678a
CB
2953 /* S390 KVM requires the topmost vma of the RAM to be smaller than
2954 an system defined value, which is at least 256GB. Larger systems
2955 have larger values. We put the guest between the end of data
2956 segment (system break) and this value. We use 32GB as a base to
2957 have enough room for the system break to grow. */
2958 new_block->host = mmap((void*)0x800000000, size,
6977dfe6 2959 PROT_EXEC|PROT_READ|PROT_WRITE,
ff83678a 2960 MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
fb8b2735
AG
2961 if (new_block->host == MAP_FAILED) {
2962 fprintf(stderr, "Allocating RAM failed\n");
2963 abort();
2964 }
6b02494d 2965#else
868bb33f 2966 if (xen_enabled()) {
432d268c
JN
2967 xen_ram_alloc(new_block->offset, size);
2968 } else {
2969 new_block->host = qemu_vmalloc(size);
2970 }
6b02494d 2971#endif
e78815a5 2972 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
6977dfe6 2973 }
c902760f 2974 }
94a6b54f
PB
2975 new_block->length = size;
2976
f471a17e 2977 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
94a6b54f 2978
7267c094 2979 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
04b16653 2980 last_ram_offset() >> TARGET_PAGE_BITS);
d17b5288 2981 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
94a6b54f
PB
2982 0xff, size >> TARGET_PAGE_BITS);
2983
6f0437e8
JK
2984 if (kvm_enabled())
2985 kvm_setup_guest_memory(new_block->host, size);
2986
94a6b54f
PB
2987 return new_block->offset;
2988}
e9a1ab19 2989
6977dfe6
YT
2990ram_addr_t qemu_ram_alloc(DeviceState *dev, const char *name, ram_addr_t size)
2991{
2992 return qemu_ram_alloc_from_ptr(dev, name, size, NULL);
2993}
2994
1f2e98b6
AW
2995void qemu_ram_free_from_ptr(ram_addr_t addr)
2996{
2997 RAMBlock *block;
2998
2999 QLIST_FOREACH(block, &ram_list.blocks, next) {
3000 if (addr == block->offset) {
3001 QLIST_REMOVE(block, next);
7267c094 3002 g_free(block);
1f2e98b6
AW
3003 return;
3004 }
3005 }
3006}
3007
c227f099 3008void qemu_ram_free(ram_addr_t addr)
e9a1ab19 3009{
04b16653
AW
3010 RAMBlock *block;
3011
3012 QLIST_FOREACH(block, &ram_list.blocks, next) {
3013 if (addr == block->offset) {
3014 QLIST_REMOVE(block, next);
cd19cfa2
HY
3015 if (block->flags & RAM_PREALLOC_MASK) {
3016 ;
3017 } else if (mem_path) {
04b16653
AW
3018#if defined (__linux__) && !defined(TARGET_S390X)
3019 if (block->fd) {
3020 munmap(block->host, block->length);
3021 close(block->fd);
3022 } else {
3023 qemu_vfree(block->host);
3024 }
fd28aa13
JK
3025#else
3026 abort();
04b16653
AW
3027#endif
3028 } else {
3029#if defined(TARGET_S390X) && defined(CONFIG_KVM)
3030 munmap(block->host, block->length);
3031#else
868bb33f 3032 if (xen_enabled()) {
e41d7c69 3033 xen_invalidate_map_cache_entry(block->host);
432d268c
JN
3034 } else {
3035 qemu_vfree(block->host);
3036 }
04b16653
AW
3037#endif
3038 }
7267c094 3039 g_free(block);
04b16653
AW
3040 return;
3041 }
3042 }
3043
e9a1ab19
FB
3044}
3045
cd19cfa2
HY
3046#ifndef _WIN32
3047void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
3048{
3049 RAMBlock *block;
3050 ram_addr_t offset;
3051 int flags;
3052 void *area, *vaddr;
3053
3054 QLIST_FOREACH(block, &ram_list.blocks, next) {
3055 offset = addr - block->offset;
3056 if (offset < block->length) {
3057 vaddr = block->host + offset;
3058 if (block->flags & RAM_PREALLOC_MASK) {
3059 ;
3060 } else {
3061 flags = MAP_FIXED;
3062 munmap(vaddr, length);
3063 if (mem_path) {
3064#if defined(__linux__) && !defined(TARGET_S390X)
3065 if (block->fd) {
3066#ifdef MAP_POPULATE
3067 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
3068 MAP_PRIVATE;
3069#else
3070 flags |= MAP_PRIVATE;
3071#endif
3072 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3073 flags, block->fd, offset);
3074 } else {
3075 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
3076 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3077 flags, -1, 0);
3078 }
fd28aa13
JK
3079#else
3080 abort();
cd19cfa2
HY
3081#endif
3082 } else {
3083#if defined(TARGET_S390X) && defined(CONFIG_KVM)
3084 flags |= MAP_SHARED | MAP_ANONYMOUS;
3085 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
3086 flags, -1, 0);
3087#else
3088 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
3089 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3090 flags, -1, 0);
3091#endif
3092 }
3093 if (area != vaddr) {
f15fbc4b
AP
3094 fprintf(stderr, "Could not remap addr: "
3095 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
cd19cfa2
HY
3096 length, addr);
3097 exit(1);
3098 }
3099 qemu_madvise(vaddr, length, QEMU_MADV_MERGEABLE);
3100 }
3101 return;
3102 }
3103 }
3104}
3105#endif /* !_WIN32 */
3106
dc828ca1 3107/* Return a host pointer to ram allocated with qemu_ram_alloc.
5579c7f3
PB
3108 With the exception of the softmmu code in this file, this should
3109 only be used for local memory (e.g. video ram) that the device owns,
3110 and knows it isn't going to access beyond the end of the block.
3111
3112 It should not be used for general purpose DMA.
3113 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
3114 */
c227f099 3115void *qemu_get_ram_ptr(ram_addr_t addr)
dc828ca1 3116{
94a6b54f
PB
3117 RAMBlock *block;
3118
f471a17e
AW
3119 QLIST_FOREACH(block, &ram_list.blocks, next) {
3120 if (addr - block->offset < block->length) {
7d82af38
VP
3121 /* Move this entry to to start of the list. */
3122 if (block != QLIST_FIRST(&ram_list.blocks)) {
3123 QLIST_REMOVE(block, next);
3124 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
3125 }
868bb33f 3126 if (xen_enabled()) {
432d268c
JN
3127 /* We need to check if the requested address is in the RAM
3128 * because we don't want to map the entire memory in QEMU.
712c2b41 3129 * In that case just map until the end of the page.
432d268c
JN
3130 */
3131 if (block->offset == 0) {
e41d7c69 3132 return xen_map_cache(addr, 0, 0);
432d268c 3133 } else if (block->host == NULL) {
e41d7c69
JK
3134 block->host =
3135 xen_map_cache(block->offset, block->length, 1);
432d268c
JN
3136 }
3137 }
f471a17e
AW
3138 return block->host + (addr - block->offset);
3139 }
94a6b54f 3140 }
f471a17e
AW
3141
3142 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3143 abort();
3144
3145 return NULL;
dc828ca1
PB
3146}
3147
b2e0a138
MT
3148/* Return a host pointer to ram allocated with qemu_ram_alloc.
3149 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
3150 */
3151void *qemu_safe_ram_ptr(ram_addr_t addr)
3152{
3153 RAMBlock *block;
3154
3155 QLIST_FOREACH(block, &ram_list.blocks, next) {
3156 if (addr - block->offset < block->length) {
868bb33f 3157 if (xen_enabled()) {
432d268c
JN
3158 /* We need to check if the requested address is in the RAM
3159 * because we don't want to map the entire memory in QEMU.
712c2b41 3160 * In that case just map until the end of the page.
432d268c
JN
3161 */
3162 if (block->offset == 0) {
e41d7c69 3163 return xen_map_cache(addr, 0, 0);
432d268c 3164 } else if (block->host == NULL) {
e41d7c69
JK
3165 block->host =
3166 xen_map_cache(block->offset, block->length, 1);
432d268c
JN
3167 }
3168 }
b2e0a138
MT
3169 return block->host + (addr - block->offset);
3170 }
3171 }
3172
3173 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3174 abort();
3175
3176 return NULL;
3177}
3178
38bee5dc
SS
3179/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
3180 * but takes a size argument */
8ab934f9 3181void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
38bee5dc 3182{
8ab934f9
SS
3183 if (*size == 0) {
3184 return NULL;
3185 }
868bb33f 3186 if (xen_enabled()) {
e41d7c69 3187 return xen_map_cache(addr, *size, 1);
868bb33f 3188 } else {
38bee5dc
SS
3189 RAMBlock *block;
3190
3191 QLIST_FOREACH(block, &ram_list.blocks, next) {
3192 if (addr - block->offset < block->length) {
3193 if (addr - block->offset + *size > block->length)
3194 *size = block->length - addr + block->offset;
3195 return block->host + (addr - block->offset);
3196 }
3197 }
3198
3199 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3200 abort();
38bee5dc
SS
3201 }
3202}
3203
050a0ddf
AP
3204void qemu_put_ram_ptr(void *addr)
3205{
3206 trace_qemu_put_ram_ptr(addr);
050a0ddf
AP
3207}
3208
e890261f 3209int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
5579c7f3 3210{
94a6b54f
PB
3211 RAMBlock *block;
3212 uint8_t *host = ptr;
3213
868bb33f 3214 if (xen_enabled()) {
e41d7c69 3215 *ram_addr = xen_ram_addr_from_mapcache(ptr);
712c2b41
SS
3216 return 0;
3217 }
3218
f471a17e 3219 QLIST_FOREACH(block, &ram_list.blocks, next) {
432d268c
JN
3220 /* This case append when the block is not mapped. */
3221 if (block->host == NULL) {
3222 continue;
3223 }
f471a17e 3224 if (host - block->host < block->length) {
e890261f
MT
3225 *ram_addr = block->offset + (host - block->host);
3226 return 0;
f471a17e 3227 }
94a6b54f 3228 }
432d268c 3229
e890261f
MT
3230 return -1;
3231}
f471a17e 3232
e890261f
MT
3233/* Some of the softmmu routines need to translate from a host pointer
3234 (typically a TLB entry) back to a ram offset. */
3235ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
3236{
3237 ram_addr_t ram_addr;
f471a17e 3238
e890261f
MT
3239 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
3240 fprintf(stderr, "Bad ram pointer %p\n", ptr);
3241 abort();
3242 }
3243 return ram_addr;
5579c7f3
PB
3244}
3245
c227f099 3246static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
33417e70 3247{
67d3b957 3248#ifdef DEBUG_UNASSIGNED
ab3d1727 3249 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
b4f0a316 3250#endif
5b450407 3251#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
b14ef7c9 3252 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, 1);
e18231a3
BS
3253#endif
3254 return 0;
3255}
3256
c227f099 3257static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
e18231a3
BS
3258{
3259#ifdef DEBUG_UNASSIGNED
3260 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3261#endif
5b450407 3262#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
b14ef7c9 3263 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, 2);
e18231a3
BS
3264#endif
3265 return 0;
3266}
3267
c227f099 3268static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
e18231a3
BS
3269{
3270#ifdef DEBUG_UNASSIGNED
3271 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3272#endif
5b450407 3273#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
b14ef7c9 3274 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, 4);
67d3b957 3275#endif
33417e70
FB
3276 return 0;
3277}
3278
c227f099 3279static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
33417e70 3280{
67d3b957 3281#ifdef DEBUG_UNASSIGNED
ab3d1727 3282 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
67d3b957 3283#endif
5b450407 3284#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
b14ef7c9 3285 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, 1);
e18231a3
BS
3286#endif
3287}
3288
c227f099 3289static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
e18231a3
BS
3290{
3291#ifdef DEBUG_UNASSIGNED
3292 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3293#endif
5b450407 3294#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
b14ef7c9 3295 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, 2);
e18231a3
BS
3296#endif
3297}
3298
c227f099 3299static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
e18231a3
BS
3300{
3301#ifdef DEBUG_UNASSIGNED
3302 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3303#endif
5b450407 3304#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
b14ef7c9 3305 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, 4);
b4f0a316 3306#endif
33417e70
FB
3307}
3308
d60efc6b 3309static CPUReadMemoryFunc * const unassigned_mem_read[3] = {
33417e70 3310 unassigned_mem_readb,
e18231a3
BS
3311 unassigned_mem_readw,
3312 unassigned_mem_readl,
33417e70
FB
3313};
3314
d60efc6b 3315static CPUWriteMemoryFunc * const unassigned_mem_write[3] = {
33417e70 3316 unassigned_mem_writeb,
e18231a3
BS
3317 unassigned_mem_writew,
3318 unassigned_mem_writel,
33417e70
FB
3319};
3320
c227f099 3321static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
0f459d16 3322 uint32_t val)
9fa3e853 3323{
3a7d929e 3324 int dirty_flags;
f7c11b53 3325 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3a7d929e 3326 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 3327#if !defined(CONFIG_USER_ONLY)
3a7d929e 3328 tb_invalidate_phys_page_fast(ram_addr, 1);
f7c11b53 3329 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
9fa3e853 3330#endif
3a7d929e 3331 }
5579c7f3 3332 stb_p(qemu_get_ram_ptr(ram_addr), val);
f23db169 3333 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
f7c11b53 3334 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
f23db169
FB
3335 /* we remove the notdirty callback only if the code has been
3336 flushed */
3337 if (dirty_flags == 0xff)
2e70f6ef 3338 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
3339}
3340
c227f099 3341static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
0f459d16 3342 uint32_t val)
9fa3e853 3343{
3a7d929e 3344 int dirty_flags;
f7c11b53 3345 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3a7d929e 3346 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 3347#if !defined(CONFIG_USER_ONLY)
3a7d929e 3348 tb_invalidate_phys_page_fast(ram_addr, 2);
f7c11b53 3349 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
9fa3e853 3350#endif
3a7d929e 3351 }
5579c7f3 3352 stw_p(qemu_get_ram_ptr(ram_addr), val);
f23db169 3353 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
f7c11b53 3354 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
f23db169
FB
3355 /* we remove the notdirty callback only if the code has been
3356 flushed */
3357 if (dirty_flags == 0xff)
2e70f6ef 3358 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
3359}
3360
c227f099 3361static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
0f459d16 3362 uint32_t val)
9fa3e853 3363{
3a7d929e 3364 int dirty_flags;
f7c11b53 3365 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3a7d929e 3366 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 3367#if !defined(CONFIG_USER_ONLY)
3a7d929e 3368 tb_invalidate_phys_page_fast(ram_addr, 4);
f7c11b53 3369 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
9fa3e853 3370#endif
3a7d929e 3371 }
5579c7f3 3372 stl_p(qemu_get_ram_ptr(ram_addr), val);
f23db169 3373 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
f7c11b53 3374 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
f23db169
FB
3375 /* we remove the notdirty callback only if the code has been
3376 flushed */
3377 if (dirty_flags == 0xff)
2e70f6ef 3378 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
3379}
3380
d60efc6b 3381static CPUReadMemoryFunc * const error_mem_read[3] = {
9fa3e853
FB
3382 NULL, /* never used */
3383 NULL, /* never used */
3384 NULL, /* never used */
3385};
3386
d60efc6b 3387static CPUWriteMemoryFunc * const notdirty_mem_write[3] = {
1ccde1cb
FB
3388 notdirty_mem_writeb,
3389 notdirty_mem_writew,
3390 notdirty_mem_writel,
3391};
3392
0f459d16 3393/* Generate a debug exception if a watchpoint has been hit. */
b4051334 3394static void check_watchpoint(int offset, int len_mask, int flags)
0f459d16
PB
3395{
3396 CPUState *env = cpu_single_env;
06d55cc1
AL
3397 target_ulong pc, cs_base;
3398 TranslationBlock *tb;
0f459d16 3399 target_ulong vaddr;
a1d1bb31 3400 CPUWatchpoint *wp;
06d55cc1 3401 int cpu_flags;
0f459d16 3402
06d55cc1
AL
3403 if (env->watchpoint_hit) {
3404 /* We re-entered the check after replacing the TB. Now raise
3405 * the debug interrupt so that is will trigger after the
3406 * current instruction. */
3407 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3408 return;
3409 }
2e70f6ef 3410 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
72cf2d4f 3411 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334
AL
3412 if ((vaddr == (wp->vaddr & len_mask) ||
3413 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
6e140f28
AL
3414 wp->flags |= BP_WATCHPOINT_HIT;
3415 if (!env->watchpoint_hit) {
3416 env->watchpoint_hit = wp;
3417 tb = tb_find_pc(env->mem_io_pc);
3418 if (!tb) {
3419 cpu_abort(env, "check_watchpoint: could not find TB for "
3420 "pc=%p", (void *)env->mem_io_pc);
3421 }
618ba8e6 3422 cpu_restore_state(tb, env, env->mem_io_pc);
6e140f28
AL
3423 tb_phys_invalidate(tb, -1);
3424 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3425 env->exception_index = EXCP_DEBUG;
3426 } else {
3427 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3428 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
3429 }
3430 cpu_resume_from_signal(env, NULL);
06d55cc1 3431 }
6e140f28
AL
3432 } else {
3433 wp->flags &= ~BP_WATCHPOINT_HIT;
0f459d16
PB
3434 }
3435 }
3436}
3437
6658ffb8
PB
3438/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3439 so these check for a hit then pass through to the normal out-of-line
3440 phys routines. */
c227f099 3441static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
6658ffb8 3442{
b4051334 3443 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
6658ffb8
PB
3444 return ldub_phys(addr);
3445}
3446
c227f099 3447static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
6658ffb8 3448{
b4051334 3449 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
6658ffb8
PB
3450 return lduw_phys(addr);
3451}
3452
c227f099 3453static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
6658ffb8 3454{
b4051334 3455 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
6658ffb8
PB
3456 return ldl_phys(addr);
3457}
3458
c227f099 3459static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
6658ffb8
PB
3460 uint32_t val)
3461{
b4051334 3462 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
6658ffb8
PB
3463 stb_phys(addr, val);
3464}
3465
c227f099 3466static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
6658ffb8
PB
3467 uint32_t val)
3468{
b4051334 3469 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
6658ffb8
PB
3470 stw_phys(addr, val);
3471}
3472
c227f099 3473static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
6658ffb8
PB
3474 uint32_t val)
3475{
b4051334 3476 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
6658ffb8
PB
3477 stl_phys(addr, val);
3478}
3479
d60efc6b 3480static CPUReadMemoryFunc * const watch_mem_read[3] = {
6658ffb8
PB
3481 watch_mem_readb,
3482 watch_mem_readw,
3483 watch_mem_readl,
3484};
3485
d60efc6b 3486static CPUWriteMemoryFunc * const watch_mem_write[3] = {
6658ffb8
PB
3487 watch_mem_writeb,
3488 watch_mem_writew,
3489 watch_mem_writel,
3490};
6658ffb8 3491
f6405247
RH
3492static inline uint32_t subpage_readlen (subpage_t *mmio,
3493 target_phys_addr_t addr,
3494 unsigned int len)
db7b5426 3495{
f6405247 3496 unsigned int idx = SUBPAGE_IDX(addr);
db7b5426
BS
3497#if defined(DEBUG_SUBPAGE)
3498 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3499 mmio, len, addr, idx);
3500#endif
db7b5426 3501
f6405247
RH
3502 addr += mmio->region_offset[idx];
3503 idx = mmio->sub_io_index[idx];
3504 return io_mem_read[idx][len](io_mem_opaque[idx], addr);
db7b5426
BS
3505}
3506
c227f099 3507static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
f6405247 3508 uint32_t value, unsigned int len)
db7b5426 3509{
f6405247 3510 unsigned int idx = SUBPAGE_IDX(addr);
db7b5426 3511#if defined(DEBUG_SUBPAGE)
f6405247
RH
3512 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n",
3513 __func__, mmio, len, addr, idx, value);
db7b5426 3514#endif
f6405247
RH
3515
3516 addr += mmio->region_offset[idx];
3517 idx = mmio->sub_io_index[idx];
3518 io_mem_write[idx][len](io_mem_opaque[idx], addr, value);
db7b5426
BS
3519}
3520
c227f099 3521static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
db7b5426 3522{
db7b5426
BS
3523 return subpage_readlen(opaque, addr, 0);
3524}
3525
c227f099 3526static void subpage_writeb (void *opaque, target_phys_addr_t addr,
db7b5426
BS
3527 uint32_t value)
3528{
db7b5426
BS
3529 subpage_writelen(opaque, addr, value, 0);
3530}
3531
c227f099 3532static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
db7b5426 3533{
db7b5426
BS
3534 return subpage_readlen(opaque, addr, 1);
3535}
3536
c227f099 3537static void subpage_writew (void *opaque, target_phys_addr_t addr,
db7b5426
BS
3538 uint32_t value)
3539{
db7b5426
BS
3540 subpage_writelen(opaque, addr, value, 1);
3541}
3542
c227f099 3543static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
db7b5426 3544{
db7b5426
BS
3545 return subpage_readlen(opaque, addr, 2);
3546}
3547
f6405247
RH
3548static void subpage_writel (void *opaque, target_phys_addr_t addr,
3549 uint32_t value)
db7b5426 3550{
db7b5426
BS
3551 subpage_writelen(opaque, addr, value, 2);
3552}
3553
d60efc6b 3554static CPUReadMemoryFunc * const subpage_read[] = {
db7b5426
BS
3555 &subpage_readb,
3556 &subpage_readw,
3557 &subpage_readl,
3558};
3559
d60efc6b 3560static CPUWriteMemoryFunc * const subpage_write[] = {
db7b5426
BS
3561 &subpage_writeb,
3562 &subpage_writew,
3563 &subpage_writel,
3564};
3565
c227f099
AL
3566static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3567 ram_addr_t memory, ram_addr_t region_offset)
db7b5426
BS
3568{
3569 int idx, eidx;
3570
3571 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3572 return -1;
3573 idx = SUBPAGE_IDX(start);
3574 eidx = SUBPAGE_IDX(end);
3575#if defined(DEBUG_SUBPAGE)
0bf9e31a 3576 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
db7b5426
BS
3577 mmio, start, end, idx, eidx, memory);
3578#endif
95c318f5
GN
3579 if ((memory & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
3580 memory = IO_MEM_UNASSIGNED;
f6405247 3581 memory = (memory >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
db7b5426 3582 for (; idx <= eidx; idx++) {
f6405247
RH
3583 mmio->sub_io_index[idx] = memory;
3584 mmio->region_offset[idx] = region_offset;
db7b5426
BS
3585 }
3586
3587 return 0;
3588}
3589
f6405247
RH
3590static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
3591 ram_addr_t orig_memory,
3592 ram_addr_t region_offset)
db7b5426 3593{
c227f099 3594 subpage_t *mmio;
db7b5426
BS
3595 int subpage_memory;
3596
7267c094 3597 mmio = g_malloc0(sizeof(subpage_t));
1eec614b
AL
3598
3599 mmio->base = base;
2507c12a
AG
3600 subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio,
3601 DEVICE_NATIVE_ENDIAN);
db7b5426 3602#if defined(DEBUG_SUBPAGE)
1eec614b
AL
3603 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3604 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
db7b5426 3605#endif
1eec614b 3606 *phys = subpage_memory | IO_MEM_SUBPAGE;
f6405247 3607 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, orig_memory, region_offset);
db7b5426
BS
3608
3609 return mmio;
3610}
3611
88715657
AL
3612static int get_free_io_mem_idx(void)
3613{
3614 int i;
3615
3616 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3617 if (!io_mem_used[i]) {
3618 io_mem_used[i] = 1;
3619 return i;
3620 }
c6703b47 3621 fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
88715657
AL
3622 return -1;
3623}
3624
dd310534
AG
3625/*
3626 * Usually, devices operate in little endian mode. There are devices out
3627 * there that operate in big endian too. Each device gets byte swapped
3628 * mmio if plugged onto a CPU that does the other endianness.
3629 *
3630 * CPU Device swap?
3631 *
3632 * little little no
3633 * little big yes
3634 * big little yes
3635 * big big no
3636 */
3637
3638typedef struct SwapEndianContainer {
3639 CPUReadMemoryFunc *read[3];
3640 CPUWriteMemoryFunc *write[3];
3641 void *opaque;
3642} SwapEndianContainer;
3643
3644static uint32_t swapendian_mem_readb (void *opaque, target_phys_addr_t addr)
3645{
3646 uint32_t val;
3647 SwapEndianContainer *c = opaque;
3648 val = c->read[0](c->opaque, addr);
3649 return val;
3650}
3651
3652static uint32_t swapendian_mem_readw(void *opaque, target_phys_addr_t addr)
3653{
3654 uint32_t val;
3655 SwapEndianContainer *c = opaque;
3656 val = bswap16(c->read[1](c->opaque, addr));
3657 return val;
3658}
3659
3660static uint32_t swapendian_mem_readl(void *opaque, target_phys_addr_t addr)
3661{
3662 uint32_t val;
3663 SwapEndianContainer *c = opaque;
3664 val = bswap32(c->read[2](c->opaque, addr));
3665 return val;
3666}
3667
3668static CPUReadMemoryFunc * const swapendian_readfn[3]={
3669 swapendian_mem_readb,
3670 swapendian_mem_readw,
3671 swapendian_mem_readl
3672};
3673
3674static void swapendian_mem_writeb(void *opaque, target_phys_addr_t addr,
3675 uint32_t val)
3676{
3677 SwapEndianContainer *c = opaque;
3678 c->write[0](c->opaque, addr, val);
3679}
3680
3681static void swapendian_mem_writew(void *opaque, target_phys_addr_t addr,
3682 uint32_t val)
3683{
3684 SwapEndianContainer *c = opaque;
3685 c->write[1](c->opaque, addr, bswap16(val));
3686}
3687
3688static void swapendian_mem_writel(void *opaque, target_phys_addr_t addr,
3689 uint32_t val)
3690{
3691 SwapEndianContainer *c = opaque;
3692 c->write[2](c->opaque, addr, bswap32(val));
3693}
3694
3695static CPUWriteMemoryFunc * const swapendian_writefn[3]={
3696 swapendian_mem_writeb,
3697 swapendian_mem_writew,
3698 swapendian_mem_writel
3699};
3700
3701static void swapendian_init(int io_index)
3702{
7267c094 3703 SwapEndianContainer *c = g_malloc(sizeof(SwapEndianContainer));
dd310534
AG
3704 int i;
3705
3706 /* Swap mmio for big endian targets */
3707 c->opaque = io_mem_opaque[io_index];
3708 for (i = 0; i < 3; i++) {
3709 c->read[i] = io_mem_read[io_index][i];
3710 c->write[i] = io_mem_write[io_index][i];
3711
3712 io_mem_read[io_index][i] = swapendian_readfn[i];
3713 io_mem_write[io_index][i] = swapendian_writefn[i];
3714 }
3715 io_mem_opaque[io_index] = c;
3716}
3717
3718static void swapendian_del(int io_index)
3719{
3720 if (io_mem_read[io_index][0] == swapendian_readfn[0]) {
7267c094 3721 g_free(io_mem_opaque[io_index]);
dd310534
AG
3722 }
3723}
3724
33417e70
FB
3725/* mem_read and mem_write are arrays of functions containing the
3726 function to access byte (index 0), word (index 1) and dword (index
0b4e6e3e 3727 2). Functions can be omitted with a NULL function pointer.
3ee89922 3728 If io_index is non zero, the corresponding io zone is
4254fab8
BS
3729 modified. If it is zero, a new io zone is allocated. The return
3730 value can be used with cpu_register_physical_memory(). (-1) is
3731 returned if error. */
1eed09cb 3732static int cpu_register_io_memory_fixed(int io_index,
d60efc6b
BS
3733 CPUReadMemoryFunc * const *mem_read,
3734 CPUWriteMemoryFunc * const *mem_write,
dd310534 3735 void *opaque, enum device_endian endian)
33417e70 3736{
3cab721d
RH
3737 int i;
3738
33417e70 3739 if (io_index <= 0) {
88715657
AL
3740 io_index = get_free_io_mem_idx();
3741 if (io_index == -1)
3742 return io_index;
33417e70 3743 } else {
1eed09cb 3744 io_index >>= IO_MEM_SHIFT;
33417e70
FB
3745 if (io_index >= IO_MEM_NB_ENTRIES)
3746 return -1;
3747 }
b5ff1b31 3748
3cab721d
RH
3749 for (i = 0; i < 3; ++i) {
3750 io_mem_read[io_index][i]
3751 = (mem_read[i] ? mem_read[i] : unassigned_mem_read[i]);
3752 }
3753 for (i = 0; i < 3; ++i) {
3754 io_mem_write[io_index][i]
3755 = (mem_write[i] ? mem_write[i] : unassigned_mem_write[i]);
3756 }
a4193c8a 3757 io_mem_opaque[io_index] = opaque;
f6405247 3758
dd310534
AG
3759 switch (endian) {
3760 case DEVICE_BIG_ENDIAN:
3761#ifndef TARGET_WORDS_BIGENDIAN
3762 swapendian_init(io_index);
3763#endif
3764 break;
3765 case DEVICE_LITTLE_ENDIAN:
3766#ifdef TARGET_WORDS_BIGENDIAN
3767 swapendian_init(io_index);
3768#endif
3769 break;
3770 case DEVICE_NATIVE_ENDIAN:
3771 default:
3772 break;
3773 }
3774
f6405247 3775 return (io_index << IO_MEM_SHIFT);
33417e70 3776}
61382a50 3777
d60efc6b
BS
3778int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
3779 CPUWriteMemoryFunc * const *mem_write,
dd310534 3780 void *opaque, enum device_endian endian)
1eed09cb 3781{
2507c12a 3782 return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque, endian);
1eed09cb
AK
3783}
3784
88715657
AL
3785void cpu_unregister_io_memory(int io_table_address)
3786{
3787 int i;
3788 int io_index = io_table_address >> IO_MEM_SHIFT;
3789
dd310534
AG
3790 swapendian_del(io_index);
3791
88715657
AL
3792 for (i=0;i < 3; i++) {
3793 io_mem_read[io_index][i] = unassigned_mem_read[i];
3794 io_mem_write[io_index][i] = unassigned_mem_write[i];
3795 }
3796 io_mem_opaque[io_index] = NULL;
3797 io_mem_used[io_index] = 0;
3798}
3799
e9179ce1
AK
3800static void io_mem_init(void)
3801{
3802 int i;
3803
2507c12a
AG
3804 cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read,
3805 unassigned_mem_write, NULL,
3806 DEVICE_NATIVE_ENDIAN);
3807 cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read,
3808 unassigned_mem_write, NULL,
3809 DEVICE_NATIVE_ENDIAN);
3810 cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read,
3811 notdirty_mem_write, NULL,
3812 DEVICE_NATIVE_ENDIAN);
e9179ce1
AK
3813 for (i=0; i<5; i++)
3814 io_mem_used[i] = 1;
3815
3816 io_mem_watch = cpu_register_io_memory(watch_mem_read,
2507c12a
AG
3817 watch_mem_write, NULL,
3818 DEVICE_NATIVE_ENDIAN);
e9179ce1
AK
3819}
3820
62152b8a
AK
3821static void memory_map_init(void)
3822{
7267c094 3823 system_memory = g_malloc(sizeof(*system_memory));
8417cebf 3824 memory_region_init(system_memory, "system", INT64_MAX);
62152b8a 3825 set_system_memory_map(system_memory);
309cb471 3826
7267c094 3827 system_io = g_malloc(sizeof(*system_io));
309cb471
AK
3828 memory_region_init(system_io, "io", 65536);
3829 set_system_io_map(system_io);
62152b8a
AK
3830}
3831
3832MemoryRegion *get_system_memory(void)
3833{
3834 return system_memory;
3835}
3836
309cb471
AK
3837MemoryRegion *get_system_io(void)
3838{
3839 return system_io;
3840}
3841
e2eef170
PB
3842#endif /* !defined(CONFIG_USER_ONLY) */
3843
13eb76e0
FB
3844/* physical memory access (slow version, mainly for debug) */
3845#if defined(CONFIG_USER_ONLY)
a68fe89c
PB
3846int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3847 uint8_t *buf, int len, int is_write)
13eb76e0
FB
3848{
3849 int l, flags;
3850 target_ulong page;
53a5960a 3851 void * p;
13eb76e0
FB
3852
3853 while (len > 0) {
3854 page = addr & TARGET_PAGE_MASK;
3855 l = (page + TARGET_PAGE_SIZE) - addr;
3856 if (l > len)
3857 l = len;
3858 flags = page_get_flags(page);
3859 if (!(flags & PAGE_VALID))
a68fe89c 3860 return -1;
13eb76e0
FB
3861 if (is_write) {
3862 if (!(flags & PAGE_WRITE))
a68fe89c 3863 return -1;
579a97f7 3864 /* XXX: this code should not depend on lock_user */
72fb7daa 3865 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
a68fe89c 3866 return -1;
72fb7daa
AJ
3867 memcpy(p, buf, l);
3868 unlock_user(p, addr, l);
13eb76e0
FB
3869 } else {
3870 if (!(flags & PAGE_READ))
a68fe89c 3871 return -1;
579a97f7 3872 /* XXX: this code should not depend on lock_user */
72fb7daa 3873 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
a68fe89c 3874 return -1;
72fb7daa 3875 memcpy(buf, p, l);
5b257578 3876 unlock_user(p, addr, 0);
13eb76e0
FB
3877 }
3878 len -= l;
3879 buf += l;
3880 addr += l;
3881 }
a68fe89c 3882 return 0;
13eb76e0 3883}
8df1cd07 3884
13eb76e0 3885#else
c227f099 3886void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
3887 int len, int is_write)
3888{
3889 int l, io_index;
3890 uint8_t *ptr;
3891 uint32_t val;
c227f099 3892 target_phys_addr_t page;
8ca5692d 3893 ram_addr_t pd;
92e873b9 3894 PhysPageDesc *p;
3b46e624 3895
13eb76e0
FB
3896 while (len > 0) {
3897 page = addr & TARGET_PAGE_MASK;
3898 l = (page + TARGET_PAGE_SIZE) - addr;
3899 if (l > len)
3900 l = len;
92e873b9 3901 p = phys_page_find(page >> TARGET_PAGE_BITS);
13eb76e0
FB
3902 if (!p) {
3903 pd = IO_MEM_UNASSIGNED;
3904 } else {
3905 pd = p->phys_offset;
3906 }
3b46e624 3907
13eb76e0 3908 if (is_write) {
3a7d929e 3909 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
c227f099 3910 target_phys_addr_t addr1 = addr;
13eb76e0 3911 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18 3912 if (p)
6c2934db 3913 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
6a00d601
FB
3914 /* XXX: could force cpu_single_env to NULL to avoid
3915 potential bugs */
6c2934db 3916 if (l >= 4 && ((addr1 & 3) == 0)) {
1c213d19 3917 /* 32 bit write access */
c27004ec 3918 val = ldl_p(buf);
6c2934db 3919 io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
13eb76e0 3920 l = 4;
6c2934db 3921 } else if (l >= 2 && ((addr1 & 1) == 0)) {
1c213d19 3922 /* 16 bit write access */
c27004ec 3923 val = lduw_p(buf);
6c2934db 3924 io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
13eb76e0
FB
3925 l = 2;
3926 } else {
1c213d19 3927 /* 8 bit write access */
c27004ec 3928 val = ldub_p(buf);
6c2934db 3929 io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
13eb76e0
FB
3930 l = 1;
3931 }
3932 } else {
8ca5692d 3933 ram_addr_t addr1;
b448f2f3 3934 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
13eb76e0 3935 /* RAM case */
5579c7f3 3936 ptr = qemu_get_ram_ptr(addr1);
13eb76e0 3937 memcpy(ptr, buf, l);
3a7d929e
FB
3938 if (!cpu_physical_memory_is_dirty(addr1)) {
3939 /* invalidate code */
3940 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3941 /* set dirty bit */
f7c11b53
YT
3942 cpu_physical_memory_set_dirty_flags(
3943 addr1, (0xff & ~CODE_DIRTY_FLAG));
3a7d929e 3944 }
050a0ddf 3945 qemu_put_ram_ptr(ptr);
13eb76e0
FB
3946 }
3947 } else {
5fafdf24 3948 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2a4188a3 3949 !(pd & IO_MEM_ROMD)) {
c227f099 3950 target_phys_addr_t addr1 = addr;
13eb76e0
FB
3951 /* I/O case */
3952 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18 3953 if (p)
6c2934db
AJ
3954 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3955 if (l >= 4 && ((addr1 & 3) == 0)) {
13eb76e0 3956 /* 32 bit read access */
6c2934db 3957 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
c27004ec 3958 stl_p(buf, val);
13eb76e0 3959 l = 4;
6c2934db 3960 } else if (l >= 2 && ((addr1 & 1) == 0)) {
13eb76e0 3961 /* 16 bit read access */
6c2934db 3962 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
c27004ec 3963 stw_p(buf, val);
13eb76e0
FB
3964 l = 2;
3965 } else {
1c213d19 3966 /* 8 bit read access */
6c2934db 3967 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
c27004ec 3968 stb_p(buf, val);
13eb76e0
FB
3969 l = 1;
3970 }
3971 } else {
3972 /* RAM case */
050a0ddf
AP
3973 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
3974 memcpy(buf, ptr + (addr & ~TARGET_PAGE_MASK), l);
3975 qemu_put_ram_ptr(ptr);
13eb76e0
FB
3976 }
3977 }
3978 len -= l;
3979 buf += l;
3980 addr += l;
3981 }
3982}
8df1cd07 3983
d0ecd2aa 3984/* used for ROM loading : can write in RAM and ROM */
c227f099 3985void cpu_physical_memory_write_rom(target_phys_addr_t addr,
d0ecd2aa
FB
3986 const uint8_t *buf, int len)
3987{
3988 int l;
3989 uint8_t *ptr;
c227f099 3990 target_phys_addr_t page;
d0ecd2aa
FB
3991 unsigned long pd;
3992 PhysPageDesc *p;
3b46e624 3993
d0ecd2aa
FB
3994 while (len > 0) {
3995 page = addr & TARGET_PAGE_MASK;
3996 l = (page + TARGET_PAGE_SIZE) - addr;
3997 if (l > len)
3998 l = len;
3999 p = phys_page_find(page >> TARGET_PAGE_BITS);
4000 if (!p) {
4001 pd = IO_MEM_UNASSIGNED;
4002 } else {
4003 pd = p->phys_offset;
4004 }
3b46e624 4005
d0ecd2aa 4006 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2a4188a3
FB
4007 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
4008 !(pd & IO_MEM_ROMD)) {
d0ecd2aa
FB
4009 /* do nothing */
4010 } else {
4011 unsigned long addr1;
4012 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4013 /* ROM/RAM case */
5579c7f3 4014 ptr = qemu_get_ram_ptr(addr1);
d0ecd2aa 4015 memcpy(ptr, buf, l);
050a0ddf 4016 qemu_put_ram_ptr(ptr);
d0ecd2aa
FB
4017 }
4018 len -= l;
4019 buf += l;
4020 addr += l;
4021 }
4022}
4023
6d16c2f8
AL
4024typedef struct {
4025 void *buffer;
c227f099
AL
4026 target_phys_addr_t addr;
4027 target_phys_addr_t len;
6d16c2f8
AL
4028} BounceBuffer;
4029
4030static BounceBuffer bounce;
4031
ba223c29
AL
4032typedef struct MapClient {
4033 void *opaque;
4034 void (*callback)(void *opaque);
72cf2d4f 4035 QLIST_ENTRY(MapClient) link;
ba223c29
AL
4036} MapClient;
4037
72cf2d4f
BS
4038static QLIST_HEAD(map_client_list, MapClient) map_client_list
4039 = QLIST_HEAD_INITIALIZER(map_client_list);
ba223c29
AL
4040
4041void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
4042{
7267c094 4043 MapClient *client = g_malloc(sizeof(*client));
ba223c29
AL
4044
4045 client->opaque = opaque;
4046 client->callback = callback;
72cf2d4f 4047 QLIST_INSERT_HEAD(&map_client_list, client, link);
ba223c29
AL
4048 return client;
4049}
4050
4051void cpu_unregister_map_client(void *_client)
4052{
4053 MapClient *client = (MapClient *)_client;
4054
72cf2d4f 4055 QLIST_REMOVE(client, link);
7267c094 4056 g_free(client);
ba223c29
AL
4057}
4058
4059static void cpu_notify_map_clients(void)
4060{
4061 MapClient *client;
4062
72cf2d4f
BS
4063 while (!QLIST_EMPTY(&map_client_list)) {
4064 client = QLIST_FIRST(&map_client_list);
ba223c29 4065 client->callback(client->opaque);
34d5e948 4066 cpu_unregister_map_client(client);
ba223c29
AL
4067 }
4068}
4069
6d16c2f8
AL
4070/* Map a physical memory region into a host virtual address.
4071 * May map a subset of the requested range, given by and returned in *plen.
4072 * May return NULL if resources needed to perform the mapping are exhausted.
4073 * Use only for reads OR writes - not for read-modify-write operations.
ba223c29
AL
4074 * Use cpu_register_map_client() to know when retrying the map operation is
4075 * likely to succeed.
6d16c2f8 4076 */
c227f099
AL
4077void *cpu_physical_memory_map(target_phys_addr_t addr,
4078 target_phys_addr_t *plen,
6d16c2f8
AL
4079 int is_write)
4080{
c227f099 4081 target_phys_addr_t len = *plen;
38bee5dc 4082 target_phys_addr_t todo = 0;
6d16c2f8 4083 int l;
c227f099 4084 target_phys_addr_t page;
6d16c2f8
AL
4085 unsigned long pd;
4086 PhysPageDesc *p;
f15fbc4b 4087 ram_addr_t raddr = RAM_ADDR_MAX;
8ab934f9
SS
4088 ram_addr_t rlen;
4089 void *ret;
6d16c2f8
AL
4090
4091 while (len > 0) {
4092 page = addr & TARGET_PAGE_MASK;
4093 l = (page + TARGET_PAGE_SIZE) - addr;
4094 if (l > len)
4095 l = len;
4096 p = phys_page_find(page >> TARGET_PAGE_BITS);
4097 if (!p) {
4098 pd = IO_MEM_UNASSIGNED;
4099 } else {
4100 pd = p->phys_offset;
4101 }
4102
4103 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
38bee5dc 4104 if (todo || bounce.buffer) {
6d16c2f8
AL
4105 break;
4106 }
4107 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
4108 bounce.addr = addr;
4109 bounce.len = l;
4110 if (!is_write) {
54f7b4a3 4111 cpu_physical_memory_read(addr, bounce.buffer, l);
6d16c2f8 4112 }
38bee5dc
SS
4113
4114 *plen = l;
4115 return bounce.buffer;
6d16c2f8 4116 }
8ab934f9
SS
4117 if (!todo) {
4118 raddr = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4119 }
6d16c2f8
AL
4120
4121 len -= l;
4122 addr += l;
38bee5dc 4123 todo += l;
6d16c2f8 4124 }
8ab934f9
SS
4125 rlen = todo;
4126 ret = qemu_ram_ptr_length(raddr, &rlen);
4127 *plen = rlen;
4128 return ret;
6d16c2f8
AL
4129}
4130
4131/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
4132 * Will also mark the memory as dirty if is_write == 1. access_len gives
4133 * the amount of memory that was actually read or written by the caller.
4134 */
c227f099
AL
4135void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
4136 int is_write, target_phys_addr_t access_len)
6d16c2f8
AL
4137{
4138 if (buffer != bounce.buffer) {
4139 if (is_write) {
e890261f 4140 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
6d16c2f8
AL
4141 while (access_len) {
4142 unsigned l;
4143 l = TARGET_PAGE_SIZE;
4144 if (l > access_len)
4145 l = access_len;
4146 if (!cpu_physical_memory_is_dirty(addr1)) {
4147 /* invalidate code */
4148 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
4149 /* set dirty bit */
f7c11b53
YT
4150 cpu_physical_memory_set_dirty_flags(
4151 addr1, (0xff & ~CODE_DIRTY_FLAG));
6d16c2f8
AL
4152 }
4153 addr1 += l;
4154 access_len -= l;
4155 }
4156 }
868bb33f 4157 if (xen_enabled()) {
e41d7c69 4158 xen_invalidate_map_cache_entry(buffer);
050a0ddf 4159 }
6d16c2f8
AL
4160 return;
4161 }
4162 if (is_write) {
4163 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
4164 }
f8a83245 4165 qemu_vfree(bounce.buffer);
6d16c2f8 4166 bounce.buffer = NULL;
ba223c29 4167 cpu_notify_map_clients();
6d16c2f8 4168}
d0ecd2aa 4169
8df1cd07 4170/* warning: addr must be aligned */
1e78bcc1
AG
4171static inline uint32_t ldl_phys_internal(target_phys_addr_t addr,
4172 enum device_endian endian)
8df1cd07
FB
4173{
4174 int io_index;
4175 uint8_t *ptr;
4176 uint32_t val;
4177 unsigned long pd;
4178 PhysPageDesc *p;
4179
4180 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4181 if (!p) {
4182 pd = IO_MEM_UNASSIGNED;
4183 } else {
4184 pd = p->phys_offset;
4185 }
3b46e624 4186
5fafdf24 4187 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2a4188a3 4188 !(pd & IO_MEM_ROMD)) {
8df1cd07
FB
4189 /* I/O case */
4190 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
4191 if (p)
4192 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
8df1cd07 4193 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
1e78bcc1
AG
4194#if defined(TARGET_WORDS_BIGENDIAN)
4195 if (endian == DEVICE_LITTLE_ENDIAN) {
4196 val = bswap32(val);
4197 }
4198#else
4199 if (endian == DEVICE_BIG_ENDIAN) {
4200 val = bswap32(val);
4201 }
4202#endif
8df1cd07
FB
4203 } else {
4204 /* RAM case */
5579c7f3 4205 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
8df1cd07 4206 (addr & ~TARGET_PAGE_MASK);
1e78bcc1
AG
4207 switch (endian) {
4208 case DEVICE_LITTLE_ENDIAN:
4209 val = ldl_le_p(ptr);
4210 break;
4211 case DEVICE_BIG_ENDIAN:
4212 val = ldl_be_p(ptr);
4213 break;
4214 default:
4215 val = ldl_p(ptr);
4216 break;
4217 }
8df1cd07
FB
4218 }
4219 return val;
4220}
4221
1e78bcc1
AG
4222uint32_t ldl_phys(target_phys_addr_t addr)
4223{
4224 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4225}
4226
4227uint32_t ldl_le_phys(target_phys_addr_t addr)
4228{
4229 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4230}
4231
4232uint32_t ldl_be_phys(target_phys_addr_t addr)
4233{
4234 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
4235}
4236
84b7b8e7 4237/* warning: addr must be aligned */
1e78bcc1
AG
4238static inline uint64_t ldq_phys_internal(target_phys_addr_t addr,
4239 enum device_endian endian)
84b7b8e7
FB
4240{
4241 int io_index;
4242 uint8_t *ptr;
4243 uint64_t val;
4244 unsigned long pd;
4245 PhysPageDesc *p;
4246
4247 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4248 if (!p) {
4249 pd = IO_MEM_UNASSIGNED;
4250 } else {
4251 pd = p->phys_offset;
4252 }
3b46e624 4253
2a4188a3
FB
4254 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
4255 !(pd & IO_MEM_ROMD)) {
84b7b8e7
FB
4256 /* I/O case */
4257 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
4258 if (p)
4259 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
1e78bcc1
AG
4260
4261 /* XXX This is broken when device endian != cpu endian.
4262 Fix and add "endian" variable check */
84b7b8e7
FB
4263#ifdef TARGET_WORDS_BIGENDIAN
4264 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
4265 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
4266#else
4267 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
4268 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
4269#endif
4270 } else {
4271 /* RAM case */
5579c7f3 4272 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
84b7b8e7 4273 (addr & ~TARGET_PAGE_MASK);
1e78bcc1
AG
4274 switch (endian) {
4275 case DEVICE_LITTLE_ENDIAN:
4276 val = ldq_le_p(ptr);
4277 break;
4278 case DEVICE_BIG_ENDIAN:
4279 val = ldq_be_p(ptr);
4280 break;
4281 default:
4282 val = ldq_p(ptr);
4283 break;
4284 }
84b7b8e7
FB
4285 }
4286 return val;
4287}
4288
1e78bcc1
AG
4289uint64_t ldq_phys(target_phys_addr_t addr)
4290{
4291 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4292}
4293
4294uint64_t ldq_le_phys(target_phys_addr_t addr)
4295{
4296 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4297}
4298
4299uint64_t ldq_be_phys(target_phys_addr_t addr)
4300{
4301 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
4302}
4303
aab33094 4304/* XXX: optimize */
c227f099 4305uint32_t ldub_phys(target_phys_addr_t addr)
aab33094
FB
4306{
4307 uint8_t val;
4308 cpu_physical_memory_read(addr, &val, 1);
4309 return val;
4310}
4311
733f0b02 4312/* warning: addr must be aligned */
1e78bcc1
AG
4313static inline uint32_t lduw_phys_internal(target_phys_addr_t addr,
4314 enum device_endian endian)
aab33094 4315{
733f0b02
MT
4316 int io_index;
4317 uint8_t *ptr;
4318 uint64_t val;
4319 unsigned long pd;
4320 PhysPageDesc *p;
4321
4322 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4323 if (!p) {
4324 pd = IO_MEM_UNASSIGNED;
4325 } else {
4326 pd = p->phys_offset;
4327 }
4328
4329 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
4330 !(pd & IO_MEM_ROMD)) {
4331 /* I/O case */
4332 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4333 if (p)
4334 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4335 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
1e78bcc1
AG
4336#if defined(TARGET_WORDS_BIGENDIAN)
4337 if (endian == DEVICE_LITTLE_ENDIAN) {
4338 val = bswap16(val);
4339 }
4340#else
4341 if (endian == DEVICE_BIG_ENDIAN) {
4342 val = bswap16(val);
4343 }
4344#endif
733f0b02
MT
4345 } else {
4346 /* RAM case */
4347 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4348 (addr & ~TARGET_PAGE_MASK);
1e78bcc1
AG
4349 switch (endian) {
4350 case DEVICE_LITTLE_ENDIAN:
4351 val = lduw_le_p(ptr);
4352 break;
4353 case DEVICE_BIG_ENDIAN:
4354 val = lduw_be_p(ptr);
4355 break;
4356 default:
4357 val = lduw_p(ptr);
4358 break;
4359 }
733f0b02
MT
4360 }
4361 return val;
aab33094
FB
4362}
4363
1e78bcc1
AG
4364uint32_t lduw_phys(target_phys_addr_t addr)
4365{
4366 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4367}
4368
4369uint32_t lduw_le_phys(target_phys_addr_t addr)
4370{
4371 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4372}
4373
4374uint32_t lduw_be_phys(target_phys_addr_t addr)
4375{
4376 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
4377}
4378
8df1cd07
FB
4379/* warning: addr must be aligned. The ram page is not masked as dirty
4380 and the code inside is not invalidated. It is useful if the dirty
4381 bits are used to track modified PTEs */
c227f099 4382void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
8df1cd07
FB
4383{
4384 int io_index;
4385 uint8_t *ptr;
4386 unsigned long pd;
4387 PhysPageDesc *p;
4388
4389 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4390 if (!p) {
4391 pd = IO_MEM_UNASSIGNED;
4392 } else {
4393 pd = p->phys_offset;
4394 }
3b46e624 4395
3a7d929e 4396 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07 4397 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
4398 if (p)
4399 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
8df1cd07
FB
4400 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4401 } else {
74576198 4402 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
5579c7f3 4403 ptr = qemu_get_ram_ptr(addr1);
8df1cd07 4404 stl_p(ptr, val);
74576198
AL
4405
4406 if (unlikely(in_migration)) {
4407 if (!cpu_physical_memory_is_dirty(addr1)) {
4408 /* invalidate code */
4409 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4410 /* set dirty bit */
f7c11b53
YT
4411 cpu_physical_memory_set_dirty_flags(
4412 addr1, (0xff & ~CODE_DIRTY_FLAG));
74576198
AL
4413 }
4414 }
8df1cd07
FB
4415 }
4416}
4417
c227f099 4418void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
bc98a7ef
JM
4419{
4420 int io_index;
4421 uint8_t *ptr;
4422 unsigned long pd;
4423 PhysPageDesc *p;
4424
4425 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4426 if (!p) {
4427 pd = IO_MEM_UNASSIGNED;
4428 } else {
4429 pd = p->phys_offset;
4430 }
3b46e624 4431
bc98a7ef
JM
4432 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4433 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
4434 if (p)
4435 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bc98a7ef
JM
4436#ifdef TARGET_WORDS_BIGENDIAN
4437 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
4438 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
4439#else
4440 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4441 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
4442#endif
4443 } else {
5579c7f3 4444 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
bc98a7ef
JM
4445 (addr & ~TARGET_PAGE_MASK);
4446 stq_p(ptr, val);
4447 }
4448}
4449
8df1cd07 4450/* warning: addr must be aligned */
1e78bcc1
AG
4451static inline void stl_phys_internal(target_phys_addr_t addr, uint32_t val,
4452 enum device_endian endian)
8df1cd07
FB
4453{
4454 int io_index;
4455 uint8_t *ptr;
4456 unsigned long pd;
4457 PhysPageDesc *p;
4458
4459 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4460 if (!p) {
4461 pd = IO_MEM_UNASSIGNED;
4462 } else {
4463 pd = p->phys_offset;
4464 }
3b46e624 4465
3a7d929e 4466 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07 4467 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
4468 if (p)
4469 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
1e78bcc1
AG
4470#if defined(TARGET_WORDS_BIGENDIAN)
4471 if (endian == DEVICE_LITTLE_ENDIAN) {
4472 val = bswap32(val);
4473 }
4474#else
4475 if (endian == DEVICE_BIG_ENDIAN) {
4476 val = bswap32(val);
4477 }
4478#endif
8df1cd07
FB
4479 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4480 } else {
4481 unsigned long addr1;
4482 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4483 /* RAM case */
5579c7f3 4484 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
4485 switch (endian) {
4486 case DEVICE_LITTLE_ENDIAN:
4487 stl_le_p(ptr, val);
4488 break;
4489 case DEVICE_BIG_ENDIAN:
4490 stl_be_p(ptr, val);
4491 break;
4492 default:
4493 stl_p(ptr, val);
4494 break;
4495 }
3a7d929e
FB
4496 if (!cpu_physical_memory_is_dirty(addr1)) {
4497 /* invalidate code */
4498 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4499 /* set dirty bit */
f7c11b53
YT
4500 cpu_physical_memory_set_dirty_flags(addr1,
4501 (0xff & ~CODE_DIRTY_FLAG));
3a7d929e 4502 }
8df1cd07
FB
4503 }
4504}
4505
1e78bcc1
AG
4506void stl_phys(target_phys_addr_t addr, uint32_t val)
4507{
4508 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4509}
4510
4511void stl_le_phys(target_phys_addr_t addr, uint32_t val)
4512{
4513 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4514}
4515
4516void stl_be_phys(target_phys_addr_t addr, uint32_t val)
4517{
4518 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4519}
4520
aab33094 4521/* XXX: optimize */
c227f099 4522void stb_phys(target_phys_addr_t addr, uint32_t val)
aab33094
FB
4523{
4524 uint8_t v = val;
4525 cpu_physical_memory_write(addr, &v, 1);
4526}
4527
733f0b02 4528/* warning: addr must be aligned */
1e78bcc1
AG
4529static inline void stw_phys_internal(target_phys_addr_t addr, uint32_t val,
4530 enum device_endian endian)
aab33094 4531{
733f0b02
MT
4532 int io_index;
4533 uint8_t *ptr;
4534 unsigned long pd;
4535 PhysPageDesc *p;
4536
4537 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4538 if (!p) {
4539 pd = IO_MEM_UNASSIGNED;
4540 } else {
4541 pd = p->phys_offset;
4542 }
4543
4544 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4545 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4546 if (p)
4547 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
1e78bcc1
AG
4548#if defined(TARGET_WORDS_BIGENDIAN)
4549 if (endian == DEVICE_LITTLE_ENDIAN) {
4550 val = bswap16(val);
4551 }
4552#else
4553 if (endian == DEVICE_BIG_ENDIAN) {
4554 val = bswap16(val);
4555 }
4556#endif
733f0b02
MT
4557 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
4558 } else {
4559 unsigned long addr1;
4560 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4561 /* RAM case */
4562 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
4563 switch (endian) {
4564 case DEVICE_LITTLE_ENDIAN:
4565 stw_le_p(ptr, val);
4566 break;
4567 case DEVICE_BIG_ENDIAN:
4568 stw_be_p(ptr, val);
4569 break;
4570 default:
4571 stw_p(ptr, val);
4572 break;
4573 }
733f0b02
MT
4574 if (!cpu_physical_memory_is_dirty(addr1)) {
4575 /* invalidate code */
4576 tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
4577 /* set dirty bit */
4578 cpu_physical_memory_set_dirty_flags(addr1,
4579 (0xff & ~CODE_DIRTY_FLAG));
4580 }
4581 }
aab33094
FB
4582}
4583
1e78bcc1
AG
4584void stw_phys(target_phys_addr_t addr, uint32_t val)
4585{
4586 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4587}
4588
4589void stw_le_phys(target_phys_addr_t addr, uint32_t val)
4590{
4591 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4592}
4593
4594void stw_be_phys(target_phys_addr_t addr, uint32_t val)
4595{
4596 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4597}
4598
aab33094 4599/* XXX: optimize */
c227f099 4600void stq_phys(target_phys_addr_t addr, uint64_t val)
aab33094
FB
4601{
4602 val = tswap64(val);
71d2b725 4603 cpu_physical_memory_write(addr, &val, 8);
aab33094
FB
4604}
4605
1e78bcc1
AG
4606void stq_le_phys(target_phys_addr_t addr, uint64_t val)
4607{
4608 val = cpu_to_le64(val);
4609 cpu_physical_memory_write(addr, &val, 8);
4610}
4611
4612void stq_be_phys(target_phys_addr_t addr, uint64_t val)
4613{
4614 val = cpu_to_be64(val);
4615 cpu_physical_memory_write(addr, &val, 8);
4616}
4617
5e2972fd 4618/* virtual memory access for debug (includes writing to ROM) */
5fafdf24 4619int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
b448f2f3 4620 uint8_t *buf, int len, int is_write)
13eb76e0
FB
4621{
4622 int l;
c227f099 4623 target_phys_addr_t phys_addr;
9b3c35e0 4624 target_ulong page;
13eb76e0
FB
4625
4626 while (len > 0) {
4627 page = addr & TARGET_PAGE_MASK;
4628 phys_addr = cpu_get_phys_page_debug(env, page);
4629 /* if no physical page mapped, return an error */
4630 if (phys_addr == -1)
4631 return -1;
4632 l = (page + TARGET_PAGE_SIZE) - addr;
4633 if (l > len)
4634 l = len;
5e2972fd 4635 phys_addr += (addr & ~TARGET_PAGE_MASK);
5e2972fd
AL
4636 if (is_write)
4637 cpu_physical_memory_write_rom(phys_addr, buf, l);
4638 else
5e2972fd 4639 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
13eb76e0
FB
4640 len -= l;
4641 buf += l;
4642 addr += l;
4643 }
4644 return 0;
4645}
a68fe89c 4646#endif
13eb76e0 4647
2e70f6ef
PB
4648/* in deterministic execution mode, instructions doing device I/Os
4649 must be at the end of the TB */
4650void cpu_io_recompile(CPUState *env, void *retaddr)
4651{
4652 TranslationBlock *tb;
4653 uint32_t n, cflags;
4654 target_ulong pc, cs_base;
4655 uint64_t flags;
4656
4657 tb = tb_find_pc((unsigned long)retaddr);
4658 if (!tb) {
4659 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
4660 retaddr);
4661 }
4662 n = env->icount_decr.u16.low + tb->icount;
618ba8e6 4663 cpu_restore_state(tb, env, (unsigned long)retaddr);
2e70f6ef 4664 /* Calculate how many instructions had been executed before the fault
bf20dc07 4665 occurred. */
2e70f6ef
PB
4666 n = n - env->icount_decr.u16.low;
4667 /* Generate a new TB ending on the I/O insn. */
4668 n++;
4669 /* On MIPS and SH, delay slot instructions can only be restarted if
4670 they were already the first instruction in the TB. If this is not
bf20dc07 4671 the first instruction in a TB then re-execute the preceding
2e70f6ef
PB
4672 branch. */
4673#if defined(TARGET_MIPS)
4674 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4675 env->active_tc.PC -= 4;
4676 env->icount_decr.u16.low++;
4677 env->hflags &= ~MIPS_HFLAG_BMASK;
4678 }
4679#elif defined(TARGET_SH4)
4680 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4681 && n > 1) {
4682 env->pc -= 2;
4683 env->icount_decr.u16.low++;
4684 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4685 }
4686#endif
4687 /* This should never happen. */
4688 if (n > CF_COUNT_MASK)
4689 cpu_abort(env, "TB too big during recompile");
4690
4691 cflags = n | CF_LAST_IO;
4692 pc = tb->pc;
4693 cs_base = tb->cs_base;
4694 flags = tb->flags;
4695 tb_phys_invalidate(tb, -1);
4696 /* FIXME: In theory this could raise an exception. In practice
4697 we have already translated the block once so it's probably ok. */
4698 tb_gen_code(env, pc, cs_base, flags, cflags);
bf20dc07 4699 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
2e70f6ef
PB
4700 the first in the TB) then we end up generating a whole new TB and
4701 repeating the fault, which is horribly inefficient.
4702 Better would be to execute just this insn uncached, or generate a
4703 second new TB. */
4704 cpu_resume_from_signal(env, NULL);
4705}
4706
b3755a91
PB
4707#if !defined(CONFIG_USER_ONLY)
4708
055403b2 4709void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
e3db7226
FB
4710{
4711 int i, target_code_size, max_target_code_size;
4712 int direct_jmp_count, direct_jmp2_count, cross_page;
4713 TranslationBlock *tb;
3b46e624 4714
e3db7226
FB
4715 target_code_size = 0;
4716 max_target_code_size = 0;
4717 cross_page = 0;
4718 direct_jmp_count = 0;
4719 direct_jmp2_count = 0;
4720 for(i = 0; i < nb_tbs; i++) {
4721 tb = &tbs[i];
4722 target_code_size += tb->size;
4723 if (tb->size > max_target_code_size)
4724 max_target_code_size = tb->size;
4725 if (tb->page_addr[1] != -1)
4726 cross_page++;
4727 if (tb->tb_next_offset[0] != 0xffff) {
4728 direct_jmp_count++;
4729 if (tb->tb_next_offset[1] != 0xffff) {
4730 direct_jmp2_count++;
4731 }
4732 }
4733 }
4734 /* XXX: avoid using doubles ? */
57fec1fe 4735 cpu_fprintf(f, "Translation buffer state:\n");
055403b2 4736 cpu_fprintf(f, "gen code size %td/%ld\n",
26a5f13b
FB
4737 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4738 cpu_fprintf(f, "TB count %d/%d\n",
4739 nb_tbs, code_gen_max_blocks);
5fafdf24 4740 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
e3db7226
FB
4741 nb_tbs ? target_code_size / nb_tbs : 0,
4742 max_target_code_size);
055403b2 4743 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
e3db7226
FB
4744 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4745 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
5fafdf24
TS
4746 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4747 cross_page,
e3db7226
FB
4748 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4749 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
5fafdf24 4750 direct_jmp_count,
e3db7226
FB
4751 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4752 direct_jmp2_count,
4753 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
57fec1fe 4754 cpu_fprintf(f, "\nStatistics:\n");
e3db7226
FB
4755 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4756 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4757 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
b67d9a52 4758 tcg_dump_info(f, cpu_fprintf);
e3db7226
FB
4759}
4760
61382a50 4761#define MMUSUFFIX _cmmu
3917149d 4762#undef GETPC
61382a50
FB
4763#define GETPC() NULL
4764#define env cpu_single_env
b769d8fe 4765#define SOFTMMU_CODE_ACCESS
61382a50
FB
4766
4767#define SHIFT 0
4768#include "softmmu_template.h"
4769
4770#define SHIFT 1
4771#include "softmmu_template.h"
4772
4773#define SHIFT 2
4774#include "softmmu_template.h"
4775
4776#define SHIFT 3
4777#include "softmmu_template.h"
4778
4779#undef env
4780
4781#endif