]> git.proxmox.com Git - qemu.git/blame - exec.c
vl.c: Print the actual program name in help output
[qemu.git] / exec.c
CommitLineData
54936004 1/*
fd6ce8f6 2 * virtual page mapping and translated block handling
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
54936004 18 */
67b915a5 19#include "config.h"
d5a8f07c
FB
20#ifdef _WIN32
21#include <windows.h>
22#else
a98d49b1 23#include <sys/types.h>
d5a8f07c
FB
24#include <sys/mman.h>
25#endif
54936004 26
055403b2 27#include "qemu-common.h"
6180a181 28#include "cpu.h"
b67d9a52 29#include "tcg.h"
b3c7724c 30#include "hw/hw.h"
cc9e98cb 31#include "hw/qdev.h"
74576198 32#include "osdep.h"
7ba1e619 33#include "kvm.h"
432d268c 34#include "hw/xen.h"
29e922b6 35#include "qemu-timer.h"
62152b8a
AK
36#include "memory.h"
37#include "exec-memory.h"
53a5960a
PB
38#if defined(CONFIG_USER_ONLY)
39#include <qemu.h>
f01576f1
JL
40#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41#include <sys/param.h>
42#if __FreeBSD_version >= 700104
43#define HAVE_KINFO_GETVMMAP
44#define sigqueue sigqueue_freebsd /* avoid redefinition */
45#include <sys/time.h>
46#include <sys/proc.h>
47#include <machine/profile.h>
48#define _KERNEL
49#include <sys/user.h>
50#undef _KERNEL
51#undef sigqueue
52#include <libutil.h>
53#endif
54#endif
432d268c
JN
55#else /* !CONFIG_USER_ONLY */
56#include "xen-mapcache.h"
6506e4f9 57#include "trace.h"
53a5960a 58#endif
54936004 59
fd6ce8f6 60//#define DEBUG_TB_INVALIDATE
66e85a21 61//#define DEBUG_FLUSH
9fa3e853 62//#define DEBUG_TLB
67d3b957 63//#define DEBUG_UNASSIGNED
fd6ce8f6
FB
64
65/* make various TB consistency checks */
5fafdf24
TS
66//#define DEBUG_TB_CHECK
67//#define DEBUG_TLB_CHECK
fd6ce8f6 68
1196be37 69//#define DEBUG_IOPORT
db7b5426 70//#define DEBUG_SUBPAGE
1196be37 71
99773bd4
PB
72#if !defined(CONFIG_USER_ONLY)
73/* TB consistency checks only implemented for usermode emulation. */
74#undef DEBUG_TB_CHECK
75#endif
76
9fa3e853
FB
77#define SMC_BITMAP_USE_THRESHOLD 10
78
bdaf78e0 79static TranslationBlock *tbs;
24ab68ac 80static int code_gen_max_blocks;
9fa3e853 81TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
bdaf78e0 82static int nb_tbs;
eb51d102 83/* any access to the tbs or the page table must use this lock */
c227f099 84spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
fd6ce8f6 85
141ac468
BS
86#if defined(__arm__) || defined(__sparc_v9__)
87/* The prologue must be reachable with a direct jump. ARM and Sparc64
88 have limited branch ranges (possibly also PPC) so place it in a
d03d860b
BS
89 section close to code segment. */
90#define code_gen_section \
91 __attribute__((__section__(".gen_code"))) \
92 __attribute__((aligned (32)))
f8e2af11
SW
93#elif defined(_WIN32)
94/* Maximum alignment for Win32 is 16. */
95#define code_gen_section \
96 __attribute__((aligned (16)))
d03d860b
BS
97#else
98#define code_gen_section \
99 __attribute__((aligned (32)))
100#endif
101
102uint8_t code_gen_prologue[1024] code_gen_section;
bdaf78e0
BS
103static uint8_t *code_gen_buffer;
104static unsigned long code_gen_buffer_size;
26a5f13b 105/* threshold to flush the translated code buffer */
bdaf78e0 106static unsigned long code_gen_buffer_max_size;
24ab68ac 107static uint8_t *code_gen_ptr;
fd6ce8f6 108
e2eef170 109#if !defined(CONFIG_USER_ONLY)
9fa3e853 110int phys_ram_fd;
74576198 111static int in_migration;
94a6b54f 112
85d59fef 113RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
62152b8a
AK
114
115static MemoryRegion *system_memory;
309cb471 116static MemoryRegion *system_io;
62152b8a 117
e2eef170 118#endif
9fa3e853 119
6a00d601
FB
120CPUState *first_cpu;
121/* current CPU in the current thread. It is only valid inside
122 cpu_exec() */
b3c4bbe5 123DEFINE_TLS(CPUState *,cpu_single_env);
2e70f6ef 124/* 0 = Do not count executed instructions.
bf20dc07 125 1 = Precise instruction counting.
2e70f6ef
PB
126 2 = Adaptive rate instruction counting. */
127int use_icount = 0;
6a00d601 128
54936004 129typedef struct PageDesc {
92e873b9 130 /* list of TBs intersecting this ram page */
fd6ce8f6 131 TranslationBlock *first_tb;
9fa3e853
FB
132 /* in order to optimize self modifying code, we count the number
133 of lookups we do to a given page to use a bitmap */
134 unsigned int code_write_count;
135 uint8_t *code_bitmap;
136#if defined(CONFIG_USER_ONLY)
137 unsigned long flags;
138#endif
54936004
FB
139} PageDesc;
140
41c1b1c9 141/* In system mode we want L1_MAP to be based on ram offsets,
5cd2c5b6
RH
142 while in user mode we want it to be based on virtual addresses. */
143#if !defined(CONFIG_USER_ONLY)
41c1b1c9
PB
144#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
145# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
146#else
5cd2c5b6 147# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
41c1b1c9 148#endif
bedb69ea 149#else
5cd2c5b6 150# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
bedb69ea 151#endif
54936004 152
5cd2c5b6
RH
153/* Size of the L2 (and L3, etc) page tables. */
154#define L2_BITS 10
54936004
FB
155#define L2_SIZE (1 << L2_BITS)
156
5cd2c5b6
RH
157/* The bits remaining after N lower levels of page tables. */
158#define P_L1_BITS_REM \
159 ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
160#define V_L1_BITS_REM \
161 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
162
163/* Size of the L1 page table. Avoid silly small sizes. */
164#if P_L1_BITS_REM < 4
165#define P_L1_BITS (P_L1_BITS_REM + L2_BITS)
166#else
167#define P_L1_BITS P_L1_BITS_REM
168#endif
169
170#if V_L1_BITS_REM < 4
171#define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
172#else
173#define V_L1_BITS V_L1_BITS_REM
174#endif
175
176#define P_L1_SIZE ((target_phys_addr_t)1 << P_L1_BITS)
177#define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
178
179#define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS)
180#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
181
83fb7adf 182unsigned long qemu_real_host_page_size;
83fb7adf
FB
183unsigned long qemu_host_page_size;
184unsigned long qemu_host_page_mask;
54936004 185
5cd2c5b6
RH
186/* This is a multi-level map on the virtual address space.
187 The bottom level has pointers to PageDesc. */
188static void *l1_map[V_L1_SIZE];
54936004 189
e2eef170 190#if !defined(CONFIG_USER_ONLY)
41c1b1c9
PB
191typedef struct PhysPageDesc {
192 /* offset in host memory of the page + io_index in the low bits */
193 ram_addr_t phys_offset;
194 ram_addr_t region_offset;
195} PhysPageDesc;
196
5cd2c5b6
RH
197/* This is a multi-level map on the physical address space.
198 The bottom level has pointers to PhysPageDesc. */
199static void *l1_phys_map[P_L1_SIZE];
6d9a1304 200
e2eef170 201static void io_mem_init(void);
62152b8a 202static void memory_map_init(void);
e2eef170 203
33417e70 204/* io memory support */
33417e70
FB
205CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
206CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
a4193c8a 207void *io_mem_opaque[IO_MEM_NB_ENTRIES];
511d2b14 208static char io_mem_used[IO_MEM_NB_ENTRIES];
6658ffb8
PB
209static int io_mem_watch;
210#endif
33417e70 211
34865134 212/* log support */
1e8b27ca
JR
213#ifdef WIN32
214static const char *logfilename = "qemu.log";
215#else
d9b630fd 216static const char *logfilename = "/tmp/qemu.log";
1e8b27ca 217#endif
34865134
FB
218FILE *logfile;
219int loglevel;
e735b91c 220static int log_append = 0;
34865134 221
e3db7226 222/* statistics */
b3755a91 223#if !defined(CONFIG_USER_ONLY)
e3db7226 224static int tlb_flush_count;
b3755a91 225#endif
e3db7226
FB
226static int tb_flush_count;
227static int tb_phys_invalidate_count;
228
7cb69cae
FB
229#ifdef _WIN32
230static void map_exec(void *addr, long size)
231{
232 DWORD old_protect;
233 VirtualProtect(addr, size,
234 PAGE_EXECUTE_READWRITE, &old_protect);
235
236}
237#else
238static void map_exec(void *addr, long size)
239{
4369415f 240 unsigned long start, end, page_size;
7cb69cae 241
4369415f 242 page_size = getpagesize();
7cb69cae 243 start = (unsigned long)addr;
4369415f 244 start &= ~(page_size - 1);
7cb69cae
FB
245
246 end = (unsigned long)addr + size;
4369415f
FB
247 end += page_size - 1;
248 end &= ~(page_size - 1);
7cb69cae
FB
249
250 mprotect((void *)start, end - start,
251 PROT_READ | PROT_WRITE | PROT_EXEC);
252}
253#endif
254
b346ff46 255static void page_init(void)
54936004 256{
83fb7adf 257 /* NOTE: we can always suppose that qemu_host_page_size >=
54936004 258 TARGET_PAGE_SIZE */
c2b48b69
AL
259#ifdef _WIN32
260 {
261 SYSTEM_INFO system_info;
262
263 GetSystemInfo(&system_info);
264 qemu_real_host_page_size = system_info.dwPageSize;
265 }
266#else
267 qemu_real_host_page_size = getpagesize();
268#endif
83fb7adf
FB
269 if (qemu_host_page_size == 0)
270 qemu_host_page_size = qemu_real_host_page_size;
271 if (qemu_host_page_size < TARGET_PAGE_SIZE)
272 qemu_host_page_size = TARGET_PAGE_SIZE;
83fb7adf 273 qemu_host_page_mask = ~(qemu_host_page_size - 1);
50a9569b 274
2e9a5713 275#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
50a9569b 276 {
f01576f1
JL
277#ifdef HAVE_KINFO_GETVMMAP
278 struct kinfo_vmentry *freep;
279 int i, cnt;
280
281 freep = kinfo_getvmmap(getpid(), &cnt);
282 if (freep) {
283 mmap_lock();
284 for (i = 0; i < cnt; i++) {
285 unsigned long startaddr, endaddr;
286
287 startaddr = freep[i].kve_start;
288 endaddr = freep[i].kve_end;
289 if (h2g_valid(startaddr)) {
290 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
291
292 if (h2g_valid(endaddr)) {
293 endaddr = h2g(endaddr);
fd436907 294 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
f01576f1
JL
295 } else {
296#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
297 endaddr = ~0ul;
fd436907 298 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
f01576f1
JL
299#endif
300 }
301 }
302 }
303 free(freep);
304 mmap_unlock();
305 }
306#else
50a9569b 307 FILE *f;
50a9569b 308
0776590d 309 last_brk = (unsigned long)sbrk(0);
5cd2c5b6 310
fd436907 311 f = fopen("/compat/linux/proc/self/maps", "r");
50a9569b 312 if (f) {
5cd2c5b6
RH
313 mmap_lock();
314
50a9569b 315 do {
5cd2c5b6
RH
316 unsigned long startaddr, endaddr;
317 int n;
318
319 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
320
321 if (n == 2 && h2g_valid(startaddr)) {
322 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
323
324 if (h2g_valid(endaddr)) {
325 endaddr = h2g(endaddr);
326 } else {
327 endaddr = ~0ul;
328 }
329 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
50a9569b
AZ
330 }
331 } while (!feof(f));
5cd2c5b6 332
50a9569b 333 fclose(f);
5cd2c5b6 334 mmap_unlock();
50a9569b 335 }
f01576f1 336#endif
50a9569b
AZ
337 }
338#endif
54936004
FB
339}
340
41c1b1c9 341static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
54936004 342{
41c1b1c9
PB
343 PageDesc *pd;
344 void **lp;
345 int i;
346
5cd2c5b6 347#if defined(CONFIG_USER_ONLY)
7267c094 348 /* We can't use g_malloc because it may recurse into a locked mutex. */
5cd2c5b6
RH
349# define ALLOC(P, SIZE) \
350 do { \
351 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
352 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
5cd2c5b6
RH
353 } while (0)
354#else
355# define ALLOC(P, SIZE) \
7267c094 356 do { P = g_malloc0(SIZE); } while (0)
17e2377a 357#endif
434929bf 358
5cd2c5b6
RH
359 /* Level 1. Always allocated. */
360 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
361
362 /* Level 2..N-1. */
363 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
364 void **p = *lp;
365
366 if (p == NULL) {
367 if (!alloc) {
368 return NULL;
369 }
370 ALLOC(p, sizeof(void *) * L2_SIZE);
371 *lp = p;
17e2377a 372 }
5cd2c5b6
RH
373
374 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
375 }
376
377 pd = *lp;
378 if (pd == NULL) {
379 if (!alloc) {
380 return NULL;
381 }
382 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
383 *lp = pd;
54936004 384 }
5cd2c5b6
RH
385
386#undef ALLOC
5cd2c5b6
RH
387
388 return pd + (index & (L2_SIZE - 1));
54936004
FB
389}
390
41c1b1c9 391static inline PageDesc *page_find(tb_page_addr_t index)
54936004 392{
5cd2c5b6 393 return page_find_alloc(index, 0);
fd6ce8f6
FB
394}
395
6d9a1304 396#if !defined(CONFIG_USER_ONLY)
c227f099 397static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
92e873b9 398{
e3f4e2a4 399 PhysPageDesc *pd;
5cd2c5b6
RH
400 void **lp;
401 int i;
92e873b9 402
5cd2c5b6
RH
403 /* Level 1. Always allocated. */
404 lp = l1_phys_map + ((index >> P_L1_SHIFT) & (P_L1_SIZE - 1));
108c49b8 405
5cd2c5b6
RH
406 /* Level 2..N-1. */
407 for (i = P_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
408 void **p = *lp;
409 if (p == NULL) {
410 if (!alloc) {
411 return NULL;
412 }
7267c094 413 *lp = p = g_malloc0(sizeof(void *) * L2_SIZE);
5cd2c5b6
RH
414 }
415 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
108c49b8 416 }
5cd2c5b6 417
e3f4e2a4 418 pd = *lp;
5cd2c5b6 419 if (pd == NULL) {
e3f4e2a4 420 int i;
5cd2c5b6
RH
421
422 if (!alloc) {
108c49b8 423 return NULL;
5cd2c5b6
RH
424 }
425
7267c094 426 *lp = pd = g_malloc(sizeof(PhysPageDesc) * L2_SIZE);
5cd2c5b6 427
67c4d23c 428 for (i = 0; i < L2_SIZE; i++) {
5cd2c5b6
RH
429 pd[i].phys_offset = IO_MEM_UNASSIGNED;
430 pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
67c4d23c 431 }
92e873b9 432 }
5cd2c5b6
RH
433
434 return pd + (index & (L2_SIZE - 1));
92e873b9
FB
435}
436
c227f099 437static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
92e873b9 438{
108c49b8 439 return phys_page_find_alloc(index, 0);
92e873b9
FB
440}
441
c227f099
AL
442static void tlb_protect_code(ram_addr_t ram_addr);
443static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 444 target_ulong vaddr);
c8a706fe
PB
445#define mmap_lock() do { } while(0)
446#define mmap_unlock() do { } while(0)
9fa3e853 447#endif
fd6ce8f6 448
4369415f
FB
449#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
450
451#if defined(CONFIG_USER_ONLY)
ccbb4d44 452/* Currently it is not recommended to allocate big chunks of data in
4369415f
FB
453 user mode. It will change when a dedicated libc will be used */
454#define USE_STATIC_CODE_GEN_BUFFER
455#endif
456
457#ifdef USE_STATIC_CODE_GEN_BUFFER
ebf50fb3
AJ
458static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
459 __attribute__((aligned (CODE_GEN_ALIGN)));
4369415f
FB
460#endif
461
8fcd3692 462static void code_gen_alloc(unsigned long tb_size)
26a5f13b 463{
4369415f
FB
464#ifdef USE_STATIC_CODE_GEN_BUFFER
465 code_gen_buffer = static_code_gen_buffer;
466 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
467 map_exec(code_gen_buffer, code_gen_buffer_size);
468#else
26a5f13b
FB
469 code_gen_buffer_size = tb_size;
470 if (code_gen_buffer_size == 0) {
4369415f 471#if defined(CONFIG_USER_ONLY)
4369415f
FB
472 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
473#else
ccbb4d44 474 /* XXX: needs adjustments */
94a6b54f 475 code_gen_buffer_size = (unsigned long)(ram_size / 4);
4369415f 476#endif
26a5f13b
FB
477 }
478 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
479 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
480 /* The code gen buffer location may have constraints depending on
481 the host cpu and OS */
482#if defined(__linux__)
483 {
484 int flags;
141ac468
BS
485 void *start = NULL;
486
26a5f13b
FB
487 flags = MAP_PRIVATE | MAP_ANONYMOUS;
488#if defined(__x86_64__)
489 flags |= MAP_32BIT;
490 /* Cannot map more than that */
491 if (code_gen_buffer_size > (800 * 1024 * 1024))
492 code_gen_buffer_size = (800 * 1024 * 1024);
141ac468
BS
493#elif defined(__sparc_v9__)
494 // Map the buffer below 2G, so we can use direct calls and branches
495 flags |= MAP_FIXED;
496 start = (void *) 0x60000000UL;
497 if (code_gen_buffer_size > (512 * 1024 * 1024))
498 code_gen_buffer_size = (512 * 1024 * 1024);
1cb0661e 499#elif defined(__arm__)
222f23f5 500 /* Keep the buffer no bigger than 16GB to branch between blocks */
1cb0661e
AZ
501 if (code_gen_buffer_size > 16 * 1024 * 1024)
502 code_gen_buffer_size = 16 * 1024 * 1024;
eba0b893
RH
503#elif defined(__s390x__)
504 /* Map the buffer so that we can use direct calls and branches. */
505 /* We have a +- 4GB range on the branches; leave some slop. */
506 if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
507 code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
508 }
509 start = (void *)0x90000000UL;
26a5f13b 510#endif
141ac468
BS
511 code_gen_buffer = mmap(start, code_gen_buffer_size,
512 PROT_WRITE | PROT_READ | PROT_EXEC,
26a5f13b
FB
513 flags, -1, 0);
514 if (code_gen_buffer == MAP_FAILED) {
515 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
516 exit(1);
517 }
518 }
cbb608a5 519#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
9f4b09a4
TN
520 || defined(__DragonFly__) || defined(__OpenBSD__) \
521 || defined(__NetBSD__)
06e67a82
AL
522 {
523 int flags;
524 void *addr = NULL;
525 flags = MAP_PRIVATE | MAP_ANONYMOUS;
526#if defined(__x86_64__)
527 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
528 * 0x40000000 is free */
529 flags |= MAP_FIXED;
530 addr = (void *)0x40000000;
531 /* Cannot map more than that */
532 if (code_gen_buffer_size > (800 * 1024 * 1024))
533 code_gen_buffer_size = (800 * 1024 * 1024);
4cd31ad2
BS
534#elif defined(__sparc_v9__)
535 // Map the buffer below 2G, so we can use direct calls and branches
536 flags |= MAP_FIXED;
537 addr = (void *) 0x60000000UL;
538 if (code_gen_buffer_size > (512 * 1024 * 1024)) {
539 code_gen_buffer_size = (512 * 1024 * 1024);
540 }
06e67a82
AL
541#endif
542 code_gen_buffer = mmap(addr, code_gen_buffer_size,
543 PROT_WRITE | PROT_READ | PROT_EXEC,
544 flags, -1, 0);
545 if (code_gen_buffer == MAP_FAILED) {
546 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
547 exit(1);
548 }
549 }
26a5f13b 550#else
7267c094 551 code_gen_buffer = g_malloc(code_gen_buffer_size);
26a5f13b
FB
552 map_exec(code_gen_buffer, code_gen_buffer_size);
553#endif
4369415f 554#endif /* !USE_STATIC_CODE_GEN_BUFFER */
26a5f13b 555 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
a884da8a
PM
556 code_gen_buffer_max_size = code_gen_buffer_size -
557 (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
26a5f13b 558 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
7267c094 559 tbs = g_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
26a5f13b
FB
560}
561
562/* Must be called before using the QEMU cpus. 'tb_size' is the size
563 (in bytes) allocated to the translation buffer. Zero means default
564 size. */
d5ab9713 565void tcg_exec_init(unsigned long tb_size)
26a5f13b 566{
26a5f13b
FB
567 cpu_gen_init();
568 code_gen_alloc(tb_size);
569 code_gen_ptr = code_gen_buffer;
4369415f 570 page_init();
9002ec79
RH
571#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
572 /* There's no guest base to take into account, so go ahead and
573 initialize the prologue now. */
574 tcg_prologue_init(&tcg_ctx);
575#endif
26a5f13b
FB
576}
577
d5ab9713
JK
578bool tcg_enabled(void)
579{
580 return code_gen_buffer != NULL;
581}
582
583void cpu_exec_init_all(void)
584{
585#if !defined(CONFIG_USER_ONLY)
586 memory_map_init();
587 io_mem_init();
588#endif
589}
590
9656f324
PB
591#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
592
e59fb374 593static int cpu_common_post_load(void *opaque, int version_id)
e7f4eff7
JQ
594{
595 CPUState *env = opaque;
9656f324 596
3098dba0
AJ
597 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
598 version_id is increased. */
599 env->interrupt_request &= ~0x01;
9656f324
PB
600 tlb_flush(env, 1);
601
602 return 0;
603}
e7f4eff7
JQ
604
605static const VMStateDescription vmstate_cpu_common = {
606 .name = "cpu_common",
607 .version_id = 1,
608 .minimum_version_id = 1,
609 .minimum_version_id_old = 1,
e7f4eff7
JQ
610 .post_load = cpu_common_post_load,
611 .fields = (VMStateField []) {
612 VMSTATE_UINT32(halted, CPUState),
613 VMSTATE_UINT32(interrupt_request, CPUState),
614 VMSTATE_END_OF_LIST()
615 }
616};
9656f324
PB
617#endif
618
950f1472
GC
619CPUState *qemu_get_cpu(int cpu)
620{
621 CPUState *env = first_cpu;
622
623 while (env) {
624 if (env->cpu_index == cpu)
625 break;
626 env = env->next_cpu;
627 }
628
629 return env;
630}
631
6a00d601 632void cpu_exec_init(CPUState *env)
fd6ce8f6 633{
6a00d601
FB
634 CPUState **penv;
635 int cpu_index;
636
c2764719
PB
637#if defined(CONFIG_USER_ONLY)
638 cpu_list_lock();
639#endif
6a00d601
FB
640 env->next_cpu = NULL;
641 penv = &first_cpu;
642 cpu_index = 0;
643 while (*penv != NULL) {
1e9fa730 644 penv = &(*penv)->next_cpu;
6a00d601
FB
645 cpu_index++;
646 }
647 env->cpu_index = cpu_index;
268a362c 648 env->numa_node = 0;
72cf2d4f
BS
649 QTAILQ_INIT(&env->breakpoints);
650 QTAILQ_INIT(&env->watchpoints);
dc7a09cf
JK
651#ifndef CONFIG_USER_ONLY
652 env->thread_id = qemu_get_thread_id();
653#endif
6a00d601 654 *penv = env;
c2764719
PB
655#if defined(CONFIG_USER_ONLY)
656 cpu_list_unlock();
657#endif
b3c7724c 658#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
0be71e32
AW
659 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
660 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
b3c7724c
PB
661 cpu_save, cpu_load, env);
662#endif
fd6ce8f6
FB
663}
664
d1a1eb74
TG
665/* Allocate a new translation block. Flush the translation buffer if
666 too many translation blocks or too much generated code. */
667static TranslationBlock *tb_alloc(target_ulong pc)
668{
669 TranslationBlock *tb;
670
671 if (nb_tbs >= code_gen_max_blocks ||
672 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
673 return NULL;
674 tb = &tbs[nb_tbs++];
675 tb->pc = pc;
676 tb->cflags = 0;
677 return tb;
678}
679
680void tb_free(TranslationBlock *tb)
681{
682 /* In practice this is mostly used for single use temporary TB
683 Ignore the hard cases and just back up if this TB happens to
684 be the last one generated. */
685 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
686 code_gen_ptr = tb->tc_ptr;
687 nb_tbs--;
688 }
689}
690
9fa3e853
FB
691static inline void invalidate_page_bitmap(PageDesc *p)
692{
693 if (p->code_bitmap) {
7267c094 694 g_free(p->code_bitmap);
9fa3e853
FB
695 p->code_bitmap = NULL;
696 }
697 p->code_write_count = 0;
698}
699
5cd2c5b6
RH
700/* Set to NULL all the 'first_tb' fields in all PageDescs. */
701
702static void page_flush_tb_1 (int level, void **lp)
fd6ce8f6 703{
5cd2c5b6 704 int i;
fd6ce8f6 705
5cd2c5b6
RH
706 if (*lp == NULL) {
707 return;
708 }
709 if (level == 0) {
710 PageDesc *pd = *lp;
7296abac 711 for (i = 0; i < L2_SIZE; ++i) {
5cd2c5b6
RH
712 pd[i].first_tb = NULL;
713 invalidate_page_bitmap(pd + i);
fd6ce8f6 714 }
5cd2c5b6
RH
715 } else {
716 void **pp = *lp;
7296abac 717 for (i = 0; i < L2_SIZE; ++i) {
5cd2c5b6
RH
718 page_flush_tb_1 (level - 1, pp + i);
719 }
720 }
721}
722
723static void page_flush_tb(void)
724{
725 int i;
726 for (i = 0; i < V_L1_SIZE; i++) {
727 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
fd6ce8f6
FB
728 }
729}
730
731/* flush all the translation blocks */
d4e8164f 732/* XXX: tb_flush is currently not thread safe */
6a00d601 733void tb_flush(CPUState *env1)
fd6ce8f6 734{
6a00d601 735 CPUState *env;
0124311e 736#if defined(DEBUG_FLUSH)
ab3d1727
BS
737 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
738 (unsigned long)(code_gen_ptr - code_gen_buffer),
739 nb_tbs, nb_tbs > 0 ?
740 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
fd6ce8f6 741#endif
26a5f13b 742 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
a208e54a
PB
743 cpu_abort(env1, "Internal error: code buffer overflow\n");
744
fd6ce8f6 745 nb_tbs = 0;
3b46e624 746
6a00d601
FB
747 for(env = first_cpu; env != NULL; env = env->next_cpu) {
748 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
749 }
9fa3e853 750
8a8a608f 751 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
fd6ce8f6 752 page_flush_tb();
9fa3e853 753
fd6ce8f6 754 code_gen_ptr = code_gen_buffer;
d4e8164f
FB
755 /* XXX: flush processor icache at this point if cache flush is
756 expensive */
e3db7226 757 tb_flush_count++;
fd6ce8f6
FB
758}
759
760#ifdef DEBUG_TB_CHECK
761
bc98a7ef 762static void tb_invalidate_check(target_ulong address)
fd6ce8f6
FB
763{
764 TranslationBlock *tb;
765 int i;
766 address &= TARGET_PAGE_MASK;
99773bd4
PB
767 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
768 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
769 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
770 address >= tb->pc + tb->size)) {
0bf9e31a
BS
771 printf("ERROR invalidate: address=" TARGET_FMT_lx
772 " PC=%08lx size=%04x\n",
99773bd4 773 address, (long)tb->pc, tb->size);
fd6ce8f6
FB
774 }
775 }
776 }
777}
778
779/* verify that all the pages have correct rights for code */
780static void tb_page_check(void)
781{
782 TranslationBlock *tb;
783 int i, flags1, flags2;
3b46e624 784
99773bd4
PB
785 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
786 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
787 flags1 = page_get_flags(tb->pc);
788 flags2 = page_get_flags(tb->pc + tb->size - 1);
789 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
790 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
99773bd4 791 (long)tb->pc, tb->size, flags1, flags2);
fd6ce8f6
FB
792 }
793 }
794 }
795}
796
797#endif
798
799/* invalidate one TB */
800static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
801 int next_offset)
802{
803 TranslationBlock *tb1;
804 for(;;) {
805 tb1 = *ptb;
806 if (tb1 == tb) {
807 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
808 break;
809 }
810 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
811 }
812}
813
9fa3e853
FB
814static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
815{
816 TranslationBlock *tb1;
817 unsigned int n1;
818
819 for(;;) {
820 tb1 = *ptb;
821 n1 = (long)tb1 & 3;
822 tb1 = (TranslationBlock *)((long)tb1 & ~3);
823 if (tb1 == tb) {
824 *ptb = tb1->page_next[n1];
825 break;
826 }
827 ptb = &tb1->page_next[n1];
828 }
829}
830
d4e8164f
FB
831static inline void tb_jmp_remove(TranslationBlock *tb, int n)
832{
833 TranslationBlock *tb1, **ptb;
834 unsigned int n1;
835
836 ptb = &tb->jmp_next[n];
837 tb1 = *ptb;
838 if (tb1) {
839 /* find tb(n) in circular list */
840 for(;;) {
841 tb1 = *ptb;
842 n1 = (long)tb1 & 3;
843 tb1 = (TranslationBlock *)((long)tb1 & ~3);
844 if (n1 == n && tb1 == tb)
845 break;
846 if (n1 == 2) {
847 ptb = &tb1->jmp_first;
848 } else {
849 ptb = &tb1->jmp_next[n1];
850 }
851 }
852 /* now we can suppress tb(n) from the list */
853 *ptb = tb->jmp_next[n];
854
855 tb->jmp_next[n] = NULL;
856 }
857}
858
859/* reset the jump entry 'n' of a TB so that it is not chained to
860 another TB */
861static inline void tb_reset_jump(TranslationBlock *tb, int n)
862{
863 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
864}
865
41c1b1c9 866void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
fd6ce8f6 867{
6a00d601 868 CPUState *env;
8a40a180 869 PageDesc *p;
d4e8164f 870 unsigned int h, n1;
41c1b1c9 871 tb_page_addr_t phys_pc;
8a40a180 872 TranslationBlock *tb1, *tb2;
3b46e624 873
8a40a180
FB
874 /* remove the TB from the hash list */
875 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
876 h = tb_phys_hash_func(phys_pc);
5fafdf24 877 tb_remove(&tb_phys_hash[h], tb,
8a40a180
FB
878 offsetof(TranslationBlock, phys_hash_next));
879
880 /* remove the TB from the page list */
881 if (tb->page_addr[0] != page_addr) {
882 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
883 tb_page_remove(&p->first_tb, tb);
884 invalidate_page_bitmap(p);
885 }
886 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
887 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
888 tb_page_remove(&p->first_tb, tb);
889 invalidate_page_bitmap(p);
890 }
891
36bdbe54 892 tb_invalidated_flag = 1;
59817ccb 893
fd6ce8f6 894 /* remove the TB from the hash list */
8a40a180 895 h = tb_jmp_cache_hash_func(tb->pc);
6a00d601
FB
896 for(env = first_cpu; env != NULL; env = env->next_cpu) {
897 if (env->tb_jmp_cache[h] == tb)
898 env->tb_jmp_cache[h] = NULL;
899 }
d4e8164f
FB
900
901 /* suppress this TB from the two jump lists */
902 tb_jmp_remove(tb, 0);
903 tb_jmp_remove(tb, 1);
904
905 /* suppress any remaining jumps to this TB */
906 tb1 = tb->jmp_first;
907 for(;;) {
908 n1 = (long)tb1 & 3;
909 if (n1 == 2)
910 break;
911 tb1 = (TranslationBlock *)((long)tb1 & ~3);
912 tb2 = tb1->jmp_next[n1];
913 tb_reset_jump(tb1, n1);
914 tb1->jmp_next[n1] = NULL;
915 tb1 = tb2;
916 }
917 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
9fa3e853 918
e3db7226 919 tb_phys_invalidate_count++;
9fa3e853
FB
920}
921
922static inline void set_bits(uint8_t *tab, int start, int len)
923{
924 int end, mask, end1;
925
926 end = start + len;
927 tab += start >> 3;
928 mask = 0xff << (start & 7);
929 if ((start & ~7) == (end & ~7)) {
930 if (start < end) {
931 mask &= ~(0xff << (end & 7));
932 *tab |= mask;
933 }
934 } else {
935 *tab++ |= mask;
936 start = (start + 8) & ~7;
937 end1 = end & ~7;
938 while (start < end1) {
939 *tab++ = 0xff;
940 start += 8;
941 }
942 if (start < end) {
943 mask = ~(0xff << (end & 7));
944 *tab |= mask;
945 }
946 }
947}
948
949static void build_page_bitmap(PageDesc *p)
950{
951 int n, tb_start, tb_end;
952 TranslationBlock *tb;
3b46e624 953
7267c094 954 p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8);
9fa3e853
FB
955
956 tb = p->first_tb;
957 while (tb != NULL) {
958 n = (long)tb & 3;
959 tb = (TranslationBlock *)((long)tb & ~3);
960 /* NOTE: this is subtle as a TB may span two physical pages */
961 if (n == 0) {
962 /* NOTE: tb_end may be after the end of the page, but
963 it is not a problem */
964 tb_start = tb->pc & ~TARGET_PAGE_MASK;
965 tb_end = tb_start + tb->size;
966 if (tb_end > TARGET_PAGE_SIZE)
967 tb_end = TARGET_PAGE_SIZE;
968 } else {
969 tb_start = 0;
970 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
971 }
972 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
973 tb = tb->page_next[n];
974 }
975}
976
2e70f6ef
PB
977TranslationBlock *tb_gen_code(CPUState *env,
978 target_ulong pc, target_ulong cs_base,
979 int flags, int cflags)
d720b93d
FB
980{
981 TranslationBlock *tb;
982 uint8_t *tc_ptr;
41c1b1c9
PB
983 tb_page_addr_t phys_pc, phys_page2;
984 target_ulong virt_page2;
d720b93d
FB
985 int code_gen_size;
986
41c1b1c9 987 phys_pc = get_page_addr_code(env, pc);
c27004ec 988 tb = tb_alloc(pc);
d720b93d
FB
989 if (!tb) {
990 /* flush must be done */
991 tb_flush(env);
992 /* cannot fail at this point */
c27004ec 993 tb = tb_alloc(pc);
2e70f6ef
PB
994 /* Don't forget to invalidate previous TB info. */
995 tb_invalidated_flag = 1;
d720b93d
FB
996 }
997 tc_ptr = code_gen_ptr;
998 tb->tc_ptr = tc_ptr;
999 tb->cs_base = cs_base;
1000 tb->flags = flags;
1001 tb->cflags = cflags;
d07bde88 1002 cpu_gen_code(env, tb, &code_gen_size);
d720b93d 1003 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
3b46e624 1004
d720b93d 1005 /* check next page if needed */
c27004ec 1006 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
d720b93d 1007 phys_page2 = -1;
c27004ec 1008 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
41c1b1c9 1009 phys_page2 = get_page_addr_code(env, virt_page2);
d720b93d 1010 }
41c1b1c9 1011 tb_link_page(tb, phys_pc, phys_page2);
2e70f6ef 1012 return tb;
d720b93d 1013}
3b46e624 1014
9fa3e853
FB
1015/* invalidate all TBs which intersect with the target physical page
1016 starting in range [start;end[. NOTE: start and end must refer to
d720b93d
FB
1017 the same physical page. 'is_cpu_write_access' should be true if called
1018 from a real cpu write access: the virtual CPU will exit the current
1019 TB if code is modified inside this TB. */
41c1b1c9 1020void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
d720b93d
FB
1021 int is_cpu_write_access)
1022{
6b917547 1023 TranslationBlock *tb, *tb_next, *saved_tb;
d720b93d 1024 CPUState *env = cpu_single_env;
41c1b1c9 1025 tb_page_addr_t tb_start, tb_end;
6b917547
AL
1026 PageDesc *p;
1027 int n;
1028#ifdef TARGET_HAS_PRECISE_SMC
1029 int current_tb_not_found = is_cpu_write_access;
1030 TranslationBlock *current_tb = NULL;
1031 int current_tb_modified = 0;
1032 target_ulong current_pc = 0;
1033 target_ulong current_cs_base = 0;
1034 int current_flags = 0;
1035#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
1036
1037 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 1038 if (!p)
9fa3e853 1039 return;
5fafdf24 1040 if (!p->code_bitmap &&
d720b93d
FB
1041 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1042 is_cpu_write_access) {
9fa3e853
FB
1043 /* build code bitmap */
1044 build_page_bitmap(p);
1045 }
1046
1047 /* we remove all the TBs in the range [start, end[ */
1048 /* XXX: see if in some cases it could be faster to invalidate all the code */
1049 tb = p->first_tb;
1050 while (tb != NULL) {
1051 n = (long)tb & 3;
1052 tb = (TranslationBlock *)((long)tb & ~3);
1053 tb_next = tb->page_next[n];
1054 /* NOTE: this is subtle as a TB may span two physical pages */
1055 if (n == 0) {
1056 /* NOTE: tb_end may be after the end of the page, but
1057 it is not a problem */
1058 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1059 tb_end = tb_start + tb->size;
1060 } else {
1061 tb_start = tb->page_addr[1];
1062 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1063 }
1064 if (!(tb_end <= start || tb_start >= end)) {
d720b93d
FB
1065#ifdef TARGET_HAS_PRECISE_SMC
1066 if (current_tb_not_found) {
1067 current_tb_not_found = 0;
1068 current_tb = NULL;
2e70f6ef 1069 if (env->mem_io_pc) {
d720b93d 1070 /* now we have a real cpu fault */
2e70f6ef 1071 current_tb = tb_find_pc(env->mem_io_pc);
d720b93d
FB
1072 }
1073 }
1074 if (current_tb == tb &&
2e70f6ef 1075 (current_tb->cflags & CF_COUNT_MASK) != 1) {
d720b93d
FB
1076 /* If we are modifying the current TB, we must stop
1077 its execution. We could be more precise by checking
1078 that the modification is after the current PC, but it
1079 would require a specialized function to partially
1080 restore the CPU state */
3b46e624 1081
d720b93d 1082 current_tb_modified = 1;
618ba8e6 1083 cpu_restore_state(current_tb, env, env->mem_io_pc);
6b917547
AL
1084 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1085 &current_flags);
d720b93d
FB
1086 }
1087#endif /* TARGET_HAS_PRECISE_SMC */
6f5a9f7e
FB
1088 /* we need to do that to handle the case where a signal
1089 occurs while doing tb_phys_invalidate() */
1090 saved_tb = NULL;
1091 if (env) {
1092 saved_tb = env->current_tb;
1093 env->current_tb = NULL;
1094 }
9fa3e853 1095 tb_phys_invalidate(tb, -1);
6f5a9f7e
FB
1096 if (env) {
1097 env->current_tb = saved_tb;
1098 if (env->interrupt_request && env->current_tb)
1099 cpu_interrupt(env, env->interrupt_request);
1100 }
9fa3e853
FB
1101 }
1102 tb = tb_next;
1103 }
1104#if !defined(CONFIG_USER_ONLY)
1105 /* if no code remaining, no need to continue to use slow writes */
1106 if (!p->first_tb) {
1107 invalidate_page_bitmap(p);
d720b93d 1108 if (is_cpu_write_access) {
2e70f6ef 1109 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
d720b93d
FB
1110 }
1111 }
1112#endif
1113#ifdef TARGET_HAS_PRECISE_SMC
1114 if (current_tb_modified) {
1115 /* we generate a block containing just the instruction
1116 modifying the memory. It will ensure that it cannot modify
1117 itself */
ea1c1802 1118 env->current_tb = NULL;
2e70f6ef 1119 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
d720b93d 1120 cpu_resume_from_signal(env, NULL);
9fa3e853 1121 }
fd6ce8f6 1122#endif
9fa3e853 1123}
fd6ce8f6 1124
9fa3e853 1125/* len must be <= 8 and start must be a multiple of len */
41c1b1c9 1126static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
9fa3e853
FB
1127{
1128 PageDesc *p;
1129 int offset, b;
59817ccb 1130#if 0
a4193c8a 1131 if (1) {
93fcfe39
AL
1132 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1133 cpu_single_env->mem_io_vaddr, len,
1134 cpu_single_env->eip,
1135 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
59817ccb
FB
1136 }
1137#endif
9fa3e853 1138 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 1139 if (!p)
9fa3e853
FB
1140 return;
1141 if (p->code_bitmap) {
1142 offset = start & ~TARGET_PAGE_MASK;
1143 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1144 if (b & ((1 << len) - 1))
1145 goto do_invalidate;
1146 } else {
1147 do_invalidate:
d720b93d 1148 tb_invalidate_phys_page_range(start, start + len, 1);
9fa3e853
FB
1149 }
1150}
1151
9fa3e853 1152#if !defined(CONFIG_SOFTMMU)
41c1b1c9 1153static void tb_invalidate_phys_page(tb_page_addr_t addr,
d720b93d 1154 unsigned long pc, void *puc)
9fa3e853 1155{
6b917547 1156 TranslationBlock *tb;
9fa3e853 1157 PageDesc *p;
6b917547 1158 int n;
d720b93d 1159#ifdef TARGET_HAS_PRECISE_SMC
6b917547 1160 TranslationBlock *current_tb = NULL;
d720b93d 1161 CPUState *env = cpu_single_env;
6b917547
AL
1162 int current_tb_modified = 0;
1163 target_ulong current_pc = 0;
1164 target_ulong current_cs_base = 0;
1165 int current_flags = 0;
d720b93d 1166#endif
9fa3e853
FB
1167
1168 addr &= TARGET_PAGE_MASK;
1169 p = page_find(addr >> TARGET_PAGE_BITS);
5fafdf24 1170 if (!p)
9fa3e853
FB
1171 return;
1172 tb = p->first_tb;
d720b93d
FB
1173#ifdef TARGET_HAS_PRECISE_SMC
1174 if (tb && pc != 0) {
1175 current_tb = tb_find_pc(pc);
1176 }
1177#endif
9fa3e853
FB
1178 while (tb != NULL) {
1179 n = (long)tb & 3;
1180 tb = (TranslationBlock *)((long)tb & ~3);
d720b93d
FB
1181#ifdef TARGET_HAS_PRECISE_SMC
1182 if (current_tb == tb &&
2e70f6ef 1183 (current_tb->cflags & CF_COUNT_MASK) != 1) {
d720b93d
FB
1184 /* If we are modifying the current TB, we must stop
1185 its execution. We could be more precise by checking
1186 that the modification is after the current PC, but it
1187 would require a specialized function to partially
1188 restore the CPU state */
3b46e624 1189
d720b93d 1190 current_tb_modified = 1;
618ba8e6 1191 cpu_restore_state(current_tb, env, pc);
6b917547
AL
1192 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1193 &current_flags);
d720b93d
FB
1194 }
1195#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
1196 tb_phys_invalidate(tb, addr);
1197 tb = tb->page_next[n];
1198 }
fd6ce8f6 1199 p->first_tb = NULL;
d720b93d
FB
1200#ifdef TARGET_HAS_PRECISE_SMC
1201 if (current_tb_modified) {
1202 /* we generate a block containing just the instruction
1203 modifying the memory. It will ensure that it cannot modify
1204 itself */
ea1c1802 1205 env->current_tb = NULL;
2e70f6ef 1206 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
d720b93d
FB
1207 cpu_resume_from_signal(env, puc);
1208 }
1209#endif
fd6ce8f6 1210}
9fa3e853 1211#endif
fd6ce8f6
FB
1212
1213/* add the tb in the target page and protect it if necessary */
5fafdf24 1214static inline void tb_alloc_page(TranslationBlock *tb,
41c1b1c9 1215 unsigned int n, tb_page_addr_t page_addr)
fd6ce8f6
FB
1216{
1217 PageDesc *p;
4429ab44
JQ
1218#ifndef CONFIG_USER_ONLY
1219 bool page_already_protected;
1220#endif
9fa3e853
FB
1221
1222 tb->page_addr[n] = page_addr;
5cd2c5b6 1223 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
9fa3e853 1224 tb->page_next[n] = p->first_tb;
4429ab44
JQ
1225#ifndef CONFIG_USER_ONLY
1226 page_already_protected = p->first_tb != NULL;
1227#endif
9fa3e853
FB
1228 p->first_tb = (TranslationBlock *)((long)tb | n);
1229 invalidate_page_bitmap(p);
fd6ce8f6 1230
107db443 1231#if defined(TARGET_HAS_SMC) || 1
d720b93d 1232
9fa3e853 1233#if defined(CONFIG_USER_ONLY)
fd6ce8f6 1234 if (p->flags & PAGE_WRITE) {
53a5960a
PB
1235 target_ulong addr;
1236 PageDesc *p2;
9fa3e853
FB
1237 int prot;
1238
fd6ce8f6
FB
1239 /* force the host page as non writable (writes will have a
1240 page fault + mprotect overhead) */
53a5960a 1241 page_addr &= qemu_host_page_mask;
fd6ce8f6 1242 prot = 0;
53a5960a
PB
1243 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1244 addr += TARGET_PAGE_SIZE) {
1245
1246 p2 = page_find (addr >> TARGET_PAGE_BITS);
1247 if (!p2)
1248 continue;
1249 prot |= p2->flags;
1250 p2->flags &= ~PAGE_WRITE;
53a5960a 1251 }
5fafdf24 1252 mprotect(g2h(page_addr), qemu_host_page_size,
fd6ce8f6
FB
1253 (prot & PAGE_BITS) & ~PAGE_WRITE);
1254#ifdef DEBUG_TB_INVALIDATE
ab3d1727 1255 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
53a5960a 1256 page_addr);
fd6ce8f6 1257#endif
fd6ce8f6 1258 }
9fa3e853
FB
1259#else
1260 /* if some code is already present, then the pages are already
1261 protected. So we handle the case where only the first TB is
1262 allocated in a physical page */
4429ab44 1263 if (!page_already_protected) {
6a00d601 1264 tlb_protect_code(page_addr);
9fa3e853
FB
1265 }
1266#endif
d720b93d
FB
1267
1268#endif /* TARGET_HAS_SMC */
fd6ce8f6
FB
1269}
1270
9fa3e853
FB
1271/* add a new TB and link it to the physical page tables. phys_page2 is
1272 (-1) to indicate that only one page contains the TB. */
41c1b1c9
PB
1273void tb_link_page(TranslationBlock *tb,
1274 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
d4e8164f 1275{
9fa3e853
FB
1276 unsigned int h;
1277 TranslationBlock **ptb;
1278
c8a706fe
PB
1279 /* Grab the mmap lock to stop another thread invalidating this TB
1280 before we are done. */
1281 mmap_lock();
9fa3e853
FB
1282 /* add in the physical hash table */
1283 h = tb_phys_hash_func(phys_pc);
1284 ptb = &tb_phys_hash[h];
1285 tb->phys_hash_next = *ptb;
1286 *ptb = tb;
fd6ce8f6
FB
1287
1288 /* add in the page list */
9fa3e853
FB
1289 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1290 if (phys_page2 != -1)
1291 tb_alloc_page(tb, 1, phys_page2);
1292 else
1293 tb->page_addr[1] = -1;
9fa3e853 1294
d4e8164f
FB
1295 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1296 tb->jmp_next[0] = NULL;
1297 tb->jmp_next[1] = NULL;
1298
1299 /* init original jump addresses */
1300 if (tb->tb_next_offset[0] != 0xffff)
1301 tb_reset_jump(tb, 0);
1302 if (tb->tb_next_offset[1] != 0xffff)
1303 tb_reset_jump(tb, 1);
8a40a180
FB
1304
1305#ifdef DEBUG_TB_CHECK
1306 tb_page_check();
1307#endif
c8a706fe 1308 mmap_unlock();
fd6ce8f6
FB
1309}
1310
9fa3e853
FB
1311/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1312 tb[1].tc_ptr. Return NULL if not found */
1313TranslationBlock *tb_find_pc(unsigned long tc_ptr)
fd6ce8f6 1314{
9fa3e853
FB
1315 int m_min, m_max, m;
1316 unsigned long v;
1317 TranslationBlock *tb;
a513fe19
FB
1318
1319 if (nb_tbs <= 0)
1320 return NULL;
1321 if (tc_ptr < (unsigned long)code_gen_buffer ||
1322 tc_ptr >= (unsigned long)code_gen_ptr)
1323 return NULL;
1324 /* binary search (cf Knuth) */
1325 m_min = 0;
1326 m_max = nb_tbs - 1;
1327 while (m_min <= m_max) {
1328 m = (m_min + m_max) >> 1;
1329 tb = &tbs[m];
1330 v = (unsigned long)tb->tc_ptr;
1331 if (v == tc_ptr)
1332 return tb;
1333 else if (tc_ptr < v) {
1334 m_max = m - 1;
1335 } else {
1336 m_min = m + 1;
1337 }
5fafdf24 1338 }
a513fe19
FB
1339 return &tbs[m_max];
1340}
7501267e 1341
ea041c0e
FB
1342static void tb_reset_jump_recursive(TranslationBlock *tb);
1343
1344static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1345{
1346 TranslationBlock *tb1, *tb_next, **ptb;
1347 unsigned int n1;
1348
1349 tb1 = tb->jmp_next[n];
1350 if (tb1 != NULL) {
1351 /* find head of list */
1352 for(;;) {
1353 n1 = (long)tb1 & 3;
1354 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1355 if (n1 == 2)
1356 break;
1357 tb1 = tb1->jmp_next[n1];
1358 }
1359 /* we are now sure now that tb jumps to tb1 */
1360 tb_next = tb1;
1361
1362 /* remove tb from the jmp_first list */
1363 ptb = &tb_next->jmp_first;
1364 for(;;) {
1365 tb1 = *ptb;
1366 n1 = (long)tb1 & 3;
1367 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1368 if (n1 == n && tb1 == tb)
1369 break;
1370 ptb = &tb1->jmp_next[n1];
1371 }
1372 *ptb = tb->jmp_next[n];
1373 tb->jmp_next[n] = NULL;
3b46e624 1374
ea041c0e
FB
1375 /* suppress the jump to next tb in generated code */
1376 tb_reset_jump(tb, n);
1377
0124311e 1378 /* suppress jumps in the tb on which we could have jumped */
ea041c0e
FB
1379 tb_reset_jump_recursive(tb_next);
1380 }
1381}
1382
1383static void tb_reset_jump_recursive(TranslationBlock *tb)
1384{
1385 tb_reset_jump_recursive2(tb, 0);
1386 tb_reset_jump_recursive2(tb, 1);
1387}
1388
1fddef4b 1389#if defined(TARGET_HAS_ICE)
94df27fd
PB
1390#if defined(CONFIG_USER_ONLY)
1391static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1392{
1393 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1394}
1395#else
d720b93d
FB
1396static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1397{
c227f099 1398 target_phys_addr_t addr;
9b3c35e0 1399 target_ulong pd;
c227f099 1400 ram_addr_t ram_addr;
c2f07f81 1401 PhysPageDesc *p;
d720b93d 1402
c2f07f81
PB
1403 addr = cpu_get_phys_page_debug(env, pc);
1404 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1405 if (!p) {
1406 pd = IO_MEM_UNASSIGNED;
1407 } else {
1408 pd = p->phys_offset;
1409 }
1410 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
706cd4b5 1411 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
d720b93d 1412}
c27004ec 1413#endif
94df27fd 1414#endif /* TARGET_HAS_ICE */
d720b93d 1415
c527ee8f
PB
1416#if defined(CONFIG_USER_ONLY)
1417void cpu_watchpoint_remove_all(CPUState *env, int mask)
1418
1419{
1420}
1421
1422int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1423 int flags, CPUWatchpoint **watchpoint)
1424{
1425 return -ENOSYS;
1426}
1427#else
6658ffb8 1428/* Add a watchpoint. */
a1d1bb31
AL
1429int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1430 int flags, CPUWatchpoint **watchpoint)
6658ffb8 1431{
b4051334 1432 target_ulong len_mask = ~(len - 1);
c0ce998e 1433 CPUWatchpoint *wp;
6658ffb8 1434
b4051334
AL
1435 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1436 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1437 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1438 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1439 return -EINVAL;
1440 }
7267c094 1441 wp = g_malloc(sizeof(*wp));
a1d1bb31
AL
1442
1443 wp->vaddr = addr;
b4051334 1444 wp->len_mask = len_mask;
a1d1bb31
AL
1445 wp->flags = flags;
1446
2dc9f411 1447 /* keep all GDB-injected watchpoints in front */
c0ce998e 1448 if (flags & BP_GDB)
72cf2d4f 1449 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
c0ce998e 1450 else
72cf2d4f 1451 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
6658ffb8 1452
6658ffb8 1453 tlb_flush_page(env, addr);
a1d1bb31
AL
1454
1455 if (watchpoint)
1456 *watchpoint = wp;
1457 return 0;
6658ffb8
PB
1458}
1459
a1d1bb31
AL
1460/* Remove a specific watchpoint. */
1461int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1462 int flags)
6658ffb8 1463{
b4051334 1464 target_ulong len_mask = ~(len - 1);
a1d1bb31 1465 CPUWatchpoint *wp;
6658ffb8 1466
72cf2d4f 1467 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334 1468 if (addr == wp->vaddr && len_mask == wp->len_mask
6e140f28 1469 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
a1d1bb31 1470 cpu_watchpoint_remove_by_ref(env, wp);
6658ffb8
PB
1471 return 0;
1472 }
1473 }
a1d1bb31 1474 return -ENOENT;
6658ffb8
PB
1475}
1476
a1d1bb31
AL
1477/* Remove a specific watchpoint by reference. */
1478void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1479{
72cf2d4f 1480 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
7d03f82f 1481
a1d1bb31
AL
1482 tlb_flush_page(env, watchpoint->vaddr);
1483
7267c094 1484 g_free(watchpoint);
a1d1bb31
AL
1485}
1486
1487/* Remove all matching watchpoints. */
1488void cpu_watchpoint_remove_all(CPUState *env, int mask)
1489{
c0ce998e 1490 CPUWatchpoint *wp, *next;
a1d1bb31 1491
72cf2d4f 1492 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
a1d1bb31
AL
1493 if (wp->flags & mask)
1494 cpu_watchpoint_remove_by_ref(env, wp);
c0ce998e 1495 }
7d03f82f 1496}
c527ee8f 1497#endif
7d03f82f 1498
a1d1bb31
AL
1499/* Add a breakpoint. */
1500int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1501 CPUBreakpoint **breakpoint)
4c3a88a2 1502{
1fddef4b 1503#if defined(TARGET_HAS_ICE)
c0ce998e 1504 CPUBreakpoint *bp;
3b46e624 1505
7267c094 1506 bp = g_malloc(sizeof(*bp));
4c3a88a2 1507
a1d1bb31
AL
1508 bp->pc = pc;
1509 bp->flags = flags;
1510
2dc9f411 1511 /* keep all GDB-injected breakpoints in front */
c0ce998e 1512 if (flags & BP_GDB)
72cf2d4f 1513 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
c0ce998e 1514 else
72cf2d4f 1515 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
3b46e624 1516
d720b93d 1517 breakpoint_invalidate(env, pc);
a1d1bb31
AL
1518
1519 if (breakpoint)
1520 *breakpoint = bp;
4c3a88a2
FB
1521 return 0;
1522#else
a1d1bb31 1523 return -ENOSYS;
4c3a88a2
FB
1524#endif
1525}
1526
a1d1bb31
AL
1527/* Remove a specific breakpoint. */
1528int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1529{
7d03f82f 1530#if defined(TARGET_HAS_ICE)
a1d1bb31
AL
1531 CPUBreakpoint *bp;
1532
72cf2d4f 1533 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31
AL
1534 if (bp->pc == pc && bp->flags == flags) {
1535 cpu_breakpoint_remove_by_ref(env, bp);
1536 return 0;
1537 }
7d03f82f 1538 }
a1d1bb31
AL
1539 return -ENOENT;
1540#else
1541 return -ENOSYS;
7d03f82f
EI
1542#endif
1543}
1544
a1d1bb31
AL
1545/* Remove a specific breakpoint by reference. */
1546void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
4c3a88a2 1547{
1fddef4b 1548#if defined(TARGET_HAS_ICE)
72cf2d4f 1549 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
d720b93d 1550
a1d1bb31
AL
1551 breakpoint_invalidate(env, breakpoint->pc);
1552
7267c094 1553 g_free(breakpoint);
a1d1bb31
AL
1554#endif
1555}
1556
1557/* Remove all matching breakpoints. */
1558void cpu_breakpoint_remove_all(CPUState *env, int mask)
1559{
1560#if defined(TARGET_HAS_ICE)
c0ce998e 1561 CPUBreakpoint *bp, *next;
a1d1bb31 1562
72cf2d4f 1563 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
a1d1bb31
AL
1564 if (bp->flags & mask)
1565 cpu_breakpoint_remove_by_ref(env, bp);
c0ce998e 1566 }
4c3a88a2
FB
1567#endif
1568}
1569
c33a346e
FB
1570/* enable or disable single step mode. EXCP_DEBUG is returned by the
1571 CPU loop after each instruction */
1572void cpu_single_step(CPUState *env, int enabled)
1573{
1fddef4b 1574#if defined(TARGET_HAS_ICE)
c33a346e
FB
1575 if (env->singlestep_enabled != enabled) {
1576 env->singlestep_enabled = enabled;
e22a25c9
AL
1577 if (kvm_enabled())
1578 kvm_update_guest_debug(env, 0);
1579 else {
ccbb4d44 1580 /* must flush all the translated code to avoid inconsistencies */
e22a25c9
AL
1581 /* XXX: only flush what is necessary */
1582 tb_flush(env);
1583 }
c33a346e
FB
1584 }
1585#endif
1586}
1587
34865134
FB
1588/* enable or disable low levels log */
1589void cpu_set_log(int log_flags)
1590{
1591 loglevel = log_flags;
1592 if (loglevel && !logfile) {
11fcfab4 1593 logfile = fopen(logfilename, log_append ? "a" : "w");
34865134
FB
1594 if (!logfile) {
1595 perror(logfilename);
1596 _exit(1);
1597 }
9fa3e853
FB
1598#if !defined(CONFIG_SOFTMMU)
1599 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1600 {
b55266b5 1601 static char logfile_buf[4096];
9fa3e853
FB
1602 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1603 }
daf767b1
SW
1604#elif defined(_WIN32)
1605 /* Win32 doesn't support line-buffering, so use unbuffered output. */
1606 setvbuf(logfile, NULL, _IONBF, 0);
1607#else
34865134 1608 setvbuf(logfile, NULL, _IOLBF, 0);
9fa3e853 1609#endif
e735b91c
PB
1610 log_append = 1;
1611 }
1612 if (!loglevel && logfile) {
1613 fclose(logfile);
1614 logfile = NULL;
34865134
FB
1615 }
1616}
1617
1618void cpu_set_log_filename(const char *filename)
1619{
1620 logfilename = strdup(filename);
e735b91c
PB
1621 if (logfile) {
1622 fclose(logfile);
1623 logfile = NULL;
1624 }
1625 cpu_set_log(loglevel);
34865134 1626}
c33a346e 1627
3098dba0 1628static void cpu_unlink_tb(CPUState *env)
ea041c0e 1629{
3098dba0
AJ
1630 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1631 problem and hope the cpu will stop of its own accord. For userspace
1632 emulation this often isn't actually as bad as it sounds. Often
1633 signals are used primarily to interrupt blocking syscalls. */
ea041c0e 1634 TranslationBlock *tb;
c227f099 1635 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
59817ccb 1636
cab1b4bd 1637 spin_lock(&interrupt_lock);
3098dba0
AJ
1638 tb = env->current_tb;
1639 /* if the cpu is currently executing code, we must unlink it and
1640 all the potentially executing TB */
f76cfe56 1641 if (tb) {
3098dba0
AJ
1642 env->current_tb = NULL;
1643 tb_reset_jump_recursive(tb);
be214e6c 1644 }
cab1b4bd 1645 spin_unlock(&interrupt_lock);
3098dba0
AJ
1646}
1647
97ffbd8d 1648#ifndef CONFIG_USER_ONLY
3098dba0 1649/* mask must never be zero, except for A20 change call */
ec6959d0 1650static void tcg_handle_interrupt(CPUState *env, int mask)
3098dba0
AJ
1651{
1652 int old_mask;
be214e6c 1653
2e70f6ef 1654 old_mask = env->interrupt_request;
68a79315 1655 env->interrupt_request |= mask;
3098dba0 1656
8edac960
AL
1657 /*
1658 * If called from iothread context, wake the target cpu in
1659 * case its halted.
1660 */
b7680cb6 1661 if (!qemu_cpu_is_self(env)) {
8edac960
AL
1662 qemu_cpu_kick(env);
1663 return;
1664 }
8edac960 1665
2e70f6ef 1666 if (use_icount) {
266910c4 1667 env->icount_decr.u16.high = 0xffff;
2e70f6ef 1668 if (!can_do_io(env)
be214e6c 1669 && (mask & ~old_mask) != 0) {
2e70f6ef
PB
1670 cpu_abort(env, "Raised interrupt while not in I/O function");
1671 }
2e70f6ef 1672 } else {
3098dba0 1673 cpu_unlink_tb(env);
ea041c0e
FB
1674 }
1675}
1676
ec6959d0
JK
1677CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1678
97ffbd8d
JK
1679#else /* CONFIG_USER_ONLY */
1680
1681void cpu_interrupt(CPUState *env, int mask)
1682{
1683 env->interrupt_request |= mask;
1684 cpu_unlink_tb(env);
1685}
1686#endif /* CONFIG_USER_ONLY */
1687
b54ad049
FB
1688void cpu_reset_interrupt(CPUState *env, int mask)
1689{
1690 env->interrupt_request &= ~mask;
1691}
1692
3098dba0
AJ
1693void cpu_exit(CPUState *env)
1694{
1695 env->exit_request = 1;
1696 cpu_unlink_tb(env);
1697}
1698
c7cd6a37 1699const CPULogItem cpu_log_items[] = {
5fafdf24 1700 { CPU_LOG_TB_OUT_ASM, "out_asm",
f193c797
FB
1701 "show generated host assembly code for each compiled TB" },
1702 { CPU_LOG_TB_IN_ASM, "in_asm",
1703 "show target assembly code for each compiled TB" },
5fafdf24 1704 { CPU_LOG_TB_OP, "op",
57fec1fe 1705 "show micro ops for each compiled TB" },
f193c797 1706 { CPU_LOG_TB_OP_OPT, "op_opt",
e01a1157
BS
1707 "show micro ops "
1708#ifdef TARGET_I386
1709 "before eflags optimization and "
f193c797 1710#endif
e01a1157 1711 "after liveness analysis" },
f193c797
FB
1712 { CPU_LOG_INT, "int",
1713 "show interrupts/exceptions in short format" },
1714 { CPU_LOG_EXEC, "exec",
1715 "show trace before each executed TB (lots of logs)" },
9fddaa0c 1716 { CPU_LOG_TB_CPU, "cpu",
e91c8a77 1717 "show CPU state before block translation" },
f193c797
FB
1718#ifdef TARGET_I386
1719 { CPU_LOG_PCALL, "pcall",
1720 "show protected mode far calls/returns/exceptions" },
eca1bdf4
AL
1721 { CPU_LOG_RESET, "cpu_reset",
1722 "show CPU state before CPU resets" },
f193c797 1723#endif
8e3a9fd2 1724#ifdef DEBUG_IOPORT
fd872598
FB
1725 { CPU_LOG_IOPORT, "ioport",
1726 "show all i/o ports accesses" },
8e3a9fd2 1727#endif
f193c797
FB
1728 { 0, NULL, NULL },
1729};
1730
f6f3fbca
MT
1731#ifndef CONFIG_USER_ONLY
1732static QLIST_HEAD(memory_client_list, CPUPhysMemoryClient) memory_client_list
1733 = QLIST_HEAD_INITIALIZER(memory_client_list);
1734
1735static void cpu_notify_set_memory(target_phys_addr_t start_addr,
9742bf26 1736 ram_addr_t size,
0fd542fb
MT
1737 ram_addr_t phys_offset,
1738 bool log_dirty)
f6f3fbca
MT
1739{
1740 CPUPhysMemoryClient *client;
1741 QLIST_FOREACH(client, &memory_client_list, list) {
0fd542fb 1742 client->set_memory(client, start_addr, size, phys_offset, log_dirty);
f6f3fbca
MT
1743 }
1744}
1745
1746static int cpu_notify_sync_dirty_bitmap(target_phys_addr_t start,
9742bf26 1747 target_phys_addr_t end)
f6f3fbca
MT
1748{
1749 CPUPhysMemoryClient *client;
1750 QLIST_FOREACH(client, &memory_client_list, list) {
1751 int r = client->sync_dirty_bitmap(client, start, end);
1752 if (r < 0)
1753 return r;
1754 }
1755 return 0;
1756}
1757
1758static int cpu_notify_migration_log(int enable)
1759{
1760 CPUPhysMemoryClient *client;
1761 QLIST_FOREACH(client, &memory_client_list, list) {
1762 int r = client->migration_log(client, enable);
1763 if (r < 0)
1764 return r;
1765 }
1766 return 0;
1767}
1768
2173a75f
AW
1769struct last_map {
1770 target_phys_addr_t start_addr;
1771 ram_addr_t size;
1772 ram_addr_t phys_offset;
1773};
1774
8d4c78e7
AW
1775/* The l1_phys_map provides the upper P_L1_BITs of the guest physical
1776 * address. Each intermediate table provides the next L2_BITs of guest
1777 * physical address space. The number of levels vary based on host and
1778 * guest configuration, making it efficient to build the final guest
1779 * physical address by seeding the L1 offset and shifting and adding in
1780 * each L2 offset as we recurse through them. */
2173a75f
AW
1781static void phys_page_for_each_1(CPUPhysMemoryClient *client, int level,
1782 void **lp, target_phys_addr_t addr,
1783 struct last_map *map)
f6f3fbca 1784{
5cd2c5b6 1785 int i;
f6f3fbca 1786
5cd2c5b6
RH
1787 if (*lp == NULL) {
1788 return;
1789 }
1790 if (level == 0) {
1791 PhysPageDesc *pd = *lp;
8d4c78e7 1792 addr <<= L2_BITS + TARGET_PAGE_BITS;
7296abac 1793 for (i = 0; i < L2_SIZE; ++i) {
5cd2c5b6 1794 if (pd[i].phys_offset != IO_MEM_UNASSIGNED) {
2173a75f
AW
1795 target_phys_addr_t start_addr = addr | i << TARGET_PAGE_BITS;
1796
1797 if (map->size &&
1798 start_addr == map->start_addr + map->size &&
1799 pd[i].phys_offset == map->phys_offset + map->size) {
1800
1801 map->size += TARGET_PAGE_SIZE;
1802 continue;
1803 } else if (map->size) {
1804 client->set_memory(client, map->start_addr,
1805 map->size, map->phys_offset, false);
1806 }
1807
1808 map->start_addr = start_addr;
1809 map->size = TARGET_PAGE_SIZE;
1810 map->phys_offset = pd[i].phys_offset;
f6f3fbca 1811 }
5cd2c5b6
RH
1812 }
1813 } else {
1814 void **pp = *lp;
7296abac 1815 for (i = 0; i < L2_SIZE; ++i) {
8d4c78e7 1816 phys_page_for_each_1(client, level - 1, pp + i,
2173a75f 1817 (addr << L2_BITS) | i, map);
f6f3fbca
MT
1818 }
1819 }
1820}
1821
1822static void phys_page_for_each(CPUPhysMemoryClient *client)
1823{
5cd2c5b6 1824 int i;
2173a75f
AW
1825 struct last_map map = { };
1826
5cd2c5b6
RH
1827 for (i = 0; i < P_L1_SIZE; ++i) {
1828 phys_page_for_each_1(client, P_L1_SHIFT / L2_BITS - 1,
2173a75f
AW
1829 l1_phys_map + i, i, &map);
1830 }
1831 if (map.size) {
1832 client->set_memory(client, map.start_addr, map.size, map.phys_offset,
1833 false);
f6f3fbca 1834 }
f6f3fbca
MT
1835}
1836
1837void cpu_register_phys_memory_client(CPUPhysMemoryClient *client)
1838{
1839 QLIST_INSERT_HEAD(&memory_client_list, client, list);
1840 phys_page_for_each(client);
1841}
1842
1843void cpu_unregister_phys_memory_client(CPUPhysMemoryClient *client)
1844{
1845 QLIST_REMOVE(client, list);
1846}
1847#endif
1848
f193c797
FB
1849static int cmp1(const char *s1, int n, const char *s2)
1850{
1851 if (strlen(s2) != n)
1852 return 0;
1853 return memcmp(s1, s2, n) == 0;
1854}
3b46e624 1855
f193c797
FB
1856/* takes a comma separated list of log masks. Return 0 if error. */
1857int cpu_str_to_log_mask(const char *str)
1858{
c7cd6a37 1859 const CPULogItem *item;
f193c797
FB
1860 int mask;
1861 const char *p, *p1;
1862
1863 p = str;
1864 mask = 0;
1865 for(;;) {
1866 p1 = strchr(p, ',');
1867 if (!p1)
1868 p1 = p + strlen(p);
9742bf26
YT
1869 if(cmp1(p,p1-p,"all")) {
1870 for(item = cpu_log_items; item->mask != 0; item++) {
1871 mask |= item->mask;
1872 }
1873 } else {
1874 for(item = cpu_log_items; item->mask != 0; item++) {
1875 if (cmp1(p, p1 - p, item->name))
1876 goto found;
1877 }
1878 return 0;
f193c797 1879 }
f193c797
FB
1880 found:
1881 mask |= item->mask;
1882 if (*p1 != ',')
1883 break;
1884 p = p1 + 1;
1885 }
1886 return mask;
1887}
ea041c0e 1888
7501267e
FB
1889void cpu_abort(CPUState *env, const char *fmt, ...)
1890{
1891 va_list ap;
493ae1f0 1892 va_list ap2;
7501267e
FB
1893
1894 va_start(ap, fmt);
493ae1f0 1895 va_copy(ap2, ap);
7501267e
FB
1896 fprintf(stderr, "qemu: fatal: ");
1897 vfprintf(stderr, fmt, ap);
1898 fprintf(stderr, "\n");
1899#ifdef TARGET_I386
7fe48483
FB
1900 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1901#else
1902 cpu_dump_state(env, stderr, fprintf, 0);
7501267e 1903#endif
93fcfe39
AL
1904 if (qemu_log_enabled()) {
1905 qemu_log("qemu: fatal: ");
1906 qemu_log_vprintf(fmt, ap2);
1907 qemu_log("\n");
f9373291 1908#ifdef TARGET_I386
93fcfe39 1909 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
f9373291 1910#else
93fcfe39 1911 log_cpu_state(env, 0);
f9373291 1912#endif
31b1a7b4 1913 qemu_log_flush();
93fcfe39 1914 qemu_log_close();
924edcae 1915 }
493ae1f0 1916 va_end(ap2);
f9373291 1917 va_end(ap);
fd052bf6
RV
1918#if defined(CONFIG_USER_ONLY)
1919 {
1920 struct sigaction act;
1921 sigfillset(&act.sa_mask);
1922 act.sa_handler = SIG_DFL;
1923 sigaction(SIGABRT, &act, NULL);
1924 }
1925#endif
7501267e
FB
1926 abort();
1927}
1928
c5be9f08
TS
1929CPUState *cpu_copy(CPUState *env)
1930{
01ba9816 1931 CPUState *new_env = cpu_init(env->cpu_model_str);
c5be9f08
TS
1932 CPUState *next_cpu = new_env->next_cpu;
1933 int cpu_index = new_env->cpu_index;
5a38f081
AL
1934#if defined(TARGET_HAS_ICE)
1935 CPUBreakpoint *bp;
1936 CPUWatchpoint *wp;
1937#endif
1938
c5be9f08 1939 memcpy(new_env, env, sizeof(CPUState));
5a38f081
AL
1940
1941 /* Preserve chaining and index. */
c5be9f08
TS
1942 new_env->next_cpu = next_cpu;
1943 new_env->cpu_index = cpu_index;
5a38f081
AL
1944
1945 /* Clone all break/watchpoints.
1946 Note: Once we support ptrace with hw-debug register access, make sure
1947 BP_CPU break/watchpoints are handled correctly on clone. */
72cf2d4f
BS
1948 QTAILQ_INIT(&env->breakpoints);
1949 QTAILQ_INIT(&env->watchpoints);
5a38f081 1950#if defined(TARGET_HAS_ICE)
72cf2d4f 1951 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
5a38f081
AL
1952 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1953 }
72cf2d4f 1954 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
5a38f081
AL
1955 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1956 wp->flags, NULL);
1957 }
1958#endif
1959
c5be9f08
TS
1960 return new_env;
1961}
1962
0124311e
FB
1963#if !defined(CONFIG_USER_ONLY)
1964
5c751e99
EI
1965static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1966{
1967 unsigned int i;
1968
1969 /* Discard jump cache entries for any tb which might potentially
1970 overlap the flushed page. */
1971 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1972 memset (&env->tb_jmp_cache[i], 0,
9742bf26 1973 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
5c751e99
EI
1974
1975 i = tb_jmp_cache_hash_page(addr);
1976 memset (&env->tb_jmp_cache[i], 0,
9742bf26 1977 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
5c751e99
EI
1978}
1979
08738984
IK
1980static CPUTLBEntry s_cputlb_empty_entry = {
1981 .addr_read = -1,
1982 .addr_write = -1,
1983 .addr_code = -1,
1984 .addend = -1,
1985};
1986
ee8b7021
FB
1987/* NOTE: if flush_global is true, also flush global entries (not
1988 implemented yet) */
1989void tlb_flush(CPUState *env, int flush_global)
33417e70 1990{
33417e70 1991 int i;
0124311e 1992
9fa3e853
FB
1993#if defined(DEBUG_TLB)
1994 printf("tlb_flush:\n");
1995#endif
0124311e
FB
1996 /* must reset current TB so that interrupts cannot modify the
1997 links while we are modifying them */
1998 env->current_tb = NULL;
1999
33417e70 2000 for(i = 0; i < CPU_TLB_SIZE; i++) {
cfde4bd9
IY
2001 int mmu_idx;
2002 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
08738984 2003 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
cfde4bd9 2004 }
33417e70 2005 }
9fa3e853 2006
8a40a180 2007 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
9fa3e853 2008
d4c430a8
PB
2009 env->tlb_flush_addr = -1;
2010 env->tlb_flush_mask = 0;
e3db7226 2011 tlb_flush_count++;
33417e70
FB
2012}
2013
274da6b2 2014static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
61382a50 2015{
5fafdf24 2016 if (addr == (tlb_entry->addr_read &
84b7b8e7 2017 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 2018 addr == (tlb_entry->addr_write &
84b7b8e7 2019 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 2020 addr == (tlb_entry->addr_code &
84b7b8e7 2021 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
08738984 2022 *tlb_entry = s_cputlb_empty_entry;
84b7b8e7 2023 }
61382a50
FB
2024}
2025
2e12669a 2026void tlb_flush_page(CPUState *env, target_ulong addr)
33417e70 2027{
8a40a180 2028 int i;
cfde4bd9 2029 int mmu_idx;
0124311e 2030
9fa3e853 2031#if defined(DEBUG_TLB)
108c49b8 2032 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
9fa3e853 2033#endif
d4c430a8
PB
2034 /* Check if we need to flush due to large pages. */
2035 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
2036#if defined(DEBUG_TLB)
2037 printf("tlb_flush_page: forced full flush ("
2038 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
2039 env->tlb_flush_addr, env->tlb_flush_mask);
2040#endif
2041 tlb_flush(env, 1);
2042 return;
2043 }
0124311e
FB
2044 /* must reset current TB so that interrupts cannot modify the
2045 links while we are modifying them */
2046 env->current_tb = NULL;
61382a50
FB
2047
2048 addr &= TARGET_PAGE_MASK;
2049 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
cfde4bd9
IY
2050 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2051 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
0124311e 2052
5c751e99 2053 tlb_flush_jmp_cache(env, addr);
9fa3e853
FB
2054}
2055
9fa3e853
FB
2056/* update the TLBs so that writes to code in the virtual page 'addr'
2057 can be detected */
c227f099 2058static void tlb_protect_code(ram_addr_t ram_addr)
9fa3e853 2059{
5fafdf24 2060 cpu_physical_memory_reset_dirty(ram_addr,
6a00d601
FB
2061 ram_addr + TARGET_PAGE_SIZE,
2062 CODE_DIRTY_FLAG);
9fa3e853
FB
2063}
2064
9fa3e853 2065/* update the TLB so that writes in physical page 'phys_addr' are no longer
3a7d929e 2066 tested for self modifying code */
c227f099 2067static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 2068 target_ulong vaddr)
9fa3e853 2069{
f7c11b53 2070 cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
1ccde1cb
FB
2071}
2072
5fafdf24 2073static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1ccde1cb
FB
2074 unsigned long start, unsigned long length)
2075{
2076 unsigned long addr;
84b7b8e7
FB
2077 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2078 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1ccde1cb 2079 if ((addr - start) < length) {
0f459d16 2080 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1ccde1cb
FB
2081 }
2082 }
2083}
2084
5579c7f3 2085/* Note: start and end must be within the same ram block. */
c227f099 2086void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 2087 int dirty_flags)
1ccde1cb
FB
2088{
2089 CPUState *env;
4f2ac237 2090 unsigned long length, start1;
f7c11b53 2091 int i;
1ccde1cb
FB
2092
2093 start &= TARGET_PAGE_MASK;
2094 end = TARGET_PAGE_ALIGN(end);
2095
2096 length = end - start;
2097 if (length == 0)
2098 return;
f7c11b53 2099 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
f23db169 2100
1ccde1cb
FB
2101 /* we modify the TLB cache so that the dirty bit will be set again
2102 when accessing the range */
b2e0a138 2103 start1 = (unsigned long)qemu_safe_ram_ptr(start);
a57d23e4 2104 /* Check that we don't span multiple blocks - this breaks the
5579c7f3 2105 address comparisons below. */
b2e0a138 2106 if ((unsigned long)qemu_safe_ram_ptr(end - 1) - start1
5579c7f3
PB
2107 != (end - 1) - start) {
2108 abort();
2109 }
2110
6a00d601 2111 for(env = first_cpu; env != NULL; env = env->next_cpu) {
cfde4bd9
IY
2112 int mmu_idx;
2113 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2114 for(i = 0; i < CPU_TLB_SIZE; i++)
2115 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
2116 start1, length);
2117 }
6a00d601 2118 }
1ccde1cb
FB
2119}
2120
74576198
AL
2121int cpu_physical_memory_set_dirty_tracking(int enable)
2122{
f6f3fbca 2123 int ret = 0;
74576198 2124 in_migration = enable;
f6f3fbca
MT
2125 ret = cpu_notify_migration_log(!!enable);
2126 return ret;
74576198
AL
2127}
2128
2129int cpu_physical_memory_get_dirty_tracking(void)
2130{
2131 return in_migration;
2132}
2133
c227f099
AL
2134int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
2135 target_phys_addr_t end_addr)
2bec46dc 2136{
7b8f3b78 2137 int ret;
151f7749 2138
f6f3fbca 2139 ret = cpu_notify_sync_dirty_bitmap(start_addr, end_addr);
151f7749 2140 return ret;
2bec46dc
AL
2141}
2142
e5896b12
AP
2143int cpu_physical_log_start(target_phys_addr_t start_addr,
2144 ram_addr_t size)
2145{
2146 CPUPhysMemoryClient *client;
2147 QLIST_FOREACH(client, &memory_client_list, list) {
2148 if (client->log_start) {
2149 int r = client->log_start(client, start_addr, size);
2150 if (r < 0) {
2151 return r;
2152 }
2153 }
2154 }
2155 return 0;
2156}
2157
2158int cpu_physical_log_stop(target_phys_addr_t start_addr,
2159 ram_addr_t size)
2160{
2161 CPUPhysMemoryClient *client;
2162 QLIST_FOREACH(client, &memory_client_list, list) {
2163 if (client->log_stop) {
2164 int r = client->log_stop(client, start_addr, size);
2165 if (r < 0) {
2166 return r;
2167 }
2168 }
2169 }
2170 return 0;
2171}
2172
3a7d929e
FB
2173static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2174{
c227f099 2175 ram_addr_t ram_addr;
5579c7f3 2176 void *p;
3a7d929e 2177
84b7b8e7 2178 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
5579c7f3
PB
2179 p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
2180 + tlb_entry->addend);
e890261f 2181 ram_addr = qemu_ram_addr_from_host_nofail(p);
3a7d929e 2182 if (!cpu_physical_memory_is_dirty(ram_addr)) {
0f459d16 2183 tlb_entry->addr_write |= TLB_NOTDIRTY;
3a7d929e
FB
2184 }
2185 }
2186}
2187
2188/* update the TLB according to the current state of the dirty bits */
2189void cpu_tlb_update_dirty(CPUState *env)
2190{
2191 int i;
cfde4bd9
IY
2192 int mmu_idx;
2193 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2194 for(i = 0; i < CPU_TLB_SIZE; i++)
2195 tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2196 }
3a7d929e
FB
2197}
2198
0f459d16 2199static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1ccde1cb 2200{
0f459d16
PB
2201 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2202 tlb_entry->addr_write = vaddr;
1ccde1cb
FB
2203}
2204
0f459d16
PB
2205/* update the TLB corresponding to virtual page vaddr
2206 so that it is no longer dirty */
2207static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1ccde1cb 2208{
1ccde1cb 2209 int i;
cfde4bd9 2210 int mmu_idx;
1ccde1cb 2211
0f459d16 2212 vaddr &= TARGET_PAGE_MASK;
1ccde1cb 2213 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
cfde4bd9
IY
2214 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2215 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
9fa3e853
FB
2216}
2217
d4c430a8
PB
2218/* Our TLB does not support large pages, so remember the area covered by
2219 large pages and trigger a full TLB flush if these are invalidated. */
2220static void tlb_add_large_page(CPUState *env, target_ulong vaddr,
2221 target_ulong size)
2222{
2223 target_ulong mask = ~(size - 1);
2224
2225 if (env->tlb_flush_addr == (target_ulong)-1) {
2226 env->tlb_flush_addr = vaddr & mask;
2227 env->tlb_flush_mask = mask;
2228 return;
2229 }
2230 /* Extend the existing region to include the new page.
2231 This is a compromise between unnecessary flushes and the cost
2232 of maintaining a full variable size TLB. */
2233 mask &= env->tlb_flush_mask;
2234 while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
2235 mask <<= 1;
2236 }
2237 env->tlb_flush_addr &= mask;
2238 env->tlb_flush_mask = mask;
2239}
2240
2241/* Add a new TLB entry. At most one entry for a given virtual address
2242 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2243 supplied size is only used by tlb_flush_page. */
2244void tlb_set_page(CPUState *env, target_ulong vaddr,
2245 target_phys_addr_t paddr, int prot,
2246 int mmu_idx, target_ulong size)
9fa3e853 2247{
92e873b9 2248 PhysPageDesc *p;
4f2ac237 2249 unsigned long pd;
9fa3e853 2250 unsigned int index;
4f2ac237 2251 target_ulong address;
0f459d16 2252 target_ulong code_address;
355b1943 2253 unsigned long addend;
84b7b8e7 2254 CPUTLBEntry *te;
a1d1bb31 2255 CPUWatchpoint *wp;
c227f099 2256 target_phys_addr_t iotlb;
9fa3e853 2257
d4c430a8
PB
2258 assert(size >= TARGET_PAGE_SIZE);
2259 if (size != TARGET_PAGE_SIZE) {
2260 tlb_add_large_page(env, vaddr, size);
2261 }
92e873b9 2262 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
9fa3e853
FB
2263 if (!p) {
2264 pd = IO_MEM_UNASSIGNED;
9fa3e853
FB
2265 } else {
2266 pd = p->phys_offset;
9fa3e853
FB
2267 }
2268#if defined(DEBUG_TLB)
7fd3f494
SW
2269 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
2270 " prot=%x idx=%d pd=0x%08lx\n",
2271 vaddr, paddr, prot, mmu_idx, pd);
9fa3e853
FB
2272#endif
2273
0f459d16
PB
2274 address = vaddr;
2275 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2276 /* IO memory case (romd handled later) */
2277 address |= TLB_MMIO;
2278 }
5579c7f3 2279 addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
0f459d16
PB
2280 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2281 /* Normal RAM. */
2282 iotlb = pd & TARGET_PAGE_MASK;
2283 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2284 iotlb |= IO_MEM_NOTDIRTY;
2285 else
2286 iotlb |= IO_MEM_ROM;
2287 } else {
ccbb4d44 2288 /* IO handlers are currently passed a physical address.
0f459d16
PB
2289 It would be nice to pass an offset from the base address
2290 of that region. This would avoid having to special case RAM,
2291 and avoid full address decoding in every device.
2292 We can't use the high bits of pd for this because
2293 IO_MEM_ROMD uses these as a ram address. */
8da3ff18
PB
2294 iotlb = (pd & ~TARGET_PAGE_MASK);
2295 if (p) {
8da3ff18
PB
2296 iotlb += p->region_offset;
2297 } else {
2298 iotlb += paddr;
2299 }
0f459d16
PB
2300 }
2301
2302 code_address = address;
2303 /* Make accesses to pages with watchpoints go via the
2304 watchpoint trap routines. */
72cf2d4f 2305 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
a1d1bb31 2306 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
bf298f83
JK
2307 /* Avoid trapping reads of pages with a write breakpoint. */
2308 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
2309 iotlb = io_mem_watch + paddr;
2310 address |= TLB_MMIO;
2311 break;
2312 }
6658ffb8 2313 }
0f459d16 2314 }
d79acba4 2315
0f459d16
PB
2316 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2317 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2318 te = &env->tlb_table[mmu_idx][index];
2319 te->addend = addend - vaddr;
2320 if (prot & PAGE_READ) {
2321 te->addr_read = address;
2322 } else {
2323 te->addr_read = -1;
2324 }
5c751e99 2325
0f459d16
PB
2326 if (prot & PAGE_EXEC) {
2327 te->addr_code = code_address;
2328 } else {
2329 te->addr_code = -1;
2330 }
2331 if (prot & PAGE_WRITE) {
2332 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2333 (pd & IO_MEM_ROMD)) {
2334 /* Write access calls the I/O callback. */
2335 te->addr_write = address | TLB_MMIO;
2336 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2337 !cpu_physical_memory_is_dirty(pd)) {
2338 te->addr_write = address | TLB_NOTDIRTY;
9fa3e853 2339 } else {
0f459d16 2340 te->addr_write = address;
9fa3e853 2341 }
0f459d16
PB
2342 } else {
2343 te->addr_write = -1;
9fa3e853 2344 }
9fa3e853
FB
2345}
2346
0124311e
FB
2347#else
2348
ee8b7021 2349void tlb_flush(CPUState *env, int flush_global)
0124311e
FB
2350{
2351}
2352
2e12669a 2353void tlb_flush_page(CPUState *env, target_ulong addr)
0124311e
FB
2354{
2355}
2356
edf8e2af
MW
2357/*
2358 * Walks guest process memory "regions" one by one
2359 * and calls callback function 'fn' for each region.
2360 */
5cd2c5b6
RH
2361
2362struct walk_memory_regions_data
2363{
2364 walk_memory_regions_fn fn;
2365 void *priv;
2366 unsigned long start;
2367 int prot;
2368};
2369
2370static int walk_memory_regions_end(struct walk_memory_regions_data *data,
b480d9b7 2371 abi_ulong end, int new_prot)
5cd2c5b6
RH
2372{
2373 if (data->start != -1ul) {
2374 int rc = data->fn(data->priv, data->start, end, data->prot);
2375 if (rc != 0) {
2376 return rc;
2377 }
2378 }
2379
2380 data->start = (new_prot ? end : -1ul);
2381 data->prot = new_prot;
2382
2383 return 0;
2384}
2385
2386static int walk_memory_regions_1(struct walk_memory_regions_data *data,
b480d9b7 2387 abi_ulong base, int level, void **lp)
5cd2c5b6 2388{
b480d9b7 2389 abi_ulong pa;
5cd2c5b6
RH
2390 int i, rc;
2391
2392 if (*lp == NULL) {
2393 return walk_memory_regions_end(data, base, 0);
2394 }
2395
2396 if (level == 0) {
2397 PageDesc *pd = *lp;
7296abac 2398 for (i = 0; i < L2_SIZE; ++i) {
5cd2c5b6
RH
2399 int prot = pd[i].flags;
2400
2401 pa = base | (i << TARGET_PAGE_BITS);
2402 if (prot != data->prot) {
2403 rc = walk_memory_regions_end(data, pa, prot);
2404 if (rc != 0) {
2405 return rc;
9fa3e853 2406 }
9fa3e853 2407 }
5cd2c5b6
RH
2408 }
2409 } else {
2410 void **pp = *lp;
7296abac 2411 for (i = 0; i < L2_SIZE; ++i) {
b480d9b7
PB
2412 pa = base | ((abi_ulong)i <<
2413 (TARGET_PAGE_BITS + L2_BITS * level));
5cd2c5b6
RH
2414 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2415 if (rc != 0) {
2416 return rc;
2417 }
2418 }
2419 }
2420
2421 return 0;
2422}
2423
2424int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2425{
2426 struct walk_memory_regions_data data;
2427 unsigned long i;
2428
2429 data.fn = fn;
2430 data.priv = priv;
2431 data.start = -1ul;
2432 data.prot = 0;
2433
2434 for (i = 0; i < V_L1_SIZE; i++) {
b480d9b7 2435 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
5cd2c5b6
RH
2436 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2437 if (rc != 0) {
2438 return rc;
9fa3e853 2439 }
33417e70 2440 }
5cd2c5b6
RH
2441
2442 return walk_memory_regions_end(&data, 0, 0);
edf8e2af
MW
2443}
2444
b480d9b7
PB
2445static int dump_region(void *priv, abi_ulong start,
2446 abi_ulong end, unsigned long prot)
edf8e2af
MW
2447{
2448 FILE *f = (FILE *)priv;
2449
b480d9b7
PB
2450 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2451 " "TARGET_ABI_FMT_lx" %c%c%c\n",
edf8e2af
MW
2452 start, end, end - start,
2453 ((prot & PAGE_READ) ? 'r' : '-'),
2454 ((prot & PAGE_WRITE) ? 'w' : '-'),
2455 ((prot & PAGE_EXEC) ? 'x' : '-'));
2456
2457 return (0);
2458}
2459
2460/* dump memory mappings */
2461void page_dump(FILE *f)
2462{
2463 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2464 "start", "end", "size", "prot");
2465 walk_memory_regions(f, dump_region);
33417e70
FB
2466}
2467
53a5960a 2468int page_get_flags(target_ulong address)
33417e70 2469{
9fa3e853
FB
2470 PageDesc *p;
2471
2472 p = page_find(address >> TARGET_PAGE_BITS);
33417e70 2473 if (!p)
9fa3e853
FB
2474 return 0;
2475 return p->flags;
2476}
2477
376a7909
RH
2478/* Modify the flags of a page and invalidate the code if necessary.
2479 The flag PAGE_WRITE_ORG is positioned automatically depending
2480 on PAGE_WRITE. The mmap_lock should already be held. */
53a5960a 2481void page_set_flags(target_ulong start, target_ulong end, int flags)
9fa3e853 2482{
376a7909
RH
2483 target_ulong addr, len;
2484
2485 /* This function should never be called with addresses outside the
2486 guest address space. If this assert fires, it probably indicates
2487 a missing call to h2g_valid. */
b480d9b7
PB
2488#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2489 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
376a7909
RH
2490#endif
2491 assert(start < end);
9fa3e853
FB
2492
2493 start = start & TARGET_PAGE_MASK;
2494 end = TARGET_PAGE_ALIGN(end);
376a7909
RH
2495
2496 if (flags & PAGE_WRITE) {
9fa3e853 2497 flags |= PAGE_WRITE_ORG;
376a7909
RH
2498 }
2499
2500 for (addr = start, len = end - start;
2501 len != 0;
2502 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2503 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2504
2505 /* If the write protection bit is set, then we invalidate
2506 the code inside. */
5fafdf24 2507 if (!(p->flags & PAGE_WRITE) &&
9fa3e853
FB
2508 (flags & PAGE_WRITE) &&
2509 p->first_tb) {
d720b93d 2510 tb_invalidate_phys_page(addr, 0, NULL);
9fa3e853
FB
2511 }
2512 p->flags = flags;
2513 }
33417e70
FB
2514}
2515
3d97b40b
TS
2516int page_check_range(target_ulong start, target_ulong len, int flags)
2517{
2518 PageDesc *p;
2519 target_ulong end;
2520 target_ulong addr;
2521
376a7909
RH
2522 /* This function should never be called with addresses outside the
2523 guest address space. If this assert fires, it probably indicates
2524 a missing call to h2g_valid. */
338e9e6c
BS
2525#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2526 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
376a7909
RH
2527#endif
2528
3e0650a9
RH
2529 if (len == 0) {
2530 return 0;
2531 }
376a7909
RH
2532 if (start + len - 1 < start) {
2533 /* We've wrapped around. */
55f280c9 2534 return -1;
376a7909 2535 }
55f280c9 2536
3d97b40b
TS
2537 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2538 start = start & TARGET_PAGE_MASK;
2539
376a7909
RH
2540 for (addr = start, len = end - start;
2541 len != 0;
2542 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
3d97b40b
TS
2543 p = page_find(addr >> TARGET_PAGE_BITS);
2544 if( !p )
2545 return -1;
2546 if( !(p->flags & PAGE_VALID) )
2547 return -1;
2548
dae3270c 2549 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
3d97b40b 2550 return -1;
dae3270c
FB
2551 if (flags & PAGE_WRITE) {
2552 if (!(p->flags & PAGE_WRITE_ORG))
2553 return -1;
2554 /* unprotect the page if it was put read-only because it
2555 contains translated code */
2556 if (!(p->flags & PAGE_WRITE)) {
2557 if (!page_unprotect(addr, 0, NULL))
2558 return -1;
2559 }
2560 return 0;
2561 }
3d97b40b
TS
2562 }
2563 return 0;
2564}
2565
9fa3e853 2566/* called from signal handler: invalidate the code and unprotect the
ccbb4d44 2567 page. Return TRUE if the fault was successfully handled. */
53a5960a 2568int page_unprotect(target_ulong address, unsigned long pc, void *puc)
9fa3e853 2569{
45d679d6
AJ
2570 unsigned int prot;
2571 PageDesc *p;
53a5960a 2572 target_ulong host_start, host_end, addr;
9fa3e853 2573
c8a706fe
PB
2574 /* Technically this isn't safe inside a signal handler. However we
2575 know this only ever happens in a synchronous SEGV handler, so in
2576 practice it seems to be ok. */
2577 mmap_lock();
2578
45d679d6
AJ
2579 p = page_find(address >> TARGET_PAGE_BITS);
2580 if (!p) {
c8a706fe 2581 mmap_unlock();
9fa3e853 2582 return 0;
c8a706fe 2583 }
45d679d6 2584
9fa3e853
FB
2585 /* if the page was really writable, then we change its
2586 protection back to writable */
45d679d6
AJ
2587 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2588 host_start = address & qemu_host_page_mask;
2589 host_end = host_start + qemu_host_page_size;
2590
2591 prot = 0;
2592 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2593 p = page_find(addr >> TARGET_PAGE_BITS);
2594 p->flags |= PAGE_WRITE;
2595 prot |= p->flags;
2596
9fa3e853
FB
2597 /* and since the content will be modified, we must invalidate
2598 the corresponding translated code. */
45d679d6 2599 tb_invalidate_phys_page(addr, pc, puc);
9fa3e853 2600#ifdef DEBUG_TB_CHECK
45d679d6 2601 tb_invalidate_check(addr);
9fa3e853 2602#endif
9fa3e853 2603 }
45d679d6
AJ
2604 mprotect((void *)g2h(host_start), qemu_host_page_size,
2605 prot & PAGE_BITS);
2606
2607 mmap_unlock();
2608 return 1;
9fa3e853 2609 }
c8a706fe 2610 mmap_unlock();
9fa3e853
FB
2611 return 0;
2612}
2613
6a00d601
FB
2614static inline void tlb_set_dirty(CPUState *env,
2615 unsigned long addr, target_ulong vaddr)
1ccde1cb
FB
2616{
2617}
9fa3e853
FB
2618#endif /* defined(CONFIG_USER_ONLY) */
2619
e2eef170 2620#if !defined(CONFIG_USER_ONLY)
8da3ff18 2621
c04b2b78
PB
2622#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2623typedef struct subpage_t {
2624 target_phys_addr_t base;
f6405247
RH
2625 ram_addr_t sub_io_index[TARGET_PAGE_SIZE];
2626 ram_addr_t region_offset[TARGET_PAGE_SIZE];
c04b2b78
PB
2627} subpage_t;
2628
c227f099
AL
2629static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2630 ram_addr_t memory, ram_addr_t region_offset);
f6405247
RH
2631static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2632 ram_addr_t orig_memory,
2633 ram_addr_t region_offset);
db7b5426
BS
2634#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2635 need_subpage) \
2636 do { \
2637 if (addr > start_addr) \
2638 start_addr2 = 0; \
2639 else { \
2640 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2641 if (start_addr2 > 0) \
2642 need_subpage = 1; \
2643 } \
2644 \
49e9fba2 2645 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
db7b5426
BS
2646 end_addr2 = TARGET_PAGE_SIZE - 1; \
2647 else { \
2648 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2649 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2650 need_subpage = 1; \
2651 } \
2652 } while (0)
2653
8f2498f9
MT
2654/* register physical memory.
2655 For RAM, 'size' must be a multiple of the target page size.
2656 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
8da3ff18
PB
2657 io memory page. The address used when calling the IO function is
2658 the offset from the start of the region, plus region_offset. Both
ccbb4d44 2659 start_addr and region_offset are rounded down to a page boundary
8da3ff18
PB
2660 before calculating this offset. This should not be a problem unless
2661 the low bits of start_addr and region_offset differ. */
0fd542fb 2662void cpu_register_physical_memory_log(target_phys_addr_t start_addr,
c227f099
AL
2663 ram_addr_t size,
2664 ram_addr_t phys_offset,
0fd542fb
MT
2665 ram_addr_t region_offset,
2666 bool log_dirty)
33417e70 2667{
c227f099 2668 target_phys_addr_t addr, end_addr;
92e873b9 2669 PhysPageDesc *p;
9d42037b 2670 CPUState *env;
c227f099 2671 ram_addr_t orig_size = size;
f6405247 2672 subpage_t *subpage;
33417e70 2673
3b8e6a2d 2674 assert(size);
0fd542fb 2675 cpu_notify_set_memory(start_addr, size, phys_offset, log_dirty);
f6f3fbca 2676
67c4d23c
PB
2677 if (phys_offset == IO_MEM_UNASSIGNED) {
2678 region_offset = start_addr;
2679 }
8da3ff18 2680 region_offset &= TARGET_PAGE_MASK;
5fd386f6 2681 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
c227f099 2682 end_addr = start_addr + (target_phys_addr_t)size;
3b8e6a2d
EI
2683
2684 addr = start_addr;
2685 do {
db7b5426
BS
2686 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2687 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
c227f099
AL
2688 ram_addr_t orig_memory = p->phys_offset;
2689 target_phys_addr_t start_addr2, end_addr2;
db7b5426
BS
2690 int need_subpage = 0;
2691
2692 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2693 need_subpage);
f6405247 2694 if (need_subpage) {
db7b5426
BS
2695 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2696 subpage = subpage_init((addr & TARGET_PAGE_MASK),
8da3ff18
PB
2697 &p->phys_offset, orig_memory,
2698 p->region_offset);
db7b5426
BS
2699 } else {
2700 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2701 >> IO_MEM_SHIFT];
2702 }
8da3ff18
PB
2703 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2704 region_offset);
2705 p->region_offset = 0;
db7b5426
BS
2706 } else {
2707 p->phys_offset = phys_offset;
2708 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2709 (phys_offset & IO_MEM_ROMD))
2710 phys_offset += TARGET_PAGE_SIZE;
2711 }
2712 } else {
2713 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2714 p->phys_offset = phys_offset;
8da3ff18 2715 p->region_offset = region_offset;
db7b5426 2716 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
8da3ff18 2717 (phys_offset & IO_MEM_ROMD)) {
db7b5426 2718 phys_offset += TARGET_PAGE_SIZE;
0e8f0967 2719 } else {
c227f099 2720 target_phys_addr_t start_addr2, end_addr2;
db7b5426
BS
2721 int need_subpage = 0;
2722
2723 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2724 end_addr2, need_subpage);
2725
f6405247 2726 if (need_subpage) {
db7b5426 2727 subpage = subpage_init((addr & TARGET_PAGE_MASK),
8da3ff18 2728 &p->phys_offset, IO_MEM_UNASSIGNED,
67c4d23c 2729 addr & TARGET_PAGE_MASK);
db7b5426 2730 subpage_register(subpage, start_addr2, end_addr2,
8da3ff18
PB
2731 phys_offset, region_offset);
2732 p->region_offset = 0;
db7b5426
BS
2733 }
2734 }
2735 }
8da3ff18 2736 region_offset += TARGET_PAGE_SIZE;
3b8e6a2d
EI
2737 addr += TARGET_PAGE_SIZE;
2738 } while (addr != end_addr);
3b46e624 2739
9d42037b
FB
2740 /* since each CPU stores ram addresses in its TLB cache, we must
2741 reset the modified entries */
2742 /* XXX: slow ! */
2743 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2744 tlb_flush(env, 1);
2745 }
33417e70
FB
2746}
2747
ba863458 2748/* XXX: temporary until new memory mapping API */
c227f099 2749ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
ba863458
FB
2750{
2751 PhysPageDesc *p;
2752
2753 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2754 if (!p)
2755 return IO_MEM_UNASSIGNED;
2756 return p->phys_offset;
2757}
2758
c227f099 2759void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
f65ed4c1
AL
2760{
2761 if (kvm_enabled())
2762 kvm_coalesce_mmio_region(addr, size);
2763}
2764
c227f099 2765void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
f65ed4c1
AL
2766{
2767 if (kvm_enabled())
2768 kvm_uncoalesce_mmio_region(addr, size);
2769}
2770
62a2744c
SY
2771void qemu_flush_coalesced_mmio_buffer(void)
2772{
2773 if (kvm_enabled())
2774 kvm_flush_coalesced_mmio_buffer();
2775}
2776
c902760f
MT
2777#if defined(__linux__) && !defined(TARGET_S390X)
2778
2779#include <sys/vfs.h>
2780
2781#define HUGETLBFS_MAGIC 0x958458f6
2782
2783static long gethugepagesize(const char *path)
2784{
2785 struct statfs fs;
2786 int ret;
2787
2788 do {
9742bf26 2789 ret = statfs(path, &fs);
c902760f
MT
2790 } while (ret != 0 && errno == EINTR);
2791
2792 if (ret != 0) {
9742bf26
YT
2793 perror(path);
2794 return 0;
c902760f
MT
2795 }
2796
2797 if (fs.f_type != HUGETLBFS_MAGIC)
9742bf26 2798 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
c902760f
MT
2799
2800 return fs.f_bsize;
2801}
2802
04b16653
AW
2803static void *file_ram_alloc(RAMBlock *block,
2804 ram_addr_t memory,
2805 const char *path)
c902760f
MT
2806{
2807 char *filename;
2808 void *area;
2809 int fd;
2810#ifdef MAP_POPULATE
2811 int flags;
2812#endif
2813 unsigned long hpagesize;
2814
2815 hpagesize = gethugepagesize(path);
2816 if (!hpagesize) {
9742bf26 2817 return NULL;
c902760f
MT
2818 }
2819
2820 if (memory < hpagesize) {
2821 return NULL;
2822 }
2823
2824 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2825 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2826 return NULL;
2827 }
2828
2829 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
9742bf26 2830 return NULL;
c902760f
MT
2831 }
2832
2833 fd = mkstemp(filename);
2834 if (fd < 0) {
9742bf26
YT
2835 perror("unable to create backing store for hugepages");
2836 free(filename);
2837 return NULL;
c902760f
MT
2838 }
2839 unlink(filename);
2840 free(filename);
2841
2842 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2843
2844 /*
2845 * ftruncate is not supported by hugetlbfs in older
2846 * hosts, so don't bother bailing out on errors.
2847 * If anything goes wrong with it under other filesystems,
2848 * mmap will fail.
2849 */
2850 if (ftruncate(fd, memory))
9742bf26 2851 perror("ftruncate");
c902760f
MT
2852
2853#ifdef MAP_POPULATE
2854 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2855 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2856 * to sidestep this quirk.
2857 */
2858 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2859 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2860#else
2861 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2862#endif
2863 if (area == MAP_FAILED) {
9742bf26
YT
2864 perror("file_ram_alloc: can't mmap RAM pages");
2865 close(fd);
2866 return (NULL);
c902760f 2867 }
04b16653 2868 block->fd = fd;
c902760f
MT
2869 return area;
2870}
2871#endif
2872
d17b5288 2873static ram_addr_t find_ram_offset(ram_addr_t size)
04b16653
AW
2874{
2875 RAMBlock *block, *next_block;
3e837b2c 2876 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
04b16653
AW
2877
2878 if (QLIST_EMPTY(&ram_list.blocks))
2879 return 0;
2880
2881 QLIST_FOREACH(block, &ram_list.blocks, next) {
f15fbc4b 2882 ram_addr_t end, next = RAM_ADDR_MAX;
04b16653
AW
2883
2884 end = block->offset + block->length;
2885
2886 QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2887 if (next_block->offset >= end) {
2888 next = MIN(next, next_block->offset);
2889 }
2890 }
2891 if (next - end >= size && next - end < mingap) {
3e837b2c 2892 offset = end;
04b16653
AW
2893 mingap = next - end;
2894 }
2895 }
3e837b2c
AW
2896
2897 if (offset == RAM_ADDR_MAX) {
2898 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
2899 (uint64_t)size);
2900 abort();
2901 }
2902
04b16653
AW
2903 return offset;
2904}
2905
2906static ram_addr_t last_ram_offset(void)
d17b5288
AW
2907{
2908 RAMBlock *block;
2909 ram_addr_t last = 0;
2910
2911 QLIST_FOREACH(block, &ram_list.blocks, next)
2912 last = MAX(last, block->offset + block->length);
2913
2914 return last;
2915}
2916
84b89d78 2917ram_addr_t qemu_ram_alloc_from_ptr(DeviceState *dev, const char *name,
6977dfe6 2918 ram_addr_t size, void *host)
84b89d78
CM
2919{
2920 RAMBlock *new_block, *block;
2921
2922 size = TARGET_PAGE_ALIGN(size);
7267c094 2923 new_block = g_malloc0(sizeof(*new_block));
84b89d78
CM
2924
2925 if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
2926 char *id = dev->parent_bus->info->get_dev_path(dev);
2927 if (id) {
2928 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
7267c094 2929 g_free(id);
84b89d78
CM
2930 }
2931 }
2932 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2933
2934 QLIST_FOREACH(block, &ram_list.blocks, next) {
2935 if (!strcmp(block->idstr, new_block->idstr)) {
2936 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2937 new_block->idstr);
2938 abort();
2939 }
2940 }
2941
432d268c 2942 new_block->offset = find_ram_offset(size);
6977dfe6
YT
2943 if (host) {
2944 new_block->host = host;
cd19cfa2 2945 new_block->flags |= RAM_PREALLOC_MASK;
6977dfe6
YT
2946 } else {
2947 if (mem_path) {
c902760f 2948#if defined (__linux__) && !defined(TARGET_S390X)
6977dfe6
YT
2949 new_block->host = file_ram_alloc(new_block, size, mem_path);
2950 if (!new_block->host) {
2951 new_block->host = qemu_vmalloc(size);
e78815a5 2952 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
6977dfe6 2953 }
c902760f 2954#else
6977dfe6
YT
2955 fprintf(stderr, "-mem-path option unsupported\n");
2956 exit(1);
c902760f 2957#endif
6977dfe6 2958 } else {
6b02494d 2959#if defined(TARGET_S390X) && defined(CONFIG_KVM)
ff83678a
CB
2960 /* S390 KVM requires the topmost vma of the RAM to be smaller than
2961 an system defined value, which is at least 256GB. Larger systems
2962 have larger values. We put the guest between the end of data
2963 segment (system break) and this value. We use 32GB as a base to
2964 have enough room for the system break to grow. */
2965 new_block->host = mmap((void*)0x800000000, size,
6977dfe6 2966 PROT_EXEC|PROT_READ|PROT_WRITE,
ff83678a 2967 MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
fb8b2735
AG
2968 if (new_block->host == MAP_FAILED) {
2969 fprintf(stderr, "Allocating RAM failed\n");
2970 abort();
2971 }
6b02494d 2972#else
868bb33f 2973 if (xen_enabled()) {
432d268c
JN
2974 xen_ram_alloc(new_block->offset, size);
2975 } else {
2976 new_block->host = qemu_vmalloc(size);
2977 }
6b02494d 2978#endif
e78815a5 2979 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
6977dfe6 2980 }
c902760f 2981 }
94a6b54f
PB
2982 new_block->length = size;
2983
f471a17e 2984 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
94a6b54f 2985
7267c094 2986 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
04b16653 2987 last_ram_offset() >> TARGET_PAGE_BITS);
d17b5288 2988 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
94a6b54f
PB
2989 0xff, size >> TARGET_PAGE_BITS);
2990
6f0437e8
JK
2991 if (kvm_enabled())
2992 kvm_setup_guest_memory(new_block->host, size);
2993
94a6b54f
PB
2994 return new_block->offset;
2995}
e9a1ab19 2996
6977dfe6
YT
2997ram_addr_t qemu_ram_alloc(DeviceState *dev, const char *name, ram_addr_t size)
2998{
2999 return qemu_ram_alloc_from_ptr(dev, name, size, NULL);
3000}
3001
1f2e98b6
AW
3002void qemu_ram_free_from_ptr(ram_addr_t addr)
3003{
3004 RAMBlock *block;
3005
3006 QLIST_FOREACH(block, &ram_list.blocks, next) {
3007 if (addr == block->offset) {
3008 QLIST_REMOVE(block, next);
7267c094 3009 g_free(block);
1f2e98b6
AW
3010 return;
3011 }
3012 }
3013}
3014
c227f099 3015void qemu_ram_free(ram_addr_t addr)
e9a1ab19 3016{
04b16653
AW
3017 RAMBlock *block;
3018
3019 QLIST_FOREACH(block, &ram_list.blocks, next) {
3020 if (addr == block->offset) {
3021 QLIST_REMOVE(block, next);
cd19cfa2
HY
3022 if (block->flags & RAM_PREALLOC_MASK) {
3023 ;
3024 } else if (mem_path) {
04b16653
AW
3025#if defined (__linux__) && !defined(TARGET_S390X)
3026 if (block->fd) {
3027 munmap(block->host, block->length);
3028 close(block->fd);
3029 } else {
3030 qemu_vfree(block->host);
3031 }
fd28aa13
JK
3032#else
3033 abort();
04b16653
AW
3034#endif
3035 } else {
3036#if defined(TARGET_S390X) && defined(CONFIG_KVM)
3037 munmap(block->host, block->length);
3038#else
868bb33f 3039 if (xen_enabled()) {
e41d7c69 3040 xen_invalidate_map_cache_entry(block->host);
432d268c
JN
3041 } else {
3042 qemu_vfree(block->host);
3043 }
04b16653
AW
3044#endif
3045 }
7267c094 3046 g_free(block);
04b16653
AW
3047 return;
3048 }
3049 }
3050
e9a1ab19
FB
3051}
3052
cd19cfa2
HY
3053#ifndef _WIN32
3054void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
3055{
3056 RAMBlock *block;
3057 ram_addr_t offset;
3058 int flags;
3059 void *area, *vaddr;
3060
3061 QLIST_FOREACH(block, &ram_list.blocks, next) {
3062 offset = addr - block->offset;
3063 if (offset < block->length) {
3064 vaddr = block->host + offset;
3065 if (block->flags & RAM_PREALLOC_MASK) {
3066 ;
3067 } else {
3068 flags = MAP_FIXED;
3069 munmap(vaddr, length);
3070 if (mem_path) {
3071#if defined(__linux__) && !defined(TARGET_S390X)
3072 if (block->fd) {
3073#ifdef MAP_POPULATE
3074 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
3075 MAP_PRIVATE;
3076#else
3077 flags |= MAP_PRIVATE;
3078#endif
3079 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3080 flags, block->fd, offset);
3081 } else {
3082 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
3083 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3084 flags, -1, 0);
3085 }
fd28aa13
JK
3086#else
3087 abort();
cd19cfa2
HY
3088#endif
3089 } else {
3090#if defined(TARGET_S390X) && defined(CONFIG_KVM)
3091 flags |= MAP_SHARED | MAP_ANONYMOUS;
3092 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
3093 flags, -1, 0);
3094#else
3095 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
3096 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3097 flags, -1, 0);
3098#endif
3099 }
3100 if (area != vaddr) {
f15fbc4b
AP
3101 fprintf(stderr, "Could not remap addr: "
3102 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
cd19cfa2
HY
3103 length, addr);
3104 exit(1);
3105 }
3106 qemu_madvise(vaddr, length, QEMU_MADV_MERGEABLE);
3107 }
3108 return;
3109 }
3110 }
3111}
3112#endif /* !_WIN32 */
3113
dc828ca1 3114/* Return a host pointer to ram allocated with qemu_ram_alloc.
5579c7f3
PB
3115 With the exception of the softmmu code in this file, this should
3116 only be used for local memory (e.g. video ram) that the device owns,
3117 and knows it isn't going to access beyond the end of the block.
3118
3119 It should not be used for general purpose DMA.
3120 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
3121 */
c227f099 3122void *qemu_get_ram_ptr(ram_addr_t addr)
dc828ca1 3123{
94a6b54f
PB
3124 RAMBlock *block;
3125
f471a17e
AW
3126 QLIST_FOREACH(block, &ram_list.blocks, next) {
3127 if (addr - block->offset < block->length) {
7d82af38
VP
3128 /* Move this entry to to start of the list. */
3129 if (block != QLIST_FIRST(&ram_list.blocks)) {
3130 QLIST_REMOVE(block, next);
3131 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
3132 }
868bb33f 3133 if (xen_enabled()) {
432d268c
JN
3134 /* We need to check if the requested address is in the RAM
3135 * because we don't want to map the entire memory in QEMU.
712c2b41 3136 * In that case just map until the end of the page.
432d268c
JN
3137 */
3138 if (block->offset == 0) {
e41d7c69 3139 return xen_map_cache(addr, 0, 0);
432d268c 3140 } else if (block->host == NULL) {
e41d7c69
JK
3141 block->host =
3142 xen_map_cache(block->offset, block->length, 1);
432d268c
JN
3143 }
3144 }
f471a17e
AW
3145 return block->host + (addr - block->offset);
3146 }
94a6b54f 3147 }
f471a17e
AW
3148
3149 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3150 abort();
3151
3152 return NULL;
dc828ca1
PB
3153}
3154
b2e0a138
MT
3155/* Return a host pointer to ram allocated with qemu_ram_alloc.
3156 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
3157 */
3158void *qemu_safe_ram_ptr(ram_addr_t addr)
3159{
3160 RAMBlock *block;
3161
3162 QLIST_FOREACH(block, &ram_list.blocks, next) {
3163 if (addr - block->offset < block->length) {
868bb33f 3164 if (xen_enabled()) {
432d268c
JN
3165 /* We need to check if the requested address is in the RAM
3166 * because we don't want to map the entire memory in QEMU.
712c2b41 3167 * In that case just map until the end of the page.
432d268c
JN
3168 */
3169 if (block->offset == 0) {
e41d7c69 3170 return xen_map_cache(addr, 0, 0);
432d268c 3171 } else if (block->host == NULL) {
e41d7c69
JK
3172 block->host =
3173 xen_map_cache(block->offset, block->length, 1);
432d268c
JN
3174 }
3175 }
b2e0a138
MT
3176 return block->host + (addr - block->offset);
3177 }
3178 }
3179
3180 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3181 abort();
3182
3183 return NULL;
3184}
3185
38bee5dc
SS
3186/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
3187 * but takes a size argument */
8ab934f9 3188void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
38bee5dc 3189{
8ab934f9
SS
3190 if (*size == 0) {
3191 return NULL;
3192 }
868bb33f 3193 if (xen_enabled()) {
e41d7c69 3194 return xen_map_cache(addr, *size, 1);
868bb33f 3195 } else {
38bee5dc
SS
3196 RAMBlock *block;
3197
3198 QLIST_FOREACH(block, &ram_list.blocks, next) {
3199 if (addr - block->offset < block->length) {
3200 if (addr - block->offset + *size > block->length)
3201 *size = block->length - addr + block->offset;
3202 return block->host + (addr - block->offset);
3203 }
3204 }
3205
3206 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3207 abort();
38bee5dc
SS
3208 }
3209}
3210
050a0ddf
AP
3211void qemu_put_ram_ptr(void *addr)
3212{
3213 trace_qemu_put_ram_ptr(addr);
050a0ddf
AP
3214}
3215
e890261f 3216int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
5579c7f3 3217{
94a6b54f
PB
3218 RAMBlock *block;
3219 uint8_t *host = ptr;
3220
868bb33f 3221 if (xen_enabled()) {
e41d7c69 3222 *ram_addr = xen_ram_addr_from_mapcache(ptr);
712c2b41
SS
3223 return 0;
3224 }
3225
f471a17e 3226 QLIST_FOREACH(block, &ram_list.blocks, next) {
432d268c
JN
3227 /* This case append when the block is not mapped. */
3228 if (block->host == NULL) {
3229 continue;
3230 }
f471a17e 3231 if (host - block->host < block->length) {
e890261f
MT
3232 *ram_addr = block->offset + (host - block->host);
3233 return 0;
f471a17e 3234 }
94a6b54f 3235 }
432d268c 3236
e890261f
MT
3237 return -1;
3238}
f471a17e 3239
e890261f
MT
3240/* Some of the softmmu routines need to translate from a host pointer
3241 (typically a TLB entry) back to a ram offset. */
3242ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
3243{
3244 ram_addr_t ram_addr;
f471a17e 3245
e890261f
MT
3246 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
3247 fprintf(stderr, "Bad ram pointer %p\n", ptr);
3248 abort();
3249 }
3250 return ram_addr;
5579c7f3
PB
3251}
3252
c227f099 3253static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
33417e70 3254{
67d3b957 3255#ifdef DEBUG_UNASSIGNED
ab3d1727 3256 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
b4f0a316 3257#endif
5b450407 3258#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
b14ef7c9 3259 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, 1);
e18231a3
BS
3260#endif
3261 return 0;
3262}
3263
c227f099 3264static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
e18231a3
BS
3265{
3266#ifdef DEBUG_UNASSIGNED
3267 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3268#endif
5b450407 3269#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
b14ef7c9 3270 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, 2);
e18231a3
BS
3271#endif
3272 return 0;
3273}
3274
c227f099 3275static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
e18231a3
BS
3276{
3277#ifdef DEBUG_UNASSIGNED
3278 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3279#endif
5b450407 3280#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
b14ef7c9 3281 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, 4);
67d3b957 3282#endif
33417e70
FB
3283 return 0;
3284}
3285
c227f099 3286static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
33417e70 3287{
67d3b957 3288#ifdef DEBUG_UNASSIGNED
ab3d1727 3289 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
67d3b957 3290#endif
5b450407 3291#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
b14ef7c9 3292 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, 1);
e18231a3
BS
3293#endif
3294}
3295
c227f099 3296static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
e18231a3
BS
3297{
3298#ifdef DEBUG_UNASSIGNED
3299 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3300#endif
5b450407 3301#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
b14ef7c9 3302 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, 2);
e18231a3
BS
3303#endif
3304}
3305
c227f099 3306static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
e18231a3
BS
3307{
3308#ifdef DEBUG_UNASSIGNED
3309 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3310#endif
5b450407 3311#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
b14ef7c9 3312 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, 4);
b4f0a316 3313#endif
33417e70
FB
3314}
3315
d60efc6b 3316static CPUReadMemoryFunc * const unassigned_mem_read[3] = {
33417e70 3317 unassigned_mem_readb,
e18231a3
BS
3318 unassigned_mem_readw,
3319 unassigned_mem_readl,
33417e70
FB
3320};
3321
d60efc6b 3322static CPUWriteMemoryFunc * const unassigned_mem_write[3] = {
33417e70 3323 unassigned_mem_writeb,
e18231a3
BS
3324 unassigned_mem_writew,
3325 unassigned_mem_writel,
33417e70
FB
3326};
3327
c227f099 3328static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
0f459d16 3329 uint32_t val)
9fa3e853 3330{
3a7d929e 3331 int dirty_flags;
f7c11b53 3332 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3a7d929e 3333 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 3334#if !defined(CONFIG_USER_ONLY)
3a7d929e 3335 tb_invalidate_phys_page_fast(ram_addr, 1);
f7c11b53 3336 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
9fa3e853 3337#endif
3a7d929e 3338 }
5579c7f3 3339 stb_p(qemu_get_ram_ptr(ram_addr), val);
f23db169 3340 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
f7c11b53 3341 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
f23db169
FB
3342 /* we remove the notdirty callback only if the code has been
3343 flushed */
3344 if (dirty_flags == 0xff)
2e70f6ef 3345 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
3346}
3347
c227f099 3348static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
0f459d16 3349 uint32_t val)
9fa3e853 3350{
3a7d929e 3351 int dirty_flags;
f7c11b53 3352 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3a7d929e 3353 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 3354#if !defined(CONFIG_USER_ONLY)
3a7d929e 3355 tb_invalidate_phys_page_fast(ram_addr, 2);
f7c11b53 3356 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
9fa3e853 3357#endif
3a7d929e 3358 }
5579c7f3 3359 stw_p(qemu_get_ram_ptr(ram_addr), val);
f23db169 3360 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
f7c11b53 3361 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
f23db169
FB
3362 /* we remove the notdirty callback only if the code has been
3363 flushed */
3364 if (dirty_flags == 0xff)
2e70f6ef 3365 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
3366}
3367
c227f099 3368static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
0f459d16 3369 uint32_t val)
9fa3e853 3370{
3a7d929e 3371 int dirty_flags;
f7c11b53 3372 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3a7d929e 3373 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 3374#if !defined(CONFIG_USER_ONLY)
3a7d929e 3375 tb_invalidate_phys_page_fast(ram_addr, 4);
f7c11b53 3376 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
9fa3e853 3377#endif
3a7d929e 3378 }
5579c7f3 3379 stl_p(qemu_get_ram_ptr(ram_addr), val);
f23db169 3380 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
f7c11b53 3381 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
f23db169
FB
3382 /* we remove the notdirty callback only if the code has been
3383 flushed */
3384 if (dirty_flags == 0xff)
2e70f6ef 3385 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
3386}
3387
d60efc6b 3388static CPUReadMemoryFunc * const error_mem_read[3] = {
9fa3e853
FB
3389 NULL, /* never used */
3390 NULL, /* never used */
3391 NULL, /* never used */
3392};
3393
d60efc6b 3394static CPUWriteMemoryFunc * const notdirty_mem_write[3] = {
1ccde1cb
FB
3395 notdirty_mem_writeb,
3396 notdirty_mem_writew,
3397 notdirty_mem_writel,
3398};
3399
0f459d16 3400/* Generate a debug exception if a watchpoint has been hit. */
b4051334 3401static void check_watchpoint(int offset, int len_mask, int flags)
0f459d16
PB
3402{
3403 CPUState *env = cpu_single_env;
06d55cc1
AL
3404 target_ulong pc, cs_base;
3405 TranslationBlock *tb;
0f459d16 3406 target_ulong vaddr;
a1d1bb31 3407 CPUWatchpoint *wp;
06d55cc1 3408 int cpu_flags;
0f459d16 3409
06d55cc1
AL
3410 if (env->watchpoint_hit) {
3411 /* We re-entered the check after replacing the TB. Now raise
3412 * the debug interrupt so that is will trigger after the
3413 * current instruction. */
3414 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3415 return;
3416 }
2e70f6ef 3417 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
72cf2d4f 3418 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334
AL
3419 if ((vaddr == (wp->vaddr & len_mask) ||
3420 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
6e140f28
AL
3421 wp->flags |= BP_WATCHPOINT_HIT;
3422 if (!env->watchpoint_hit) {
3423 env->watchpoint_hit = wp;
3424 tb = tb_find_pc(env->mem_io_pc);
3425 if (!tb) {
3426 cpu_abort(env, "check_watchpoint: could not find TB for "
3427 "pc=%p", (void *)env->mem_io_pc);
3428 }
618ba8e6 3429 cpu_restore_state(tb, env, env->mem_io_pc);
6e140f28
AL
3430 tb_phys_invalidate(tb, -1);
3431 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3432 env->exception_index = EXCP_DEBUG;
3433 } else {
3434 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3435 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
3436 }
3437 cpu_resume_from_signal(env, NULL);
06d55cc1 3438 }
6e140f28
AL
3439 } else {
3440 wp->flags &= ~BP_WATCHPOINT_HIT;
0f459d16
PB
3441 }
3442 }
3443}
3444
6658ffb8
PB
3445/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3446 so these check for a hit then pass through to the normal out-of-line
3447 phys routines. */
c227f099 3448static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
6658ffb8 3449{
b4051334 3450 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
6658ffb8
PB
3451 return ldub_phys(addr);
3452}
3453
c227f099 3454static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
6658ffb8 3455{
b4051334 3456 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
6658ffb8
PB
3457 return lduw_phys(addr);
3458}
3459
c227f099 3460static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
6658ffb8 3461{
b4051334 3462 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
6658ffb8
PB
3463 return ldl_phys(addr);
3464}
3465
c227f099 3466static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
6658ffb8
PB
3467 uint32_t val)
3468{
b4051334 3469 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
6658ffb8
PB
3470 stb_phys(addr, val);
3471}
3472
c227f099 3473static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
6658ffb8
PB
3474 uint32_t val)
3475{
b4051334 3476 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
6658ffb8
PB
3477 stw_phys(addr, val);
3478}
3479
c227f099 3480static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
6658ffb8
PB
3481 uint32_t val)
3482{
b4051334 3483 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
6658ffb8
PB
3484 stl_phys(addr, val);
3485}
3486
d60efc6b 3487static CPUReadMemoryFunc * const watch_mem_read[3] = {
6658ffb8
PB
3488 watch_mem_readb,
3489 watch_mem_readw,
3490 watch_mem_readl,
3491};
3492
d60efc6b 3493static CPUWriteMemoryFunc * const watch_mem_write[3] = {
6658ffb8
PB
3494 watch_mem_writeb,
3495 watch_mem_writew,
3496 watch_mem_writel,
3497};
6658ffb8 3498
f6405247
RH
3499static inline uint32_t subpage_readlen (subpage_t *mmio,
3500 target_phys_addr_t addr,
3501 unsigned int len)
db7b5426 3502{
f6405247 3503 unsigned int idx = SUBPAGE_IDX(addr);
db7b5426
BS
3504#if defined(DEBUG_SUBPAGE)
3505 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3506 mmio, len, addr, idx);
3507#endif
db7b5426 3508
f6405247
RH
3509 addr += mmio->region_offset[idx];
3510 idx = mmio->sub_io_index[idx];
3511 return io_mem_read[idx][len](io_mem_opaque[idx], addr);
db7b5426
BS
3512}
3513
c227f099 3514static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
f6405247 3515 uint32_t value, unsigned int len)
db7b5426 3516{
f6405247 3517 unsigned int idx = SUBPAGE_IDX(addr);
db7b5426 3518#if defined(DEBUG_SUBPAGE)
f6405247
RH
3519 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n",
3520 __func__, mmio, len, addr, idx, value);
db7b5426 3521#endif
f6405247
RH
3522
3523 addr += mmio->region_offset[idx];
3524 idx = mmio->sub_io_index[idx];
3525 io_mem_write[idx][len](io_mem_opaque[idx], addr, value);
db7b5426
BS
3526}
3527
c227f099 3528static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
db7b5426 3529{
db7b5426
BS
3530 return subpage_readlen(opaque, addr, 0);
3531}
3532
c227f099 3533static void subpage_writeb (void *opaque, target_phys_addr_t addr,
db7b5426
BS
3534 uint32_t value)
3535{
db7b5426
BS
3536 subpage_writelen(opaque, addr, value, 0);
3537}
3538
c227f099 3539static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
db7b5426 3540{
db7b5426
BS
3541 return subpage_readlen(opaque, addr, 1);
3542}
3543
c227f099 3544static void subpage_writew (void *opaque, target_phys_addr_t addr,
db7b5426
BS
3545 uint32_t value)
3546{
db7b5426
BS
3547 subpage_writelen(opaque, addr, value, 1);
3548}
3549
c227f099 3550static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
db7b5426 3551{
db7b5426
BS
3552 return subpage_readlen(opaque, addr, 2);
3553}
3554
f6405247
RH
3555static void subpage_writel (void *opaque, target_phys_addr_t addr,
3556 uint32_t value)
db7b5426 3557{
db7b5426
BS
3558 subpage_writelen(opaque, addr, value, 2);
3559}
3560
d60efc6b 3561static CPUReadMemoryFunc * const subpage_read[] = {
db7b5426
BS
3562 &subpage_readb,
3563 &subpage_readw,
3564 &subpage_readl,
3565};
3566
d60efc6b 3567static CPUWriteMemoryFunc * const subpage_write[] = {
db7b5426
BS
3568 &subpage_writeb,
3569 &subpage_writew,
3570 &subpage_writel,
3571};
3572
c227f099
AL
3573static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3574 ram_addr_t memory, ram_addr_t region_offset)
db7b5426
BS
3575{
3576 int idx, eidx;
3577
3578 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3579 return -1;
3580 idx = SUBPAGE_IDX(start);
3581 eidx = SUBPAGE_IDX(end);
3582#if defined(DEBUG_SUBPAGE)
0bf9e31a 3583 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
db7b5426
BS
3584 mmio, start, end, idx, eidx, memory);
3585#endif
95c318f5
GN
3586 if ((memory & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
3587 memory = IO_MEM_UNASSIGNED;
f6405247 3588 memory = (memory >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
db7b5426 3589 for (; idx <= eidx; idx++) {
f6405247
RH
3590 mmio->sub_io_index[idx] = memory;
3591 mmio->region_offset[idx] = region_offset;
db7b5426
BS
3592 }
3593
3594 return 0;
3595}
3596
f6405247
RH
3597static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
3598 ram_addr_t orig_memory,
3599 ram_addr_t region_offset)
db7b5426 3600{
c227f099 3601 subpage_t *mmio;
db7b5426
BS
3602 int subpage_memory;
3603
7267c094 3604 mmio = g_malloc0(sizeof(subpage_t));
1eec614b
AL
3605
3606 mmio->base = base;
2507c12a
AG
3607 subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio,
3608 DEVICE_NATIVE_ENDIAN);
db7b5426 3609#if defined(DEBUG_SUBPAGE)
1eec614b
AL
3610 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3611 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
db7b5426 3612#endif
1eec614b 3613 *phys = subpage_memory | IO_MEM_SUBPAGE;
f6405247 3614 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, orig_memory, region_offset);
db7b5426
BS
3615
3616 return mmio;
3617}
3618
88715657
AL
3619static int get_free_io_mem_idx(void)
3620{
3621 int i;
3622
3623 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3624 if (!io_mem_used[i]) {
3625 io_mem_used[i] = 1;
3626 return i;
3627 }
c6703b47 3628 fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
88715657
AL
3629 return -1;
3630}
3631
dd310534
AG
3632/*
3633 * Usually, devices operate in little endian mode. There are devices out
3634 * there that operate in big endian too. Each device gets byte swapped
3635 * mmio if plugged onto a CPU that does the other endianness.
3636 *
3637 * CPU Device swap?
3638 *
3639 * little little no
3640 * little big yes
3641 * big little yes
3642 * big big no
3643 */
3644
3645typedef struct SwapEndianContainer {
3646 CPUReadMemoryFunc *read[3];
3647 CPUWriteMemoryFunc *write[3];
3648 void *opaque;
3649} SwapEndianContainer;
3650
3651static uint32_t swapendian_mem_readb (void *opaque, target_phys_addr_t addr)
3652{
3653 uint32_t val;
3654 SwapEndianContainer *c = opaque;
3655 val = c->read[0](c->opaque, addr);
3656 return val;
3657}
3658
3659static uint32_t swapendian_mem_readw(void *opaque, target_phys_addr_t addr)
3660{
3661 uint32_t val;
3662 SwapEndianContainer *c = opaque;
3663 val = bswap16(c->read[1](c->opaque, addr));
3664 return val;
3665}
3666
3667static uint32_t swapendian_mem_readl(void *opaque, target_phys_addr_t addr)
3668{
3669 uint32_t val;
3670 SwapEndianContainer *c = opaque;
3671 val = bswap32(c->read[2](c->opaque, addr));
3672 return val;
3673}
3674
3675static CPUReadMemoryFunc * const swapendian_readfn[3]={
3676 swapendian_mem_readb,
3677 swapendian_mem_readw,
3678 swapendian_mem_readl
3679};
3680
3681static void swapendian_mem_writeb(void *opaque, target_phys_addr_t addr,
3682 uint32_t val)
3683{
3684 SwapEndianContainer *c = opaque;
3685 c->write[0](c->opaque, addr, val);
3686}
3687
3688static void swapendian_mem_writew(void *opaque, target_phys_addr_t addr,
3689 uint32_t val)
3690{
3691 SwapEndianContainer *c = opaque;
3692 c->write[1](c->opaque, addr, bswap16(val));
3693}
3694
3695static void swapendian_mem_writel(void *opaque, target_phys_addr_t addr,
3696 uint32_t val)
3697{
3698 SwapEndianContainer *c = opaque;
3699 c->write[2](c->opaque, addr, bswap32(val));
3700}
3701
3702static CPUWriteMemoryFunc * const swapendian_writefn[3]={
3703 swapendian_mem_writeb,
3704 swapendian_mem_writew,
3705 swapendian_mem_writel
3706};
3707
3708static void swapendian_init(int io_index)
3709{
7267c094 3710 SwapEndianContainer *c = g_malloc(sizeof(SwapEndianContainer));
dd310534
AG
3711 int i;
3712
3713 /* Swap mmio for big endian targets */
3714 c->opaque = io_mem_opaque[io_index];
3715 for (i = 0; i < 3; i++) {
3716 c->read[i] = io_mem_read[io_index][i];
3717 c->write[i] = io_mem_write[io_index][i];
3718
3719 io_mem_read[io_index][i] = swapendian_readfn[i];
3720 io_mem_write[io_index][i] = swapendian_writefn[i];
3721 }
3722 io_mem_opaque[io_index] = c;
3723}
3724
3725static void swapendian_del(int io_index)
3726{
3727 if (io_mem_read[io_index][0] == swapendian_readfn[0]) {
7267c094 3728 g_free(io_mem_opaque[io_index]);
dd310534
AG
3729 }
3730}
3731
33417e70
FB
3732/* mem_read and mem_write are arrays of functions containing the
3733 function to access byte (index 0), word (index 1) and dword (index
0b4e6e3e 3734 2). Functions can be omitted with a NULL function pointer.
3ee89922 3735 If io_index is non zero, the corresponding io zone is
4254fab8
BS
3736 modified. If it is zero, a new io zone is allocated. The return
3737 value can be used with cpu_register_physical_memory(). (-1) is
3738 returned if error. */
1eed09cb 3739static int cpu_register_io_memory_fixed(int io_index,
d60efc6b
BS
3740 CPUReadMemoryFunc * const *mem_read,
3741 CPUWriteMemoryFunc * const *mem_write,
dd310534 3742 void *opaque, enum device_endian endian)
33417e70 3743{
3cab721d
RH
3744 int i;
3745
33417e70 3746 if (io_index <= 0) {
88715657
AL
3747 io_index = get_free_io_mem_idx();
3748 if (io_index == -1)
3749 return io_index;
33417e70 3750 } else {
1eed09cb 3751 io_index >>= IO_MEM_SHIFT;
33417e70
FB
3752 if (io_index >= IO_MEM_NB_ENTRIES)
3753 return -1;
3754 }
b5ff1b31 3755
3cab721d
RH
3756 for (i = 0; i < 3; ++i) {
3757 io_mem_read[io_index][i]
3758 = (mem_read[i] ? mem_read[i] : unassigned_mem_read[i]);
3759 }
3760 for (i = 0; i < 3; ++i) {
3761 io_mem_write[io_index][i]
3762 = (mem_write[i] ? mem_write[i] : unassigned_mem_write[i]);
3763 }
a4193c8a 3764 io_mem_opaque[io_index] = opaque;
f6405247 3765
dd310534
AG
3766 switch (endian) {
3767 case DEVICE_BIG_ENDIAN:
3768#ifndef TARGET_WORDS_BIGENDIAN
3769 swapendian_init(io_index);
3770#endif
3771 break;
3772 case DEVICE_LITTLE_ENDIAN:
3773#ifdef TARGET_WORDS_BIGENDIAN
3774 swapendian_init(io_index);
3775#endif
3776 break;
3777 case DEVICE_NATIVE_ENDIAN:
3778 default:
3779 break;
3780 }
3781
f6405247 3782 return (io_index << IO_MEM_SHIFT);
33417e70 3783}
61382a50 3784
d60efc6b
BS
3785int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
3786 CPUWriteMemoryFunc * const *mem_write,
dd310534 3787 void *opaque, enum device_endian endian)
1eed09cb 3788{
2507c12a 3789 return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque, endian);
1eed09cb
AK
3790}
3791
88715657
AL
3792void cpu_unregister_io_memory(int io_table_address)
3793{
3794 int i;
3795 int io_index = io_table_address >> IO_MEM_SHIFT;
3796
dd310534
AG
3797 swapendian_del(io_index);
3798
88715657
AL
3799 for (i=0;i < 3; i++) {
3800 io_mem_read[io_index][i] = unassigned_mem_read[i];
3801 io_mem_write[io_index][i] = unassigned_mem_write[i];
3802 }
3803 io_mem_opaque[io_index] = NULL;
3804 io_mem_used[io_index] = 0;
3805}
3806
e9179ce1
AK
3807static void io_mem_init(void)
3808{
3809 int i;
3810
2507c12a
AG
3811 cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read,
3812 unassigned_mem_write, NULL,
3813 DEVICE_NATIVE_ENDIAN);
3814 cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read,
3815 unassigned_mem_write, NULL,
3816 DEVICE_NATIVE_ENDIAN);
3817 cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read,
3818 notdirty_mem_write, NULL,
3819 DEVICE_NATIVE_ENDIAN);
e9179ce1
AK
3820 for (i=0; i<5; i++)
3821 io_mem_used[i] = 1;
3822
3823 io_mem_watch = cpu_register_io_memory(watch_mem_read,
2507c12a
AG
3824 watch_mem_write, NULL,
3825 DEVICE_NATIVE_ENDIAN);
e9179ce1
AK
3826}
3827
62152b8a
AK
3828static void memory_map_init(void)
3829{
7267c094 3830 system_memory = g_malloc(sizeof(*system_memory));
8417cebf 3831 memory_region_init(system_memory, "system", INT64_MAX);
62152b8a 3832 set_system_memory_map(system_memory);
309cb471 3833
7267c094 3834 system_io = g_malloc(sizeof(*system_io));
309cb471
AK
3835 memory_region_init(system_io, "io", 65536);
3836 set_system_io_map(system_io);
62152b8a
AK
3837}
3838
3839MemoryRegion *get_system_memory(void)
3840{
3841 return system_memory;
3842}
3843
309cb471
AK
3844MemoryRegion *get_system_io(void)
3845{
3846 return system_io;
3847}
3848
e2eef170
PB
3849#endif /* !defined(CONFIG_USER_ONLY) */
3850
13eb76e0
FB
3851/* physical memory access (slow version, mainly for debug) */
3852#if defined(CONFIG_USER_ONLY)
a68fe89c
PB
3853int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3854 uint8_t *buf, int len, int is_write)
13eb76e0
FB
3855{
3856 int l, flags;
3857 target_ulong page;
53a5960a 3858 void * p;
13eb76e0
FB
3859
3860 while (len > 0) {
3861 page = addr & TARGET_PAGE_MASK;
3862 l = (page + TARGET_PAGE_SIZE) - addr;
3863 if (l > len)
3864 l = len;
3865 flags = page_get_flags(page);
3866 if (!(flags & PAGE_VALID))
a68fe89c 3867 return -1;
13eb76e0
FB
3868 if (is_write) {
3869 if (!(flags & PAGE_WRITE))
a68fe89c 3870 return -1;
579a97f7 3871 /* XXX: this code should not depend on lock_user */
72fb7daa 3872 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
a68fe89c 3873 return -1;
72fb7daa
AJ
3874 memcpy(p, buf, l);
3875 unlock_user(p, addr, l);
13eb76e0
FB
3876 } else {
3877 if (!(flags & PAGE_READ))
a68fe89c 3878 return -1;
579a97f7 3879 /* XXX: this code should not depend on lock_user */
72fb7daa 3880 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
a68fe89c 3881 return -1;
72fb7daa 3882 memcpy(buf, p, l);
5b257578 3883 unlock_user(p, addr, 0);
13eb76e0
FB
3884 }
3885 len -= l;
3886 buf += l;
3887 addr += l;
3888 }
a68fe89c 3889 return 0;
13eb76e0 3890}
8df1cd07 3891
13eb76e0 3892#else
c227f099 3893void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
3894 int len, int is_write)
3895{
3896 int l, io_index;
3897 uint8_t *ptr;
3898 uint32_t val;
c227f099 3899 target_phys_addr_t page;
8ca5692d 3900 ram_addr_t pd;
92e873b9 3901 PhysPageDesc *p;
3b46e624 3902
13eb76e0
FB
3903 while (len > 0) {
3904 page = addr & TARGET_PAGE_MASK;
3905 l = (page + TARGET_PAGE_SIZE) - addr;
3906 if (l > len)
3907 l = len;
92e873b9 3908 p = phys_page_find(page >> TARGET_PAGE_BITS);
13eb76e0
FB
3909 if (!p) {
3910 pd = IO_MEM_UNASSIGNED;
3911 } else {
3912 pd = p->phys_offset;
3913 }
3b46e624 3914
13eb76e0 3915 if (is_write) {
3a7d929e 3916 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
c227f099 3917 target_phys_addr_t addr1 = addr;
13eb76e0 3918 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18 3919 if (p)
6c2934db 3920 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
6a00d601
FB
3921 /* XXX: could force cpu_single_env to NULL to avoid
3922 potential bugs */
6c2934db 3923 if (l >= 4 && ((addr1 & 3) == 0)) {
1c213d19 3924 /* 32 bit write access */
c27004ec 3925 val = ldl_p(buf);
6c2934db 3926 io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
13eb76e0 3927 l = 4;
6c2934db 3928 } else if (l >= 2 && ((addr1 & 1) == 0)) {
1c213d19 3929 /* 16 bit write access */
c27004ec 3930 val = lduw_p(buf);
6c2934db 3931 io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
13eb76e0
FB
3932 l = 2;
3933 } else {
1c213d19 3934 /* 8 bit write access */
c27004ec 3935 val = ldub_p(buf);
6c2934db 3936 io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
13eb76e0
FB
3937 l = 1;
3938 }
3939 } else {
8ca5692d 3940 ram_addr_t addr1;
b448f2f3 3941 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
13eb76e0 3942 /* RAM case */
5579c7f3 3943 ptr = qemu_get_ram_ptr(addr1);
13eb76e0 3944 memcpy(ptr, buf, l);
3a7d929e
FB
3945 if (!cpu_physical_memory_is_dirty(addr1)) {
3946 /* invalidate code */
3947 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3948 /* set dirty bit */
f7c11b53
YT
3949 cpu_physical_memory_set_dirty_flags(
3950 addr1, (0xff & ~CODE_DIRTY_FLAG));
3a7d929e 3951 }
050a0ddf 3952 qemu_put_ram_ptr(ptr);
13eb76e0
FB
3953 }
3954 } else {
5fafdf24 3955 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2a4188a3 3956 !(pd & IO_MEM_ROMD)) {
c227f099 3957 target_phys_addr_t addr1 = addr;
13eb76e0
FB
3958 /* I/O case */
3959 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18 3960 if (p)
6c2934db
AJ
3961 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3962 if (l >= 4 && ((addr1 & 3) == 0)) {
13eb76e0 3963 /* 32 bit read access */
6c2934db 3964 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
c27004ec 3965 stl_p(buf, val);
13eb76e0 3966 l = 4;
6c2934db 3967 } else if (l >= 2 && ((addr1 & 1) == 0)) {
13eb76e0 3968 /* 16 bit read access */
6c2934db 3969 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
c27004ec 3970 stw_p(buf, val);
13eb76e0
FB
3971 l = 2;
3972 } else {
1c213d19 3973 /* 8 bit read access */
6c2934db 3974 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
c27004ec 3975 stb_p(buf, val);
13eb76e0
FB
3976 l = 1;
3977 }
3978 } else {
3979 /* RAM case */
050a0ddf
AP
3980 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
3981 memcpy(buf, ptr + (addr & ~TARGET_PAGE_MASK), l);
3982 qemu_put_ram_ptr(ptr);
13eb76e0
FB
3983 }
3984 }
3985 len -= l;
3986 buf += l;
3987 addr += l;
3988 }
3989}
8df1cd07 3990
d0ecd2aa 3991/* used for ROM loading : can write in RAM and ROM */
c227f099 3992void cpu_physical_memory_write_rom(target_phys_addr_t addr,
d0ecd2aa
FB
3993 const uint8_t *buf, int len)
3994{
3995 int l;
3996 uint8_t *ptr;
c227f099 3997 target_phys_addr_t page;
d0ecd2aa
FB
3998 unsigned long pd;
3999 PhysPageDesc *p;
3b46e624 4000
d0ecd2aa
FB
4001 while (len > 0) {
4002 page = addr & TARGET_PAGE_MASK;
4003 l = (page + TARGET_PAGE_SIZE) - addr;
4004 if (l > len)
4005 l = len;
4006 p = phys_page_find(page >> TARGET_PAGE_BITS);
4007 if (!p) {
4008 pd = IO_MEM_UNASSIGNED;
4009 } else {
4010 pd = p->phys_offset;
4011 }
3b46e624 4012
d0ecd2aa 4013 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2a4188a3
FB
4014 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
4015 !(pd & IO_MEM_ROMD)) {
d0ecd2aa
FB
4016 /* do nothing */
4017 } else {
4018 unsigned long addr1;
4019 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4020 /* ROM/RAM case */
5579c7f3 4021 ptr = qemu_get_ram_ptr(addr1);
d0ecd2aa 4022 memcpy(ptr, buf, l);
050a0ddf 4023 qemu_put_ram_ptr(ptr);
d0ecd2aa
FB
4024 }
4025 len -= l;
4026 buf += l;
4027 addr += l;
4028 }
4029}
4030
6d16c2f8
AL
4031typedef struct {
4032 void *buffer;
c227f099
AL
4033 target_phys_addr_t addr;
4034 target_phys_addr_t len;
6d16c2f8
AL
4035} BounceBuffer;
4036
4037static BounceBuffer bounce;
4038
ba223c29
AL
4039typedef struct MapClient {
4040 void *opaque;
4041 void (*callback)(void *opaque);
72cf2d4f 4042 QLIST_ENTRY(MapClient) link;
ba223c29
AL
4043} MapClient;
4044
72cf2d4f
BS
4045static QLIST_HEAD(map_client_list, MapClient) map_client_list
4046 = QLIST_HEAD_INITIALIZER(map_client_list);
ba223c29
AL
4047
4048void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
4049{
7267c094 4050 MapClient *client = g_malloc(sizeof(*client));
ba223c29
AL
4051
4052 client->opaque = opaque;
4053 client->callback = callback;
72cf2d4f 4054 QLIST_INSERT_HEAD(&map_client_list, client, link);
ba223c29
AL
4055 return client;
4056}
4057
4058void cpu_unregister_map_client(void *_client)
4059{
4060 MapClient *client = (MapClient *)_client;
4061
72cf2d4f 4062 QLIST_REMOVE(client, link);
7267c094 4063 g_free(client);
ba223c29
AL
4064}
4065
4066static void cpu_notify_map_clients(void)
4067{
4068 MapClient *client;
4069
72cf2d4f
BS
4070 while (!QLIST_EMPTY(&map_client_list)) {
4071 client = QLIST_FIRST(&map_client_list);
ba223c29 4072 client->callback(client->opaque);
34d5e948 4073 cpu_unregister_map_client(client);
ba223c29
AL
4074 }
4075}
4076
6d16c2f8
AL
4077/* Map a physical memory region into a host virtual address.
4078 * May map a subset of the requested range, given by and returned in *plen.
4079 * May return NULL if resources needed to perform the mapping are exhausted.
4080 * Use only for reads OR writes - not for read-modify-write operations.
ba223c29
AL
4081 * Use cpu_register_map_client() to know when retrying the map operation is
4082 * likely to succeed.
6d16c2f8 4083 */
c227f099
AL
4084void *cpu_physical_memory_map(target_phys_addr_t addr,
4085 target_phys_addr_t *plen,
6d16c2f8
AL
4086 int is_write)
4087{
c227f099 4088 target_phys_addr_t len = *plen;
38bee5dc 4089 target_phys_addr_t todo = 0;
6d16c2f8 4090 int l;
c227f099 4091 target_phys_addr_t page;
6d16c2f8
AL
4092 unsigned long pd;
4093 PhysPageDesc *p;
f15fbc4b 4094 ram_addr_t raddr = RAM_ADDR_MAX;
8ab934f9
SS
4095 ram_addr_t rlen;
4096 void *ret;
6d16c2f8
AL
4097
4098 while (len > 0) {
4099 page = addr & TARGET_PAGE_MASK;
4100 l = (page + TARGET_PAGE_SIZE) - addr;
4101 if (l > len)
4102 l = len;
4103 p = phys_page_find(page >> TARGET_PAGE_BITS);
4104 if (!p) {
4105 pd = IO_MEM_UNASSIGNED;
4106 } else {
4107 pd = p->phys_offset;
4108 }
4109
4110 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
38bee5dc 4111 if (todo || bounce.buffer) {
6d16c2f8
AL
4112 break;
4113 }
4114 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
4115 bounce.addr = addr;
4116 bounce.len = l;
4117 if (!is_write) {
54f7b4a3 4118 cpu_physical_memory_read(addr, bounce.buffer, l);
6d16c2f8 4119 }
38bee5dc
SS
4120
4121 *plen = l;
4122 return bounce.buffer;
6d16c2f8 4123 }
8ab934f9
SS
4124 if (!todo) {
4125 raddr = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4126 }
6d16c2f8
AL
4127
4128 len -= l;
4129 addr += l;
38bee5dc 4130 todo += l;
6d16c2f8 4131 }
8ab934f9
SS
4132 rlen = todo;
4133 ret = qemu_ram_ptr_length(raddr, &rlen);
4134 *plen = rlen;
4135 return ret;
6d16c2f8
AL
4136}
4137
4138/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
4139 * Will also mark the memory as dirty if is_write == 1. access_len gives
4140 * the amount of memory that was actually read or written by the caller.
4141 */
c227f099
AL
4142void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
4143 int is_write, target_phys_addr_t access_len)
6d16c2f8
AL
4144{
4145 if (buffer != bounce.buffer) {
4146 if (is_write) {
e890261f 4147 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
6d16c2f8
AL
4148 while (access_len) {
4149 unsigned l;
4150 l = TARGET_PAGE_SIZE;
4151 if (l > access_len)
4152 l = access_len;
4153 if (!cpu_physical_memory_is_dirty(addr1)) {
4154 /* invalidate code */
4155 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
4156 /* set dirty bit */
f7c11b53
YT
4157 cpu_physical_memory_set_dirty_flags(
4158 addr1, (0xff & ~CODE_DIRTY_FLAG));
6d16c2f8
AL
4159 }
4160 addr1 += l;
4161 access_len -= l;
4162 }
4163 }
868bb33f 4164 if (xen_enabled()) {
e41d7c69 4165 xen_invalidate_map_cache_entry(buffer);
050a0ddf 4166 }
6d16c2f8
AL
4167 return;
4168 }
4169 if (is_write) {
4170 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
4171 }
f8a83245 4172 qemu_vfree(bounce.buffer);
6d16c2f8 4173 bounce.buffer = NULL;
ba223c29 4174 cpu_notify_map_clients();
6d16c2f8 4175}
d0ecd2aa 4176
8df1cd07 4177/* warning: addr must be aligned */
1e78bcc1
AG
4178static inline uint32_t ldl_phys_internal(target_phys_addr_t addr,
4179 enum device_endian endian)
8df1cd07
FB
4180{
4181 int io_index;
4182 uint8_t *ptr;
4183 uint32_t val;
4184 unsigned long pd;
4185 PhysPageDesc *p;
4186
4187 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4188 if (!p) {
4189 pd = IO_MEM_UNASSIGNED;
4190 } else {
4191 pd = p->phys_offset;
4192 }
3b46e624 4193
5fafdf24 4194 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2a4188a3 4195 !(pd & IO_MEM_ROMD)) {
8df1cd07
FB
4196 /* I/O case */
4197 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
4198 if (p)
4199 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
8df1cd07 4200 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
1e78bcc1
AG
4201#if defined(TARGET_WORDS_BIGENDIAN)
4202 if (endian == DEVICE_LITTLE_ENDIAN) {
4203 val = bswap32(val);
4204 }
4205#else
4206 if (endian == DEVICE_BIG_ENDIAN) {
4207 val = bswap32(val);
4208 }
4209#endif
8df1cd07
FB
4210 } else {
4211 /* RAM case */
5579c7f3 4212 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
8df1cd07 4213 (addr & ~TARGET_PAGE_MASK);
1e78bcc1
AG
4214 switch (endian) {
4215 case DEVICE_LITTLE_ENDIAN:
4216 val = ldl_le_p(ptr);
4217 break;
4218 case DEVICE_BIG_ENDIAN:
4219 val = ldl_be_p(ptr);
4220 break;
4221 default:
4222 val = ldl_p(ptr);
4223 break;
4224 }
8df1cd07
FB
4225 }
4226 return val;
4227}
4228
1e78bcc1
AG
4229uint32_t ldl_phys(target_phys_addr_t addr)
4230{
4231 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4232}
4233
4234uint32_t ldl_le_phys(target_phys_addr_t addr)
4235{
4236 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4237}
4238
4239uint32_t ldl_be_phys(target_phys_addr_t addr)
4240{
4241 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
4242}
4243
84b7b8e7 4244/* warning: addr must be aligned */
1e78bcc1
AG
4245static inline uint64_t ldq_phys_internal(target_phys_addr_t addr,
4246 enum device_endian endian)
84b7b8e7
FB
4247{
4248 int io_index;
4249 uint8_t *ptr;
4250 uint64_t val;
4251 unsigned long pd;
4252 PhysPageDesc *p;
4253
4254 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4255 if (!p) {
4256 pd = IO_MEM_UNASSIGNED;
4257 } else {
4258 pd = p->phys_offset;
4259 }
3b46e624 4260
2a4188a3
FB
4261 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
4262 !(pd & IO_MEM_ROMD)) {
84b7b8e7
FB
4263 /* I/O case */
4264 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
4265 if (p)
4266 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
1e78bcc1
AG
4267
4268 /* XXX This is broken when device endian != cpu endian.
4269 Fix and add "endian" variable check */
84b7b8e7
FB
4270#ifdef TARGET_WORDS_BIGENDIAN
4271 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
4272 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
4273#else
4274 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
4275 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
4276#endif
4277 } else {
4278 /* RAM case */
5579c7f3 4279 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
84b7b8e7 4280 (addr & ~TARGET_PAGE_MASK);
1e78bcc1
AG
4281 switch (endian) {
4282 case DEVICE_LITTLE_ENDIAN:
4283 val = ldq_le_p(ptr);
4284 break;
4285 case DEVICE_BIG_ENDIAN:
4286 val = ldq_be_p(ptr);
4287 break;
4288 default:
4289 val = ldq_p(ptr);
4290 break;
4291 }
84b7b8e7
FB
4292 }
4293 return val;
4294}
4295
1e78bcc1
AG
4296uint64_t ldq_phys(target_phys_addr_t addr)
4297{
4298 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4299}
4300
4301uint64_t ldq_le_phys(target_phys_addr_t addr)
4302{
4303 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4304}
4305
4306uint64_t ldq_be_phys(target_phys_addr_t addr)
4307{
4308 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
4309}
4310
aab33094 4311/* XXX: optimize */
c227f099 4312uint32_t ldub_phys(target_phys_addr_t addr)
aab33094
FB
4313{
4314 uint8_t val;
4315 cpu_physical_memory_read(addr, &val, 1);
4316 return val;
4317}
4318
733f0b02 4319/* warning: addr must be aligned */
1e78bcc1
AG
4320static inline uint32_t lduw_phys_internal(target_phys_addr_t addr,
4321 enum device_endian endian)
aab33094 4322{
733f0b02
MT
4323 int io_index;
4324 uint8_t *ptr;
4325 uint64_t val;
4326 unsigned long pd;
4327 PhysPageDesc *p;
4328
4329 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4330 if (!p) {
4331 pd = IO_MEM_UNASSIGNED;
4332 } else {
4333 pd = p->phys_offset;
4334 }
4335
4336 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
4337 !(pd & IO_MEM_ROMD)) {
4338 /* I/O case */
4339 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4340 if (p)
4341 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4342 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
1e78bcc1
AG
4343#if defined(TARGET_WORDS_BIGENDIAN)
4344 if (endian == DEVICE_LITTLE_ENDIAN) {
4345 val = bswap16(val);
4346 }
4347#else
4348 if (endian == DEVICE_BIG_ENDIAN) {
4349 val = bswap16(val);
4350 }
4351#endif
733f0b02
MT
4352 } else {
4353 /* RAM case */
4354 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4355 (addr & ~TARGET_PAGE_MASK);
1e78bcc1
AG
4356 switch (endian) {
4357 case DEVICE_LITTLE_ENDIAN:
4358 val = lduw_le_p(ptr);
4359 break;
4360 case DEVICE_BIG_ENDIAN:
4361 val = lduw_be_p(ptr);
4362 break;
4363 default:
4364 val = lduw_p(ptr);
4365 break;
4366 }
733f0b02
MT
4367 }
4368 return val;
aab33094
FB
4369}
4370
1e78bcc1
AG
4371uint32_t lduw_phys(target_phys_addr_t addr)
4372{
4373 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4374}
4375
4376uint32_t lduw_le_phys(target_phys_addr_t addr)
4377{
4378 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4379}
4380
4381uint32_t lduw_be_phys(target_phys_addr_t addr)
4382{
4383 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
4384}
4385
8df1cd07
FB
4386/* warning: addr must be aligned. The ram page is not masked as dirty
4387 and the code inside is not invalidated. It is useful if the dirty
4388 bits are used to track modified PTEs */
c227f099 4389void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
8df1cd07
FB
4390{
4391 int io_index;
4392 uint8_t *ptr;
4393 unsigned long pd;
4394 PhysPageDesc *p;
4395
4396 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4397 if (!p) {
4398 pd = IO_MEM_UNASSIGNED;
4399 } else {
4400 pd = p->phys_offset;
4401 }
3b46e624 4402
3a7d929e 4403 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07 4404 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
4405 if (p)
4406 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
8df1cd07
FB
4407 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4408 } else {
74576198 4409 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
5579c7f3 4410 ptr = qemu_get_ram_ptr(addr1);
8df1cd07 4411 stl_p(ptr, val);
74576198
AL
4412
4413 if (unlikely(in_migration)) {
4414 if (!cpu_physical_memory_is_dirty(addr1)) {
4415 /* invalidate code */
4416 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4417 /* set dirty bit */
f7c11b53
YT
4418 cpu_physical_memory_set_dirty_flags(
4419 addr1, (0xff & ~CODE_DIRTY_FLAG));
74576198
AL
4420 }
4421 }
8df1cd07
FB
4422 }
4423}
4424
c227f099 4425void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
bc98a7ef
JM
4426{
4427 int io_index;
4428 uint8_t *ptr;
4429 unsigned long pd;
4430 PhysPageDesc *p;
4431
4432 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4433 if (!p) {
4434 pd = IO_MEM_UNASSIGNED;
4435 } else {
4436 pd = p->phys_offset;
4437 }
3b46e624 4438
bc98a7ef
JM
4439 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4440 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
4441 if (p)
4442 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bc98a7ef
JM
4443#ifdef TARGET_WORDS_BIGENDIAN
4444 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
4445 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
4446#else
4447 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4448 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
4449#endif
4450 } else {
5579c7f3 4451 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
bc98a7ef
JM
4452 (addr & ~TARGET_PAGE_MASK);
4453 stq_p(ptr, val);
4454 }
4455}
4456
8df1cd07 4457/* warning: addr must be aligned */
1e78bcc1
AG
4458static inline void stl_phys_internal(target_phys_addr_t addr, uint32_t val,
4459 enum device_endian endian)
8df1cd07
FB
4460{
4461 int io_index;
4462 uint8_t *ptr;
4463 unsigned long pd;
4464 PhysPageDesc *p;
4465
4466 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4467 if (!p) {
4468 pd = IO_MEM_UNASSIGNED;
4469 } else {
4470 pd = p->phys_offset;
4471 }
3b46e624 4472
3a7d929e 4473 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07 4474 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
4475 if (p)
4476 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
1e78bcc1
AG
4477#if defined(TARGET_WORDS_BIGENDIAN)
4478 if (endian == DEVICE_LITTLE_ENDIAN) {
4479 val = bswap32(val);
4480 }
4481#else
4482 if (endian == DEVICE_BIG_ENDIAN) {
4483 val = bswap32(val);
4484 }
4485#endif
8df1cd07
FB
4486 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4487 } else {
4488 unsigned long addr1;
4489 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4490 /* RAM case */
5579c7f3 4491 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
4492 switch (endian) {
4493 case DEVICE_LITTLE_ENDIAN:
4494 stl_le_p(ptr, val);
4495 break;
4496 case DEVICE_BIG_ENDIAN:
4497 stl_be_p(ptr, val);
4498 break;
4499 default:
4500 stl_p(ptr, val);
4501 break;
4502 }
3a7d929e
FB
4503 if (!cpu_physical_memory_is_dirty(addr1)) {
4504 /* invalidate code */
4505 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4506 /* set dirty bit */
f7c11b53
YT
4507 cpu_physical_memory_set_dirty_flags(addr1,
4508 (0xff & ~CODE_DIRTY_FLAG));
3a7d929e 4509 }
8df1cd07
FB
4510 }
4511}
4512
1e78bcc1
AG
4513void stl_phys(target_phys_addr_t addr, uint32_t val)
4514{
4515 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4516}
4517
4518void stl_le_phys(target_phys_addr_t addr, uint32_t val)
4519{
4520 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4521}
4522
4523void stl_be_phys(target_phys_addr_t addr, uint32_t val)
4524{
4525 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4526}
4527
aab33094 4528/* XXX: optimize */
c227f099 4529void stb_phys(target_phys_addr_t addr, uint32_t val)
aab33094
FB
4530{
4531 uint8_t v = val;
4532 cpu_physical_memory_write(addr, &v, 1);
4533}
4534
733f0b02 4535/* warning: addr must be aligned */
1e78bcc1
AG
4536static inline void stw_phys_internal(target_phys_addr_t addr, uint32_t val,
4537 enum device_endian endian)
aab33094 4538{
733f0b02
MT
4539 int io_index;
4540 uint8_t *ptr;
4541 unsigned long pd;
4542 PhysPageDesc *p;
4543
4544 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4545 if (!p) {
4546 pd = IO_MEM_UNASSIGNED;
4547 } else {
4548 pd = p->phys_offset;
4549 }
4550
4551 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4552 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4553 if (p)
4554 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
1e78bcc1
AG
4555#if defined(TARGET_WORDS_BIGENDIAN)
4556 if (endian == DEVICE_LITTLE_ENDIAN) {
4557 val = bswap16(val);
4558 }
4559#else
4560 if (endian == DEVICE_BIG_ENDIAN) {
4561 val = bswap16(val);
4562 }
4563#endif
733f0b02
MT
4564 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
4565 } else {
4566 unsigned long addr1;
4567 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4568 /* RAM case */
4569 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
4570 switch (endian) {
4571 case DEVICE_LITTLE_ENDIAN:
4572 stw_le_p(ptr, val);
4573 break;
4574 case DEVICE_BIG_ENDIAN:
4575 stw_be_p(ptr, val);
4576 break;
4577 default:
4578 stw_p(ptr, val);
4579 break;
4580 }
733f0b02
MT
4581 if (!cpu_physical_memory_is_dirty(addr1)) {
4582 /* invalidate code */
4583 tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
4584 /* set dirty bit */
4585 cpu_physical_memory_set_dirty_flags(addr1,
4586 (0xff & ~CODE_DIRTY_FLAG));
4587 }
4588 }
aab33094
FB
4589}
4590
1e78bcc1
AG
4591void stw_phys(target_phys_addr_t addr, uint32_t val)
4592{
4593 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4594}
4595
4596void stw_le_phys(target_phys_addr_t addr, uint32_t val)
4597{
4598 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4599}
4600
4601void stw_be_phys(target_phys_addr_t addr, uint32_t val)
4602{
4603 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4604}
4605
aab33094 4606/* XXX: optimize */
c227f099 4607void stq_phys(target_phys_addr_t addr, uint64_t val)
aab33094
FB
4608{
4609 val = tswap64(val);
71d2b725 4610 cpu_physical_memory_write(addr, &val, 8);
aab33094
FB
4611}
4612
1e78bcc1
AG
4613void stq_le_phys(target_phys_addr_t addr, uint64_t val)
4614{
4615 val = cpu_to_le64(val);
4616 cpu_physical_memory_write(addr, &val, 8);
4617}
4618
4619void stq_be_phys(target_phys_addr_t addr, uint64_t val)
4620{
4621 val = cpu_to_be64(val);
4622 cpu_physical_memory_write(addr, &val, 8);
4623}
4624
5e2972fd 4625/* virtual memory access for debug (includes writing to ROM) */
5fafdf24 4626int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
b448f2f3 4627 uint8_t *buf, int len, int is_write)
13eb76e0
FB
4628{
4629 int l;
c227f099 4630 target_phys_addr_t phys_addr;
9b3c35e0 4631 target_ulong page;
13eb76e0
FB
4632
4633 while (len > 0) {
4634 page = addr & TARGET_PAGE_MASK;
4635 phys_addr = cpu_get_phys_page_debug(env, page);
4636 /* if no physical page mapped, return an error */
4637 if (phys_addr == -1)
4638 return -1;
4639 l = (page + TARGET_PAGE_SIZE) - addr;
4640 if (l > len)
4641 l = len;
5e2972fd 4642 phys_addr += (addr & ~TARGET_PAGE_MASK);
5e2972fd
AL
4643 if (is_write)
4644 cpu_physical_memory_write_rom(phys_addr, buf, l);
4645 else
5e2972fd 4646 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
13eb76e0
FB
4647 len -= l;
4648 buf += l;
4649 addr += l;
4650 }
4651 return 0;
4652}
a68fe89c 4653#endif
13eb76e0 4654
2e70f6ef
PB
4655/* in deterministic execution mode, instructions doing device I/Os
4656 must be at the end of the TB */
4657void cpu_io_recompile(CPUState *env, void *retaddr)
4658{
4659 TranslationBlock *tb;
4660 uint32_t n, cflags;
4661 target_ulong pc, cs_base;
4662 uint64_t flags;
4663
4664 tb = tb_find_pc((unsigned long)retaddr);
4665 if (!tb) {
4666 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
4667 retaddr);
4668 }
4669 n = env->icount_decr.u16.low + tb->icount;
618ba8e6 4670 cpu_restore_state(tb, env, (unsigned long)retaddr);
2e70f6ef 4671 /* Calculate how many instructions had been executed before the fault
bf20dc07 4672 occurred. */
2e70f6ef
PB
4673 n = n - env->icount_decr.u16.low;
4674 /* Generate a new TB ending on the I/O insn. */
4675 n++;
4676 /* On MIPS and SH, delay slot instructions can only be restarted if
4677 they were already the first instruction in the TB. If this is not
bf20dc07 4678 the first instruction in a TB then re-execute the preceding
2e70f6ef
PB
4679 branch. */
4680#if defined(TARGET_MIPS)
4681 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4682 env->active_tc.PC -= 4;
4683 env->icount_decr.u16.low++;
4684 env->hflags &= ~MIPS_HFLAG_BMASK;
4685 }
4686#elif defined(TARGET_SH4)
4687 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4688 && n > 1) {
4689 env->pc -= 2;
4690 env->icount_decr.u16.low++;
4691 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4692 }
4693#endif
4694 /* This should never happen. */
4695 if (n > CF_COUNT_MASK)
4696 cpu_abort(env, "TB too big during recompile");
4697
4698 cflags = n | CF_LAST_IO;
4699 pc = tb->pc;
4700 cs_base = tb->cs_base;
4701 flags = tb->flags;
4702 tb_phys_invalidate(tb, -1);
4703 /* FIXME: In theory this could raise an exception. In practice
4704 we have already translated the block once so it's probably ok. */
4705 tb_gen_code(env, pc, cs_base, flags, cflags);
bf20dc07 4706 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
2e70f6ef
PB
4707 the first in the TB) then we end up generating a whole new TB and
4708 repeating the fault, which is horribly inefficient.
4709 Better would be to execute just this insn uncached, or generate a
4710 second new TB. */
4711 cpu_resume_from_signal(env, NULL);
4712}
4713
b3755a91
PB
4714#if !defined(CONFIG_USER_ONLY)
4715
055403b2 4716void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
e3db7226
FB
4717{
4718 int i, target_code_size, max_target_code_size;
4719 int direct_jmp_count, direct_jmp2_count, cross_page;
4720 TranslationBlock *tb;
3b46e624 4721
e3db7226
FB
4722 target_code_size = 0;
4723 max_target_code_size = 0;
4724 cross_page = 0;
4725 direct_jmp_count = 0;
4726 direct_jmp2_count = 0;
4727 for(i = 0; i < nb_tbs; i++) {
4728 tb = &tbs[i];
4729 target_code_size += tb->size;
4730 if (tb->size > max_target_code_size)
4731 max_target_code_size = tb->size;
4732 if (tb->page_addr[1] != -1)
4733 cross_page++;
4734 if (tb->tb_next_offset[0] != 0xffff) {
4735 direct_jmp_count++;
4736 if (tb->tb_next_offset[1] != 0xffff) {
4737 direct_jmp2_count++;
4738 }
4739 }
4740 }
4741 /* XXX: avoid using doubles ? */
57fec1fe 4742 cpu_fprintf(f, "Translation buffer state:\n");
055403b2 4743 cpu_fprintf(f, "gen code size %td/%ld\n",
26a5f13b
FB
4744 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4745 cpu_fprintf(f, "TB count %d/%d\n",
4746 nb_tbs, code_gen_max_blocks);
5fafdf24 4747 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
e3db7226
FB
4748 nb_tbs ? target_code_size / nb_tbs : 0,
4749 max_target_code_size);
055403b2 4750 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
e3db7226
FB
4751 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4752 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
5fafdf24
TS
4753 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4754 cross_page,
e3db7226
FB
4755 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4756 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
5fafdf24 4757 direct_jmp_count,
e3db7226
FB
4758 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4759 direct_jmp2_count,
4760 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
57fec1fe 4761 cpu_fprintf(f, "\nStatistics:\n");
e3db7226
FB
4762 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4763 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4764 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
b67d9a52 4765 tcg_dump_info(f, cpu_fprintf);
e3db7226
FB
4766}
4767
61382a50 4768#define MMUSUFFIX _cmmu
3917149d 4769#undef GETPC
61382a50
FB
4770#define GETPC() NULL
4771#define env cpu_single_env
b769d8fe 4772#define SOFTMMU_CODE_ACCESS
61382a50
FB
4773
4774#define SHIFT 0
4775#include "softmmu_template.h"
4776
4777#define SHIFT 1
4778#include "softmmu_template.h"
4779
4780#define SHIFT 2
4781#include "softmmu_template.h"
4782
4783#define SHIFT 3
4784#include "softmmu_template.h"
4785
4786#undef env
4787
4788#endif