]> git.proxmox.com Git - qemu.git/blame - exec.c
Version 1.0.1
[qemu.git] / exec.c
CommitLineData
54936004 1/*
fd6ce8f6 2 * virtual page mapping and translated block handling
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
54936004 18 */
67b915a5 19#include "config.h"
d5a8f07c
FB
20#ifdef _WIN32
21#include <windows.h>
22#else
a98d49b1 23#include <sys/types.h>
d5a8f07c
FB
24#include <sys/mman.h>
25#endif
54936004 26
055403b2 27#include "qemu-common.h"
6180a181 28#include "cpu.h"
b67d9a52 29#include "tcg.h"
b3c7724c 30#include "hw/hw.h"
cc9e98cb 31#include "hw/qdev.h"
74576198 32#include "osdep.h"
7ba1e619 33#include "kvm.h"
432d268c 34#include "hw/xen.h"
29e922b6 35#include "qemu-timer.h"
62152b8a
AK
36#include "memory.h"
37#include "exec-memory.h"
53a5960a
PB
38#if defined(CONFIG_USER_ONLY)
39#include <qemu.h>
f01576f1
JL
40#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41#include <sys/param.h>
42#if __FreeBSD_version >= 700104
43#define HAVE_KINFO_GETVMMAP
44#define sigqueue sigqueue_freebsd /* avoid redefinition */
45#include <sys/time.h>
46#include <sys/proc.h>
47#include <machine/profile.h>
48#define _KERNEL
49#include <sys/user.h>
50#undef _KERNEL
51#undef sigqueue
52#include <libutil.h>
53#endif
54#endif
432d268c
JN
55#else /* !CONFIG_USER_ONLY */
56#include "xen-mapcache.h"
6506e4f9 57#include "trace.h"
53a5960a 58#endif
54936004 59
fd6ce8f6 60//#define DEBUG_TB_INVALIDATE
66e85a21 61//#define DEBUG_FLUSH
9fa3e853 62//#define DEBUG_TLB
67d3b957 63//#define DEBUG_UNASSIGNED
fd6ce8f6
FB
64
65/* make various TB consistency checks */
5fafdf24
TS
66//#define DEBUG_TB_CHECK
67//#define DEBUG_TLB_CHECK
fd6ce8f6 68
1196be37 69//#define DEBUG_IOPORT
db7b5426 70//#define DEBUG_SUBPAGE
1196be37 71
99773bd4
PB
72#if !defined(CONFIG_USER_ONLY)
73/* TB consistency checks only implemented for usermode emulation. */
74#undef DEBUG_TB_CHECK
75#endif
76
9fa3e853
FB
77#define SMC_BITMAP_USE_THRESHOLD 10
78
bdaf78e0 79static TranslationBlock *tbs;
24ab68ac 80static int code_gen_max_blocks;
9fa3e853 81TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
bdaf78e0 82static int nb_tbs;
eb51d102 83/* any access to the tbs or the page table must use this lock */
c227f099 84spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
fd6ce8f6 85
141ac468
BS
86#if defined(__arm__) || defined(__sparc_v9__)
87/* The prologue must be reachable with a direct jump. ARM and Sparc64
88 have limited branch ranges (possibly also PPC) so place it in a
d03d860b
BS
89 section close to code segment. */
90#define code_gen_section \
91 __attribute__((__section__(".gen_code"))) \
92 __attribute__((aligned (32)))
f8e2af11
SW
93#elif defined(_WIN32)
94/* Maximum alignment for Win32 is 16. */
95#define code_gen_section \
96 __attribute__((aligned (16)))
d03d860b
BS
97#else
98#define code_gen_section \
99 __attribute__((aligned (32)))
100#endif
101
102uint8_t code_gen_prologue[1024] code_gen_section;
bdaf78e0
BS
103static uint8_t *code_gen_buffer;
104static unsigned long code_gen_buffer_size;
26a5f13b 105/* threshold to flush the translated code buffer */
bdaf78e0 106static unsigned long code_gen_buffer_max_size;
24ab68ac 107static uint8_t *code_gen_ptr;
fd6ce8f6 108
e2eef170 109#if !defined(CONFIG_USER_ONLY)
9fa3e853 110int phys_ram_fd;
74576198 111static int in_migration;
94a6b54f 112
85d59fef 113RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
62152b8a
AK
114
115static MemoryRegion *system_memory;
309cb471 116static MemoryRegion *system_io;
62152b8a 117
e2eef170 118#endif
9fa3e853 119
6a00d601
FB
120CPUState *first_cpu;
121/* current CPU in the current thread. It is only valid inside
122 cpu_exec() */
b3c4bbe5 123DEFINE_TLS(CPUState *,cpu_single_env);
2e70f6ef 124/* 0 = Do not count executed instructions.
bf20dc07 125 1 = Precise instruction counting.
2e70f6ef
PB
126 2 = Adaptive rate instruction counting. */
127int use_icount = 0;
6a00d601 128
54936004 129typedef struct PageDesc {
92e873b9 130 /* list of TBs intersecting this ram page */
fd6ce8f6 131 TranslationBlock *first_tb;
9fa3e853
FB
132 /* in order to optimize self modifying code, we count the number
133 of lookups we do to a given page to use a bitmap */
134 unsigned int code_write_count;
135 uint8_t *code_bitmap;
136#if defined(CONFIG_USER_ONLY)
137 unsigned long flags;
138#endif
54936004
FB
139} PageDesc;
140
41c1b1c9 141/* In system mode we want L1_MAP to be based on ram offsets,
5cd2c5b6
RH
142 while in user mode we want it to be based on virtual addresses. */
143#if !defined(CONFIG_USER_ONLY)
41c1b1c9
PB
144#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
145# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
146#else
5cd2c5b6 147# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
41c1b1c9 148#endif
bedb69ea 149#else
5cd2c5b6 150# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
bedb69ea 151#endif
54936004 152
5cd2c5b6
RH
153/* Size of the L2 (and L3, etc) page tables. */
154#define L2_BITS 10
54936004
FB
155#define L2_SIZE (1 << L2_BITS)
156
5cd2c5b6
RH
157/* The bits remaining after N lower levels of page tables. */
158#define P_L1_BITS_REM \
159 ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
160#define V_L1_BITS_REM \
161 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
162
163/* Size of the L1 page table. Avoid silly small sizes. */
164#if P_L1_BITS_REM < 4
165#define P_L1_BITS (P_L1_BITS_REM + L2_BITS)
166#else
167#define P_L1_BITS P_L1_BITS_REM
168#endif
169
170#if V_L1_BITS_REM < 4
171#define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
172#else
173#define V_L1_BITS V_L1_BITS_REM
174#endif
175
176#define P_L1_SIZE ((target_phys_addr_t)1 << P_L1_BITS)
177#define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
178
179#define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS)
180#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
181
83fb7adf 182unsigned long qemu_real_host_page_size;
83fb7adf
FB
183unsigned long qemu_host_page_size;
184unsigned long qemu_host_page_mask;
54936004 185
5cd2c5b6
RH
186/* This is a multi-level map on the virtual address space.
187 The bottom level has pointers to PageDesc. */
188static void *l1_map[V_L1_SIZE];
54936004 189
e2eef170 190#if !defined(CONFIG_USER_ONLY)
41c1b1c9
PB
191typedef struct PhysPageDesc {
192 /* offset in host memory of the page + io_index in the low bits */
193 ram_addr_t phys_offset;
194 ram_addr_t region_offset;
195} PhysPageDesc;
196
5cd2c5b6
RH
197/* This is a multi-level map on the physical address space.
198 The bottom level has pointers to PhysPageDesc. */
199static void *l1_phys_map[P_L1_SIZE];
6d9a1304 200
e2eef170 201static void io_mem_init(void);
62152b8a 202static void memory_map_init(void);
e2eef170 203
33417e70 204/* io memory support */
33417e70
FB
205CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
206CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
a4193c8a 207void *io_mem_opaque[IO_MEM_NB_ENTRIES];
511d2b14 208static char io_mem_used[IO_MEM_NB_ENTRIES];
6658ffb8
PB
209static int io_mem_watch;
210#endif
33417e70 211
34865134 212/* log support */
1e8b27ca
JR
213#ifdef WIN32
214static const char *logfilename = "qemu.log";
215#else
d9b630fd 216static const char *logfilename = "/tmp/qemu.log";
1e8b27ca 217#endif
34865134
FB
218FILE *logfile;
219int loglevel;
e735b91c 220static int log_append = 0;
34865134 221
e3db7226 222/* statistics */
b3755a91 223#if !defined(CONFIG_USER_ONLY)
e3db7226 224static int tlb_flush_count;
b3755a91 225#endif
e3db7226
FB
226static int tb_flush_count;
227static int tb_phys_invalidate_count;
228
7cb69cae
FB
229#ifdef _WIN32
230static void map_exec(void *addr, long size)
231{
232 DWORD old_protect;
233 VirtualProtect(addr, size,
234 PAGE_EXECUTE_READWRITE, &old_protect);
235
236}
237#else
238static void map_exec(void *addr, long size)
239{
4369415f 240 unsigned long start, end, page_size;
7cb69cae 241
4369415f 242 page_size = getpagesize();
7cb69cae 243 start = (unsigned long)addr;
4369415f 244 start &= ~(page_size - 1);
7cb69cae
FB
245
246 end = (unsigned long)addr + size;
4369415f
FB
247 end += page_size - 1;
248 end &= ~(page_size - 1);
7cb69cae
FB
249
250 mprotect((void *)start, end - start,
251 PROT_READ | PROT_WRITE | PROT_EXEC);
252}
253#endif
254
b346ff46 255static void page_init(void)
54936004 256{
83fb7adf 257 /* NOTE: we can always suppose that qemu_host_page_size >=
54936004 258 TARGET_PAGE_SIZE */
c2b48b69
AL
259#ifdef _WIN32
260 {
261 SYSTEM_INFO system_info;
262
263 GetSystemInfo(&system_info);
264 qemu_real_host_page_size = system_info.dwPageSize;
265 }
266#else
267 qemu_real_host_page_size = getpagesize();
268#endif
83fb7adf
FB
269 if (qemu_host_page_size == 0)
270 qemu_host_page_size = qemu_real_host_page_size;
271 if (qemu_host_page_size < TARGET_PAGE_SIZE)
272 qemu_host_page_size = TARGET_PAGE_SIZE;
83fb7adf 273 qemu_host_page_mask = ~(qemu_host_page_size - 1);
50a9569b 274
2e9a5713 275#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
50a9569b 276 {
f01576f1
JL
277#ifdef HAVE_KINFO_GETVMMAP
278 struct kinfo_vmentry *freep;
279 int i, cnt;
280
281 freep = kinfo_getvmmap(getpid(), &cnt);
282 if (freep) {
283 mmap_lock();
284 for (i = 0; i < cnt; i++) {
285 unsigned long startaddr, endaddr;
286
287 startaddr = freep[i].kve_start;
288 endaddr = freep[i].kve_end;
289 if (h2g_valid(startaddr)) {
290 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
291
292 if (h2g_valid(endaddr)) {
293 endaddr = h2g(endaddr);
fd436907 294 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
f01576f1
JL
295 } else {
296#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
297 endaddr = ~0ul;
fd436907 298 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
f01576f1
JL
299#endif
300 }
301 }
302 }
303 free(freep);
304 mmap_unlock();
305 }
306#else
50a9569b 307 FILE *f;
50a9569b 308
0776590d 309 last_brk = (unsigned long)sbrk(0);
5cd2c5b6 310
fd436907 311 f = fopen("/compat/linux/proc/self/maps", "r");
50a9569b 312 if (f) {
5cd2c5b6
RH
313 mmap_lock();
314
50a9569b 315 do {
5cd2c5b6
RH
316 unsigned long startaddr, endaddr;
317 int n;
318
319 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
320
321 if (n == 2 && h2g_valid(startaddr)) {
322 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
323
324 if (h2g_valid(endaddr)) {
325 endaddr = h2g(endaddr);
326 } else {
327 endaddr = ~0ul;
328 }
329 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
50a9569b
AZ
330 }
331 } while (!feof(f));
5cd2c5b6 332
50a9569b 333 fclose(f);
5cd2c5b6 334 mmap_unlock();
50a9569b 335 }
f01576f1 336#endif
50a9569b
AZ
337 }
338#endif
54936004
FB
339}
340
41c1b1c9 341static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
54936004 342{
41c1b1c9
PB
343 PageDesc *pd;
344 void **lp;
345 int i;
346
5cd2c5b6 347#if defined(CONFIG_USER_ONLY)
7267c094 348 /* We can't use g_malloc because it may recurse into a locked mutex. */
5cd2c5b6
RH
349# define ALLOC(P, SIZE) \
350 do { \
351 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
352 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
5cd2c5b6
RH
353 } while (0)
354#else
355# define ALLOC(P, SIZE) \
7267c094 356 do { P = g_malloc0(SIZE); } while (0)
17e2377a 357#endif
434929bf 358
5cd2c5b6
RH
359 /* Level 1. Always allocated. */
360 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
361
362 /* Level 2..N-1. */
363 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
364 void **p = *lp;
365
366 if (p == NULL) {
367 if (!alloc) {
368 return NULL;
369 }
370 ALLOC(p, sizeof(void *) * L2_SIZE);
371 *lp = p;
17e2377a 372 }
5cd2c5b6
RH
373
374 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
375 }
376
377 pd = *lp;
378 if (pd == NULL) {
379 if (!alloc) {
380 return NULL;
381 }
382 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
383 *lp = pd;
54936004 384 }
5cd2c5b6
RH
385
386#undef ALLOC
5cd2c5b6
RH
387
388 return pd + (index & (L2_SIZE - 1));
54936004
FB
389}
390
41c1b1c9 391static inline PageDesc *page_find(tb_page_addr_t index)
54936004 392{
5cd2c5b6 393 return page_find_alloc(index, 0);
fd6ce8f6
FB
394}
395
6d9a1304 396#if !defined(CONFIG_USER_ONLY)
c227f099 397static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
92e873b9 398{
e3f4e2a4 399 PhysPageDesc *pd;
5cd2c5b6
RH
400 void **lp;
401 int i;
92e873b9 402
5cd2c5b6
RH
403 /* Level 1. Always allocated. */
404 lp = l1_phys_map + ((index >> P_L1_SHIFT) & (P_L1_SIZE - 1));
108c49b8 405
5cd2c5b6
RH
406 /* Level 2..N-1. */
407 for (i = P_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
408 void **p = *lp;
409 if (p == NULL) {
410 if (!alloc) {
411 return NULL;
412 }
7267c094 413 *lp = p = g_malloc0(sizeof(void *) * L2_SIZE);
5cd2c5b6
RH
414 }
415 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
108c49b8 416 }
5cd2c5b6 417
e3f4e2a4 418 pd = *lp;
5cd2c5b6 419 if (pd == NULL) {
e3f4e2a4 420 int i;
5cd2c5b6
RH
421
422 if (!alloc) {
108c49b8 423 return NULL;
5cd2c5b6
RH
424 }
425
7267c094 426 *lp = pd = g_malloc(sizeof(PhysPageDesc) * L2_SIZE);
5cd2c5b6 427
67c4d23c 428 for (i = 0; i < L2_SIZE; i++) {
5cd2c5b6
RH
429 pd[i].phys_offset = IO_MEM_UNASSIGNED;
430 pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
67c4d23c 431 }
92e873b9 432 }
5cd2c5b6
RH
433
434 return pd + (index & (L2_SIZE - 1));
92e873b9
FB
435}
436
c227f099 437static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
92e873b9 438{
108c49b8 439 return phys_page_find_alloc(index, 0);
92e873b9
FB
440}
441
c227f099
AL
442static void tlb_protect_code(ram_addr_t ram_addr);
443static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 444 target_ulong vaddr);
c8a706fe
PB
445#define mmap_lock() do { } while(0)
446#define mmap_unlock() do { } while(0)
9fa3e853 447#endif
fd6ce8f6 448
4369415f
FB
449#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
450
451#if defined(CONFIG_USER_ONLY)
ccbb4d44 452/* Currently it is not recommended to allocate big chunks of data in
4369415f
FB
453 user mode. It will change when a dedicated libc will be used */
454#define USE_STATIC_CODE_GEN_BUFFER
455#endif
456
457#ifdef USE_STATIC_CODE_GEN_BUFFER
ebf50fb3
AJ
458static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
459 __attribute__((aligned (CODE_GEN_ALIGN)));
4369415f
FB
460#endif
461
8fcd3692 462static void code_gen_alloc(unsigned long tb_size)
26a5f13b 463{
4369415f
FB
464#ifdef USE_STATIC_CODE_GEN_BUFFER
465 code_gen_buffer = static_code_gen_buffer;
466 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
467 map_exec(code_gen_buffer, code_gen_buffer_size);
468#else
26a5f13b
FB
469 code_gen_buffer_size = tb_size;
470 if (code_gen_buffer_size == 0) {
4369415f 471#if defined(CONFIG_USER_ONLY)
4369415f
FB
472 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
473#else
ccbb4d44 474 /* XXX: needs adjustments */
94a6b54f 475 code_gen_buffer_size = (unsigned long)(ram_size / 4);
4369415f 476#endif
26a5f13b
FB
477 }
478 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
479 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
480 /* The code gen buffer location may have constraints depending on
481 the host cpu and OS */
482#if defined(__linux__)
483 {
484 int flags;
141ac468
BS
485 void *start = NULL;
486
26a5f13b
FB
487 flags = MAP_PRIVATE | MAP_ANONYMOUS;
488#if defined(__x86_64__)
489 flags |= MAP_32BIT;
490 /* Cannot map more than that */
491 if (code_gen_buffer_size > (800 * 1024 * 1024))
492 code_gen_buffer_size = (800 * 1024 * 1024);
141ac468
BS
493#elif defined(__sparc_v9__)
494 // Map the buffer below 2G, so we can use direct calls and branches
495 flags |= MAP_FIXED;
496 start = (void *) 0x60000000UL;
497 if (code_gen_buffer_size > (512 * 1024 * 1024))
498 code_gen_buffer_size = (512 * 1024 * 1024);
1cb0661e 499#elif defined(__arm__)
63d41246 500 /* Map the buffer below 32M, so we can use direct calls and branches */
1cb0661e
AZ
501 flags |= MAP_FIXED;
502 start = (void *) 0x01000000UL;
503 if (code_gen_buffer_size > 16 * 1024 * 1024)
504 code_gen_buffer_size = 16 * 1024 * 1024;
eba0b893
RH
505#elif defined(__s390x__)
506 /* Map the buffer so that we can use direct calls and branches. */
507 /* We have a +- 4GB range on the branches; leave some slop. */
508 if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
509 code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
510 }
511 start = (void *)0x90000000UL;
26a5f13b 512#endif
141ac468
BS
513 code_gen_buffer = mmap(start, code_gen_buffer_size,
514 PROT_WRITE | PROT_READ | PROT_EXEC,
26a5f13b
FB
515 flags, -1, 0);
516 if (code_gen_buffer == MAP_FAILED) {
517 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
518 exit(1);
519 }
520 }
cbb608a5 521#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
9f4b09a4
TN
522 || defined(__DragonFly__) || defined(__OpenBSD__) \
523 || defined(__NetBSD__)
06e67a82
AL
524 {
525 int flags;
526 void *addr = NULL;
527 flags = MAP_PRIVATE | MAP_ANONYMOUS;
528#if defined(__x86_64__)
529 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
530 * 0x40000000 is free */
531 flags |= MAP_FIXED;
532 addr = (void *)0x40000000;
533 /* Cannot map more than that */
534 if (code_gen_buffer_size > (800 * 1024 * 1024))
535 code_gen_buffer_size = (800 * 1024 * 1024);
4cd31ad2
BS
536#elif defined(__sparc_v9__)
537 // Map the buffer below 2G, so we can use direct calls and branches
538 flags |= MAP_FIXED;
539 addr = (void *) 0x60000000UL;
540 if (code_gen_buffer_size > (512 * 1024 * 1024)) {
541 code_gen_buffer_size = (512 * 1024 * 1024);
542 }
06e67a82
AL
543#endif
544 code_gen_buffer = mmap(addr, code_gen_buffer_size,
545 PROT_WRITE | PROT_READ | PROT_EXEC,
546 flags, -1, 0);
547 if (code_gen_buffer == MAP_FAILED) {
548 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
549 exit(1);
550 }
551 }
26a5f13b 552#else
7267c094 553 code_gen_buffer = g_malloc(code_gen_buffer_size);
26a5f13b
FB
554 map_exec(code_gen_buffer, code_gen_buffer_size);
555#endif
4369415f 556#endif /* !USE_STATIC_CODE_GEN_BUFFER */
26a5f13b 557 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
a884da8a
PM
558 code_gen_buffer_max_size = code_gen_buffer_size -
559 (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
26a5f13b 560 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
7267c094 561 tbs = g_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
26a5f13b
FB
562}
563
564/* Must be called before using the QEMU cpus. 'tb_size' is the size
565 (in bytes) allocated to the translation buffer. Zero means default
566 size. */
d5ab9713 567void tcg_exec_init(unsigned long tb_size)
26a5f13b 568{
26a5f13b
FB
569 cpu_gen_init();
570 code_gen_alloc(tb_size);
571 code_gen_ptr = code_gen_buffer;
4369415f 572 page_init();
9002ec79
RH
573#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
574 /* There's no guest base to take into account, so go ahead and
575 initialize the prologue now. */
576 tcg_prologue_init(&tcg_ctx);
577#endif
26a5f13b
FB
578}
579
d5ab9713
JK
580bool tcg_enabled(void)
581{
582 return code_gen_buffer != NULL;
583}
584
585void cpu_exec_init_all(void)
586{
587#if !defined(CONFIG_USER_ONLY)
588 memory_map_init();
589 io_mem_init();
590#endif
591}
592
9656f324
PB
593#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
594
e59fb374 595static int cpu_common_post_load(void *opaque, int version_id)
e7f4eff7
JQ
596{
597 CPUState *env = opaque;
9656f324 598
3098dba0
AJ
599 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
600 version_id is increased. */
601 env->interrupt_request &= ~0x01;
9656f324
PB
602 tlb_flush(env, 1);
603
604 return 0;
605}
e7f4eff7
JQ
606
607static const VMStateDescription vmstate_cpu_common = {
608 .name = "cpu_common",
609 .version_id = 1,
610 .minimum_version_id = 1,
611 .minimum_version_id_old = 1,
e7f4eff7
JQ
612 .post_load = cpu_common_post_load,
613 .fields = (VMStateField []) {
614 VMSTATE_UINT32(halted, CPUState),
615 VMSTATE_UINT32(interrupt_request, CPUState),
616 VMSTATE_END_OF_LIST()
617 }
618};
9656f324
PB
619#endif
620
950f1472
GC
621CPUState *qemu_get_cpu(int cpu)
622{
623 CPUState *env = first_cpu;
624
625 while (env) {
626 if (env->cpu_index == cpu)
627 break;
628 env = env->next_cpu;
629 }
630
631 return env;
632}
633
6a00d601 634void cpu_exec_init(CPUState *env)
fd6ce8f6 635{
6a00d601
FB
636 CPUState **penv;
637 int cpu_index;
638
c2764719
PB
639#if defined(CONFIG_USER_ONLY)
640 cpu_list_lock();
641#endif
6a00d601
FB
642 env->next_cpu = NULL;
643 penv = &first_cpu;
644 cpu_index = 0;
645 while (*penv != NULL) {
1e9fa730 646 penv = &(*penv)->next_cpu;
6a00d601
FB
647 cpu_index++;
648 }
649 env->cpu_index = cpu_index;
268a362c 650 env->numa_node = 0;
72cf2d4f
BS
651 QTAILQ_INIT(&env->breakpoints);
652 QTAILQ_INIT(&env->watchpoints);
dc7a09cf
JK
653#ifndef CONFIG_USER_ONLY
654 env->thread_id = qemu_get_thread_id();
655#endif
6a00d601 656 *penv = env;
c2764719
PB
657#if defined(CONFIG_USER_ONLY)
658 cpu_list_unlock();
659#endif
b3c7724c 660#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
0be71e32
AW
661 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
662 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
b3c7724c
PB
663 cpu_save, cpu_load, env);
664#endif
fd6ce8f6
FB
665}
666
d1a1eb74
TG
667/* Allocate a new translation block. Flush the translation buffer if
668 too many translation blocks or too much generated code. */
669static TranslationBlock *tb_alloc(target_ulong pc)
670{
671 TranslationBlock *tb;
672
673 if (nb_tbs >= code_gen_max_blocks ||
674 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
675 return NULL;
676 tb = &tbs[nb_tbs++];
677 tb->pc = pc;
678 tb->cflags = 0;
679 return tb;
680}
681
682void tb_free(TranslationBlock *tb)
683{
684 /* In practice this is mostly used for single use temporary TB
685 Ignore the hard cases and just back up if this TB happens to
686 be the last one generated. */
687 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
688 code_gen_ptr = tb->tc_ptr;
689 nb_tbs--;
690 }
691}
692
9fa3e853
FB
693static inline void invalidate_page_bitmap(PageDesc *p)
694{
695 if (p->code_bitmap) {
7267c094 696 g_free(p->code_bitmap);
9fa3e853
FB
697 p->code_bitmap = NULL;
698 }
699 p->code_write_count = 0;
700}
701
5cd2c5b6
RH
702/* Set to NULL all the 'first_tb' fields in all PageDescs. */
703
704static void page_flush_tb_1 (int level, void **lp)
fd6ce8f6 705{
5cd2c5b6 706 int i;
fd6ce8f6 707
5cd2c5b6
RH
708 if (*lp == NULL) {
709 return;
710 }
711 if (level == 0) {
712 PageDesc *pd = *lp;
7296abac 713 for (i = 0; i < L2_SIZE; ++i) {
5cd2c5b6
RH
714 pd[i].first_tb = NULL;
715 invalidate_page_bitmap(pd + i);
fd6ce8f6 716 }
5cd2c5b6
RH
717 } else {
718 void **pp = *lp;
7296abac 719 for (i = 0; i < L2_SIZE; ++i) {
5cd2c5b6
RH
720 page_flush_tb_1 (level - 1, pp + i);
721 }
722 }
723}
724
725static void page_flush_tb(void)
726{
727 int i;
728 for (i = 0; i < V_L1_SIZE; i++) {
729 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
fd6ce8f6
FB
730 }
731}
732
733/* flush all the translation blocks */
d4e8164f 734/* XXX: tb_flush is currently not thread safe */
6a00d601 735void tb_flush(CPUState *env1)
fd6ce8f6 736{
6a00d601 737 CPUState *env;
0124311e 738#if defined(DEBUG_FLUSH)
ab3d1727
BS
739 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
740 (unsigned long)(code_gen_ptr - code_gen_buffer),
741 nb_tbs, nb_tbs > 0 ?
742 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
fd6ce8f6 743#endif
26a5f13b 744 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
a208e54a
PB
745 cpu_abort(env1, "Internal error: code buffer overflow\n");
746
fd6ce8f6 747 nb_tbs = 0;
3b46e624 748
6a00d601
FB
749 for(env = first_cpu; env != NULL; env = env->next_cpu) {
750 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
751 }
9fa3e853 752
8a8a608f 753 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
fd6ce8f6 754 page_flush_tb();
9fa3e853 755
fd6ce8f6 756 code_gen_ptr = code_gen_buffer;
d4e8164f
FB
757 /* XXX: flush processor icache at this point if cache flush is
758 expensive */
e3db7226 759 tb_flush_count++;
fd6ce8f6
FB
760}
761
762#ifdef DEBUG_TB_CHECK
763
bc98a7ef 764static void tb_invalidate_check(target_ulong address)
fd6ce8f6
FB
765{
766 TranslationBlock *tb;
767 int i;
768 address &= TARGET_PAGE_MASK;
99773bd4
PB
769 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
770 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
771 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
772 address >= tb->pc + tb->size)) {
0bf9e31a
BS
773 printf("ERROR invalidate: address=" TARGET_FMT_lx
774 " PC=%08lx size=%04x\n",
99773bd4 775 address, (long)tb->pc, tb->size);
fd6ce8f6
FB
776 }
777 }
778 }
779}
780
781/* verify that all the pages have correct rights for code */
782static void tb_page_check(void)
783{
784 TranslationBlock *tb;
785 int i, flags1, flags2;
3b46e624 786
99773bd4
PB
787 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
788 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
789 flags1 = page_get_flags(tb->pc);
790 flags2 = page_get_flags(tb->pc + tb->size - 1);
791 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
792 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
99773bd4 793 (long)tb->pc, tb->size, flags1, flags2);
fd6ce8f6
FB
794 }
795 }
796 }
797}
798
799#endif
800
801/* invalidate one TB */
802static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
803 int next_offset)
804{
805 TranslationBlock *tb1;
806 for(;;) {
807 tb1 = *ptb;
808 if (tb1 == tb) {
809 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
810 break;
811 }
812 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
813 }
814}
815
9fa3e853
FB
816static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
817{
818 TranslationBlock *tb1;
819 unsigned int n1;
820
821 for(;;) {
822 tb1 = *ptb;
823 n1 = (long)tb1 & 3;
824 tb1 = (TranslationBlock *)((long)tb1 & ~3);
825 if (tb1 == tb) {
826 *ptb = tb1->page_next[n1];
827 break;
828 }
829 ptb = &tb1->page_next[n1];
830 }
831}
832
d4e8164f
FB
833static inline void tb_jmp_remove(TranslationBlock *tb, int n)
834{
835 TranslationBlock *tb1, **ptb;
836 unsigned int n1;
837
838 ptb = &tb->jmp_next[n];
839 tb1 = *ptb;
840 if (tb1) {
841 /* find tb(n) in circular list */
842 for(;;) {
843 tb1 = *ptb;
844 n1 = (long)tb1 & 3;
845 tb1 = (TranslationBlock *)((long)tb1 & ~3);
846 if (n1 == n && tb1 == tb)
847 break;
848 if (n1 == 2) {
849 ptb = &tb1->jmp_first;
850 } else {
851 ptb = &tb1->jmp_next[n1];
852 }
853 }
854 /* now we can suppress tb(n) from the list */
855 *ptb = tb->jmp_next[n];
856
857 tb->jmp_next[n] = NULL;
858 }
859}
860
861/* reset the jump entry 'n' of a TB so that it is not chained to
862 another TB */
863static inline void tb_reset_jump(TranslationBlock *tb, int n)
864{
865 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
866}
867
41c1b1c9 868void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
fd6ce8f6 869{
6a00d601 870 CPUState *env;
8a40a180 871 PageDesc *p;
d4e8164f 872 unsigned int h, n1;
41c1b1c9 873 tb_page_addr_t phys_pc;
8a40a180 874 TranslationBlock *tb1, *tb2;
3b46e624 875
8a40a180
FB
876 /* remove the TB from the hash list */
877 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
878 h = tb_phys_hash_func(phys_pc);
5fafdf24 879 tb_remove(&tb_phys_hash[h], tb,
8a40a180
FB
880 offsetof(TranslationBlock, phys_hash_next));
881
882 /* remove the TB from the page list */
883 if (tb->page_addr[0] != page_addr) {
884 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
885 tb_page_remove(&p->first_tb, tb);
886 invalidate_page_bitmap(p);
887 }
888 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
889 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
890 tb_page_remove(&p->first_tb, tb);
891 invalidate_page_bitmap(p);
892 }
893
36bdbe54 894 tb_invalidated_flag = 1;
59817ccb 895
fd6ce8f6 896 /* remove the TB from the hash list */
8a40a180 897 h = tb_jmp_cache_hash_func(tb->pc);
6a00d601
FB
898 for(env = first_cpu; env != NULL; env = env->next_cpu) {
899 if (env->tb_jmp_cache[h] == tb)
900 env->tb_jmp_cache[h] = NULL;
901 }
d4e8164f
FB
902
903 /* suppress this TB from the two jump lists */
904 tb_jmp_remove(tb, 0);
905 tb_jmp_remove(tb, 1);
906
907 /* suppress any remaining jumps to this TB */
908 tb1 = tb->jmp_first;
909 for(;;) {
910 n1 = (long)tb1 & 3;
911 if (n1 == 2)
912 break;
913 tb1 = (TranslationBlock *)((long)tb1 & ~3);
914 tb2 = tb1->jmp_next[n1];
915 tb_reset_jump(tb1, n1);
916 tb1->jmp_next[n1] = NULL;
917 tb1 = tb2;
918 }
919 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
9fa3e853 920
e3db7226 921 tb_phys_invalidate_count++;
9fa3e853
FB
922}
923
924static inline void set_bits(uint8_t *tab, int start, int len)
925{
926 int end, mask, end1;
927
928 end = start + len;
929 tab += start >> 3;
930 mask = 0xff << (start & 7);
931 if ((start & ~7) == (end & ~7)) {
932 if (start < end) {
933 mask &= ~(0xff << (end & 7));
934 *tab |= mask;
935 }
936 } else {
937 *tab++ |= mask;
938 start = (start + 8) & ~7;
939 end1 = end & ~7;
940 while (start < end1) {
941 *tab++ = 0xff;
942 start += 8;
943 }
944 if (start < end) {
945 mask = ~(0xff << (end & 7));
946 *tab |= mask;
947 }
948 }
949}
950
951static void build_page_bitmap(PageDesc *p)
952{
953 int n, tb_start, tb_end;
954 TranslationBlock *tb;
3b46e624 955
7267c094 956 p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8);
9fa3e853
FB
957
958 tb = p->first_tb;
959 while (tb != NULL) {
960 n = (long)tb & 3;
961 tb = (TranslationBlock *)((long)tb & ~3);
962 /* NOTE: this is subtle as a TB may span two physical pages */
963 if (n == 0) {
964 /* NOTE: tb_end may be after the end of the page, but
965 it is not a problem */
966 tb_start = tb->pc & ~TARGET_PAGE_MASK;
967 tb_end = tb_start + tb->size;
968 if (tb_end > TARGET_PAGE_SIZE)
969 tb_end = TARGET_PAGE_SIZE;
970 } else {
971 tb_start = 0;
972 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
973 }
974 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
975 tb = tb->page_next[n];
976 }
977}
978
2e70f6ef
PB
979TranslationBlock *tb_gen_code(CPUState *env,
980 target_ulong pc, target_ulong cs_base,
981 int flags, int cflags)
d720b93d
FB
982{
983 TranslationBlock *tb;
984 uint8_t *tc_ptr;
41c1b1c9
PB
985 tb_page_addr_t phys_pc, phys_page2;
986 target_ulong virt_page2;
d720b93d
FB
987 int code_gen_size;
988
41c1b1c9 989 phys_pc = get_page_addr_code(env, pc);
c27004ec 990 tb = tb_alloc(pc);
d720b93d
FB
991 if (!tb) {
992 /* flush must be done */
993 tb_flush(env);
994 /* cannot fail at this point */
c27004ec 995 tb = tb_alloc(pc);
2e70f6ef
PB
996 /* Don't forget to invalidate previous TB info. */
997 tb_invalidated_flag = 1;
d720b93d
FB
998 }
999 tc_ptr = code_gen_ptr;
1000 tb->tc_ptr = tc_ptr;
1001 tb->cs_base = cs_base;
1002 tb->flags = flags;
1003 tb->cflags = cflags;
d07bde88 1004 cpu_gen_code(env, tb, &code_gen_size);
d720b93d 1005 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
3b46e624 1006
d720b93d 1007 /* check next page if needed */
c27004ec 1008 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
d720b93d 1009 phys_page2 = -1;
c27004ec 1010 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
41c1b1c9 1011 phys_page2 = get_page_addr_code(env, virt_page2);
d720b93d 1012 }
41c1b1c9 1013 tb_link_page(tb, phys_pc, phys_page2);
2e70f6ef 1014 return tb;
d720b93d 1015}
3b46e624 1016
9fa3e853
FB
1017/* invalidate all TBs which intersect with the target physical page
1018 starting in range [start;end[. NOTE: start and end must refer to
d720b93d
FB
1019 the same physical page. 'is_cpu_write_access' should be true if called
1020 from a real cpu write access: the virtual CPU will exit the current
1021 TB if code is modified inside this TB. */
41c1b1c9 1022void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
d720b93d
FB
1023 int is_cpu_write_access)
1024{
6b917547 1025 TranslationBlock *tb, *tb_next, *saved_tb;
d720b93d 1026 CPUState *env = cpu_single_env;
41c1b1c9 1027 tb_page_addr_t tb_start, tb_end;
6b917547
AL
1028 PageDesc *p;
1029 int n;
1030#ifdef TARGET_HAS_PRECISE_SMC
1031 int current_tb_not_found = is_cpu_write_access;
1032 TranslationBlock *current_tb = NULL;
1033 int current_tb_modified = 0;
1034 target_ulong current_pc = 0;
1035 target_ulong current_cs_base = 0;
1036 int current_flags = 0;
1037#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
1038
1039 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 1040 if (!p)
9fa3e853 1041 return;
5fafdf24 1042 if (!p->code_bitmap &&
d720b93d
FB
1043 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1044 is_cpu_write_access) {
9fa3e853
FB
1045 /* build code bitmap */
1046 build_page_bitmap(p);
1047 }
1048
1049 /* we remove all the TBs in the range [start, end[ */
1050 /* XXX: see if in some cases it could be faster to invalidate all the code */
1051 tb = p->first_tb;
1052 while (tb != NULL) {
1053 n = (long)tb & 3;
1054 tb = (TranslationBlock *)((long)tb & ~3);
1055 tb_next = tb->page_next[n];
1056 /* NOTE: this is subtle as a TB may span two physical pages */
1057 if (n == 0) {
1058 /* NOTE: tb_end may be after the end of the page, but
1059 it is not a problem */
1060 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1061 tb_end = tb_start + tb->size;
1062 } else {
1063 tb_start = tb->page_addr[1];
1064 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1065 }
1066 if (!(tb_end <= start || tb_start >= end)) {
d720b93d
FB
1067#ifdef TARGET_HAS_PRECISE_SMC
1068 if (current_tb_not_found) {
1069 current_tb_not_found = 0;
1070 current_tb = NULL;
2e70f6ef 1071 if (env->mem_io_pc) {
d720b93d 1072 /* now we have a real cpu fault */
2e70f6ef 1073 current_tb = tb_find_pc(env->mem_io_pc);
d720b93d
FB
1074 }
1075 }
1076 if (current_tb == tb &&
2e70f6ef 1077 (current_tb->cflags & CF_COUNT_MASK) != 1) {
d720b93d
FB
1078 /* If we are modifying the current TB, we must stop
1079 its execution. We could be more precise by checking
1080 that the modification is after the current PC, but it
1081 would require a specialized function to partially
1082 restore the CPU state */
3b46e624 1083
d720b93d 1084 current_tb_modified = 1;
618ba8e6 1085 cpu_restore_state(current_tb, env, env->mem_io_pc);
6b917547
AL
1086 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1087 &current_flags);
d720b93d
FB
1088 }
1089#endif /* TARGET_HAS_PRECISE_SMC */
6f5a9f7e
FB
1090 /* we need to do that to handle the case where a signal
1091 occurs while doing tb_phys_invalidate() */
1092 saved_tb = NULL;
1093 if (env) {
1094 saved_tb = env->current_tb;
1095 env->current_tb = NULL;
1096 }
9fa3e853 1097 tb_phys_invalidate(tb, -1);
6f5a9f7e
FB
1098 if (env) {
1099 env->current_tb = saved_tb;
1100 if (env->interrupt_request && env->current_tb)
1101 cpu_interrupt(env, env->interrupt_request);
1102 }
9fa3e853
FB
1103 }
1104 tb = tb_next;
1105 }
1106#if !defined(CONFIG_USER_ONLY)
1107 /* if no code remaining, no need to continue to use slow writes */
1108 if (!p->first_tb) {
1109 invalidate_page_bitmap(p);
d720b93d 1110 if (is_cpu_write_access) {
2e70f6ef 1111 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
d720b93d
FB
1112 }
1113 }
1114#endif
1115#ifdef TARGET_HAS_PRECISE_SMC
1116 if (current_tb_modified) {
1117 /* we generate a block containing just the instruction
1118 modifying the memory. It will ensure that it cannot modify
1119 itself */
ea1c1802 1120 env->current_tb = NULL;
2e70f6ef 1121 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
d720b93d 1122 cpu_resume_from_signal(env, NULL);
9fa3e853 1123 }
fd6ce8f6 1124#endif
9fa3e853 1125}
fd6ce8f6 1126
9fa3e853 1127/* len must be <= 8 and start must be a multiple of len */
41c1b1c9 1128static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
9fa3e853
FB
1129{
1130 PageDesc *p;
1131 int offset, b;
59817ccb 1132#if 0
a4193c8a 1133 if (1) {
93fcfe39
AL
1134 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1135 cpu_single_env->mem_io_vaddr, len,
1136 cpu_single_env->eip,
1137 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
59817ccb
FB
1138 }
1139#endif
9fa3e853 1140 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 1141 if (!p)
9fa3e853
FB
1142 return;
1143 if (p->code_bitmap) {
1144 offset = start & ~TARGET_PAGE_MASK;
1145 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1146 if (b & ((1 << len) - 1))
1147 goto do_invalidate;
1148 } else {
1149 do_invalidate:
d720b93d 1150 tb_invalidate_phys_page_range(start, start + len, 1);
9fa3e853
FB
1151 }
1152}
1153
9fa3e853 1154#if !defined(CONFIG_SOFTMMU)
41c1b1c9 1155static void tb_invalidate_phys_page(tb_page_addr_t addr,
d720b93d 1156 unsigned long pc, void *puc)
9fa3e853 1157{
6b917547 1158 TranslationBlock *tb;
9fa3e853 1159 PageDesc *p;
6b917547 1160 int n;
d720b93d 1161#ifdef TARGET_HAS_PRECISE_SMC
6b917547 1162 TranslationBlock *current_tb = NULL;
d720b93d 1163 CPUState *env = cpu_single_env;
6b917547
AL
1164 int current_tb_modified = 0;
1165 target_ulong current_pc = 0;
1166 target_ulong current_cs_base = 0;
1167 int current_flags = 0;
d720b93d 1168#endif
9fa3e853
FB
1169
1170 addr &= TARGET_PAGE_MASK;
1171 p = page_find(addr >> TARGET_PAGE_BITS);
5fafdf24 1172 if (!p)
9fa3e853
FB
1173 return;
1174 tb = p->first_tb;
d720b93d
FB
1175#ifdef TARGET_HAS_PRECISE_SMC
1176 if (tb && pc != 0) {
1177 current_tb = tb_find_pc(pc);
1178 }
1179#endif
9fa3e853
FB
1180 while (tb != NULL) {
1181 n = (long)tb & 3;
1182 tb = (TranslationBlock *)((long)tb & ~3);
d720b93d
FB
1183#ifdef TARGET_HAS_PRECISE_SMC
1184 if (current_tb == tb &&
2e70f6ef 1185 (current_tb->cflags & CF_COUNT_MASK) != 1) {
d720b93d
FB
1186 /* If we are modifying the current TB, we must stop
1187 its execution. We could be more precise by checking
1188 that the modification is after the current PC, but it
1189 would require a specialized function to partially
1190 restore the CPU state */
3b46e624 1191
d720b93d 1192 current_tb_modified = 1;
618ba8e6 1193 cpu_restore_state(current_tb, env, pc);
6b917547
AL
1194 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1195 &current_flags);
d720b93d
FB
1196 }
1197#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
1198 tb_phys_invalidate(tb, addr);
1199 tb = tb->page_next[n];
1200 }
fd6ce8f6 1201 p->first_tb = NULL;
d720b93d
FB
1202#ifdef TARGET_HAS_PRECISE_SMC
1203 if (current_tb_modified) {
1204 /* we generate a block containing just the instruction
1205 modifying the memory. It will ensure that it cannot modify
1206 itself */
ea1c1802 1207 env->current_tb = NULL;
2e70f6ef 1208 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
d720b93d
FB
1209 cpu_resume_from_signal(env, puc);
1210 }
1211#endif
fd6ce8f6 1212}
9fa3e853 1213#endif
fd6ce8f6
FB
1214
1215/* add the tb in the target page and protect it if necessary */
5fafdf24 1216static inline void tb_alloc_page(TranslationBlock *tb,
41c1b1c9 1217 unsigned int n, tb_page_addr_t page_addr)
fd6ce8f6
FB
1218{
1219 PageDesc *p;
4429ab44
JQ
1220#ifndef CONFIG_USER_ONLY
1221 bool page_already_protected;
1222#endif
9fa3e853
FB
1223
1224 tb->page_addr[n] = page_addr;
5cd2c5b6 1225 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
9fa3e853 1226 tb->page_next[n] = p->first_tb;
4429ab44
JQ
1227#ifndef CONFIG_USER_ONLY
1228 page_already_protected = p->first_tb != NULL;
1229#endif
9fa3e853
FB
1230 p->first_tb = (TranslationBlock *)((long)tb | n);
1231 invalidate_page_bitmap(p);
fd6ce8f6 1232
107db443 1233#if defined(TARGET_HAS_SMC) || 1
d720b93d 1234
9fa3e853 1235#if defined(CONFIG_USER_ONLY)
fd6ce8f6 1236 if (p->flags & PAGE_WRITE) {
53a5960a
PB
1237 target_ulong addr;
1238 PageDesc *p2;
9fa3e853
FB
1239 int prot;
1240
fd6ce8f6
FB
1241 /* force the host page as non writable (writes will have a
1242 page fault + mprotect overhead) */
53a5960a 1243 page_addr &= qemu_host_page_mask;
fd6ce8f6 1244 prot = 0;
53a5960a
PB
1245 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1246 addr += TARGET_PAGE_SIZE) {
1247
1248 p2 = page_find (addr >> TARGET_PAGE_BITS);
1249 if (!p2)
1250 continue;
1251 prot |= p2->flags;
1252 p2->flags &= ~PAGE_WRITE;
53a5960a 1253 }
5fafdf24 1254 mprotect(g2h(page_addr), qemu_host_page_size,
fd6ce8f6
FB
1255 (prot & PAGE_BITS) & ~PAGE_WRITE);
1256#ifdef DEBUG_TB_INVALIDATE
ab3d1727 1257 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
53a5960a 1258 page_addr);
fd6ce8f6 1259#endif
fd6ce8f6 1260 }
9fa3e853
FB
1261#else
1262 /* if some code is already present, then the pages are already
1263 protected. So we handle the case where only the first TB is
1264 allocated in a physical page */
4429ab44 1265 if (!page_already_protected) {
6a00d601 1266 tlb_protect_code(page_addr);
9fa3e853
FB
1267 }
1268#endif
d720b93d
FB
1269
1270#endif /* TARGET_HAS_SMC */
fd6ce8f6
FB
1271}
1272
9fa3e853
FB
1273/* add a new TB and link it to the physical page tables. phys_page2 is
1274 (-1) to indicate that only one page contains the TB. */
41c1b1c9
PB
1275void tb_link_page(TranslationBlock *tb,
1276 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
d4e8164f 1277{
9fa3e853
FB
1278 unsigned int h;
1279 TranslationBlock **ptb;
1280
c8a706fe
PB
1281 /* Grab the mmap lock to stop another thread invalidating this TB
1282 before we are done. */
1283 mmap_lock();
9fa3e853
FB
1284 /* add in the physical hash table */
1285 h = tb_phys_hash_func(phys_pc);
1286 ptb = &tb_phys_hash[h];
1287 tb->phys_hash_next = *ptb;
1288 *ptb = tb;
fd6ce8f6
FB
1289
1290 /* add in the page list */
9fa3e853
FB
1291 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1292 if (phys_page2 != -1)
1293 tb_alloc_page(tb, 1, phys_page2);
1294 else
1295 tb->page_addr[1] = -1;
9fa3e853 1296
d4e8164f
FB
1297 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1298 tb->jmp_next[0] = NULL;
1299 tb->jmp_next[1] = NULL;
1300
1301 /* init original jump addresses */
1302 if (tb->tb_next_offset[0] != 0xffff)
1303 tb_reset_jump(tb, 0);
1304 if (tb->tb_next_offset[1] != 0xffff)
1305 tb_reset_jump(tb, 1);
8a40a180
FB
1306
1307#ifdef DEBUG_TB_CHECK
1308 tb_page_check();
1309#endif
c8a706fe 1310 mmap_unlock();
fd6ce8f6
FB
1311}
1312
9fa3e853
FB
1313/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1314 tb[1].tc_ptr. Return NULL if not found */
1315TranslationBlock *tb_find_pc(unsigned long tc_ptr)
fd6ce8f6 1316{
9fa3e853
FB
1317 int m_min, m_max, m;
1318 unsigned long v;
1319 TranslationBlock *tb;
a513fe19
FB
1320
1321 if (nb_tbs <= 0)
1322 return NULL;
1323 if (tc_ptr < (unsigned long)code_gen_buffer ||
1324 tc_ptr >= (unsigned long)code_gen_ptr)
1325 return NULL;
1326 /* binary search (cf Knuth) */
1327 m_min = 0;
1328 m_max = nb_tbs - 1;
1329 while (m_min <= m_max) {
1330 m = (m_min + m_max) >> 1;
1331 tb = &tbs[m];
1332 v = (unsigned long)tb->tc_ptr;
1333 if (v == tc_ptr)
1334 return tb;
1335 else if (tc_ptr < v) {
1336 m_max = m - 1;
1337 } else {
1338 m_min = m + 1;
1339 }
5fafdf24 1340 }
a513fe19
FB
1341 return &tbs[m_max];
1342}
7501267e 1343
ea041c0e
FB
1344static void tb_reset_jump_recursive(TranslationBlock *tb);
1345
1346static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1347{
1348 TranslationBlock *tb1, *tb_next, **ptb;
1349 unsigned int n1;
1350
1351 tb1 = tb->jmp_next[n];
1352 if (tb1 != NULL) {
1353 /* find head of list */
1354 for(;;) {
1355 n1 = (long)tb1 & 3;
1356 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1357 if (n1 == 2)
1358 break;
1359 tb1 = tb1->jmp_next[n1];
1360 }
1361 /* we are now sure now that tb jumps to tb1 */
1362 tb_next = tb1;
1363
1364 /* remove tb from the jmp_first list */
1365 ptb = &tb_next->jmp_first;
1366 for(;;) {
1367 tb1 = *ptb;
1368 n1 = (long)tb1 & 3;
1369 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1370 if (n1 == n && tb1 == tb)
1371 break;
1372 ptb = &tb1->jmp_next[n1];
1373 }
1374 *ptb = tb->jmp_next[n];
1375 tb->jmp_next[n] = NULL;
3b46e624 1376
ea041c0e
FB
1377 /* suppress the jump to next tb in generated code */
1378 tb_reset_jump(tb, n);
1379
0124311e 1380 /* suppress jumps in the tb on which we could have jumped */
ea041c0e
FB
1381 tb_reset_jump_recursive(tb_next);
1382 }
1383}
1384
1385static void tb_reset_jump_recursive(TranslationBlock *tb)
1386{
1387 tb_reset_jump_recursive2(tb, 0);
1388 tb_reset_jump_recursive2(tb, 1);
1389}
1390
1fddef4b 1391#if defined(TARGET_HAS_ICE)
94df27fd
PB
1392#if defined(CONFIG_USER_ONLY)
1393static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1394{
1395 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1396}
1397#else
d720b93d
FB
1398static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1399{
c227f099 1400 target_phys_addr_t addr;
9b3c35e0 1401 target_ulong pd;
c227f099 1402 ram_addr_t ram_addr;
c2f07f81 1403 PhysPageDesc *p;
d720b93d 1404
c2f07f81
PB
1405 addr = cpu_get_phys_page_debug(env, pc);
1406 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1407 if (!p) {
1408 pd = IO_MEM_UNASSIGNED;
1409 } else {
1410 pd = p->phys_offset;
1411 }
1412 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
706cd4b5 1413 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
d720b93d 1414}
c27004ec 1415#endif
94df27fd 1416#endif /* TARGET_HAS_ICE */
d720b93d 1417
c527ee8f
PB
1418#if defined(CONFIG_USER_ONLY)
1419void cpu_watchpoint_remove_all(CPUState *env, int mask)
1420
1421{
1422}
1423
1424int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1425 int flags, CPUWatchpoint **watchpoint)
1426{
1427 return -ENOSYS;
1428}
1429#else
6658ffb8 1430/* Add a watchpoint. */
a1d1bb31
AL
1431int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1432 int flags, CPUWatchpoint **watchpoint)
6658ffb8 1433{
b4051334 1434 target_ulong len_mask = ~(len - 1);
c0ce998e 1435 CPUWatchpoint *wp;
6658ffb8 1436
b4051334
AL
1437 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1438 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1439 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1440 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1441 return -EINVAL;
1442 }
7267c094 1443 wp = g_malloc(sizeof(*wp));
a1d1bb31
AL
1444
1445 wp->vaddr = addr;
b4051334 1446 wp->len_mask = len_mask;
a1d1bb31
AL
1447 wp->flags = flags;
1448
2dc9f411 1449 /* keep all GDB-injected watchpoints in front */
c0ce998e 1450 if (flags & BP_GDB)
72cf2d4f 1451 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
c0ce998e 1452 else
72cf2d4f 1453 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
6658ffb8 1454
6658ffb8 1455 tlb_flush_page(env, addr);
a1d1bb31
AL
1456
1457 if (watchpoint)
1458 *watchpoint = wp;
1459 return 0;
6658ffb8
PB
1460}
1461
a1d1bb31
AL
1462/* Remove a specific watchpoint. */
1463int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1464 int flags)
6658ffb8 1465{
b4051334 1466 target_ulong len_mask = ~(len - 1);
a1d1bb31 1467 CPUWatchpoint *wp;
6658ffb8 1468
72cf2d4f 1469 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334 1470 if (addr == wp->vaddr && len_mask == wp->len_mask
6e140f28 1471 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
a1d1bb31 1472 cpu_watchpoint_remove_by_ref(env, wp);
6658ffb8
PB
1473 return 0;
1474 }
1475 }
a1d1bb31 1476 return -ENOENT;
6658ffb8
PB
1477}
1478
a1d1bb31
AL
1479/* Remove a specific watchpoint by reference. */
1480void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1481{
72cf2d4f 1482 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
7d03f82f 1483
a1d1bb31
AL
1484 tlb_flush_page(env, watchpoint->vaddr);
1485
7267c094 1486 g_free(watchpoint);
a1d1bb31
AL
1487}
1488
1489/* Remove all matching watchpoints. */
1490void cpu_watchpoint_remove_all(CPUState *env, int mask)
1491{
c0ce998e 1492 CPUWatchpoint *wp, *next;
a1d1bb31 1493
72cf2d4f 1494 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
a1d1bb31
AL
1495 if (wp->flags & mask)
1496 cpu_watchpoint_remove_by_ref(env, wp);
c0ce998e 1497 }
7d03f82f 1498}
c527ee8f 1499#endif
7d03f82f 1500
a1d1bb31
AL
1501/* Add a breakpoint. */
1502int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1503 CPUBreakpoint **breakpoint)
4c3a88a2 1504{
1fddef4b 1505#if defined(TARGET_HAS_ICE)
c0ce998e 1506 CPUBreakpoint *bp;
3b46e624 1507
7267c094 1508 bp = g_malloc(sizeof(*bp));
4c3a88a2 1509
a1d1bb31
AL
1510 bp->pc = pc;
1511 bp->flags = flags;
1512
2dc9f411 1513 /* keep all GDB-injected breakpoints in front */
c0ce998e 1514 if (flags & BP_GDB)
72cf2d4f 1515 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
c0ce998e 1516 else
72cf2d4f 1517 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
3b46e624 1518
d720b93d 1519 breakpoint_invalidate(env, pc);
a1d1bb31
AL
1520
1521 if (breakpoint)
1522 *breakpoint = bp;
4c3a88a2
FB
1523 return 0;
1524#else
a1d1bb31 1525 return -ENOSYS;
4c3a88a2
FB
1526#endif
1527}
1528
a1d1bb31
AL
1529/* Remove a specific breakpoint. */
1530int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1531{
7d03f82f 1532#if defined(TARGET_HAS_ICE)
a1d1bb31
AL
1533 CPUBreakpoint *bp;
1534
72cf2d4f 1535 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31
AL
1536 if (bp->pc == pc && bp->flags == flags) {
1537 cpu_breakpoint_remove_by_ref(env, bp);
1538 return 0;
1539 }
7d03f82f 1540 }
a1d1bb31
AL
1541 return -ENOENT;
1542#else
1543 return -ENOSYS;
7d03f82f
EI
1544#endif
1545}
1546
a1d1bb31
AL
1547/* Remove a specific breakpoint by reference. */
1548void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
4c3a88a2 1549{
1fddef4b 1550#if defined(TARGET_HAS_ICE)
72cf2d4f 1551 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
d720b93d 1552
a1d1bb31
AL
1553 breakpoint_invalidate(env, breakpoint->pc);
1554
7267c094 1555 g_free(breakpoint);
a1d1bb31
AL
1556#endif
1557}
1558
1559/* Remove all matching breakpoints. */
1560void cpu_breakpoint_remove_all(CPUState *env, int mask)
1561{
1562#if defined(TARGET_HAS_ICE)
c0ce998e 1563 CPUBreakpoint *bp, *next;
a1d1bb31 1564
72cf2d4f 1565 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
a1d1bb31
AL
1566 if (bp->flags & mask)
1567 cpu_breakpoint_remove_by_ref(env, bp);
c0ce998e 1568 }
4c3a88a2
FB
1569#endif
1570}
1571
c33a346e
FB
1572/* enable or disable single step mode. EXCP_DEBUG is returned by the
1573 CPU loop after each instruction */
1574void cpu_single_step(CPUState *env, int enabled)
1575{
1fddef4b 1576#if defined(TARGET_HAS_ICE)
c33a346e
FB
1577 if (env->singlestep_enabled != enabled) {
1578 env->singlestep_enabled = enabled;
e22a25c9
AL
1579 if (kvm_enabled())
1580 kvm_update_guest_debug(env, 0);
1581 else {
ccbb4d44 1582 /* must flush all the translated code to avoid inconsistencies */
e22a25c9
AL
1583 /* XXX: only flush what is necessary */
1584 tb_flush(env);
1585 }
c33a346e
FB
1586 }
1587#endif
1588}
1589
34865134
FB
1590/* enable or disable low levels log */
1591void cpu_set_log(int log_flags)
1592{
1593 loglevel = log_flags;
1594 if (loglevel && !logfile) {
11fcfab4 1595 logfile = fopen(logfilename, log_append ? "a" : "w");
34865134
FB
1596 if (!logfile) {
1597 perror(logfilename);
1598 _exit(1);
1599 }
9fa3e853
FB
1600#if !defined(CONFIG_SOFTMMU)
1601 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1602 {
b55266b5 1603 static char logfile_buf[4096];
9fa3e853
FB
1604 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1605 }
bf65f53f
FN
1606#elif !defined(_WIN32)
1607 /* Win32 doesn't support line-buffering and requires size >= 2 */
34865134 1608 setvbuf(logfile, NULL, _IOLBF, 0);
9fa3e853 1609#endif
e735b91c
PB
1610 log_append = 1;
1611 }
1612 if (!loglevel && logfile) {
1613 fclose(logfile);
1614 logfile = NULL;
34865134
FB
1615 }
1616}
1617
1618void cpu_set_log_filename(const char *filename)
1619{
1620 logfilename = strdup(filename);
e735b91c
PB
1621 if (logfile) {
1622 fclose(logfile);
1623 logfile = NULL;
1624 }
1625 cpu_set_log(loglevel);
34865134 1626}
c33a346e 1627
3098dba0 1628static void cpu_unlink_tb(CPUState *env)
ea041c0e 1629{
3098dba0
AJ
1630 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1631 problem and hope the cpu will stop of its own accord. For userspace
1632 emulation this often isn't actually as bad as it sounds. Often
1633 signals are used primarily to interrupt blocking syscalls. */
ea041c0e 1634 TranslationBlock *tb;
c227f099 1635 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
59817ccb 1636
cab1b4bd 1637 spin_lock(&interrupt_lock);
3098dba0
AJ
1638 tb = env->current_tb;
1639 /* if the cpu is currently executing code, we must unlink it and
1640 all the potentially executing TB */
f76cfe56 1641 if (tb) {
3098dba0
AJ
1642 env->current_tb = NULL;
1643 tb_reset_jump_recursive(tb);
be214e6c 1644 }
cab1b4bd 1645 spin_unlock(&interrupt_lock);
3098dba0
AJ
1646}
1647
97ffbd8d 1648#ifndef CONFIG_USER_ONLY
3098dba0 1649/* mask must never be zero, except for A20 change call */
ec6959d0 1650static void tcg_handle_interrupt(CPUState *env, int mask)
3098dba0
AJ
1651{
1652 int old_mask;
be214e6c 1653
2e70f6ef 1654 old_mask = env->interrupt_request;
68a79315 1655 env->interrupt_request |= mask;
3098dba0 1656
8edac960
AL
1657 /*
1658 * If called from iothread context, wake the target cpu in
1659 * case its halted.
1660 */
b7680cb6 1661 if (!qemu_cpu_is_self(env)) {
8edac960
AL
1662 qemu_cpu_kick(env);
1663 return;
1664 }
8edac960 1665
2e70f6ef 1666 if (use_icount) {
266910c4 1667 env->icount_decr.u16.high = 0xffff;
2e70f6ef 1668 if (!can_do_io(env)
be214e6c 1669 && (mask & ~old_mask) != 0) {
2e70f6ef
PB
1670 cpu_abort(env, "Raised interrupt while not in I/O function");
1671 }
2e70f6ef 1672 } else {
3098dba0 1673 cpu_unlink_tb(env);
ea041c0e
FB
1674 }
1675}
1676
ec6959d0
JK
1677CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1678
97ffbd8d
JK
1679#else /* CONFIG_USER_ONLY */
1680
1681void cpu_interrupt(CPUState *env, int mask)
1682{
1683 env->interrupt_request |= mask;
1684 cpu_unlink_tb(env);
1685}
1686#endif /* CONFIG_USER_ONLY */
1687
b54ad049
FB
1688void cpu_reset_interrupt(CPUState *env, int mask)
1689{
1690 env->interrupt_request &= ~mask;
1691}
1692
3098dba0
AJ
1693void cpu_exit(CPUState *env)
1694{
1695 env->exit_request = 1;
1696 cpu_unlink_tb(env);
1697}
1698
c7cd6a37 1699const CPULogItem cpu_log_items[] = {
5fafdf24 1700 { CPU_LOG_TB_OUT_ASM, "out_asm",
f193c797
FB
1701 "show generated host assembly code for each compiled TB" },
1702 { CPU_LOG_TB_IN_ASM, "in_asm",
1703 "show target assembly code for each compiled TB" },
5fafdf24 1704 { CPU_LOG_TB_OP, "op",
57fec1fe 1705 "show micro ops for each compiled TB" },
f193c797 1706 { CPU_LOG_TB_OP_OPT, "op_opt",
e01a1157
BS
1707 "show micro ops "
1708#ifdef TARGET_I386
1709 "before eflags optimization and "
f193c797 1710#endif
e01a1157 1711 "after liveness analysis" },
f193c797
FB
1712 { CPU_LOG_INT, "int",
1713 "show interrupts/exceptions in short format" },
1714 { CPU_LOG_EXEC, "exec",
1715 "show trace before each executed TB (lots of logs)" },
9fddaa0c 1716 { CPU_LOG_TB_CPU, "cpu",
e91c8a77 1717 "show CPU state before block translation" },
f193c797
FB
1718#ifdef TARGET_I386
1719 { CPU_LOG_PCALL, "pcall",
1720 "show protected mode far calls/returns/exceptions" },
eca1bdf4
AL
1721 { CPU_LOG_RESET, "cpu_reset",
1722 "show CPU state before CPU resets" },
f193c797 1723#endif
8e3a9fd2 1724#ifdef DEBUG_IOPORT
fd872598
FB
1725 { CPU_LOG_IOPORT, "ioport",
1726 "show all i/o ports accesses" },
8e3a9fd2 1727#endif
f193c797
FB
1728 { 0, NULL, NULL },
1729};
1730
f6f3fbca
MT
1731#ifndef CONFIG_USER_ONLY
1732static QLIST_HEAD(memory_client_list, CPUPhysMemoryClient) memory_client_list
1733 = QLIST_HEAD_INITIALIZER(memory_client_list);
1734
1735static void cpu_notify_set_memory(target_phys_addr_t start_addr,
9742bf26 1736 ram_addr_t size,
0fd542fb
MT
1737 ram_addr_t phys_offset,
1738 bool log_dirty)
f6f3fbca
MT
1739{
1740 CPUPhysMemoryClient *client;
1741 QLIST_FOREACH(client, &memory_client_list, list) {
0fd542fb 1742 client->set_memory(client, start_addr, size, phys_offset, log_dirty);
f6f3fbca
MT
1743 }
1744}
1745
1746static int cpu_notify_sync_dirty_bitmap(target_phys_addr_t start,
9742bf26 1747 target_phys_addr_t end)
f6f3fbca
MT
1748{
1749 CPUPhysMemoryClient *client;
1750 QLIST_FOREACH(client, &memory_client_list, list) {
1751 int r = client->sync_dirty_bitmap(client, start, end);
1752 if (r < 0)
1753 return r;
1754 }
1755 return 0;
1756}
1757
1758static int cpu_notify_migration_log(int enable)
1759{
1760 CPUPhysMemoryClient *client;
1761 QLIST_FOREACH(client, &memory_client_list, list) {
1762 int r = client->migration_log(client, enable);
1763 if (r < 0)
1764 return r;
1765 }
1766 return 0;
1767}
1768
2173a75f
AW
1769struct last_map {
1770 target_phys_addr_t start_addr;
1771 ram_addr_t size;
1772 ram_addr_t phys_offset;
1773};
1774
8d4c78e7
AW
1775/* The l1_phys_map provides the upper P_L1_BITs of the guest physical
1776 * address. Each intermediate table provides the next L2_BITs of guest
1777 * physical address space. The number of levels vary based on host and
1778 * guest configuration, making it efficient to build the final guest
1779 * physical address by seeding the L1 offset and shifting and adding in
1780 * each L2 offset as we recurse through them. */
2173a75f
AW
1781static void phys_page_for_each_1(CPUPhysMemoryClient *client, int level,
1782 void **lp, target_phys_addr_t addr,
1783 struct last_map *map)
f6f3fbca 1784{
5cd2c5b6 1785 int i;
f6f3fbca 1786
5cd2c5b6
RH
1787 if (*lp == NULL) {
1788 return;
1789 }
1790 if (level == 0) {
1791 PhysPageDesc *pd = *lp;
8d4c78e7 1792 addr <<= L2_BITS + TARGET_PAGE_BITS;
7296abac 1793 for (i = 0; i < L2_SIZE; ++i) {
5cd2c5b6 1794 if (pd[i].phys_offset != IO_MEM_UNASSIGNED) {
2173a75f
AW
1795 target_phys_addr_t start_addr = addr | i << TARGET_PAGE_BITS;
1796
1797 if (map->size &&
1798 start_addr == map->start_addr + map->size &&
1799 pd[i].phys_offset == map->phys_offset + map->size) {
1800
1801 map->size += TARGET_PAGE_SIZE;
1802 continue;
1803 } else if (map->size) {
1804 client->set_memory(client, map->start_addr,
1805 map->size, map->phys_offset, false);
1806 }
1807
1808 map->start_addr = start_addr;
1809 map->size = TARGET_PAGE_SIZE;
1810 map->phys_offset = pd[i].phys_offset;
f6f3fbca 1811 }
5cd2c5b6
RH
1812 }
1813 } else {
1814 void **pp = *lp;
7296abac 1815 for (i = 0; i < L2_SIZE; ++i) {
8d4c78e7 1816 phys_page_for_each_1(client, level - 1, pp + i,
2173a75f 1817 (addr << L2_BITS) | i, map);
f6f3fbca
MT
1818 }
1819 }
1820}
1821
1822static void phys_page_for_each(CPUPhysMemoryClient *client)
1823{
5cd2c5b6 1824 int i;
2173a75f
AW
1825 struct last_map map = { };
1826
5cd2c5b6
RH
1827 for (i = 0; i < P_L1_SIZE; ++i) {
1828 phys_page_for_each_1(client, P_L1_SHIFT / L2_BITS - 1,
2173a75f
AW
1829 l1_phys_map + i, i, &map);
1830 }
1831 if (map.size) {
1832 client->set_memory(client, map.start_addr, map.size, map.phys_offset,
1833 false);
f6f3fbca 1834 }
f6f3fbca
MT
1835}
1836
1837void cpu_register_phys_memory_client(CPUPhysMemoryClient *client)
1838{
1839 QLIST_INSERT_HEAD(&memory_client_list, client, list);
1840 phys_page_for_each(client);
1841}
1842
1843void cpu_unregister_phys_memory_client(CPUPhysMemoryClient *client)
1844{
1845 QLIST_REMOVE(client, list);
1846}
1847#endif
1848
f193c797
FB
1849static int cmp1(const char *s1, int n, const char *s2)
1850{
1851 if (strlen(s2) != n)
1852 return 0;
1853 return memcmp(s1, s2, n) == 0;
1854}
3b46e624 1855
f193c797
FB
1856/* takes a comma separated list of log masks. Return 0 if error. */
1857int cpu_str_to_log_mask(const char *str)
1858{
c7cd6a37 1859 const CPULogItem *item;
f193c797
FB
1860 int mask;
1861 const char *p, *p1;
1862
1863 p = str;
1864 mask = 0;
1865 for(;;) {
1866 p1 = strchr(p, ',');
1867 if (!p1)
1868 p1 = p + strlen(p);
9742bf26
YT
1869 if(cmp1(p,p1-p,"all")) {
1870 for(item = cpu_log_items; item->mask != 0; item++) {
1871 mask |= item->mask;
1872 }
1873 } else {
1874 for(item = cpu_log_items; item->mask != 0; item++) {
1875 if (cmp1(p, p1 - p, item->name))
1876 goto found;
1877 }
1878 return 0;
f193c797 1879 }
f193c797
FB
1880 found:
1881 mask |= item->mask;
1882 if (*p1 != ',')
1883 break;
1884 p = p1 + 1;
1885 }
1886 return mask;
1887}
ea041c0e 1888
7501267e
FB
1889void cpu_abort(CPUState *env, const char *fmt, ...)
1890{
1891 va_list ap;
493ae1f0 1892 va_list ap2;
7501267e
FB
1893
1894 va_start(ap, fmt);
493ae1f0 1895 va_copy(ap2, ap);
7501267e
FB
1896 fprintf(stderr, "qemu: fatal: ");
1897 vfprintf(stderr, fmt, ap);
1898 fprintf(stderr, "\n");
1899#ifdef TARGET_I386
7fe48483
FB
1900 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1901#else
1902 cpu_dump_state(env, stderr, fprintf, 0);
7501267e 1903#endif
93fcfe39
AL
1904 if (qemu_log_enabled()) {
1905 qemu_log("qemu: fatal: ");
1906 qemu_log_vprintf(fmt, ap2);
1907 qemu_log("\n");
f9373291 1908#ifdef TARGET_I386
93fcfe39 1909 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
f9373291 1910#else
93fcfe39 1911 log_cpu_state(env, 0);
f9373291 1912#endif
31b1a7b4 1913 qemu_log_flush();
93fcfe39 1914 qemu_log_close();
924edcae 1915 }
493ae1f0 1916 va_end(ap2);
f9373291 1917 va_end(ap);
fd052bf6
RV
1918#if defined(CONFIG_USER_ONLY)
1919 {
1920 struct sigaction act;
1921 sigfillset(&act.sa_mask);
1922 act.sa_handler = SIG_DFL;
1923 sigaction(SIGABRT, &act, NULL);
1924 }
1925#endif
7501267e
FB
1926 abort();
1927}
1928
c5be9f08
TS
1929CPUState *cpu_copy(CPUState *env)
1930{
01ba9816 1931 CPUState *new_env = cpu_init(env->cpu_model_str);
c5be9f08
TS
1932 CPUState *next_cpu = new_env->next_cpu;
1933 int cpu_index = new_env->cpu_index;
5a38f081
AL
1934#if defined(TARGET_HAS_ICE)
1935 CPUBreakpoint *bp;
1936 CPUWatchpoint *wp;
1937#endif
1938
c5be9f08 1939 memcpy(new_env, env, sizeof(CPUState));
5a38f081
AL
1940
1941 /* Preserve chaining and index. */
c5be9f08
TS
1942 new_env->next_cpu = next_cpu;
1943 new_env->cpu_index = cpu_index;
5a38f081
AL
1944
1945 /* Clone all break/watchpoints.
1946 Note: Once we support ptrace with hw-debug register access, make sure
1947 BP_CPU break/watchpoints are handled correctly on clone. */
72cf2d4f
BS
1948 QTAILQ_INIT(&env->breakpoints);
1949 QTAILQ_INIT(&env->watchpoints);
5a38f081 1950#if defined(TARGET_HAS_ICE)
72cf2d4f 1951 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
5a38f081
AL
1952 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1953 }
72cf2d4f 1954 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
5a38f081
AL
1955 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1956 wp->flags, NULL);
1957 }
1958#endif
1959
c5be9f08
TS
1960 return new_env;
1961}
1962
0124311e
FB
1963#if !defined(CONFIG_USER_ONLY)
1964
5c751e99
EI
1965static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1966{
1967 unsigned int i;
1968
1969 /* Discard jump cache entries for any tb which might potentially
1970 overlap the flushed page. */
1971 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1972 memset (&env->tb_jmp_cache[i], 0,
9742bf26 1973 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
5c751e99
EI
1974
1975 i = tb_jmp_cache_hash_page(addr);
1976 memset (&env->tb_jmp_cache[i], 0,
9742bf26 1977 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
5c751e99
EI
1978}
1979
08738984
IK
1980static CPUTLBEntry s_cputlb_empty_entry = {
1981 .addr_read = -1,
1982 .addr_write = -1,
1983 .addr_code = -1,
1984 .addend = -1,
1985};
1986
ee8b7021
FB
1987/* NOTE: if flush_global is true, also flush global entries (not
1988 implemented yet) */
1989void tlb_flush(CPUState *env, int flush_global)
33417e70 1990{
33417e70 1991 int i;
0124311e 1992
9fa3e853
FB
1993#if defined(DEBUG_TLB)
1994 printf("tlb_flush:\n");
1995#endif
0124311e
FB
1996 /* must reset current TB so that interrupts cannot modify the
1997 links while we are modifying them */
1998 env->current_tb = NULL;
1999
33417e70 2000 for(i = 0; i < CPU_TLB_SIZE; i++) {
cfde4bd9
IY
2001 int mmu_idx;
2002 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
08738984 2003 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
cfde4bd9 2004 }
33417e70 2005 }
9fa3e853 2006
8a40a180 2007 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
9fa3e853 2008
d4c430a8
PB
2009 env->tlb_flush_addr = -1;
2010 env->tlb_flush_mask = 0;
e3db7226 2011 tlb_flush_count++;
33417e70
FB
2012}
2013
274da6b2 2014static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
61382a50 2015{
5fafdf24 2016 if (addr == (tlb_entry->addr_read &
84b7b8e7 2017 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 2018 addr == (tlb_entry->addr_write &
84b7b8e7 2019 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 2020 addr == (tlb_entry->addr_code &
84b7b8e7 2021 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
08738984 2022 *tlb_entry = s_cputlb_empty_entry;
84b7b8e7 2023 }
61382a50
FB
2024}
2025
2e12669a 2026void tlb_flush_page(CPUState *env, target_ulong addr)
33417e70 2027{
8a40a180 2028 int i;
cfde4bd9 2029 int mmu_idx;
0124311e 2030
9fa3e853 2031#if defined(DEBUG_TLB)
108c49b8 2032 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
9fa3e853 2033#endif
d4c430a8
PB
2034 /* Check if we need to flush due to large pages. */
2035 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
2036#if defined(DEBUG_TLB)
2037 printf("tlb_flush_page: forced full flush ("
2038 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
2039 env->tlb_flush_addr, env->tlb_flush_mask);
2040#endif
2041 tlb_flush(env, 1);
2042 return;
2043 }
0124311e
FB
2044 /* must reset current TB so that interrupts cannot modify the
2045 links while we are modifying them */
2046 env->current_tb = NULL;
61382a50
FB
2047
2048 addr &= TARGET_PAGE_MASK;
2049 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
cfde4bd9
IY
2050 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2051 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
0124311e 2052
5c751e99 2053 tlb_flush_jmp_cache(env, addr);
9fa3e853
FB
2054}
2055
9fa3e853
FB
2056/* update the TLBs so that writes to code in the virtual page 'addr'
2057 can be detected */
c227f099 2058static void tlb_protect_code(ram_addr_t ram_addr)
9fa3e853 2059{
5fafdf24 2060 cpu_physical_memory_reset_dirty(ram_addr,
6a00d601
FB
2061 ram_addr + TARGET_PAGE_SIZE,
2062 CODE_DIRTY_FLAG);
9fa3e853
FB
2063}
2064
9fa3e853 2065/* update the TLB so that writes in physical page 'phys_addr' are no longer
3a7d929e 2066 tested for self modifying code */
c227f099 2067static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 2068 target_ulong vaddr)
9fa3e853 2069{
f7c11b53 2070 cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
1ccde1cb
FB
2071}
2072
5fafdf24 2073static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1ccde1cb
FB
2074 unsigned long start, unsigned long length)
2075{
2076 unsigned long addr;
84b7b8e7
FB
2077 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2078 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1ccde1cb 2079 if ((addr - start) < length) {
0f459d16 2080 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1ccde1cb
FB
2081 }
2082 }
2083}
2084
5579c7f3 2085/* Note: start and end must be within the same ram block. */
c227f099 2086void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 2087 int dirty_flags)
1ccde1cb
FB
2088{
2089 CPUState *env;
4f2ac237 2090 unsigned long length, start1;
f7c11b53 2091 int i;
1ccde1cb
FB
2092
2093 start &= TARGET_PAGE_MASK;
2094 end = TARGET_PAGE_ALIGN(end);
2095
2096 length = end - start;
2097 if (length == 0)
2098 return;
f7c11b53 2099 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
f23db169 2100
1ccde1cb
FB
2101 /* we modify the TLB cache so that the dirty bit will be set again
2102 when accessing the range */
b2e0a138 2103 start1 = (unsigned long)qemu_safe_ram_ptr(start);
a57d23e4 2104 /* Check that we don't span multiple blocks - this breaks the
5579c7f3 2105 address comparisons below. */
b2e0a138 2106 if ((unsigned long)qemu_safe_ram_ptr(end - 1) - start1
5579c7f3
PB
2107 != (end - 1) - start) {
2108 abort();
2109 }
2110
6a00d601 2111 for(env = first_cpu; env != NULL; env = env->next_cpu) {
cfde4bd9
IY
2112 int mmu_idx;
2113 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2114 for(i = 0; i < CPU_TLB_SIZE; i++)
2115 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
2116 start1, length);
2117 }
6a00d601 2118 }
1ccde1cb
FB
2119}
2120
74576198
AL
2121int cpu_physical_memory_set_dirty_tracking(int enable)
2122{
f6f3fbca 2123 int ret = 0;
74576198 2124 in_migration = enable;
f6f3fbca
MT
2125 ret = cpu_notify_migration_log(!!enable);
2126 return ret;
74576198
AL
2127}
2128
2129int cpu_physical_memory_get_dirty_tracking(void)
2130{
2131 return in_migration;
2132}
2133
c227f099
AL
2134int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
2135 target_phys_addr_t end_addr)
2bec46dc 2136{
7b8f3b78 2137 int ret;
151f7749 2138
f6f3fbca 2139 ret = cpu_notify_sync_dirty_bitmap(start_addr, end_addr);
151f7749 2140 return ret;
2bec46dc
AL
2141}
2142
e5896b12
AP
2143int cpu_physical_log_start(target_phys_addr_t start_addr,
2144 ram_addr_t size)
2145{
2146 CPUPhysMemoryClient *client;
2147 QLIST_FOREACH(client, &memory_client_list, list) {
2148 if (client->log_start) {
2149 int r = client->log_start(client, start_addr, size);
2150 if (r < 0) {
2151 return r;
2152 }
2153 }
2154 }
2155 return 0;
2156}
2157
2158int cpu_physical_log_stop(target_phys_addr_t start_addr,
2159 ram_addr_t size)
2160{
2161 CPUPhysMemoryClient *client;
2162 QLIST_FOREACH(client, &memory_client_list, list) {
2163 if (client->log_stop) {
2164 int r = client->log_stop(client, start_addr, size);
2165 if (r < 0) {
2166 return r;
2167 }
2168 }
2169 }
2170 return 0;
2171}
2172
3a7d929e
FB
2173static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2174{
c227f099 2175 ram_addr_t ram_addr;
5579c7f3 2176 void *p;
3a7d929e 2177
84b7b8e7 2178 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
5579c7f3
PB
2179 p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
2180 + tlb_entry->addend);
e890261f 2181 ram_addr = qemu_ram_addr_from_host_nofail(p);
3a7d929e 2182 if (!cpu_physical_memory_is_dirty(ram_addr)) {
0f459d16 2183 tlb_entry->addr_write |= TLB_NOTDIRTY;
3a7d929e
FB
2184 }
2185 }
2186}
2187
2188/* update the TLB according to the current state of the dirty bits */
2189void cpu_tlb_update_dirty(CPUState *env)
2190{
2191 int i;
cfde4bd9
IY
2192 int mmu_idx;
2193 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2194 for(i = 0; i < CPU_TLB_SIZE; i++)
2195 tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2196 }
3a7d929e
FB
2197}
2198
0f459d16 2199static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1ccde1cb 2200{
0f459d16
PB
2201 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2202 tlb_entry->addr_write = vaddr;
1ccde1cb
FB
2203}
2204
0f459d16
PB
2205/* update the TLB corresponding to virtual page vaddr
2206 so that it is no longer dirty */
2207static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1ccde1cb 2208{
1ccde1cb 2209 int i;
cfde4bd9 2210 int mmu_idx;
1ccde1cb 2211
0f459d16 2212 vaddr &= TARGET_PAGE_MASK;
1ccde1cb 2213 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
cfde4bd9
IY
2214 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2215 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
9fa3e853
FB
2216}
2217
d4c430a8
PB
2218/* Our TLB does not support large pages, so remember the area covered by
2219 large pages and trigger a full TLB flush if these are invalidated. */
2220static void tlb_add_large_page(CPUState *env, target_ulong vaddr,
2221 target_ulong size)
2222{
2223 target_ulong mask = ~(size - 1);
2224
2225 if (env->tlb_flush_addr == (target_ulong)-1) {
2226 env->tlb_flush_addr = vaddr & mask;
2227 env->tlb_flush_mask = mask;
2228 return;
2229 }
2230 /* Extend the existing region to include the new page.
2231 This is a compromise between unnecessary flushes and the cost
2232 of maintaining a full variable size TLB. */
2233 mask &= env->tlb_flush_mask;
2234 while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
2235 mask <<= 1;
2236 }
2237 env->tlb_flush_addr &= mask;
2238 env->tlb_flush_mask = mask;
2239}
2240
2241/* Add a new TLB entry. At most one entry for a given virtual address
2242 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2243 supplied size is only used by tlb_flush_page. */
2244void tlb_set_page(CPUState *env, target_ulong vaddr,
2245 target_phys_addr_t paddr, int prot,
2246 int mmu_idx, target_ulong size)
9fa3e853 2247{
92e873b9 2248 PhysPageDesc *p;
4f2ac237 2249 unsigned long pd;
9fa3e853 2250 unsigned int index;
4f2ac237 2251 target_ulong address;
0f459d16 2252 target_ulong code_address;
355b1943 2253 unsigned long addend;
84b7b8e7 2254 CPUTLBEntry *te;
a1d1bb31 2255 CPUWatchpoint *wp;
c227f099 2256 target_phys_addr_t iotlb;
9fa3e853 2257
d4c430a8
PB
2258 assert(size >= TARGET_PAGE_SIZE);
2259 if (size != TARGET_PAGE_SIZE) {
2260 tlb_add_large_page(env, vaddr, size);
2261 }
92e873b9 2262 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
9fa3e853
FB
2263 if (!p) {
2264 pd = IO_MEM_UNASSIGNED;
9fa3e853
FB
2265 } else {
2266 pd = p->phys_offset;
9fa3e853
FB
2267 }
2268#if defined(DEBUG_TLB)
7fd3f494
SW
2269 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
2270 " prot=%x idx=%d pd=0x%08lx\n",
2271 vaddr, paddr, prot, mmu_idx, pd);
9fa3e853
FB
2272#endif
2273
0f459d16
PB
2274 address = vaddr;
2275 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2276 /* IO memory case (romd handled later) */
2277 address |= TLB_MMIO;
2278 }
5579c7f3 2279 addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
0f459d16
PB
2280 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2281 /* Normal RAM. */
2282 iotlb = pd & TARGET_PAGE_MASK;
2283 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2284 iotlb |= IO_MEM_NOTDIRTY;
2285 else
2286 iotlb |= IO_MEM_ROM;
2287 } else {
ccbb4d44 2288 /* IO handlers are currently passed a physical address.
0f459d16
PB
2289 It would be nice to pass an offset from the base address
2290 of that region. This would avoid having to special case RAM,
2291 and avoid full address decoding in every device.
2292 We can't use the high bits of pd for this because
2293 IO_MEM_ROMD uses these as a ram address. */
8da3ff18
PB
2294 iotlb = (pd & ~TARGET_PAGE_MASK);
2295 if (p) {
8da3ff18
PB
2296 iotlb += p->region_offset;
2297 } else {
2298 iotlb += paddr;
2299 }
0f459d16
PB
2300 }
2301
2302 code_address = address;
2303 /* Make accesses to pages with watchpoints go via the
2304 watchpoint trap routines. */
72cf2d4f 2305 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
a1d1bb31 2306 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
bf298f83
JK
2307 /* Avoid trapping reads of pages with a write breakpoint. */
2308 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
2309 iotlb = io_mem_watch + paddr;
2310 address |= TLB_MMIO;
2311 break;
2312 }
6658ffb8 2313 }
0f459d16 2314 }
d79acba4 2315
0f459d16
PB
2316 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2317 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2318 te = &env->tlb_table[mmu_idx][index];
2319 te->addend = addend - vaddr;
2320 if (prot & PAGE_READ) {
2321 te->addr_read = address;
2322 } else {
2323 te->addr_read = -1;
2324 }
5c751e99 2325
0f459d16
PB
2326 if (prot & PAGE_EXEC) {
2327 te->addr_code = code_address;
2328 } else {
2329 te->addr_code = -1;
2330 }
2331 if (prot & PAGE_WRITE) {
2332 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2333 (pd & IO_MEM_ROMD)) {
2334 /* Write access calls the I/O callback. */
2335 te->addr_write = address | TLB_MMIO;
2336 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2337 !cpu_physical_memory_is_dirty(pd)) {
2338 te->addr_write = address | TLB_NOTDIRTY;
9fa3e853 2339 } else {
0f459d16 2340 te->addr_write = address;
9fa3e853 2341 }
0f459d16
PB
2342 } else {
2343 te->addr_write = -1;
9fa3e853 2344 }
9fa3e853
FB
2345}
2346
0124311e
FB
2347#else
2348
ee8b7021 2349void tlb_flush(CPUState *env, int flush_global)
0124311e
FB
2350{
2351}
2352
2e12669a 2353void tlb_flush_page(CPUState *env, target_ulong addr)
0124311e
FB
2354{
2355}
2356
edf8e2af
MW
2357/*
2358 * Walks guest process memory "regions" one by one
2359 * and calls callback function 'fn' for each region.
2360 */
5cd2c5b6
RH
2361
2362struct walk_memory_regions_data
2363{
2364 walk_memory_regions_fn fn;
2365 void *priv;
2366 unsigned long start;
2367 int prot;
2368};
2369
2370static int walk_memory_regions_end(struct walk_memory_regions_data *data,
b480d9b7 2371 abi_ulong end, int new_prot)
5cd2c5b6
RH
2372{
2373 if (data->start != -1ul) {
2374 int rc = data->fn(data->priv, data->start, end, data->prot);
2375 if (rc != 0) {
2376 return rc;
2377 }
2378 }
2379
2380 data->start = (new_prot ? end : -1ul);
2381 data->prot = new_prot;
2382
2383 return 0;
2384}
2385
2386static int walk_memory_regions_1(struct walk_memory_regions_data *data,
b480d9b7 2387 abi_ulong base, int level, void **lp)
5cd2c5b6 2388{
b480d9b7 2389 abi_ulong pa;
5cd2c5b6
RH
2390 int i, rc;
2391
2392 if (*lp == NULL) {
2393 return walk_memory_regions_end(data, base, 0);
2394 }
2395
2396 if (level == 0) {
2397 PageDesc *pd = *lp;
7296abac 2398 for (i = 0; i < L2_SIZE; ++i) {
5cd2c5b6
RH
2399 int prot = pd[i].flags;
2400
2401 pa = base | (i << TARGET_PAGE_BITS);
2402 if (prot != data->prot) {
2403 rc = walk_memory_regions_end(data, pa, prot);
2404 if (rc != 0) {
2405 return rc;
9fa3e853 2406 }
9fa3e853 2407 }
5cd2c5b6
RH
2408 }
2409 } else {
2410 void **pp = *lp;
7296abac 2411 for (i = 0; i < L2_SIZE; ++i) {
b480d9b7
PB
2412 pa = base | ((abi_ulong)i <<
2413 (TARGET_PAGE_BITS + L2_BITS * level));
5cd2c5b6
RH
2414 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2415 if (rc != 0) {
2416 return rc;
2417 }
2418 }
2419 }
2420
2421 return 0;
2422}
2423
2424int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2425{
2426 struct walk_memory_regions_data data;
2427 unsigned long i;
2428
2429 data.fn = fn;
2430 data.priv = priv;
2431 data.start = -1ul;
2432 data.prot = 0;
2433
2434 for (i = 0; i < V_L1_SIZE; i++) {
b480d9b7 2435 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
5cd2c5b6
RH
2436 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2437 if (rc != 0) {
2438 return rc;
9fa3e853 2439 }
33417e70 2440 }
5cd2c5b6
RH
2441
2442 return walk_memory_regions_end(&data, 0, 0);
edf8e2af
MW
2443}
2444
b480d9b7
PB
2445static int dump_region(void *priv, abi_ulong start,
2446 abi_ulong end, unsigned long prot)
edf8e2af
MW
2447{
2448 FILE *f = (FILE *)priv;
2449
b480d9b7
PB
2450 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2451 " "TARGET_ABI_FMT_lx" %c%c%c\n",
edf8e2af
MW
2452 start, end, end - start,
2453 ((prot & PAGE_READ) ? 'r' : '-'),
2454 ((prot & PAGE_WRITE) ? 'w' : '-'),
2455 ((prot & PAGE_EXEC) ? 'x' : '-'));
2456
2457 return (0);
2458}
2459
2460/* dump memory mappings */
2461void page_dump(FILE *f)
2462{
2463 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2464 "start", "end", "size", "prot");
2465 walk_memory_regions(f, dump_region);
33417e70
FB
2466}
2467
53a5960a 2468int page_get_flags(target_ulong address)
33417e70 2469{
9fa3e853
FB
2470 PageDesc *p;
2471
2472 p = page_find(address >> TARGET_PAGE_BITS);
33417e70 2473 if (!p)
9fa3e853
FB
2474 return 0;
2475 return p->flags;
2476}
2477
376a7909
RH
2478/* Modify the flags of a page and invalidate the code if necessary.
2479 The flag PAGE_WRITE_ORG is positioned automatically depending
2480 on PAGE_WRITE. The mmap_lock should already be held. */
53a5960a 2481void page_set_flags(target_ulong start, target_ulong end, int flags)
9fa3e853 2482{
376a7909
RH
2483 target_ulong addr, len;
2484
2485 /* This function should never be called with addresses outside the
2486 guest address space. If this assert fires, it probably indicates
2487 a missing call to h2g_valid. */
b480d9b7
PB
2488#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2489 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
376a7909
RH
2490#endif
2491 assert(start < end);
9fa3e853
FB
2492
2493 start = start & TARGET_PAGE_MASK;
2494 end = TARGET_PAGE_ALIGN(end);
376a7909
RH
2495
2496 if (flags & PAGE_WRITE) {
9fa3e853 2497 flags |= PAGE_WRITE_ORG;
376a7909
RH
2498 }
2499
2500 for (addr = start, len = end - start;
2501 len != 0;
2502 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2503 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2504
2505 /* If the write protection bit is set, then we invalidate
2506 the code inside. */
5fafdf24 2507 if (!(p->flags & PAGE_WRITE) &&
9fa3e853
FB
2508 (flags & PAGE_WRITE) &&
2509 p->first_tb) {
d720b93d 2510 tb_invalidate_phys_page(addr, 0, NULL);
9fa3e853
FB
2511 }
2512 p->flags = flags;
2513 }
33417e70
FB
2514}
2515
3d97b40b
TS
2516int page_check_range(target_ulong start, target_ulong len, int flags)
2517{
2518 PageDesc *p;
2519 target_ulong end;
2520 target_ulong addr;
2521
376a7909
RH
2522 /* This function should never be called with addresses outside the
2523 guest address space. If this assert fires, it probably indicates
2524 a missing call to h2g_valid. */
338e9e6c
BS
2525#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2526 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
376a7909
RH
2527#endif
2528
3e0650a9
RH
2529 if (len == 0) {
2530 return 0;
2531 }
376a7909
RH
2532 if (start + len - 1 < start) {
2533 /* We've wrapped around. */
55f280c9 2534 return -1;
376a7909 2535 }
55f280c9 2536
3d97b40b
TS
2537 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2538 start = start & TARGET_PAGE_MASK;
2539
376a7909
RH
2540 for (addr = start, len = end - start;
2541 len != 0;
2542 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
3d97b40b
TS
2543 p = page_find(addr >> TARGET_PAGE_BITS);
2544 if( !p )
2545 return -1;
2546 if( !(p->flags & PAGE_VALID) )
2547 return -1;
2548
dae3270c 2549 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
3d97b40b 2550 return -1;
dae3270c
FB
2551 if (flags & PAGE_WRITE) {
2552 if (!(p->flags & PAGE_WRITE_ORG))
2553 return -1;
2554 /* unprotect the page if it was put read-only because it
2555 contains translated code */
2556 if (!(p->flags & PAGE_WRITE)) {
2557 if (!page_unprotect(addr, 0, NULL))
2558 return -1;
2559 }
2560 return 0;
2561 }
3d97b40b
TS
2562 }
2563 return 0;
2564}
2565
9fa3e853 2566/* called from signal handler: invalidate the code and unprotect the
ccbb4d44 2567 page. Return TRUE if the fault was successfully handled. */
53a5960a 2568int page_unprotect(target_ulong address, unsigned long pc, void *puc)
9fa3e853 2569{
45d679d6
AJ
2570 unsigned int prot;
2571 PageDesc *p;
53a5960a 2572 target_ulong host_start, host_end, addr;
9fa3e853 2573
c8a706fe
PB
2574 /* Technically this isn't safe inside a signal handler. However we
2575 know this only ever happens in a synchronous SEGV handler, so in
2576 practice it seems to be ok. */
2577 mmap_lock();
2578
45d679d6
AJ
2579 p = page_find(address >> TARGET_PAGE_BITS);
2580 if (!p) {
c8a706fe 2581 mmap_unlock();
9fa3e853 2582 return 0;
c8a706fe 2583 }
45d679d6 2584
9fa3e853
FB
2585 /* if the page was really writable, then we change its
2586 protection back to writable */
45d679d6
AJ
2587 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2588 host_start = address & qemu_host_page_mask;
2589 host_end = host_start + qemu_host_page_size;
2590
2591 prot = 0;
2592 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2593 p = page_find(addr >> TARGET_PAGE_BITS);
2594 p->flags |= PAGE_WRITE;
2595 prot |= p->flags;
2596
9fa3e853
FB
2597 /* and since the content will be modified, we must invalidate
2598 the corresponding translated code. */
45d679d6 2599 tb_invalidate_phys_page(addr, pc, puc);
9fa3e853 2600#ifdef DEBUG_TB_CHECK
45d679d6 2601 tb_invalidate_check(addr);
9fa3e853 2602#endif
9fa3e853 2603 }
45d679d6
AJ
2604 mprotect((void *)g2h(host_start), qemu_host_page_size,
2605 prot & PAGE_BITS);
2606
2607 mmap_unlock();
2608 return 1;
9fa3e853 2609 }
c8a706fe 2610 mmap_unlock();
9fa3e853
FB
2611 return 0;
2612}
2613
6a00d601
FB
2614static inline void tlb_set_dirty(CPUState *env,
2615 unsigned long addr, target_ulong vaddr)
1ccde1cb
FB
2616{
2617}
9fa3e853
FB
2618#endif /* defined(CONFIG_USER_ONLY) */
2619
e2eef170 2620#if !defined(CONFIG_USER_ONLY)
8da3ff18 2621
c04b2b78
PB
2622#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2623typedef struct subpage_t {
2624 target_phys_addr_t base;
f6405247
RH
2625 ram_addr_t sub_io_index[TARGET_PAGE_SIZE];
2626 ram_addr_t region_offset[TARGET_PAGE_SIZE];
c04b2b78
PB
2627} subpage_t;
2628
c227f099
AL
2629static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2630 ram_addr_t memory, ram_addr_t region_offset);
f6405247
RH
2631static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2632 ram_addr_t orig_memory,
2633 ram_addr_t region_offset);
db7b5426
BS
2634#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2635 need_subpage) \
2636 do { \
2637 if (addr > start_addr) \
2638 start_addr2 = 0; \
2639 else { \
2640 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2641 if (start_addr2 > 0) \
2642 need_subpage = 1; \
2643 } \
2644 \
49e9fba2 2645 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
db7b5426
BS
2646 end_addr2 = TARGET_PAGE_SIZE - 1; \
2647 else { \
2648 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2649 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2650 need_subpage = 1; \
2651 } \
2652 } while (0)
2653
8f2498f9
MT
2654/* register physical memory.
2655 For RAM, 'size' must be a multiple of the target page size.
2656 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
8da3ff18
PB
2657 io memory page. The address used when calling the IO function is
2658 the offset from the start of the region, plus region_offset. Both
ccbb4d44 2659 start_addr and region_offset are rounded down to a page boundary
8da3ff18
PB
2660 before calculating this offset. This should not be a problem unless
2661 the low bits of start_addr and region_offset differ. */
0fd542fb 2662void cpu_register_physical_memory_log(target_phys_addr_t start_addr,
c227f099
AL
2663 ram_addr_t size,
2664 ram_addr_t phys_offset,
0fd542fb
MT
2665 ram_addr_t region_offset,
2666 bool log_dirty)
33417e70 2667{
c227f099 2668 target_phys_addr_t addr, end_addr;
92e873b9 2669 PhysPageDesc *p;
9d42037b 2670 CPUState *env;
c227f099 2671 ram_addr_t orig_size = size;
f6405247 2672 subpage_t *subpage;
33417e70 2673
3b8e6a2d 2674 assert(size);
0fd542fb 2675 cpu_notify_set_memory(start_addr, size, phys_offset, log_dirty);
f6f3fbca 2676
67c4d23c
PB
2677 if (phys_offset == IO_MEM_UNASSIGNED) {
2678 region_offset = start_addr;
2679 }
8da3ff18 2680 region_offset &= TARGET_PAGE_MASK;
5fd386f6 2681 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
c227f099 2682 end_addr = start_addr + (target_phys_addr_t)size;
3b8e6a2d
EI
2683
2684 addr = start_addr;
2685 do {
db7b5426
BS
2686 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2687 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
c227f099
AL
2688 ram_addr_t orig_memory = p->phys_offset;
2689 target_phys_addr_t start_addr2, end_addr2;
db7b5426
BS
2690 int need_subpage = 0;
2691
2692 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2693 need_subpage);
f6405247 2694 if (need_subpage) {
db7b5426
BS
2695 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2696 subpage = subpage_init((addr & TARGET_PAGE_MASK),
8da3ff18
PB
2697 &p->phys_offset, orig_memory,
2698 p->region_offset);
db7b5426
BS
2699 } else {
2700 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2701 >> IO_MEM_SHIFT];
2702 }
8da3ff18
PB
2703 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2704 region_offset);
2705 p->region_offset = 0;
db7b5426
BS
2706 } else {
2707 p->phys_offset = phys_offset;
2708 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2709 (phys_offset & IO_MEM_ROMD))
2710 phys_offset += TARGET_PAGE_SIZE;
2711 }
2712 } else {
2713 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2714 p->phys_offset = phys_offset;
8da3ff18 2715 p->region_offset = region_offset;
db7b5426 2716 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
8da3ff18 2717 (phys_offset & IO_MEM_ROMD)) {
db7b5426 2718 phys_offset += TARGET_PAGE_SIZE;
0e8f0967 2719 } else {
c227f099 2720 target_phys_addr_t start_addr2, end_addr2;
db7b5426
BS
2721 int need_subpage = 0;
2722
2723 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2724 end_addr2, need_subpage);
2725
f6405247 2726 if (need_subpage) {
db7b5426 2727 subpage = subpage_init((addr & TARGET_PAGE_MASK),
8da3ff18 2728 &p->phys_offset, IO_MEM_UNASSIGNED,
67c4d23c 2729 addr & TARGET_PAGE_MASK);
db7b5426 2730 subpage_register(subpage, start_addr2, end_addr2,
8da3ff18
PB
2731 phys_offset, region_offset);
2732 p->region_offset = 0;
db7b5426
BS
2733 }
2734 }
2735 }
8da3ff18 2736 region_offset += TARGET_PAGE_SIZE;
3b8e6a2d
EI
2737 addr += TARGET_PAGE_SIZE;
2738 } while (addr != end_addr);
3b46e624 2739
9d42037b
FB
2740 /* since each CPU stores ram addresses in its TLB cache, we must
2741 reset the modified entries */
2742 /* XXX: slow ! */
2743 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2744 tlb_flush(env, 1);
2745 }
33417e70
FB
2746}
2747
ba863458 2748/* XXX: temporary until new memory mapping API */
c227f099 2749ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
ba863458
FB
2750{
2751 PhysPageDesc *p;
2752
2753 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2754 if (!p)
2755 return IO_MEM_UNASSIGNED;
2756 return p->phys_offset;
2757}
2758
c227f099 2759void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
f65ed4c1
AL
2760{
2761 if (kvm_enabled())
2762 kvm_coalesce_mmio_region(addr, size);
2763}
2764
c227f099 2765void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
f65ed4c1
AL
2766{
2767 if (kvm_enabled())
2768 kvm_uncoalesce_mmio_region(addr, size);
2769}
2770
62a2744c
SY
2771void qemu_flush_coalesced_mmio_buffer(void)
2772{
2773 if (kvm_enabled())
2774 kvm_flush_coalesced_mmio_buffer();
2775}
2776
c902760f
MT
2777#if defined(__linux__) && !defined(TARGET_S390X)
2778
2779#include <sys/vfs.h>
2780
2781#define HUGETLBFS_MAGIC 0x958458f6
2782
2783static long gethugepagesize(const char *path)
2784{
2785 struct statfs fs;
2786 int ret;
2787
2788 do {
9742bf26 2789 ret = statfs(path, &fs);
c902760f
MT
2790 } while (ret != 0 && errno == EINTR);
2791
2792 if (ret != 0) {
9742bf26
YT
2793 perror(path);
2794 return 0;
c902760f
MT
2795 }
2796
2797 if (fs.f_type != HUGETLBFS_MAGIC)
9742bf26 2798 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
c902760f
MT
2799
2800 return fs.f_bsize;
2801}
2802
04b16653
AW
2803static void *file_ram_alloc(RAMBlock *block,
2804 ram_addr_t memory,
2805 const char *path)
c902760f
MT
2806{
2807 char *filename;
2808 void *area;
2809 int fd;
2810#ifdef MAP_POPULATE
2811 int flags;
2812#endif
2813 unsigned long hpagesize;
2814
2815 hpagesize = gethugepagesize(path);
2816 if (!hpagesize) {
9742bf26 2817 return NULL;
c902760f
MT
2818 }
2819
2820 if (memory < hpagesize) {
2821 return NULL;
2822 }
2823
2824 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2825 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2826 return NULL;
2827 }
2828
2829 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
9742bf26 2830 return NULL;
c902760f
MT
2831 }
2832
2833 fd = mkstemp(filename);
2834 if (fd < 0) {
9742bf26
YT
2835 perror("unable to create backing store for hugepages");
2836 free(filename);
2837 return NULL;
c902760f
MT
2838 }
2839 unlink(filename);
2840 free(filename);
2841
2842 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2843
2844 /*
2845 * ftruncate is not supported by hugetlbfs in older
2846 * hosts, so don't bother bailing out on errors.
2847 * If anything goes wrong with it under other filesystems,
2848 * mmap will fail.
2849 */
2850 if (ftruncate(fd, memory))
9742bf26 2851 perror("ftruncate");
c902760f
MT
2852
2853#ifdef MAP_POPULATE
2854 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2855 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2856 * to sidestep this quirk.
2857 */
2858 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2859 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2860#else
2861 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2862#endif
2863 if (area == MAP_FAILED) {
9742bf26
YT
2864 perror("file_ram_alloc: can't mmap RAM pages");
2865 close(fd);
2866 return (NULL);
c902760f 2867 }
04b16653 2868 block->fd = fd;
c902760f
MT
2869 return area;
2870}
2871#endif
2872
d17b5288 2873static ram_addr_t find_ram_offset(ram_addr_t size)
04b16653
AW
2874{
2875 RAMBlock *block, *next_block;
3e837b2c 2876 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
04b16653
AW
2877
2878 if (QLIST_EMPTY(&ram_list.blocks))
2879 return 0;
2880
2881 QLIST_FOREACH(block, &ram_list.blocks, next) {
f15fbc4b 2882 ram_addr_t end, next = RAM_ADDR_MAX;
04b16653
AW
2883
2884 end = block->offset + block->length;
2885
2886 QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2887 if (next_block->offset >= end) {
2888 next = MIN(next, next_block->offset);
2889 }
2890 }
2891 if (next - end >= size && next - end < mingap) {
3e837b2c 2892 offset = end;
04b16653
AW
2893 mingap = next - end;
2894 }
2895 }
3e837b2c
AW
2896
2897 if (offset == RAM_ADDR_MAX) {
2898 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
2899 (uint64_t)size);
2900 abort();
2901 }
2902
04b16653
AW
2903 return offset;
2904}
2905
2906static ram_addr_t last_ram_offset(void)
d17b5288
AW
2907{
2908 RAMBlock *block;
2909 ram_addr_t last = 0;
2910
2911 QLIST_FOREACH(block, &ram_list.blocks, next)
2912 last = MAX(last, block->offset + block->length);
2913
2914 return last;
2915}
2916
84b89d78 2917ram_addr_t qemu_ram_alloc_from_ptr(DeviceState *dev, const char *name,
6977dfe6 2918 ram_addr_t size, void *host)
84b89d78
CM
2919{
2920 RAMBlock *new_block, *block;
2921
2922 size = TARGET_PAGE_ALIGN(size);
7267c094 2923 new_block = g_malloc0(sizeof(*new_block));
84b89d78
CM
2924
2925 if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
2926 char *id = dev->parent_bus->info->get_dev_path(dev);
2927 if (id) {
2928 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
7267c094 2929 g_free(id);
84b89d78
CM
2930 }
2931 }
2932 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2933
2934 QLIST_FOREACH(block, &ram_list.blocks, next) {
2935 if (!strcmp(block->idstr, new_block->idstr)) {
2936 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2937 new_block->idstr);
2938 abort();
2939 }
2940 }
2941
432d268c 2942 new_block->offset = find_ram_offset(size);
6977dfe6
YT
2943 if (host) {
2944 new_block->host = host;
cd19cfa2 2945 new_block->flags |= RAM_PREALLOC_MASK;
6977dfe6
YT
2946 } else {
2947 if (mem_path) {
c902760f 2948#if defined (__linux__) && !defined(TARGET_S390X)
6977dfe6
YT
2949 new_block->host = file_ram_alloc(new_block, size, mem_path);
2950 if (!new_block->host) {
2951 new_block->host = qemu_vmalloc(size);
e78815a5 2952 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
6977dfe6 2953 }
c902760f 2954#else
6977dfe6
YT
2955 fprintf(stderr, "-mem-path option unsupported\n");
2956 exit(1);
c902760f 2957#endif
6977dfe6 2958 } else {
6b02494d 2959#if defined(TARGET_S390X) && defined(CONFIG_KVM)
ff83678a
CB
2960 /* S390 KVM requires the topmost vma of the RAM to be smaller than
2961 an system defined value, which is at least 256GB. Larger systems
2962 have larger values. We put the guest between the end of data
2963 segment (system break) and this value. We use 32GB as a base to
2964 have enough room for the system break to grow. */
2965 new_block->host = mmap((void*)0x800000000, size,
6977dfe6 2966 PROT_EXEC|PROT_READ|PROT_WRITE,
ff83678a 2967 MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
fb8b2735
AG
2968 if (new_block->host == MAP_FAILED) {
2969 fprintf(stderr, "Allocating RAM failed\n");
2970 abort();
2971 }
6b02494d 2972#else
868bb33f 2973 if (xen_enabled()) {
432d268c
JN
2974 xen_ram_alloc(new_block->offset, size);
2975 } else {
2976 new_block->host = qemu_vmalloc(size);
2977 }
6b02494d 2978#endif
e78815a5 2979 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
6977dfe6 2980 }
c902760f 2981 }
94a6b54f
PB
2982 new_block->length = size;
2983
f471a17e 2984 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
94a6b54f 2985
7267c094 2986 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
04b16653 2987 last_ram_offset() >> TARGET_PAGE_BITS);
d17b5288 2988 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
94a6b54f
PB
2989 0xff, size >> TARGET_PAGE_BITS);
2990
6f0437e8
JK
2991 if (kvm_enabled())
2992 kvm_setup_guest_memory(new_block->host, size);
2993
94a6b54f
PB
2994 return new_block->offset;
2995}
e9a1ab19 2996
6977dfe6
YT
2997ram_addr_t qemu_ram_alloc(DeviceState *dev, const char *name, ram_addr_t size)
2998{
2999 return qemu_ram_alloc_from_ptr(dev, name, size, NULL);
3000}
3001
1f2e98b6
AW
3002void qemu_ram_free_from_ptr(ram_addr_t addr)
3003{
3004 RAMBlock *block;
3005
3006 QLIST_FOREACH(block, &ram_list.blocks, next) {
3007 if (addr == block->offset) {
3008 QLIST_REMOVE(block, next);
7267c094 3009 g_free(block);
1f2e98b6
AW
3010 return;
3011 }
3012 }
3013}
3014
c227f099 3015void qemu_ram_free(ram_addr_t addr)
e9a1ab19 3016{
04b16653
AW
3017 RAMBlock *block;
3018
3019 QLIST_FOREACH(block, &ram_list.blocks, next) {
3020 if (addr == block->offset) {
3021 QLIST_REMOVE(block, next);
cd19cfa2
HY
3022 if (block->flags & RAM_PREALLOC_MASK) {
3023 ;
3024 } else if (mem_path) {
04b16653
AW
3025#if defined (__linux__) && !defined(TARGET_S390X)
3026 if (block->fd) {
3027 munmap(block->host, block->length);
3028 close(block->fd);
3029 } else {
3030 qemu_vfree(block->host);
3031 }
fd28aa13
JK
3032#else
3033 abort();
04b16653
AW
3034#endif
3035 } else {
3036#if defined(TARGET_S390X) && defined(CONFIG_KVM)
3037 munmap(block->host, block->length);
3038#else
868bb33f 3039 if (xen_enabled()) {
e41d7c69 3040 xen_invalidate_map_cache_entry(block->host);
432d268c
JN
3041 } else {
3042 qemu_vfree(block->host);
3043 }
04b16653
AW
3044#endif
3045 }
7267c094 3046 g_free(block);
04b16653
AW
3047 return;
3048 }
3049 }
3050
e9a1ab19
FB
3051}
3052
cd19cfa2
HY
3053#ifndef _WIN32
3054void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
3055{
3056 RAMBlock *block;
3057 ram_addr_t offset;
3058 int flags;
3059 void *area, *vaddr;
3060
3061 QLIST_FOREACH(block, &ram_list.blocks, next) {
3062 offset = addr - block->offset;
3063 if (offset < block->length) {
3064 vaddr = block->host + offset;
3065 if (block->flags & RAM_PREALLOC_MASK) {
3066 ;
3067 } else {
3068 flags = MAP_FIXED;
3069 munmap(vaddr, length);
3070 if (mem_path) {
3071#if defined(__linux__) && !defined(TARGET_S390X)
3072 if (block->fd) {
3073#ifdef MAP_POPULATE
3074 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
3075 MAP_PRIVATE;
3076#else
3077 flags |= MAP_PRIVATE;
3078#endif
3079 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3080 flags, block->fd, offset);
3081 } else {
3082 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
3083 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3084 flags, -1, 0);
3085 }
fd28aa13
JK
3086#else
3087 abort();
cd19cfa2
HY
3088#endif
3089 } else {
3090#if defined(TARGET_S390X) && defined(CONFIG_KVM)
3091 flags |= MAP_SHARED | MAP_ANONYMOUS;
3092 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
3093 flags, -1, 0);
3094#else
3095 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
3096 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3097 flags, -1, 0);
3098#endif
3099 }
3100 if (area != vaddr) {
f15fbc4b
AP
3101 fprintf(stderr, "Could not remap addr: "
3102 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
cd19cfa2
HY
3103 length, addr);
3104 exit(1);
3105 }
3106 qemu_madvise(vaddr, length, QEMU_MADV_MERGEABLE);
3107 }
3108 return;
3109 }
3110 }
3111}
3112#endif /* !_WIN32 */
3113
dc828ca1 3114/* Return a host pointer to ram allocated with qemu_ram_alloc.
5579c7f3
PB
3115 With the exception of the softmmu code in this file, this should
3116 only be used for local memory (e.g. video ram) that the device owns,
3117 and knows it isn't going to access beyond the end of the block.
3118
3119 It should not be used for general purpose DMA.
3120 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
3121 */
c227f099 3122void *qemu_get_ram_ptr(ram_addr_t addr)
dc828ca1 3123{
94a6b54f
PB
3124 RAMBlock *block;
3125
f471a17e
AW
3126 QLIST_FOREACH(block, &ram_list.blocks, next) {
3127 if (addr - block->offset < block->length) {
7d82af38
VP
3128 /* Move this entry to to start of the list. */
3129 if (block != QLIST_FIRST(&ram_list.blocks)) {
3130 QLIST_REMOVE(block, next);
3131 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
3132 }
868bb33f 3133 if (xen_enabled()) {
432d268c
JN
3134 /* We need to check if the requested address is in the RAM
3135 * because we don't want to map the entire memory in QEMU.
712c2b41 3136 * In that case just map until the end of the page.
432d268c
JN
3137 */
3138 if (block->offset == 0) {
e41d7c69 3139 return xen_map_cache(addr, 0, 0);
432d268c 3140 } else if (block->host == NULL) {
e41d7c69
JK
3141 block->host =
3142 xen_map_cache(block->offset, block->length, 1);
432d268c
JN
3143 }
3144 }
f471a17e
AW
3145 return block->host + (addr - block->offset);
3146 }
94a6b54f 3147 }
f471a17e
AW
3148
3149 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3150 abort();
3151
3152 return NULL;
dc828ca1
PB
3153}
3154
b2e0a138
MT
3155/* Return a host pointer to ram allocated with qemu_ram_alloc.
3156 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
3157 */
3158void *qemu_safe_ram_ptr(ram_addr_t addr)
3159{
3160 RAMBlock *block;
3161
3162 QLIST_FOREACH(block, &ram_list.blocks, next) {
3163 if (addr - block->offset < block->length) {
868bb33f 3164 if (xen_enabled()) {
432d268c
JN
3165 /* We need to check if the requested address is in the RAM
3166 * because we don't want to map the entire memory in QEMU.
712c2b41 3167 * In that case just map until the end of the page.
432d268c
JN
3168 */
3169 if (block->offset == 0) {
e41d7c69 3170 return xen_map_cache(addr, 0, 0);
432d268c 3171 } else if (block->host == NULL) {
e41d7c69
JK
3172 block->host =
3173 xen_map_cache(block->offset, block->length, 1);
432d268c
JN
3174 }
3175 }
b2e0a138
MT
3176 return block->host + (addr - block->offset);
3177 }
3178 }
3179
3180 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3181 abort();
3182
3183 return NULL;
3184}
3185
38bee5dc
SS
3186/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
3187 * but takes a size argument */
8ab934f9 3188void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
38bee5dc 3189{
8ab934f9
SS
3190 if (*size == 0) {
3191 return NULL;
3192 }
868bb33f 3193 if (xen_enabled()) {
e41d7c69 3194 return xen_map_cache(addr, *size, 1);
868bb33f 3195 } else {
38bee5dc
SS
3196 RAMBlock *block;
3197
3198 QLIST_FOREACH(block, &ram_list.blocks, next) {
3199 if (addr - block->offset < block->length) {
3200 if (addr - block->offset + *size > block->length)
3201 *size = block->length - addr + block->offset;
3202 return block->host + (addr - block->offset);
3203 }
3204 }
3205
3206 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3207 abort();
38bee5dc
SS
3208 }
3209}
3210
050a0ddf
AP
3211void qemu_put_ram_ptr(void *addr)
3212{
3213 trace_qemu_put_ram_ptr(addr);
050a0ddf
AP
3214}
3215
e890261f 3216int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
5579c7f3 3217{
94a6b54f
PB
3218 RAMBlock *block;
3219 uint8_t *host = ptr;
3220
868bb33f 3221 if (xen_enabled()) {
e41d7c69 3222 *ram_addr = xen_ram_addr_from_mapcache(ptr);
712c2b41
SS
3223 return 0;
3224 }
3225
f471a17e 3226 QLIST_FOREACH(block, &ram_list.blocks, next) {
432d268c
JN
3227 /* This case append when the block is not mapped. */
3228 if (block->host == NULL) {
3229 continue;
3230 }
f471a17e 3231 if (host - block->host < block->length) {
e890261f
MT
3232 *ram_addr = block->offset + (host - block->host);
3233 return 0;
f471a17e 3234 }
94a6b54f 3235 }
432d268c 3236
e890261f
MT
3237 return -1;
3238}
f471a17e 3239
e890261f
MT
3240/* Some of the softmmu routines need to translate from a host pointer
3241 (typically a TLB entry) back to a ram offset. */
3242ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
3243{
3244 ram_addr_t ram_addr;
f471a17e 3245
e890261f
MT
3246 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
3247 fprintf(stderr, "Bad ram pointer %p\n", ptr);
3248 abort();
3249 }
3250 return ram_addr;
5579c7f3
PB
3251}
3252
c227f099 3253static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
33417e70 3254{
67d3b957 3255#ifdef DEBUG_UNASSIGNED
ab3d1727 3256 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
b4f0a316 3257#endif
5b450407 3258#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
b14ef7c9 3259 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, 1);
e18231a3
BS
3260#endif
3261 return 0;
3262}
3263
c227f099 3264static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
e18231a3
BS
3265{
3266#ifdef DEBUG_UNASSIGNED
3267 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3268#endif
5b450407 3269#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
b14ef7c9 3270 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, 2);
e18231a3
BS
3271#endif
3272 return 0;
3273}
3274
c227f099 3275static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
e18231a3
BS
3276{
3277#ifdef DEBUG_UNASSIGNED
3278 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3279#endif
5b450407 3280#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
b14ef7c9 3281 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, 4);
67d3b957 3282#endif
33417e70
FB
3283 return 0;
3284}
3285
c227f099 3286static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
33417e70 3287{
67d3b957 3288#ifdef DEBUG_UNASSIGNED
ab3d1727 3289 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
67d3b957 3290#endif
5b450407 3291#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
b14ef7c9 3292 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, 1);
e18231a3
BS
3293#endif
3294}
3295
c227f099 3296static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
e18231a3
BS
3297{
3298#ifdef DEBUG_UNASSIGNED
3299 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3300#endif
5b450407 3301#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
b14ef7c9 3302 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, 2);
e18231a3
BS
3303#endif
3304}
3305
c227f099 3306static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
e18231a3
BS
3307{
3308#ifdef DEBUG_UNASSIGNED
3309 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3310#endif
5b450407 3311#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
b14ef7c9 3312 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, 4);
b4f0a316 3313#endif
33417e70
FB
3314}
3315
d60efc6b 3316static CPUReadMemoryFunc * const unassigned_mem_read[3] = {
33417e70 3317 unassigned_mem_readb,
e18231a3
BS
3318 unassigned_mem_readw,
3319 unassigned_mem_readl,
33417e70
FB
3320};
3321
d60efc6b 3322static CPUWriteMemoryFunc * const unassigned_mem_write[3] = {
33417e70 3323 unassigned_mem_writeb,
e18231a3
BS
3324 unassigned_mem_writew,
3325 unassigned_mem_writel,
33417e70
FB
3326};
3327
c227f099 3328static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
0f459d16 3329 uint32_t val)
9fa3e853 3330{
3a7d929e 3331 int dirty_flags;
f7c11b53 3332 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3a7d929e 3333 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 3334#if !defined(CONFIG_USER_ONLY)
3a7d929e 3335 tb_invalidate_phys_page_fast(ram_addr, 1);
f7c11b53 3336 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
9fa3e853 3337#endif
3a7d929e 3338 }
5579c7f3 3339 stb_p(qemu_get_ram_ptr(ram_addr), val);
f23db169 3340 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
f7c11b53 3341 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
f23db169
FB
3342 /* we remove the notdirty callback only if the code has been
3343 flushed */
3344 if (dirty_flags == 0xff)
2e70f6ef 3345 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
3346}
3347
c227f099 3348static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
0f459d16 3349 uint32_t val)
9fa3e853 3350{
3a7d929e 3351 int dirty_flags;
f7c11b53 3352 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3a7d929e 3353 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 3354#if !defined(CONFIG_USER_ONLY)
3a7d929e 3355 tb_invalidate_phys_page_fast(ram_addr, 2);
f7c11b53 3356 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
9fa3e853 3357#endif
3a7d929e 3358 }
5579c7f3 3359 stw_p(qemu_get_ram_ptr(ram_addr), val);
f23db169 3360 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
f7c11b53 3361 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
f23db169
FB
3362 /* we remove the notdirty callback only if the code has been
3363 flushed */
3364 if (dirty_flags == 0xff)
2e70f6ef 3365 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
3366}
3367
c227f099 3368static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
0f459d16 3369 uint32_t val)
9fa3e853 3370{
3a7d929e 3371 int dirty_flags;
f7c11b53 3372 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3a7d929e 3373 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 3374#if !defined(CONFIG_USER_ONLY)
3a7d929e 3375 tb_invalidate_phys_page_fast(ram_addr, 4);
f7c11b53 3376 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
9fa3e853 3377#endif
3a7d929e 3378 }
5579c7f3 3379 stl_p(qemu_get_ram_ptr(ram_addr), val);
f23db169 3380 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
f7c11b53 3381 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
f23db169
FB
3382 /* we remove the notdirty callback only if the code has been
3383 flushed */
3384 if (dirty_flags == 0xff)
2e70f6ef 3385 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
3386}
3387
d60efc6b 3388static CPUReadMemoryFunc * const error_mem_read[3] = {
9fa3e853
FB
3389 NULL, /* never used */
3390 NULL, /* never used */
3391 NULL, /* never used */
3392};
3393
d60efc6b 3394static CPUWriteMemoryFunc * const notdirty_mem_write[3] = {
1ccde1cb
FB
3395 notdirty_mem_writeb,
3396 notdirty_mem_writew,
3397 notdirty_mem_writel,
3398};
3399
0f459d16 3400/* Generate a debug exception if a watchpoint has been hit. */
b4051334 3401static void check_watchpoint(int offset, int len_mask, int flags)
0f459d16
PB
3402{
3403 CPUState *env = cpu_single_env;
06d55cc1
AL
3404 target_ulong pc, cs_base;
3405 TranslationBlock *tb;
0f459d16 3406 target_ulong vaddr;
a1d1bb31 3407 CPUWatchpoint *wp;
06d55cc1 3408 int cpu_flags;
0f459d16 3409
06d55cc1
AL
3410 if (env->watchpoint_hit) {
3411 /* We re-entered the check after replacing the TB. Now raise
3412 * the debug interrupt so that is will trigger after the
3413 * current instruction. */
3414 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3415 return;
3416 }
2e70f6ef 3417 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
72cf2d4f 3418 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334
AL
3419 if ((vaddr == (wp->vaddr & len_mask) ||
3420 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
6e140f28
AL
3421 wp->flags |= BP_WATCHPOINT_HIT;
3422 if (!env->watchpoint_hit) {
3423 env->watchpoint_hit = wp;
3424 tb = tb_find_pc(env->mem_io_pc);
3425 if (!tb) {
3426 cpu_abort(env, "check_watchpoint: could not find TB for "
3427 "pc=%p", (void *)env->mem_io_pc);
3428 }
618ba8e6 3429 cpu_restore_state(tb, env, env->mem_io_pc);
6e140f28
AL
3430 tb_phys_invalidate(tb, -1);
3431 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3432 env->exception_index = EXCP_DEBUG;
3433 } else {
3434 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3435 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
3436 }
3437 cpu_resume_from_signal(env, NULL);
06d55cc1 3438 }
6e140f28
AL
3439 } else {
3440 wp->flags &= ~BP_WATCHPOINT_HIT;
0f459d16
PB
3441 }
3442 }
3443}
3444
6658ffb8
PB
3445/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3446 so these check for a hit then pass through to the normal out-of-line
3447 phys routines. */
c227f099 3448static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
6658ffb8 3449{
b4051334 3450 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
6658ffb8
PB
3451 return ldub_phys(addr);
3452}
3453
c227f099 3454static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
6658ffb8 3455{
b4051334 3456 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
6658ffb8
PB
3457 return lduw_phys(addr);
3458}
3459
c227f099 3460static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
6658ffb8 3461{
b4051334 3462 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
6658ffb8
PB
3463 return ldl_phys(addr);
3464}
3465
c227f099 3466static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
6658ffb8
PB
3467 uint32_t val)
3468{
b4051334 3469 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
6658ffb8
PB
3470 stb_phys(addr, val);
3471}
3472
c227f099 3473static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
6658ffb8
PB
3474 uint32_t val)
3475{
b4051334 3476 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
6658ffb8
PB
3477 stw_phys(addr, val);
3478}
3479
c227f099 3480static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
6658ffb8
PB
3481 uint32_t val)
3482{
b4051334 3483 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
6658ffb8
PB
3484 stl_phys(addr, val);
3485}
3486
d60efc6b 3487static CPUReadMemoryFunc * const watch_mem_read[3] = {
6658ffb8
PB
3488 watch_mem_readb,
3489 watch_mem_readw,
3490 watch_mem_readl,
3491};
3492
d60efc6b 3493static CPUWriteMemoryFunc * const watch_mem_write[3] = {
6658ffb8
PB
3494 watch_mem_writeb,
3495 watch_mem_writew,
3496 watch_mem_writel,
3497};
6658ffb8 3498
f6405247
RH
3499static inline uint32_t subpage_readlen (subpage_t *mmio,
3500 target_phys_addr_t addr,
3501 unsigned int len)
db7b5426 3502{
f6405247 3503 unsigned int idx = SUBPAGE_IDX(addr);
db7b5426
BS
3504#if defined(DEBUG_SUBPAGE)
3505 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3506 mmio, len, addr, idx);
3507#endif
db7b5426 3508
f6405247
RH
3509 addr += mmio->region_offset[idx];
3510 idx = mmio->sub_io_index[idx];
3511 return io_mem_read[idx][len](io_mem_opaque[idx], addr);
db7b5426
BS
3512}
3513
c227f099 3514static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
f6405247 3515 uint32_t value, unsigned int len)
db7b5426 3516{
f6405247 3517 unsigned int idx = SUBPAGE_IDX(addr);
db7b5426 3518#if defined(DEBUG_SUBPAGE)
f6405247
RH
3519 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n",
3520 __func__, mmio, len, addr, idx, value);
db7b5426 3521#endif
f6405247
RH
3522
3523 addr += mmio->region_offset[idx];
3524 idx = mmio->sub_io_index[idx];
3525 io_mem_write[idx][len](io_mem_opaque[idx], addr, value);
db7b5426
BS
3526}
3527
c227f099 3528static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
db7b5426 3529{
db7b5426
BS
3530 return subpage_readlen(opaque, addr, 0);
3531}
3532
c227f099 3533static void subpage_writeb (void *opaque, target_phys_addr_t addr,
db7b5426
BS
3534 uint32_t value)
3535{
db7b5426
BS
3536 subpage_writelen(opaque, addr, value, 0);
3537}
3538
c227f099 3539static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
db7b5426 3540{
db7b5426
BS
3541 return subpage_readlen(opaque, addr, 1);
3542}
3543
c227f099 3544static void subpage_writew (void *opaque, target_phys_addr_t addr,
db7b5426
BS
3545 uint32_t value)
3546{
db7b5426
BS
3547 subpage_writelen(opaque, addr, value, 1);
3548}
3549
c227f099 3550static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
db7b5426 3551{
db7b5426
BS
3552 return subpage_readlen(opaque, addr, 2);
3553}
3554
f6405247
RH
3555static void subpage_writel (void *opaque, target_phys_addr_t addr,
3556 uint32_t value)
db7b5426 3557{
db7b5426
BS
3558 subpage_writelen(opaque, addr, value, 2);
3559}
3560
d60efc6b 3561static CPUReadMemoryFunc * const subpage_read[] = {
db7b5426
BS
3562 &subpage_readb,
3563 &subpage_readw,
3564 &subpage_readl,
3565};
3566
d60efc6b 3567static CPUWriteMemoryFunc * const subpage_write[] = {
db7b5426
BS
3568 &subpage_writeb,
3569 &subpage_writew,
3570 &subpage_writel,
3571};
3572
2061800b
AF
3573static uint32_t subpage_ram_readb(void *opaque, target_phys_addr_t addr)
3574{
3575 ram_addr_t raddr = addr;
3576 void *ptr = qemu_get_ram_ptr(raddr);
3577 return ldub_p(ptr);
3578}
3579
3580static void subpage_ram_writeb(void *opaque, target_phys_addr_t addr,
3581 uint32_t value)
3582{
3583 ram_addr_t raddr = addr;
3584 void *ptr = qemu_get_ram_ptr(raddr);
3585 stb_p(ptr, value);
3586}
3587
3588static uint32_t subpage_ram_readw(void *opaque, target_phys_addr_t addr)
3589{
3590 ram_addr_t raddr = addr;
3591 void *ptr = qemu_get_ram_ptr(raddr);
3592 return lduw_p(ptr);
3593}
3594
3595static void subpage_ram_writew(void *opaque, target_phys_addr_t addr,
3596 uint32_t value)
3597{
3598 ram_addr_t raddr = addr;
3599 void *ptr = qemu_get_ram_ptr(raddr);
3600 stw_p(ptr, value);
3601}
3602
3603static uint32_t subpage_ram_readl(void *opaque, target_phys_addr_t addr)
3604{
3605 ram_addr_t raddr = addr;
3606 void *ptr = qemu_get_ram_ptr(raddr);
3607 return ldl_p(ptr);
3608}
3609
3610static void subpage_ram_writel(void *opaque, target_phys_addr_t addr,
3611 uint32_t value)
3612{
3613 ram_addr_t raddr = addr;
3614 void *ptr = qemu_get_ram_ptr(raddr);
3615 stl_p(ptr, value);
3616}
3617
3618static CPUReadMemoryFunc * const subpage_ram_read[] = {
3619 &subpage_ram_readb,
3620 &subpage_ram_readw,
3621 &subpage_ram_readl,
3622};
3623
3624static CPUWriteMemoryFunc * const subpage_ram_write[] = {
3625 &subpage_ram_writeb,
3626 &subpage_ram_writew,
3627 &subpage_ram_writel,
3628};
3629
c227f099
AL
3630static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3631 ram_addr_t memory, ram_addr_t region_offset)
db7b5426
BS
3632{
3633 int idx, eidx;
3634
3635 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3636 return -1;
3637 idx = SUBPAGE_IDX(start);
3638 eidx = SUBPAGE_IDX(end);
3639#if defined(DEBUG_SUBPAGE)
0bf9e31a 3640 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
db7b5426
BS
3641 mmio, start, end, idx, eidx, memory);
3642#endif
2061800b
AF
3643 if ((memory & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
3644 memory = IO_MEM_SUBPAGE_RAM;
3645 }
f6405247 3646 memory = (memory >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
db7b5426 3647 for (; idx <= eidx; idx++) {
f6405247
RH
3648 mmio->sub_io_index[idx] = memory;
3649 mmio->region_offset[idx] = region_offset;
db7b5426
BS
3650 }
3651
3652 return 0;
3653}
3654
f6405247
RH
3655static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
3656 ram_addr_t orig_memory,
3657 ram_addr_t region_offset)
db7b5426 3658{
c227f099 3659 subpage_t *mmio;
db7b5426
BS
3660 int subpage_memory;
3661
7267c094 3662 mmio = g_malloc0(sizeof(subpage_t));
1eec614b
AL
3663
3664 mmio->base = base;
2507c12a
AG
3665 subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio,
3666 DEVICE_NATIVE_ENDIAN);
db7b5426 3667#if defined(DEBUG_SUBPAGE)
1eec614b
AL
3668 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3669 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
db7b5426 3670#endif
1eec614b 3671 *phys = subpage_memory | IO_MEM_SUBPAGE;
f6405247 3672 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, orig_memory, region_offset);
db7b5426
BS
3673
3674 return mmio;
3675}
3676
88715657
AL
3677static int get_free_io_mem_idx(void)
3678{
3679 int i;
3680
3681 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3682 if (!io_mem_used[i]) {
3683 io_mem_used[i] = 1;
3684 return i;
3685 }
c6703b47 3686 fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
88715657
AL
3687 return -1;
3688}
3689
dd310534
AG
3690/*
3691 * Usually, devices operate in little endian mode. There are devices out
3692 * there that operate in big endian too. Each device gets byte swapped
3693 * mmio if plugged onto a CPU that does the other endianness.
3694 *
3695 * CPU Device swap?
3696 *
3697 * little little no
3698 * little big yes
3699 * big little yes
3700 * big big no
3701 */
3702
3703typedef struct SwapEndianContainer {
3704 CPUReadMemoryFunc *read[3];
3705 CPUWriteMemoryFunc *write[3];
3706 void *opaque;
3707} SwapEndianContainer;
3708
3709static uint32_t swapendian_mem_readb (void *opaque, target_phys_addr_t addr)
3710{
3711 uint32_t val;
3712 SwapEndianContainer *c = opaque;
3713 val = c->read[0](c->opaque, addr);
3714 return val;
3715}
3716
3717static uint32_t swapendian_mem_readw(void *opaque, target_phys_addr_t addr)
3718{
3719 uint32_t val;
3720 SwapEndianContainer *c = opaque;
3721 val = bswap16(c->read[1](c->opaque, addr));
3722 return val;
3723}
3724
3725static uint32_t swapendian_mem_readl(void *opaque, target_phys_addr_t addr)
3726{
3727 uint32_t val;
3728 SwapEndianContainer *c = opaque;
3729 val = bswap32(c->read[2](c->opaque, addr));
3730 return val;
3731}
3732
3733static CPUReadMemoryFunc * const swapendian_readfn[3]={
3734 swapendian_mem_readb,
3735 swapendian_mem_readw,
3736 swapendian_mem_readl
3737};
3738
3739static void swapendian_mem_writeb(void *opaque, target_phys_addr_t addr,
3740 uint32_t val)
3741{
3742 SwapEndianContainer *c = opaque;
3743 c->write[0](c->opaque, addr, val);
3744}
3745
3746static void swapendian_mem_writew(void *opaque, target_phys_addr_t addr,
3747 uint32_t val)
3748{
3749 SwapEndianContainer *c = opaque;
3750 c->write[1](c->opaque, addr, bswap16(val));
3751}
3752
3753static void swapendian_mem_writel(void *opaque, target_phys_addr_t addr,
3754 uint32_t val)
3755{
3756 SwapEndianContainer *c = opaque;
3757 c->write[2](c->opaque, addr, bswap32(val));
3758}
3759
3760static CPUWriteMemoryFunc * const swapendian_writefn[3]={
3761 swapendian_mem_writeb,
3762 swapendian_mem_writew,
3763 swapendian_mem_writel
3764};
3765
3766static void swapendian_init(int io_index)
3767{
7267c094 3768 SwapEndianContainer *c = g_malloc(sizeof(SwapEndianContainer));
dd310534
AG
3769 int i;
3770
3771 /* Swap mmio for big endian targets */
3772 c->opaque = io_mem_opaque[io_index];
3773 for (i = 0; i < 3; i++) {
3774 c->read[i] = io_mem_read[io_index][i];
3775 c->write[i] = io_mem_write[io_index][i];
3776
3777 io_mem_read[io_index][i] = swapendian_readfn[i];
3778 io_mem_write[io_index][i] = swapendian_writefn[i];
3779 }
3780 io_mem_opaque[io_index] = c;
3781}
3782
3783static void swapendian_del(int io_index)
3784{
3785 if (io_mem_read[io_index][0] == swapendian_readfn[0]) {
7267c094 3786 g_free(io_mem_opaque[io_index]);
dd310534
AG
3787 }
3788}
3789
33417e70
FB
3790/* mem_read and mem_write are arrays of functions containing the
3791 function to access byte (index 0), word (index 1) and dword (index
0b4e6e3e 3792 2). Functions can be omitted with a NULL function pointer.
3ee89922 3793 If io_index is non zero, the corresponding io zone is
4254fab8
BS
3794 modified. If it is zero, a new io zone is allocated. The return
3795 value can be used with cpu_register_physical_memory(). (-1) is
3796 returned if error. */
1eed09cb 3797static int cpu_register_io_memory_fixed(int io_index,
d60efc6b
BS
3798 CPUReadMemoryFunc * const *mem_read,
3799 CPUWriteMemoryFunc * const *mem_write,
dd310534 3800 void *opaque, enum device_endian endian)
33417e70 3801{
3cab721d
RH
3802 int i;
3803
33417e70 3804 if (io_index <= 0) {
88715657
AL
3805 io_index = get_free_io_mem_idx();
3806 if (io_index == -1)
3807 return io_index;
33417e70 3808 } else {
1eed09cb 3809 io_index >>= IO_MEM_SHIFT;
33417e70
FB
3810 if (io_index >= IO_MEM_NB_ENTRIES)
3811 return -1;
3812 }
b5ff1b31 3813
3cab721d
RH
3814 for (i = 0; i < 3; ++i) {
3815 io_mem_read[io_index][i]
3816 = (mem_read[i] ? mem_read[i] : unassigned_mem_read[i]);
3817 }
3818 for (i = 0; i < 3; ++i) {
3819 io_mem_write[io_index][i]
3820 = (mem_write[i] ? mem_write[i] : unassigned_mem_write[i]);
3821 }
a4193c8a 3822 io_mem_opaque[io_index] = opaque;
f6405247 3823
dd310534
AG
3824 switch (endian) {
3825 case DEVICE_BIG_ENDIAN:
3826#ifndef TARGET_WORDS_BIGENDIAN
3827 swapendian_init(io_index);
3828#endif
3829 break;
3830 case DEVICE_LITTLE_ENDIAN:
3831#ifdef TARGET_WORDS_BIGENDIAN
3832 swapendian_init(io_index);
3833#endif
3834 break;
3835 case DEVICE_NATIVE_ENDIAN:
3836 default:
3837 break;
3838 }
3839
f6405247 3840 return (io_index << IO_MEM_SHIFT);
33417e70 3841}
61382a50 3842
d60efc6b
BS
3843int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
3844 CPUWriteMemoryFunc * const *mem_write,
dd310534 3845 void *opaque, enum device_endian endian)
1eed09cb 3846{
2507c12a 3847 return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque, endian);
1eed09cb
AK
3848}
3849
88715657
AL
3850void cpu_unregister_io_memory(int io_table_address)
3851{
3852 int i;
3853 int io_index = io_table_address >> IO_MEM_SHIFT;
3854
dd310534
AG
3855 swapendian_del(io_index);
3856
88715657
AL
3857 for (i=0;i < 3; i++) {
3858 io_mem_read[io_index][i] = unassigned_mem_read[i];
3859 io_mem_write[io_index][i] = unassigned_mem_write[i];
3860 }
3861 io_mem_opaque[io_index] = NULL;
3862 io_mem_used[io_index] = 0;
3863}
3864
e9179ce1
AK
3865static void io_mem_init(void)
3866{
3867 int i;
3868
2507c12a
AG
3869 cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read,
3870 unassigned_mem_write, NULL,
3871 DEVICE_NATIVE_ENDIAN);
3872 cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read,
3873 unassigned_mem_write, NULL,
3874 DEVICE_NATIVE_ENDIAN);
3875 cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read,
3876 notdirty_mem_write, NULL,
3877 DEVICE_NATIVE_ENDIAN);
2061800b
AF
3878 cpu_register_io_memory_fixed(IO_MEM_SUBPAGE_RAM, subpage_ram_read,
3879 subpage_ram_write, NULL,
3880 DEVICE_NATIVE_ENDIAN);
e9179ce1
AK
3881 for (i=0; i<5; i++)
3882 io_mem_used[i] = 1;
3883
3884 io_mem_watch = cpu_register_io_memory(watch_mem_read,
2507c12a
AG
3885 watch_mem_write, NULL,
3886 DEVICE_NATIVE_ENDIAN);
e9179ce1
AK
3887}
3888
62152b8a
AK
3889static void memory_map_init(void)
3890{
7267c094 3891 system_memory = g_malloc(sizeof(*system_memory));
8417cebf 3892 memory_region_init(system_memory, "system", INT64_MAX);
62152b8a 3893 set_system_memory_map(system_memory);
309cb471 3894
7267c094 3895 system_io = g_malloc(sizeof(*system_io));
309cb471
AK
3896 memory_region_init(system_io, "io", 65536);
3897 set_system_io_map(system_io);
62152b8a
AK
3898}
3899
3900MemoryRegion *get_system_memory(void)
3901{
3902 return system_memory;
3903}
3904
309cb471
AK
3905MemoryRegion *get_system_io(void)
3906{
3907 return system_io;
3908}
3909
e2eef170
PB
3910#endif /* !defined(CONFIG_USER_ONLY) */
3911
13eb76e0
FB
3912/* physical memory access (slow version, mainly for debug) */
3913#if defined(CONFIG_USER_ONLY)
a68fe89c
PB
3914int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3915 uint8_t *buf, int len, int is_write)
13eb76e0
FB
3916{
3917 int l, flags;
3918 target_ulong page;
53a5960a 3919 void * p;
13eb76e0
FB
3920
3921 while (len > 0) {
3922 page = addr & TARGET_PAGE_MASK;
3923 l = (page + TARGET_PAGE_SIZE) - addr;
3924 if (l > len)
3925 l = len;
3926 flags = page_get_flags(page);
3927 if (!(flags & PAGE_VALID))
a68fe89c 3928 return -1;
13eb76e0
FB
3929 if (is_write) {
3930 if (!(flags & PAGE_WRITE))
a68fe89c 3931 return -1;
579a97f7 3932 /* XXX: this code should not depend on lock_user */
72fb7daa 3933 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
a68fe89c 3934 return -1;
72fb7daa
AJ
3935 memcpy(p, buf, l);
3936 unlock_user(p, addr, l);
13eb76e0
FB
3937 } else {
3938 if (!(flags & PAGE_READ))
a68fe89c 3939 return -1;
579a97f7 3940 /* XXX: this code should not depend on lock_user */
72fb7daa 3941 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
a68fe89c 3942 return -1;
72fb7daa 3943 memcpy(buf, p, l);
5b257578 3944 unlock_user(p, addr, 0);
13eb76e0
FB
3945 }
3946 len -= l;
3947 buf += l;
3948 addr += l;
3949 }
a68fe89c 3950 return 0;
13eb76e0 3951}
8df1cd07 3952
13eb76e0 3953#else
c227f099 3954void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
3955 int len, int is_write)
3956{
3957 int l, io_index;
3958 uint8_t *ptr;
3959 uint32_t val;
c227f099 3960 target_phys_addr_t page;
8ca5692d 3961 ram_addr_t pd;
92e873b9 3962 PhysPageDesc *p;
3b46e624 3963
13eb76e0
FB
3964 while (len > 0) {
3965 page = addr & TARGET_PAGE_MASK;
3966 l = (page + TARGET_PAGE_SIZE) - addr;
3967 if (l > len)
3968 l = len;
92e873b9 3969 p = phys_page_find(page >> TARGET_PAGE_BITS);
13eb76e0
FB
3970 if (!p) {
3971 pd = IO_MEM_UNASSIGNED;
3972 } else {
3973 pd = p->phys_offset;
3974 }
3b46e624 3975
13eb76e0 3976 if (is_write) {
3a7d929e 3977 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
c227f099 3978 target_phys_addr_t addr1 = addr;
13eb76e0 3979 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18 3980 if (p)
6c2934db 3981 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
6a00d601
FB
3982 /* XXX: could force cpu_single_env to NULL to avoid
3983 potential bugs */
6c2934db 3984 if (l >= 4 && ((addr1 & 3) == 0)) {
1c213d19 3985 /* 32 bit write access */
c27004ec 3986 val = ldl_p(buf);
6c2934db 3987 io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
13eb76e0 3988 l = 4;
6c2934db 3989 } else if (l >= 2 && ((addr1 & 1) == 0)) {
1c213d19 3990 /* 16 bit write access */
c27004ec 3991 val = lduw_p(buf);
6c2934db 3992 io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
13eb76e0
FB
3993 l = 2;
3994 } else {
1c213d19 3995 /* 8 bit write access */
c27004ec 3996 val = ldub_p(buf);
6c2934db 3997 io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
13eb76e0
FB
3998 l = 1;
3999 }
4000 } else {
8ca5692d 4001 ram_addr_t addr1;
b448f2f3 4002 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
13eb76e0 4003 /* RAM case */
5579c7f3 4004 ptr = qemu_get_ram_ptr(addr1);
13eb76e0 4005 memcpy(ptr, buf, l);
3a7d929e
FB
4006 if (!cpu_physical_memory_is_dirty(addr1)) {
4007 /* invalidate code */
4008 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
4009 /* set dirty bit */
f7c11b53
YT
4010 cpu_physical_memory_set_dirty_flags(
4011 addr1, (0xff & ~CODE_DIRTY_FLAG));
3a7d929e 4012 }
050a0ddf 4013 qemu_put_ram_ptr(ptr);
13eb76e0
FB
4014 }
4015 } else {
5fafdf24 4016 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2a4188a3 4017 !(pd & IO_MEM_ROMD)) {
c227f099 4018 target_phys_addr_t addr1 = addr;
13eb76e0
FB
4019 /* I/O case */
4020 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18 4021 if (p)
6c2934db
AJ
4022 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4023 if (l >= 4 && ((addr1 & 3) == 0)) {
13eb76e0 4024 /* 32 bit read access */
6c2934db 4025 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
c27004ec 4026 stl_p(buf, val);
13eb76e0 4027 l = 4;
6c2934db 4028 } else if (l >= 2 && ((addr1 & 1) == 0)) {
13eb76e0 4029 /* 16 bit read access */
6c2934db 4030 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
c27004ec 4031 stw_p(buf, val);
13eb76e0
FB
4032 l = 2;
4033 } else {
1c213d19 4034 /* 8 bit read access */
6c2934db 4035 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
c27004ec 4036 stb_p(buf, val);
13eb76e0
FB
4037 l = 1;
4038 }
4039 } else {
4040 /* RAM case */
050a0ddf
AP
4041 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
4042 memcpy(buf, ptr + (addr & ~TARGET_PAGE_MASK), l);
4043 qemu_put_ram_ptr(ptr);
13eb76e0
FB
4044 }
4045 }
4046 len -= l;
4047 buf += l;
4048 addr += l;
4049 }
4050}
8df1cd07 4051
d0ecd2aa 4052/* used for ROM loading : can write in RAM and ROM */
c227f099 4053void cpu_physical_memory_write_rom(target_phys_addr_t addr,
d0ecd2aa
FB
4054 const uint8_t *buf, int len)
4055{
4056 int l;
4057 uint8_t *ptr;
c227f099 4058 target_phys_addr_t page;
d0ecd2aa
FB
4059 unsigned long pd;
4060 PhysPageDesc *p;
3b46e624 4061
d0ecd2aa
FB
4062 while (len > 0) {
4063 page = addr & TARGET_PAGE_MASK;
4064 l = (page + TARGET_PAGE_SIZE) - addr;
4065 if (l > len)
4066 l = len;
4067 p = phys_page_find(page >> TARGET_PAGE_BITS);
4068 if (!p) {
4069 pd = IO_MEM_UNASSIGNED;
4070 } else {
4071 pd = p->phys_offset;
4072 }
3b46e624 4073
d0ecd2aa 4074 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2a4188a3
FB
4075 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
4076 !(pd & IO_MEM_ROMD)) {
d0ecd2aa
FB
4077 /* do nothing */
4078 } else {
4079 unsigned long addr1;
4080 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4081 /* ROM/RAM case */
5579c7f3 4082 ptr = qemu_get_ram_ptr(addr1);
d0ecd2aa 4083 memcpy(ptr, buf, l);
050a0ddf 4084 qemu_put_ram_ptr(ptr);
d0ecd2aa
FB
4085 }
4086 len -= l;
4087 buf += l;
4088 addr += l;
4089 }
4090}
4091
6d16c2f8
AL
4092typedef struct {
4093 void *buffer;
c227f099
AL
4094 target_phys_addr_t addr;
4095 target_phys_addr_t len;
6d16c2f8
AL
4096} BounceBuffer;
4097
4098static BounceBuffer bounce;
4099
ba223c29
AL
4100typedef struct MapClient {
4101 void *opaque;
4102 void (*callback)(void *opaque);
72cf2d4f 4103 QLIST_ENTRY(MapClient) link;
ba223c29
AL
4104} MapClient;
4105
72cf2d4f
BS
4106static QLIST_HEAD(map_client_list, MapClient) map_client_list
4107 = QLIST_HEAD_INITIALIZER(map_client_list);
ba223c29
AL
4108
4109void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
4110{
7267c094 4111 MapClient *client = g_malloc(sizeof(*client));
ba223c29
AL
4112
4113 client->opaque = opaque;
4114 client->callback = callback;
72cf2d4f 4115 QLIST_INSERT_HEAD(&map_client_list, client, link);
ba223c29
AL
4116 return client;
4117}
4118
4119void cpu_unregister_map_client(void *_client)
4120{
4121 MapClient *client = (MapClient *)_client;
4122
72cf2d4f 4123 QLIST_REMOVE(client, link);
7267c094 4124 g_free(client);
ba223c29
AL
4125}
4126
4127static void cpu_notify_map_clients(void)
4128{
4129 MapClient *client;
4130
72cf2d4f
BS
4131 while (!QLIST_EMPTY(&map_client_list)) {
4132 client = QLIST_FIRST(&map_client_list);
ba223c29 4133 client->callback(client->opaque);
34d5e948 4134 cpu_unregister_map_client(client);
ba223c29
AL
4135 }
4136}
4137
6d16c2f8
AL
4138/* Map a physical memory region into a host virtual address.
4139 * May map a subset of the requested range, given by and returned in *plen.
4140 * May return NULL if resources needed to perform the mapping are exhausted.
4141 * Use only for reads OR writes - not for read-modify-write operations.
ba223c29
AL
4142 * Use cpu_register_map_client() to know when retrying the map operation is
4143 * likely to succeed.
6d16c2f8 4144 */
c227f099
AL
4145void *cpu_physical_memory_map(target_phys_addr_t addr,
4146 target_phys_addr_t *plen,
6d16c2f8
AL
4147 int is_write)
4148{
c227f099 4149 target_phys_addr_t len = *plen;
38bee5dc 4150 target_phys_addr_t todo = 0;
6d16c2f8 4151 int l;
c227f099 4152 target_phys_addr_t page;
6d16c2f8
AL
4153 unsigned long pd;
4154 PhysPageDesc *p;
f15fbc4b 4155 ram_addr_t raddr = RAM_ADDR_MAX;
8ab934f9
SS
4156 ram_addr_t rlen;
4157 void *ret;
6d16c2f8
AL
4158
4159 while (len > 0) {
4160 page = addr & TARGET_PAGE_MASK;
4161 l = (page + TARGET_PAGE_SIZE) - addr;
4162 if (l > len)
4163 l = len;
4164 p = phys_page_find(page >> TARGET_PAGE_BITS);
4165 if (!p) {
4166 pd = IO_MEM_UNASSIGNED;
4167 } else {
4168 pd = p->phys_offset;
4169 }
4170
4171 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
38bee5dc 4172 if (todo || bounce.buffer) {
6d16c2f8
AL
4173 break;
4174 }
4175 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
4176 bounce.addr = addr;
4177 bounce.len = l;
4178 if (!is_write) {
54f7b4a3 4179 cpu_physical_memory_read(addr, bounce.buffer, l);
6d16c2f8 4180 }
38bee5dc
SS
4181
4182 *plen = l;
4183 return bounce.buffer;
6d16c2f8 4184 }
8ab934f9
SS
4185 if (!todo) {
4186 raddr = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4187 }
6d16c2f8
AL
4188
4189 len -= l;
4190 addr += l;
38bee5dc 4191 todo += l;
6d16c2f8 4192 }
8ab934f9
SS
4193 rlen = todo;
4194 ret = qemu_ram_ptr_length(raddr, &rlen);
4195 *plen = rlen;
4196 return ret;
6d16c2f8
AL
4197}
4198
4199/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
4200 * Will also mark the memory as dirty if is_write == 1. access_len gives
4201 * the amount of memory that was actually read or written by the caller.
4202 */
c227f099
AL
4203void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
4204 int is_write, target_phys_addr_t access_len)
6d16c2f8
AL
4205{
4206 if (buffer != bounce.buffer) {
4207 if (is_write) {
e890261f 4208 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
6d16c2f8
AL
4209 while (access_len) {
4210 unsigned l;
4211 l = TARGET_PAGE_SIZE;
4212 if (l > access_len)
4213 l = access_len;
4214 if (!cpu_physical_memory_is_dirty(addr1)) {
4215 /* invalidate code */
4216 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
4217 /* set dirty bit */
f7c11b53
YT
4218 cpu_physical_memory_set_dirty_flags(
4219 addr1, (0xff & ~CODE_DIRTY_FLAG));
6d16c2f8
AL
4220 }
4221 addr1 += l;
4222 access_len -= l;
4223 }
4224 }
868bb33f 4225 if (xen_enabled()) {
e41d7c69 4226 xen_invalidate_map_cache_entry(buffer);
050a0ddf 4227 }
6d16c2f8
AL
4228 return;
4229 }
4230 if (is_write) {
4231 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
4232 }
f8a83245 4233 qemu_vfree(bounce.buffer);
6d16c2f8 4234 bounce.buffer = NULL;
ba223c29 4235 cpu_notify_map_clients();
6d16c2f8 4236}
d0ecd2aa 4237
8df1cd07 4238/* warning: addr must be aligned */
1e78bcc1
AG
4239static inline uint32_t ldl_phys_internal(target_phys_addr_t addr,
4240 enum device_endian endian)
8df1cd07
FB
4241{
4242 int io_index;
4243 uint8_t *ptr;
4244 uint32_t val;
4245 unsigned long pd;
4246 PhysPageDesc *p;
4247
4248 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4249 if (!p) {
4250 pd = IO_MEM_UNASSIGNED;
4251 } else {
4252 pd = p->phys_offset;
4253 }
3b46e624 4254
5fafdf24 4255 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2a4188a3 4256 !(pd & IO_MEM_ROMD)) {
8df1cd07
FB
4257 /* I/O case */
4258 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
4259 if (p)
4260 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
8df1cd07 4261 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
1e78bcc1
AG
4262#if defined(TARGET_WORDS_BIGENDIAN)
4263 if (endian == DEVICE_LITTLE_ENDIAN) {
4264 val = bswap32(val);
4265 }
4266#else
4267 if (endian == DEVICE_BIG_ENDIAN) {
4268 val = bswap32(val);
4269 }
4270#endif
8df1cd07
FB
4271 } else {
4272 /* RAM case */
5579c7f3 4273 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
8df1cd07 4274 (addr & ~TARGET_PAGE_MASK);
1e78bcc1
AG
4275 switch (endian) {
4276 case DEVICE_LITTLE_ENDIAN:
4277 val = ldl_le_p(ptr);
4278 break;
4279 case DEVICE_BIG_ENDIAN:
4280 val = ldl_be_p(ptr);
4281 break;
4282 default:
4283 val = ldl_p(ptr);
4284 break;
4285 }
8df1cd07
FB
4286 }
4287 return val;
4288}
4289
1e78bcc1
AG
4290uint32_t ldl_phys(target_phys_addr_t addr)
4291{
4292 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4293}
4294
4295uint32_t ldl_le_phys(target_phys_addr_t addr)
4296{
4297 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4298}
4299
4300uint32_t ldl_be_phys(target_phys_addr_t addr)
4301{
4302 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
4303}
4304
84b7b8e7 4305/* warning: addr must be aligned */
1e78bcc1
AG
4306static inline uint64_t ldq_phys_internal(target_phys_addr_t addr,
4307 enum device_endian endian)
84b7b8e7
FB
4308{
4309 int io_index;
4310 uint8_t *ptr;
4311 uint64_t val;
4312 unsigned long pd;
4313 PhysPageDesc *p;
4314
4315 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4316 if (!p) {
4317 pd = IO_MEM_UNASSIGNED;
4318 } else {
4319 pd = p->phys_offset;
4320 }
3b46e624 4321
2a4188a3
FB
4322 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
4323 !(pd & IO_MEM_ROMD)) {
84b7b8e7
FB
4324 /* I/O case */
4325 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
4326 if (p)
4327 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
1e78bcc1
AG
4328
4329 /* XXX This is broken when device endian != cpu endian.
4330 Fix and add "endian" variable check */
84b7b8e7
FB
4331#ifdef TARGET_WORDS_BIGENDIAN
4332 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
4333 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
4334#else
4335 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
4336 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
4337#endif
4338 } else {
4339 /* RAM case */
5579c7f3 4340 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
84b7b8e7 4341 (addr & ~TARGET_PAGE_MASK);
1e78bcc1
AG
4342 switch (endian) {
4343 case DEVICE_LITTLE_ENDIAN:
4344 val = ldq_le_p(ptr);
4345 break;
4346 case DEVICE_BIG_ENDIAN:
4347 val = ldq_be_p(ptr);
4348 break;
4349 default:
4350 val = ldq_p(ptr);
4351 break;
4352 }
84b7b8e7
FB
4353 }
4354 return val;
4355}
4356
1e78bcc1
AG
4357uint64_t ldq_phys(target_phys_addr_t addr)
4358{
4359 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4360}
4361
4362uint64_t ldq_le_phys(target_phys_addr_t addr)
4363{
4364 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4365}
4366
4367uint64_t ldq_be_phys(target_phys_addr_t addr)
4368{
4369 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
4370}
4371
aab33094 4372/* XXX: optimize */
c227f099 4373uint32_t ldub_phys(target_phys_addr_t addr)
aab33094
FB
4374{
4375 uint8_t val;
4376 cpu_physical_memory_read(addr, &val, 1);
4377 return val;
4378}
4379
733f0b02 4380/* warning: addr must be aligned */
1e78bcc1
AG
4381static inline uint32_t lduw_phys_internal(target_phys_addr_t addr,
4382 enum device_endian endian)
aab33094 4383{
733f0b02
MT
4384 int io_index;
4385 uint8_t *ptr;
4386 uint64_t val;
4387 unsigned long pd;
4388 PhysPageDesc *p;
4389
4390 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4391 if (!p) {
4392 pd = IO_MEM_UNASSIGNED;
4393 } else {
4394 pd = p->phys_offset;
4395 }
4396
4397 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
4398 !(pd & IO_MEM_ROMD)) {
4399 /* I/O case */
4400 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4401 if (p)
4402 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4403 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
1e78bcc1
AG
4404#if defined(TARGET_WORDS_BIGENDIAN)
4405 if (endian == DEVICE_LITTLE_ENDIAN) {
4406 val = bswap16(val);
4407 }
4408#else
4409 if (endian == DEVICE_BIG_ENDIAN) {
4410 val = bswap16(val);
4411 }
4412#endif
733f0b02
MT
4413 } else {
4414 /* RAM case */
4415 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4416 (addr & ~TARGET_PAGE_MASK);
1e78bcc1
AG
4417 switch (endian) {
4418 case DEVICE_LITTLE_ENDIAN:
4419 val = lduw_le_p(ptr);
4420 break;
4421 case DEVICE_BIG_ENDIAN:
4422 val = lduw_be_p(ptr);
4423 break;
4424 default:
4425 val = lduw_p(ptr);
4426 break;
4427 }
733f0b02
MT
4428 }
4429 return val;
aab33094
FB
4430}
4431
1e78bcc1
AG
4432uint32_t lduw_phys(target_phys_addr_t addr)
4433{
4434 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4435}
4436
4437uint32_t lduw_le_phys(target_phys_addr_t addr)
4438{
4439 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4440}
4441
4442uint32_t lduw_be_phys(target_phys_addr_t addr)
4443{
4444 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
4445}
4446
8df1cd07
FB
4447/* warning: addr must be aligned. The ram page is not masked as dirty
4448 and the code inside is not invalidated. It is useful if the dirty
4449 bits are used to track modified PTEs */
c227f099 4450void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
8df1cd07
FB
4451{
4452 int io_index;
4453 uint8_t *ptr;
4454 unsigned long pd;
4455 PhysPageDesc *p;
4456
4457 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4458 if (!p) {
4459 pd = IO_MEM_UNASSIGNED;
4460 } else {
4461 pd = p->phys_offset;
4462 }
3b46e624 4463
3a7d929e 4464 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07 4465 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
4466 if (p)
4467 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
8df1cd07
FB
4468 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4469 } else {
74576198 4470 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
5579c7f3 4471 ptr = qemu_get_ram_ptr(addr1);
8df1cd07 4472 stl_p(ptr, val);
74576198
AL
4473
4474 if (unlikely(in_migration)) {
4475 if (!cpu_physical_memory_is_dirty(addr1)) {
4476 /* invalidate code */
4477 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4478 /* set dirty bit */
f7c11b53
YT
4479 cpu_physical_memory_set_dirty_flags(
4480 addr1, (0xff & ~CODE_DIRTY_FLAG));
74576198
AL
4481 }
4482 }
8df1cd07
FB
4483 }
4484}
4485
c227f099 4486void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
bc98a7ef
JM
4487{
4488 int io_index;
4489 uint8_t *ptr;
4490 unsigned long pd;
4491 PhysPageDesc *p;
4492
4493 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4494 if (!p) {
4495 pd = IO_MEM_UNASSIGNED;
4496 } else {
4497 pd = p->phys_offset;
4498 }
3b46e624 4499
bc98a7ef
JM
4500 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4501 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
4502 if (p)
4503 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bc98a7ef
JM
4504#ifdef TARGET_WORDS_BIGENDIAN
4505 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
4506 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
4507#else
4508 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4509 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
4510#endif
4511 } else {
5579c7f3 4512 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
bc98a7ef
JM
4513 (addr & ~TARGET_PAGE_MASK);
4514 stq_p(ptr, val);
4515 }
4516}
4517
8df1cd07 4518/* warning: addr must be aligned */
1e78bcc1
AG
4519static inline void stl_phys_internal(target_phys_addr_t addr, uint32_t val,
4520 enum device_endian endian)
8df1cd07
FB
4521{
4522 int io_index;
4523 uint8_t *ptr;
4524 unsigned long pd;
4525 PhysPageDesc *p;
4526
4527 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4528 if (!p) {
4529 pd = IO_MEM_UNASSIGNED;
4530 } else {
4531 pd = p->phys_offset;
4532 }
3b46e624 4533
3a7d929e 4534 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07 4535 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
4536 if (p)
4537 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
1e78bcc1
AG
4538#if defined(TARGET_WORDS_BIGENDIAN)
4539 if (endian == DEVICE_LITTLE_ENDIAN) {
4540 val = bswap32(val);
4541 }
4542#else
4543 if (endian == DEVICE_BIG_ENDIAN) {
4544 val = bswap32(val);
4545 }
4546#endif
8df1cd07
FB
4547 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4548 } else {
4549 unsigned long addr1;
4550 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4551 /* RAM case */
5579c7f3 4552 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
4553 switch (endian) {
4554 case DEVICE_LITTLE_ENDIAN:
4555 stl_le_p(ptr, val);
4556 break;
4557 case DEVICE_BIG_ENDIAN:
4558 stl_be_p(ptr, val);
4559 break;
4560 default:
4561 stl_p(ptr, val);
4562 break;
4563 }
3a7d929e
FB
4564 if (!cpu_physical_memory_is_dirty(addr1)) {
4565 /* invalidate code */
4566 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4567 /* set dirty bit */
f7c11b53
YT
4568 cpu_physical_memory_set_dirty_flags(addr1,
4569 (0xff & ~CODE_DIRTY_FLAG));
3a7d929e 4570 }
8df1cd07
FB
4571 }
4572}
4573
1e78bcc1
AG
4574void stl_phys(target_phys_addr_t addr, uint32_t val)
4575{
4576 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4577}
4578
4579void stl_le_phys(target_phys_addr_t addr, uint32_t val)
4580{
4581 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4582}
4583
4584void stl_be_phys(target_phys_addr_t addr, uint32_t val)
4585{
4586 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4587}
4588
aab33094 4589/* XXX: optimize */
c227f099 4590void stb_phys(target_phys_addr_t addr, uint32_t val)
aab33094
FB
4591{
4592 uint8_t v = val;
4593 cpu_physical_memory_write(addr, &v, 1);
4594}
4595
733f0b02 4596/* warning: addr must be aligned */
1e78bcc1
AG
4597static inline void stw_phys_internal(target_phys_addr_t addr, uint32_t val,
4598 enum device_endian endian)
aab33094 4599{
733f0b02
MT
4600 int io_index;
4601 uint8_t *ptr;
4602 unsigned long pd;
4603 PhysPageDesc *p;
4604
4605 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4606 if (!p) {
4607 pd = IO_MEM_UNASSIGNED;
4608 } else {
4609 pd = p->phys_offset;
4610 }
4611
4612 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4613 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4614 if (p)
4615 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
1e78bcc1
AG
4616#if defined(TARGET_WORDS_BIGENDIAN)
4617 if (endian == DEVICE_LITTLE_ENDIAN) {
4618 val = bswap16(val);
4619 }
4620#else
4621 if (endian == DEVICE_BIG_ENDIAN) {
4622 val = bswap16(val);
4623 }
4624#endif
733f0b02
MT
4625 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
4626 } else {
4627 unsigned long addr1;
4628 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4629 /* RAM case */
4630 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
4631 switch (endian) {
4632 case DEVICE_LITTLE_ENDIAN:
4633 stw_le_p(ptr, val);
4634 break;
4635 case DEVICE_BIG_ENDIAN:
4636 stw_be_p(ptr, val);
4637 break;
4638 default:
4639 stw_p(ptr, val);
4640 break;
4641 }
733f0b02
MT
4642 if (!cpu_physical_memory_is_dirty(addr1)) {
4643 /* invalidate code */
4644 tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
4645 /* set dirty bit */
4646 cpu_physical_memory_set_dirty_flags(addr1,
4647 (0xff & ~CODE_DIRTY_FLAG));
4648 }
4649 }
aab33094
FB
4650}
4651
1e78bcc1
AG
4652void stw_phys(target_phys_addr_t addr, uint32_t val)
4653{
4654 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4655}
4656
4657void stw_le_phys(target_phys_addr_t addr, uint32_t val)
4658{
4659 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4660}
4661
4662void stw_be_phys(target_phys_addr_t addr, uint32_t val)
4663{
4664 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4665}
4666
aab33094 4667/* XXX: optimize */
c227f099 4668void stq_phys(target_phys_addr_t addr, uint64_t val)
aab33094
FB
4669{
4670 val = tswap64(val);
71d2b725 4671 cpu_physical_memory_write(addr, &val, 8);
aab33094
FB
4672}
4673
1e78bcc1
AG
4674void stq_le_phys(target_phys_addr_t addr, uint64_t val)
4675{
4676 val = cpu_to_le64(val);
4677 cpu_physical_memory_write(addr, &val, 8);
4678}
4679
4680void stq_be_phys(target_phys_addr_t addr, uint64_t val)
4681{
4682 val = cpu_to_be64(val);
4683 cpu_physical_memory_write(addr, &val, 8);
4684}
4685
5e2972fd 4686/* virtual memory access for debug (includes writing to ROM) */
5fafdf24 4687int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
b448f2f3 4688 uint8_t *buf, int len, int is_write)
13eb76e0
FB
4689{
4690 int l;
c227f099 4691 target_phys_addr_t phys_addr;
9b3c35e0 4692 target_ulong page;
13eb76e0
FB
4693
4694 while (len > 0) {
4695 page = addr & TARGET_PAGE_MASK;
4696 phys_addr = cpu_get_phys_page_debug(env, page);
4697 /* if no physical page mapped, return an error */
4698 if (phys_addr == -1)
4699 return -1;
4700 l = (page + TARGET_PAGE_SIZE) - addr;
4701 if (l > len)
4702 l = len;
5e2972fd 4703 phys_addr += (addr & ~TARGET_PAGE_MASK);
5e2972fd
AL
4704 if (is_write)
4705 cpu_physical_memory_write_rom(phys_addr, buf, l);
4706 else
5e2972fd 4707 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
13eb76e0
FB
4708 len -= l;
4709 buf += l;
4710 addr += l;
4711 }
4712 return 0;
4713}
a68fe89c 4714#endif
13eb76e0 4715
2e70f6ef
PB
4716/* in deterministic execution mode, instructions doing device I/Os
4717 must be at the end of the TB */
4718void cpu_io_recompile(CPUState *env, void *retaddr)
4719{
4720 TranslationBlock *tb;
4721 uint32_t n, cflags;
4722 target_ulong pc, cs_base;
4723 uint64_t flags;
4724
4725 tb = tb_find_pc((unsigned long)retaddr);
4726 if (!tb) {
4727 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
4728 retaddr);
4729 }
4730 n = env->icount_decr.u16.low + tb->icount;
618ba8e6 4731 cpu_restore_state(tb, env, (unsigned long)retaddr);
2e70f6ef 4732 /* Calculate how many instructions had been executed before the fault
bf20dc07 4733 occurred. */
2e70f6ef
PB
4734 n = n - env->icount_decr.u16.low;
4735 /* Generate a new TB ending on the I/O insn. */
4736 n++;
4737 /* On MIPS and SH, delay slot instructions can only be restarted if
4738 they were already the first instruction in the TB. If this is not
bf20dc07 4739 the first instruction in a TB then re-execute the preceding
2e70f6ef
PB
4740 branch. */
4741#if defined(TARGET_MIPS)
4742 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4743 env->active_tc.PC -= 4;
4744 env->icount_decr.u16.low++;
4745 env->hflags &= ~MIPS_HFLAG_BMASK;
4746 }
4747#elif defined(TARGET_SH4)
4748 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4749 && n > 1) {
4750 env->pc -= 2;
4751 env->icount_decr.u16.low++;
4752 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4753 }
4754#endif
4755 /* This should never happen. */
4756 if (n > CF_COUNT_MASK)
4757 cpu_abort(env, "TB too big during recompile");
4758
4759 cflags = n | CF_LAST_IO;
4760 pc = tb->pc;
4761 cs_base = tb->cs_base;
4762 flags = tb->flags;
4763 tb_phys_invalidate(tb, -1);
4764 /* FIXME: In theory this could raise an exception. In practice
4765 we have already translated the block once so it's probably ok. */
4766 tb_gen_code(env, pc, cs_base, flags, cflags);
bf20dc07 4767 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
2e70f6ef
PB
4768 the first in the TB) then we end up generating a whole new TB and
4769 repeating the fault, which is horribly inefficient.
4770 Better would be to execute just this insn uncached, or generate a
4771 second new TB. */
4772 cpu_resume_from_signal(env, NULL);
4773}
4774
b3755a91
PB
4775#if !defined(CONFIG_USER_ONLY)
4776
055403b2 4777void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
e3db7226
FB
4778{
4779 int i, target_code_size, max_target_code_size;
4780 int direct_jmp_count, direct_jmp2_count, cross_page;
4781 TranslationBlock *tb;
3b46e624 4782
e3db7226
FB
4783 target_code_size = 0;
4784 max_target_code_size = 0;
4785 cross_page = 0;
4786 direct_jmp_count = 0;
4787 direct_jmp2_count = 0;
4788 for(i = 0; i < nb_tbs; i++) {
4789 tb = &tbs[i];
4790 target_code_size += tb->size;
4791 if (tb->size > max_target_code_size)
4792 max_target_code_size = tb->size;
4793 if (tb->page_addr[1] != -1)
4794 cross_page++;
4795 if (tb->tb_next_offset[0] != 0xffff) {
4796 direct_jmp_count++;
4797 if (tb->tb_next_offset[1] != 0xffff) {
4798 direct_jmp2_count++;
4799 }
4800 }
4801 }
4802 /* XXX: avoid using doubles ? */
57fec1fe 4803 cpu_fprintf(f, "Translation buffer state:\n");
055403b2 4804 cpu_fprintf(f, "gen code size %td/%ld\n",
26a5f13b
FB
4805 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4806 cpu_fprintf(f, "TB count %d/%d\n",
4807 nb_tbs, code_gen_max_blocks);
5fafdf24 4808 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
e3db7226
FB
4809 nb_tbs ? target_code_size / nb_tbs : 0,
4810 max_target_code_size);
055403b2 4811 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
e3db7226
FB
4812 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4813 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
5fafdf24
TS
4814 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4815 cross_page,
e3db7226
FB
4816 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4817 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
5fafdf24 4818 direct_jmp_count,
e3db7226
FB
4819 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4820 direct_jmp2_count,
4821 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
57fec1fe 4822 cpu_fprintf(f, "\nStatistics:\n");
e3db7226
FB
4823 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4824 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4825 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
b67d9a52 4826 tcg_dump_info(f, cpu_fprintf);
e3db7226
FB
4827}
4828
61382a50 4829#define MMUSUFFIX _cmmu
3917149d 4830#undef GETPC
61382a50
FB
4831#define GETPC() NULL
4832#define env cpu_single_env
b769d8fe 4833#define SOFTMMU_CODE_ACCESS
61382a50
FB
4834
4835#define SHIFT 0
4836#include "softmmu_template.h"
4837
4838#define SHIFT 1
4839#include "softmmu_template.h"
4840
4841#define SHIFT 2
4842#include "softmmu_template.h"
4843
4844#define SHIFT 3
4845#include "softmmu_template.h"
4846
4847#undef env
4848
4849#endif