]> git.proxmox.com Git - mirror_qemu.git/blame - exec.c
softfloat: change default nan definitions to variables
[mirror_qemu.git] / exec.c
CommitLineData
54936004 1/*
fd6ce8f6 2 * virtual page mapping and translated block handling
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
54936004 18 */
67b915a5 19#include "config.h"
d5a8f07c
FB
20#ifdef _WIN32
21#include <windows.h>
22#else
a98d49b1 23#include <sys/types.h>
d5a8f07c
FB
24#include <sys/mman.h>
25#endif
54936004 26
055403b2 27#include "qemu-common.h"
6180a181 28#include "cpu.h"
b67d9a52 29#include "tcg.h"
b3c7724c 30#include "hw/hw.h"
cc9e98cb 31#include "hw/qdev.h"
74576198 32#include "osdep.h"
7ba1e619 33#include "kvm.h"
432d268c 34#include "hw/xen.h"
29e922b6 35#include "qemu-timer.h"
62152b8a
AK
36#include "memory.h"
37#include "exec-memory.h"
53a5960a
PB
38#if defined(CONFIG_USER_ONLY)
39#include <qemu.h>
f01576f1
JL
40#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41#include <sys/param.h>
42#if __FreeBSD_version >= 700104
43#define HAVE_KINFO_GETVMMAP
44#define sigqueue sigqueue_freebsd /* avoid redefinition */
45#include <sys/time.h>
46#include <sys/proc.h>
47#include <machine/profile.h>
48#define _KERNEL
49#include <sys/user.h>
50#undef _KERNEL
51#undef sigqueue
52#include <libutil.h>
53#endif
54#endif
432d268c
JN
55#else /* !CONFIG_USER_ONLY */
56#include "xen-mapcache.h"
6506e4f9 57#include "trace.h"
53a5960a 58#endif
54936004 59
fd6ce8f6 60//#define DEBUG_TB_INVALIDATE
66e85a21 61//#define DEBUG_FLUSH
9fa3e853 62//#define DEBUG_TLB
67d3b957 63//#define DEBUG_UNASSIGNED
fd6ce8f6
FB
64
65/* make various TB consistency checks */
5fafdf24
TS
66//#define DEBUG_TB_CHECK
67//#define DEBUG_TLB_CHECK
fd6ce8f6 68
1196be37 69//#define DEBUG_IOPORT
db7b5426 70//#define DEBUG_SUBPAGE
1196be37 71
99773bd4
PB
72#if !defined(CONFIG_USER_ONLY)
73/* TB consistency checks only implemented for usermode emulation. */
74#undef DEBUG_TB_CHECK
75#endif
76
9fa3e853
FB
77#define SMC_BITMAP_USE_THRESHOLD 10
78
bdaf78e0 79static TranslationBlock *tbs;
24ab68ac 80static int code_gen_max_blocks;
9fa3e853 81TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
bdaf78e0 82static int nb_tbs;
eb51d102 83/* any access to the tbs or the page table must use this lock */
c227f099 84spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
fd6ce8f6 85
141ac468
BS
86#if defined(__arm__) || defined(__sparc_v9__)
87/* The prologue must be reachable with a direct jump. ARM and Sparc64
88 have limited branch ranges (possibly also PPC) so place it in a
d03d860b
BS
89 section close to code segment. */
90#define code_gen_section \
91 __attribute__((__section__(".gen_code"))) \
92 __attribute__((aligned (32)))
f8e2af11
SW
93#elif defined(_WIN32)
94/* Maximum alignment for Win32 is 16. */
95#define code_gen_section \
96 __attribute__((aligned (16)))
d03d860b
BS
97#else
98#define code_gen_section \
99 __attribute__((aligned (32)))
100#endif
101
102uint8_t code_gen_prologue[1024] code_gen_section;
bdaf78e0
BS
103static uint8_t *code_gen_buffer;
104static unsigned long code_gen_buffer_size;
26a5f13b 105/* threshold to flush the translated code buffer */
bdaf78e0 106static unsigned long code_gen_buffer_max_size;
24ab68ac 107static uint8_t *code_gen_ptr;
fd6ce8f6 108
e2eef170 109#if !defined(CONFIG_USER_ONLY)
9fa3e853 110int phys_ram_fd;
74576198 111static int in_migration;
94a6b54f 112
f471a17e 113RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list) };
62152b8a
AK
114
115static MemoryRegion *system_memory;
116
e2eef170 117#endif
9fa3e853 118
6a00d601
FB
119CPUState *first_cpu;
120/* current CPU in the current thread. It is only valid inside
121 cpu_exec() */
5fafdf24 122CPUState *cpu_single_env;
2e70f6ef 123/* 0 = Do not count executed instructions.
bf20dc07 124 1 = Precise instruction counting.
2e70f6ef
PB
125 2 = Adaptive rate instruction counting. */
126int use_icount = 0;
127/* Current instruction counter. While executing translated code this may
128 include some instructions that have not yet been executed. */
129int64_t qemu_icount;
6a00d601 130
54936004 131typedef struct PageDesc {
92e873b9 132 /* list of TBs intersecting this ram page */
fd6ce8f6 133 TranslationBlock *first_tb;
9fa3e853
FB
134 /* in order to optimize self modifying code, we count the number
135 of lookups we do to a given page to use a bitmap */
136 unsigned int code_write_count;
137 uint8_t *code_bitmap;
138#if defined(CONFIG_USER_ONLY)
139 unsigned long flags;
140#endif
54936004
FB
141} PageDesc;
142
41c1b1c9 143/* In system mode we want L1_MAP to be based on ram offsets,
5cd2c5b6
RH
144 while in user mode we want it to be based on virtual addresses. */
145#if !defined(CONFIG_USER_ONLY)
41c1b1c9
PB
146#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
147# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
148#else
5cd2c5b6 149# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
41c1b1c9 150#endif
bedb69ea 151#else
5cd2c5b6 152# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
bedb69ea 153#endif
54936004 154
5cd2c5b6
RH
155/* Size of the L2 (and L3, etc) page tables. */
156#define L2_BITS 10
54936004
FB
157#define L2_SIZE (1 << L2_BITS)
158
5cd2c5b6
RH
159/* The bits remaining after N lower levels of page tables. */
160#define P_L1_BITS_REM \
161 ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
162#define V_L1_BITS_REM \
163 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
164
165/* Size of the L1 page table. Avoid silly small sizes. */
166#if P_L1_BITS_REM < 4
167#define P_L1_BITS (P_L1_BITS_REM + L2_BITS)
168#else
169#define P_L1_BITS P_L1_BITS_REM
170#endif
171
172#if V_L1_BITS_REM < 4
173#define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
174#else
175#define V_L1_BITS V_L1_BITS_REM
176#endif
177
178#define P_L1_SIZE ((target_phys_addr_t)1 << P_L1_BITS)
179#define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
180
181#define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS)
182#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
183
83fb7adf
FB
184unsigned long qemu_real_host_page_size;
185unsigned long qemu_host_page_bits;
186unsigned long qemu_host_page_size;
187unsigned long qemu_host_page_mask;
54936004 188
5cd2c5b6
RH
189/* This is a multi-level map on the virtual address space.
190 The bottom level has pointers to PageDesc. */
191static void *l1_map[V_L1_SIZE];
54936004 192
e2eef170 193#if !defined(CONFIG_USER_ONLY)
41c1b1c9
PB
194typedef struct PhysPageDesc {
195 /* offset in host memory of the page + io_index in the low bits */
196 ram_addr_t phys_offset;
197 ram_addr_t region_offset;
198} PhysPageDesc;
199
5cd2c5b6
RH
200/* This is a multi-level map on the physical address space.
201 The bottom level has pointers to PhysPageDesc. */
202static void *l1_phys_map[P_L1_SIZE];
6d9a1304 203
e2eef170 204static void io_mem_init(void);
62152b8a 205static void memory_map_init(void);
e2eef170 206
33417e70 207/* io memory support */
33417e70
FB
208CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
209CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
a4193c8a 210void *io_mem_opaque[IO_MEM_NB_ENTRIES];
511d2b14 211static char io_mem_used[IO_MEM_NB_ENTRIES];
6658ffb8
PB
212static int io_mem_watch;
213#endif
33417e70 214
34865134 215/* log support */
1e8b27ca
JR
216#ifdef WIN32
217static const char *logfilename = "qemu.log";
218#else
d9b630fd 219static const char *logfilename = "/tmp/qemu.log";
1e8b27ca 220#endif
34865134
FB
221FILE *logfile;
222int loglevel;
e735b91c 223static int log_append = 0;
34865134 224
e3db7226 225/* statistics */
b3755a91 226#if !defined(CONFIG_USER_ONLY)
e3db7226 227static int tlb_flush_count;
b3755a91 228#endif
e3db7226
FB
229static int tb_flush_count;
230static int tb_phys_invalidate_count;
231
7cb69cae
FB
232#ifdef _WIN32
233static void map_exec(void *addr, long size)
234{
235 DWORD old_protect;
236 VirtualProtect(addr, size,
237 PAGE_EXECUTE_READWRITE, &old_protect);
238
239}
240#else
241static void map_exec(void *addr, long size)
242{
4369415f 243 unsigned long start, end, page_size;
7cb69cae 244
4369415f 245 page_size = getpagesize();
7cb69cae 246 start = (unsigned long)addr;
4369415f 247 start &= ~(page_size - 1);
7cb69cae
FB
248
249 end = (unsigned long)addr + size;
4369415f
FB
250 end += page_size - 1;
251 end &= ~(page_size - 1);
7cb69cae
FB
252
253 mprotect((void *)start, end - start,
254 PROT_READ | PROT_WRITE | PROT_EXEC);
255}
256#endif
257
b346ff46 258static void page_init(void)
54936004 259{
83fb7adf 260 /* NOTE: we can always suppose that qemu_host_page_size >=
54936004 261 TARGET_PAGE_SIZE */
c2b48b69
AL
262#ifdef _WIN32
263 {
264 SYSTEM_INFO system_info;
265
266 GetSystemInfo(&system_info);
267 qemu_real_host_page_size = system_info.dwPageSize;
268 }
269#else
270 qemu_real_host_page_size = getpagesize();
271#endif
83fb7adf
FB
272 if (qemu_host_page_size == 0)
273 qemu_host_page_size = qemu_real_host_page_size;
274 if (qemu_host_page_size < TARGET_PAGE_SIZE)
275 qemu_host_page_size = TARGET_PAGE_SIZE;
276 qemu_host_page_bits = 0;
277 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
278 qemu_host_page_bits++;
279 qemu_host_page_mask = ~(qemu_host_page_size - 1);
50a9569b 280
2e9a5713 281#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
50a9569b 282 {
f01576f1
JL
283#ifdef HAVE_KINFO_GETVMMAP
284 struct kinfo_vmentry *freep;
285 int i, cnt;
286
287 freep = kinfo_getvmmap(getpid(), &cnt);
288 if (freep) {
289 mmap_lock();
290 for (i = 0; i < cnt; i++) {
291 unsigned long startaddr, endaddr;
292
293 startaddr = freep[i].kve_start;
294 endaddr = freep[i].kve_end;
295 if (h2g_valid(startaddr)) {
296 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
297
298 if (h2g_valid(endaddr)) {
299 endaddr = h2g(endaddr);
fd436907 300 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
f01576f1
JL
301 } else {
302#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
303 endaddr = ~0ul;
fd436907 304 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
f01576f1
JL
305#endif
306 }
307 }
308 }
309 free(freep);
310 mmap_unlock();
311 }
312#else
50a9569b 313 FILE *f;
50a9569b 314
0776590d 315 last_brk = (unsigned long)sbrk(0);
5cd2c5b6 316
fd436907 317 f = fopen("/compat/linux/proc/self/maps", "r");
50a9569b 318 if (f) {
5cd2c5b6
RH
319 mmap_lock();
320
50a9569b 321 do {
5cd2c5b6
RH
322 unsigned long startaddr, endaddr;
323 int n;
324
325 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
326
327 if (n == 2 && h2g_valid(startaddr)) {
328 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
329
330 if (h2g_valid(endaddr)) {
331 endaddr = h2g(endaddr);
332 } else {
333 endaddr = ~0ul;
334 }
335 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
50a9569b
AZ
336 }
337 } while (!feof(f));
5cd2c5b6 338
50a9569b 339 fclose(f);
5cd2c5b6 340 mmap_unlock();
50a9569b 341 }
f01576f1 342#endif
50a9569b
AZ
343 }
344#endif
54936004
FB
345}
346
41c1b1c9 347static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
54936004 348{
41c1b1c9
PB
349 PageDesc *pd;
350 void **lp;
351 int i;
352
5cd2c5b6 353#if defined(CONFIG_USER_ONLY)
2e9a5713 354 /* We can't use qemu_malloc because it may recurse into a locked mutex. */
5cd2c5b6
RH
355# define ALLOC(P, SIZE) \
356 do { \
357 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
358 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
5cd2c5b6
RH
359 } while (0)
360#else
361# define ALLOC(P, SIZE) \
362 do { P = qemu_mallocz(SIZE); } while (0)
17e2377a 363#endif
434929bf 364
5cd2c5b6
RH
365 /* Level 1. Always allocated. */
366 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
367
368 /* Level 2..N-1. */
369 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
370 void **p = *lp;
371
372 if (p == NULL) {
373 if (!alloc) {
374 return NULL;
375 }
376 ALLOC(p, sizeof(void *) * L2_SIZE);
377 *lp = p;
17e2377a 378 }
5cd2c5b6
RH
379
380 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
381 }
382
383 pd = *lp;
384 if (pd == NULL) {
385 if (!alloc) {
386 return NULL;
387 }
388 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
389 *lp = pd;
54936004 390 }
5cd2c5b6
RH
391
392#undef ALLOC
5cd2c5b6
RH
393
394 return pd + (index & (L2_SIZE - 1));
54936004
FB
395}
396
41c1b1c9 397static inline PageDesc *page_find(tb_page_addr_t index)
54936004 398{
5cd2c5b6 399 return page_find_alloc(index, 0);
fd6ce8f6
FB
400}
401
6d9a1304 402#if !defined(CONFIG_USER_ONLY)
c227f099 403static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
92e873b9 404{
e3f4e2a4 405 PhysPageDesc *pd;
5cd2c5b6
RH
406 void **lp;
407 int i;
92e873b9 408
5cd2c5b6
RH
409 /* Level 1. Always allocated. */
410 lp = l1_phys_map + ((index >> P_L1_SHIFT) & (P_L1_SIZE - 1));
108c49b8 411
5cd2c5b6
RH
412 /* Level 2..N-1. */
413 for (i = P_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
414 void **p = *lp;
415 if (p == NULL) {
416 if (!alloc) {
417 return NULL;
418 }
419 *lp = p = qemu_mallocz(sizeof(void *) * L2_SIZE);
420 }
421 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
108c49b8 422 }
5cd2c5b6 423
e3f4e2a4 424 pd = *lp;
5cd2c5b6 425 if (pd == NULL) {
e3f4e2a4 426 int i;
5cd2c5b6
RH
427
428 if (!alloc) {
108c49b8 429 return NULL;
5cd2c5b6
RH
430 }
431
432 *lp = pd = qemu_malloc(sizeof(PhysPageDesc) * L2_SIZE);
433
67c4d23c 434 for (i = 0; i < L2_SIZE; i++) {
5cd2c5b6
RH
435 pd[i].phys_offset = IO_MEM_UNASSIGNED;
436 pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
67c4d23c 437 }
92e873b9 438 }
5cd2c5b6
RH
439
440 return pd + (index & (L2_SIZE - 1));
92e873b9
FB
441}
442
c227f099 443static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
92e873b9 444{
108c49b8 445 return phys_page_find_alloc(index, 0);
92e873b9
FB
446}
447
c227f099
AL
448static void tlb_protect_code(ram_addr_t ram_addr);
449static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 450 target_ulong vaddr);
c8a706fe
PB
451#define mmap_lock() do { } while(0)
452#define mmap_unlock() do { } while(0)
9fa3e853 453#endif
fd6ce8f6 454
4369415f
FB
455#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
456
457#if defined(CONFIG_USER_ONLY)
ccbb4d44 458/* Currently it is not recommended to allocate big chunks of data in
4369415f
FB
459 user mode. It will change when a dedicated libc will be used */
460#define USE_STATIC_CODE_GEN_BUFFER
461#endif
462
463#ifdef USE_STATIC_CODE_GEN_BUFFER
ebf50fb3
AJ
464static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
465 __attribute__((aligned (CODE_GEN_ALIGN)));
4369415f
FB
466#endif
467
8fcd3692 468static void code_gen_alloc(unsigned long tb_size)
26a5f13b 469{
4369415f
FB
470#ifdef USE_STATIC_CODE_GEN_BUFFER
471 code_gen_buffer = static_code_gen_buffer;
472 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
473 map_exec(code_gen_buffer, code_gen_buffer_size);
474#else
26a5f13b
FB
475 code_gen_buffer_size = tb_size;
476 if (code_gen_buffer_size == 0) {
4369415f
FB
477#if defined(CONFIG_USER_ONLY)
478 /* in user mode, phys_ram_size is not meaningful */
479 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
480#else
ccbb4d44 481 /* XXX: needs adjustments */
94a6b54f 482 code_gen_buffer_size = (unsigned long)(ram_size / 4);
4369415f 483#endif
26a5f13b
FB
484 }
485 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
486 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
487 /* The code gen buffer location may have constraints depending on
488 the host cpu and OS */
489#if defined(__linux__)
490 {
491 int flags;
141ac468
BS
492 void *start = NULL;
493
26a5f13b
FB
494 flags = MAP_PRIVATE | MAP_ANONYMOUS;
495#if defined(__x86_64__)
496 flags |= MAP_32BIT;
497 /* Cannot map more than that */
498 if (code_gen_buffer_size > (800 * 1024 * 1024))
499 code_gen_buffer_size = (800 * 1024 * 1024);
141ac468
BS
500#elif defined(__sparc_v9__)
501 // Map the buffer below 2G, so we can use direct calls and branches
502 flags |= MAP_FIXED;
503 start = (void *) 0x60000000UL;
504 if (code_gen_buffer_size > (512 * 1024 * 1024))
505 code_gen_buffer_size = (512 * 1024 * 1024);
1cb0661e 506#elif defined(__arm__)
63d41246 507 /* Map the buffer below 32M, so we can use direct calls and branches */
1cb0661e
AZ
508 flags |= MAP_FIXED;
509 start = (void *) 0x01000000UL;
510 if (code_gen_buffer_size > 16 * 1024 * 1024)
511 code_gen_buffer_size = 16 * 1024 * 1024;
eba0b893
RH
512#elif defined(__s390x__)
513 /* Map the buffer so that we can use direct calls and branches. */
514 /* We have a +- 4GB range on the branches; leave some slop. */
515 if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
516 code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
517 }
518 start = (void *)0x90000000UL;
26a5f13b 519#endif
141ac468
BS
520 code_gen_buffer = mmap(start, code_gen_buffer_size,
521 PROT_WRITE | PROT_READ | PROT_EXEC,
26a5f13b
FB
522 flags, -1, 0);
523 if (code_gen_buffer == MAP_FAILED) {
524 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
525 exit(1);
526 }
527 }
cbb608a5
BS
528#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
529 || defined(__DragonFly__) || defined(__OpenBSD__)
06e67a82
AL
530 {
531 int flags;
532 void *addr = NULL;
533 flags = MAP_PRIVATE | MAP_ANONYMOUS;
534#if defined(__x86_64__)
535 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
536 * 0x40000000 is free */
537 flags |= MAP_FIXED;
538 addr = (void *)0x40000000;
539 /* Cannot map more than that */
540 if (code_gen_buffer_size > (800 * 1024 * 1024))
541 code_gen_buffer_size = (800 * 1024 * 1024);
4cd31ad2
BS
542#elif defined(__sparc_v9__)
543 // Map the buffer below 2G, so we can use direct calls and branches
544 flags |= MAP_FIXED;
545 addr = (void *) 0x60000000UL;
546 if (code_gen_buffer_size > (512 * 1024 * 1024)) {
547 code_gen_buffer_size = (512 * 1024 * 1024);
548 }
06e67a82
AL
549#endif
550 code_gen_buffer = mmap(addr, code_gen_buffer_size,
551 PROT_WRITE | PROT_READ | PROT_EXEC,
552 flags, -1, 0);
553 if (code_gen_buffer == MAP_FAILED) {
554 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
555 exit(1);
556 }
557 }
26a5f13b
FB
558#else
559 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
26a5f13b
FB
560 map_exec(code_gen_buffer, code_gen_buffer_size);
561#endif
4369415f 562#endif /* !USE_STATIC_CODE_GEN_BUFFER */
26a5f13b 563 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
a884da8a
PM
564 code_gen_buffer_max_size = code_gen_buffer_size -
565 (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
26a5f13b
FB
566 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
567 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
568}
569
570/* Must be called before using the QEMU cpus. 'tb_size' is the size
571 (in bytes) allocated to the translation buffer. Zero means default
572 size. */
573void cpu_exec_init_all(unsigned long tb_size)
574{
26a5f13b
FB
575 cpu_gen_init();
576 code_gen_alloc(tb_size);
577 code_gen_ptr = code_gen_buffer;
4369415f 578 page_init();
e2eef170 579#if !defined(CONFIG_USER_ONLY)
62152b8a 580 memory_map_init();
26a5f13b 581 io_mem_init();
e2eef170 582#endif
9002ec79
RH
583#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
584 /* There's no guest base to take into account, so go ahead and
585 initialize the prologue now. */
586 tcg_prologue_init(&tcg_ctx);
587#endif
26a5f13b
FB
588}
589
9656f324
PB
590#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
591
e59fb374 592static int cpu_common_post_load(void *opaque, int version_id)
e7f4eff7
JQ
593{
594 CPUState *env = opaque;
9656f324 595
3098dba0
AJ
596 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
597 version_id is increased. */
598 env->interrupt_request &= ~0x01;
9656f324
PB
599 tlb_flush(env, 1);
600
601 return 0;
602}
e7f4eff7
JQ
603
604static const VMStateDescription vmstate_cpu_common = {
605 .name = "cpu_common",
606 .version_id = 1,
607 .minimum_version_id = 1,
608 .minimum_version_id_old = 1,
e7f4eff7
JQ
609 .post_load = cpu_common_post_load,
610 .fields = (VMStateField []) {
611 VMSTATE_UINT32(halted, CPUState),
612 VMSTATE_UINT32(interrupt_request, CPUState),
613 VMSTATE_END_OF_LIST()
614 }
615};
9656f324
PB
616#endif
617
950f1472
GC
618CPUState *qemu_get_cpu(int cpu)
619{
620 CPUState *env = first_cpu;
621
622 while (env) {
623 if (env->cpu_index == cpu)
624 break;
625 env = env->next_cpu;
626 }
627
628 return env;
629}
630
6a00d601 631void cpu_exec_init(CPUState *env)
fd6ce8f6 632{
6a00d601
FB
633 CPUState **penv;
634 int cpu_index;
635
c2764719
PB
636#if defined(CONFIG_USER_ONLY)
637 cpu_list_lock();
638#endif
6a00d601
FB
639 env->next_cpu = NULL;
640 penv = &first_cpu;
641 cpu_index = 0;
642 while (*penv != NULL) {
1e9fa730 643 penv = &(*penv)->next_cpu;
6a00d601
FB
644 cpu_index++;
645 }
646 env->cpu_index = cpu_index;
268a362c 647 env->numa_node = 0;
72cf2d4f
BS
648 QTAILQ_INIT(&env->breakpoints);
649 QTAILQ_INIT(&env->watchpoints);
dc7a09cf
JK
650#ifndef CONFIG_USER_ONLY
651 env->thread_id = qemu_get_thread_id();
652#endif
6a00d601 653 *penv = env;
c2764719
PB
654#if defined(CONFIG_USER_ONLY)
655 cpu_list_unlock();
656#endif
b3c7724c 657#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
0be71e32
AW
658 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
659 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
b3c7724c
PB
660 cpu_save, cpu_load, env);
661#endif
fd6ce8f6
FB
662}
663
d1a1eb74
TG
664/* Allocate a new translation block. Flush the translation buffer if
665 too many translation blocks or too much generated code. */
666static TranslationBlock *tb_alloc(target_ulong pc)
667{
668 TranslationBlock *tb;
669
670 if (nb_tbs >= code_gen_max_blocks ||
671 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
672 return NULL;
673 tb = &tbs[nb_tbs++];
674 tb->pc = pc;
675 tb->cflags = 0;
676 return tb;
677}
678
679void tb_free(TranslationBlock *tb)
680{
681 /* In practice this is mostly used for single use temporary TB
682 Ignore the hard cases and just back up if this TB happens to
683 be the last one generated. */
684 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
685 code_gen_ptr = tb->tc_ptr;
686 nb_tbs--;
687 }
688}
689
9fa3e853
FB
690static inline void invalidate_page_bitmap(PageDesc *p)
691{
692 if (p->code_bitmap) {
59817ccb 693 qemu_free(p->code_bitmap);
9fa3e853
FB
694 p->code_bitmap = NULL;
695 }
696 p->code_write_count = 0;
697}
698
5cd2c5b6
RH
699/* Set to NULL all the 'first_tb' fields in all PageDescs. */
700
701static void page_flush_tb_1 (int level, void **lp)
fd6ce8f6 702{
5cd2c5b6 703 int i;
fd6ce8f6 704
5cd2c5b6
RH
705 if (*lp == NULL) {
706 return;
707 }
708 if (level == 0) {
709 PageDesc *pd = *lp;
7296abac 710 for (i = 0; i < L2_SIZE; ++i) {
5cd2c5b6
RH
711 pd[i].first_tb = NULL;
712 invalidate_page_bitmap(pd + i);
fd6ce8f6 713 }
5cd2c5b6
RH
714 } else {
715 void **pp = *lp;
7296abac 716 for (i = 0; i < L2_SIZE; ++i) {
5cd2c5b6
RH
717 page_flush_tb_1 (level - 1, pp + i);
718 }
719 }
720}
721
722static void page_flush_tb(void)
723{
724 int i;
725 for (i = 0; i < V_L1_SIZE; i++) {
726 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
fd6ce8f6
FB
727 }
728}
729
730/* flush all the translation blocks */
d4e8164f 731/* XXX: tb_flush is currently not thread safe */
6a00d601 732void tb_flush(CPUState *env1)
fd6ce8f6 733{
6a00d601 734 CPUState *env;
0124311e 735#if defined(DEBUG_FLUSH)
ab3d1727
BS
736 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
737 (unsigned long)(code_gen_ptr - code_gen_buffer),
738 nb_tbs, nb_tbs > 0 ?
739 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
fd6ce8f6 740#endif
26a5f13b 741 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
a208e54a
PB
742 cpu_abort(env1, "Internal error: code buffer overflow\n");
743
fd6ce8f6 744 nb_tbs = 0;
3b46e624 745
6a00d601
FB
746 for(env = first_cpu; env != NULL; env = env->next_cpu) {
747 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
748 }
9fa3e853 749
8a8a608f 750 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
fd6ce8f6 751 page_flush_tb();
9fa3e853 752
fd6ce8f6 753 code_gen_ptr = code_gen_buffer;
d4e8164f
FB
754 /* XXX: flush processor icache at this point if cache flush is
755 expensive */
e3db7226 756 tb_flush_count++;
fd6ce8f6
FB
757}
758
759#ifdef DEBUG_TB_CHECK
760
bc98a7ef 761static void tb_invalidate_check(target_ulong address)
fd6ce8f6
FB
762{
763 TranslationBlock *tb;
764 int i;
765 address &= TARGET_PAGE_MASK;
99773bd4
PB
766 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
767 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
768 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
769 address >= tb->pc + tb->size)) {
0bf9e31a
BS
770 printf("ERROR invalidate: address=" TARGET_FMT_lx
771 " PC=%08lx size=%04x\n",
99773bd4 772 address, (long)tb->pc, tb->size);
fd6ce8f6
FB
773 }
774 }
775 }
776}
777
778/* verify that all the pages have correct rights for code */
779static void tb_page_check(void)
780{
781 TranslationBlock *tb;
782 int i, flags1, flags2;
3b46e624 783
99773bd4
PB
784 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
785 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
786 flags1 = page_get_flags(tb->pc);
787 flags2 = page_get_flags(tb->pc + tb->size - 1);
788 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
789 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
99773bd4 790 (long)tb->pc, tb->size, flags1, flags2);
fd6ce8f6
FB
791 }
792 }
793 }
794}
795
796#endif
797
798/* invalidate one TB */
799static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
800 int next_offset)
801{
802 TranslationBlock *tb1;
803 for(;;) {
804 tb1 = *ptb;
805 if (tb1 == tb) {
806 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
807 break;
808 }
809 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
810 }
811}
812
9fa3e853
FB
813static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
814{
815 TranslationBlock *tb1;
816 unsigned int n1;
817
818 for(;;) {
819 tb1 = *ptb;
820 n1 = (long)tb1 & 3;
821 tb1 = (TranslationBlock *)((long)tb1 & ~3);
822 if (tb1 == tb) {
823 *ptb = tb1->page_next[n1];
824 break;
825 }
826 ptb = &tb1->page_next[n1];
827 }
828}
829
d4e8164f
FB
830static inline void tb_jmp_remove(TranslationBlock *tb, int n)
831{
832 TranslationBlock *tb1, **ptb;
833 unsigned int n1;
834
835 ptb = &tb->jmp_next[n];
836 tb1 = *ptb;
837 if (tb1) {
838 /* find tb(n) in circular list */
839 for(;;) {
840 tb1 = *ptb;
841 n1 = (long)tb1 & 3;
842 tb1 = (TranslationBlock *)((long)tb1 & ~3);
843 if (n1 == n && tb1 == tb)
844 break;
845 if (n1 == 2) {
846 ptb = &tb1->jmp_first;
847 } else {
848 ptb = &tb1->jmp_next[n1];
849 }
850 }
851 /* now we can suppress tb(n) from the list */
852 *ptb = tb->jmp_next[n];
853
854 tb->jmp_next[n] = NULL;
855 }
856}
857
858/* reset the jump entry 'n' of a TB so that it is not chained to
859 another TB */
860static inline void tb_reset_jump(TranslationBlock *tb, int n)
861{
862 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
863}
864
41c1b1c9 865void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
fd6ce8f6 866{
6a00d601 867 CPUState *env;
8a40a180 868 PageDesc *p;
d4e8164f 869 unsigned int h, n1;
41c1b1c9 870 tb_page_addr_t phys_pc;
8a40a180 871 TranslationBlock *tb1, *tb2;
3b46e624 872
8a40a180
FB
873 /* remove the TB from the hash list */
874 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
875 h = tb_phys_hash_func(phys_pc);
5fafdf24 876 tb_remove(&tb_phys_hash[h], tb,
8a40a180
FB
877 offsetof(TranslationBlock, phys_hash_next));
878
879 /* remove the TB from the page list */
880 if (tb->page_addr[0] != page_addr) {
881 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
882 tb_page_remove(&p->first_tb, tb);
883 invalidate_page_bitmap(p);
884 }
885 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
886 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
887 tb_page_remove(&p->first_tb, tb);
888 invalidate_page_bitmap(p);
889 }
890
36bdbe54 891 tb_invalidated_flag = 1;
59817ccb 892
fd6ce8f6 893 /* remove the TB from the hash list */
8a40a180 894 h = tb_jmp_cache_hash_func(tb->pc);
6a00d601
FB
895 for(env = first_cpu; env != NULL; env = env->next_cpu) {
896 if (env->tb_jmp_cache[h] == tb)
897 env->tb_jmp_cache[h] = NULL;
898 }
d4e8164f
FB
899
900 /* suppress this TB from the two jump lists */
901 tb_jmp_remove(tb, 0);
902 tb_jmp_remove(tb, 1);
903
904 /* suppress any remaining jumps to this TB */
905 tb1 = tb->jmp_first;
906 for(;;) {
907 n1 = (long)tb1 & 3;
908 if (n1 == 2)
909 break;
910 tb1 = (TranslationBlock *)((long)tb1 & ~3);
911 tb2 = tb1->jmp_next[n1];
912 tb_reset_jump(tb1, n1);
913 tb1->jmp_next[n1] = NULL;
914 tb1 = tb2;
915 }
916 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
9fa3e853 917
e3db7226 918 tb_phys_invalidate_count++;
9fa3e853
FB
919}
920
921static inline void set_bits(uint8_t *tab, int start, int len)
922{
923 int end, mask, end1;
924
925 end = start + len;
926 tab += start >> 3;
927 mask = 0xff << (start & 7);
928 if ((start & ~7) == (end & ~7)) {
929 if (start < end) {
930 mask &= ~(0xff << (end & 7));
931 *tab |= mask;
932 }
933 } else {
934 *tab++ |= mask;
935 start = (start + 8) & ~7;
936 end1 = end & ~7;
937 while (start < end1) {
938 *tab++ = 0xff;
939 start += 8;
940 }
941 if (start < end) {
942 mask = ~(0xff << (end & 7));
943 *tab |= mask;
944 }
945 }
946}
947
948static void build_page_bitmap(PageDesc *p)
949{
950 int n, tb_start, tb_end;
951 TranslationBlock *tb;
3b46e624 952
b2a7081a 953 p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
9fa3e853
FB
954
955 tb = p->first_tb;
956 while (tb != NULL) {
957 n = (long)tb & 3;
958 tb = (TranslationBlock *)((long)tb & ~3);
959 /* NOTE: this is subtle as a TB may span two physical pages */
960 if (n == 0) {
961 /* NOTE: tb_end may be after the end of the page, but
962 it is not a problem */
963 tb_start = tb->pc & ~TARGET_PAGE_MASK;
964 tb_end = tb_start + tb->size;
965 if (tb_end > TARGET_PAGE_SIZE)
966 tb_end = TARGET_PAGE_SIZE;
967 } else {
968 tb_start = 0;
969 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
970 }
971 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
972 tb = tb->page_next[n];
973 }
974}
975
2e70f6ef
PB
976TranslationBlock *tb_gen_code(CPUState *env,
977 target_ulong pc, target_ulong cs_base,
978 int flags, int cflags)
d720b93d
FB
979{
980 TranslationBlock *tb;
981 uint8_t *tc_ptr;
41c1b1c9
PB
982 tb_page_addr_t phys_pc, phys_page2;
983 target_ulong virt_page2;
d720b93d
FB
984 int code_gen_size;
985
41c1b1c9 986 phys_pc = get_page_addr_code(env, pc);
c27004ec 987 tb = tb_alloc(pc);
d720b93d
FB
988 if (!tb) {
989 /* flush must be done */
990 tb_flush(env);
991 /* cannot fail at this point */
c27004ec 992 tb = tb_alloc(pc);
2e70f6ef
PB
993 /* Don't forget to invalidate previous TB info. */
994 tb_invalidated_flag = 1;
d720b93d
FB
995 }
996 tc_ptr = code_gen_ptr;
997 tb->tc_ptr = tc_ptr;
998 tb->cs_base = cs_base;
999 tb->flags = flags;
1000 tb->cflags = cflags;
d07bde88 1001 cpu_gen_code(env, tb, &code_gen_size);
d720b93d 1002 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
3b46e624 1003
d720b93d 1004 /* check next page if needed */
c27004ec 1005 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
d720b93d 1006 phys_page2 = -1;
c27004ec 1007 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
41c1b1c9 1008 phys_page2 = get_page_addr_code(env, virt_page2);
d720b93d 1009 }
41c1b1c9 1010 tb_link_page(tb, phys_pc, phys_page2);
2e70f6ef 1011 return tb;
d720b93d 1012}
3b46e624 1013
9fa3e853
FB
1014/* invalidate all TBs which intersect with the target physical page
1015 starting in range [start;end[. NOTE: start and end must refer to
d720b93d
FB
1016 the same physical page. 'is_cpu_write_access' should be true if called
1017 from a real cpu write access: the virtual CPU will exit the current
1018 TB if code is modified inside this TB. */
41c1b1c9 1019void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
d720b93d
FB
1020 int is_cpu_write_access)
1021{
6b917547 1022 TranslationBlock *tb, *tb_next, *saved_tb;
d720b93d 1023 CPUState *env = cpu_single_env;
41c1b1c9 1024 tb_page_addr_t tb_start, tb_end;
6b917547
AL
1025 PageDesc *p;
1026 int n;
1027#ifdef TARGET_HAS_PRECISE_SMC
1028 int current_tb_not_found = is_cpu_write_access;
1029 TranslationBlock *current_tb = NULL;
1030 int current_tb_modified = 0;
1031 target_ulong current_pc = 0;
1032 target_ulong current_cs_base = 0;
1033 int current_flags = 0;
1034#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
1035
1036 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 1037 if (!p)
9fa3e853 1038 return;
5fafdf24 1039 if (!p->code_bitmap &&
d720b93d
FB
1040 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1041 is_cpu_write_access) {
9fa3e853
FB
1042 /* build code bitmap */
1043 build_page_bitmap(p);
1044 }
1045
1046 /* we remove all the TBs in the range [start, end[ */
1047 /* XXX: see if in some cases it could be faster to invalidate all the code */
1048 tb = p->first_tb;
1049 while (tb != NULL) {
1050 n = (long)tb & 3;
1051 tb = (TranslationBlock *)((long)tb & ~3);
1052 tb_next = tb->page_next[n];
1053 /* NOTE: this is subtle as a TB may span two physical pages */
1054 if (n == 0) {
1055 /* NOTE: tb_end may be after the end of the page, but
1056 it is not a problem */
1057 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1058 tb_end = tb_start + tb->size;
1059 } else {
1060 tb_start = tb->page_addr[1];
1061 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1062 }
1063 if (!(tb_end <= start || tb_start >= end)) {
d720b93d
FB
1064#ifdef TARGET_HAS_PRECISE_SMC
1065 if (current_tb_not_found) {
1066 current_tb_not_found = 0;
1067 current_tb = NULL;
2e70f6ef 1068 if (env->mem_io_pc) {
d720b93d 1069 /* now we have a real cpu fault */
2e70f6ef 1070 current_tb = tb_find_pc(env->mem_io_pc);
d720b93d
FB
1071 }
1072 }
1073 if (current_tb == tb &&
2e70f6ef 1074 (current_tb->cflags & CF_COUNT_MASK) != 1) {
d720b93d
FB
1075 /* If we are modifying the current TB, we must stop
1076 its execution. We could be more precise by checking
1077 that the modification is after the current PC, but it
1078 would require a specialized function to partially
1079 restore the CPU state */
3b46e624 1080
d720b93d 1081 current_tb_modified = 1;
618ba8e6 1082 cpu_restore_state(current_tb, env, env->mem_io_pc);
6b917547
AL
1083 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1084 &current_flags);
d720b93d
FB
1085 }
1086#endif /* TARGET_HAS_PRECISE_SMC */
6f5a9f7e
FB
1087 /* we need to do that to handle the case where a signal
1088 occurs while doing tb_phys_invalidate() */
1089 saved_tb = NULL;
1090 if (env) {
1091 saved_tb = env->current_tb;
1092 env->current_tb = NULL;
1093 }
9fa3e853 1094 tb_phys_invalidate(tb, -1);
6f5a9f7e
FB
1095 if (env) {
1096 env->current_tb = saved_tb;
1097 if (env->interrupt_request && env->current_tb)
1098 cpu_interrupt(env, env->interrupt_request);
1099 }
9fa3e853
FB
1100 }
1101 tb = tb_next;
1102 }
1103#if !defined(CONFIG_USER_ONLY)
1104 /* if no code remaining, no need to continue to use slow writes */
1105 if (!p->first_tb) {
1106 invalidate_page_bitmap(p);
d720b93d 1107 if (is_cpu_write_access) {
2e70f6ef 1108 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
d720b93d
FB
1109 }
1110 }
1111#endif
1112#ifdef TARGET_HAS_PRECISE_SMC
1113 if (current_tb_modified) {
1114 /* we generate a block containing just the instruction
1115 modifying the memory. It will ensure that it cannot modify
1116 itself */
ea1c1802 1117 env->current_tb = NULL;
2e70f6ef 1118 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
d720b93d 1119 cpu_resume_from_signal(env, NULL);
9fa3e853 1120 }
fd6ce8f6 1121#endif
9fa3e853 1122}
fd6ce8f6 1123
9fa3e853 1124/* len must be <= 8 and start must be a multiple of len */
41c1b1c9 1125static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
9fa3e853
FB
1126{
1127 PageDesc *p;
1128 int offset, b;
59817ccb 1129#if 0
a4193c8a 1130 if (1) {
93fcfe39
AL
1131 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1132 cpu_single_env->mem_io_vaddr, len,
1133 cpu_single_env->eip,
1134 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
59817ccb
FB
1135 }
1136#endif
9fa3e853 1137 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 1138 if (!p)
9fa3e853
FB
1139 return;
1140 if (p->code_bitmap) {
1141 offset = start & ~TARGET_PAGE_MASK;
1142 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1143 if (b & ((1 << len) - 1))
1144 goto do_invalidate;
1145 } else {
1146 do_invalidate:
d720b93d 1147 tb_invalidate_phys_page_range(start, start + len, 1);
9fa3e853
FB
1148 }
1149}
1150
9fa3e853 1151#if !defined(CONFIG_SOFTMMU)
41c1b1c9 1152static void tb_invalidate_phys_page(tb_page_addr_t addr,
d720b93d 1153 unsigned long pc, void *puc)
9fa3e853 1154{
6b917547 1155 TranslationBlock *tb;
9fa3e853 1156 PageDesc *p;
6b917547 1157 int n;
d720b93d 1158#ifdef TARGET_HAS_PRECISE_SMC
6b917547 1159 TranslationBlock *current_tb = NULL;
d720b93d 1160 CPUState *env = cpu_single_env;
6b917547
AL
1161 int current_tb_modified = 0;
1162 target_ulong current_pc = 0;
1163 target_ulong current_cs_base = 0;
1164 int current_flags = 0;
d720b93d 1165#endif
9fa3e853
FB
1166
1167 addr &= TARGET_PAGE_MASK;
1168 p = page_find(addr >> TARGET_PAGE_BITS);
5fafdf24 1169 if (!p)
9fa3e853
FB
1170 return;
1171 tb = p->first_tb;
d720b93d
FB
1172#ifdef TARGET_HAS_PRECISE_SMC
1173 if (tb && pc != 0) {
1174 current_tb = tb_find_pc(pc);
1175 }
1176#endif
9fa3e853
FB
1177 while (tb != NULL) {
1178 n = (long)tb & 3;
1179 tb = (TranslationBlock *)((long)tb & ~3);
d720b93d
FB
1180#ifdef TARGET_HAS_PRECISE_SMC
1181 if (current_tb == tb &&
2e70f6ef 1182 (current_tb->cflags & CF_COUNT_MASK) != 1) {
d720b93d
FB
1183 /* If we are modifying the current TB, we must stop
1184 its execution. We could be more precise by checking
1185 that the modification is after the current PC, but it
1186 would require a specialized function to partially
1187 restore the CPU state */
3b46e624 1188
d720b93d 1189 current_tb_modified = 1;
618ba8e6 1190 cpu_restore_state(current_tb, env, pc);
6b917547
AL
1191 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1192 &current_flags);
d720b93d
FB
1193 }
1194#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
1195 tb_phys_invalidate(tb, addr);
1196 tb = tb->page_next[n];
1197 }
fd6ce8f6 1198 p->first_tb = NULL;
d720b93d
FB
1199#ifdef TARGET_HAS_PRECISE_SMC
1200 if (current_tb_modified) {
1201 /* we generate a block containing just the instruction
1202 modifying the memory. It will ensure that it cannot modify
1203 itself */
ea1c1802 1204 env->current_tb = NULL;
2e70f6ef 1205 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
d720b93d
FB
1206 cpu_resume_from_signal(env, puc);
1207 }
1208#endif
fd6ce8f6 1209}
9fa3e853 1210#endif
fd6ce8f6
FB
1211
1212/* add the tb in the target page and protect it if necessary */
5fafdf24 1213static inline void tb_alloc_page(TranslationBlock *tb,
41c1b1c9 1214 unsigned int n, tb_page_addr_t page_addr)
fd6ce8f6
FB
1215{
1216 PageDesc *p;
4429ab44
JQ
1217#ifndef CONFIG_USER_ONLY
1218 bool page_already_protected;
1219#endif
9fa3e853
FB
1220
1221 tb->page_addr[n] = page_addr;
5cd2c5b6 1222 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
9fa3e853 1223 tb->page_next[n] = p->first_tb;
4429ab44
JQ
1224#ifndef CONFIG_USER_ONLY
1225 page_already_protected = p->first_tb != NULL;
1226#endif
9fa3e853
FB
1227 p->first_tb = (TranslationBlock *)((long)tb | n);
1228 invalidate_page_bitmap(p);
fd6ce8f6 1229
107db443 1230#if defined(TARGET_HAS_SMC) || 1
d720b93d 1231
9fa3e853 1232#if defined(CONFIG_USER_ONLY)
fd6ce8f6 1233 if (p->flags & PAGE_WRITE) {
53a5960a
PB
1234 target_ulong addr;
1235 PageDesc *p2;
9fa3e853
FB
1236 int prot;
1237
fd6ce8f6
FB
1238 /* force the host page as non writable (writes will have a
1239 page fault + mprotect overhead) */
53a5960a 1240 page_addr &= qemu_host_page_mask;
fd6ce8f6 1241 prot = 0;
53a5960a
PB
1242 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1243 addr += TARGET_PAGE_SIZE) {
1244
1245 p2 = page_find (addr >> TARGET_PAGE_BITS);
1246 if (!p2)
1247 continue;
1248 prot |= p2->flags;
1249 p2->flags &= ~PAGE_WRITE;
53a5960a 1250 }
5fafdf24 1251 mprotect(g2h(page_addr), qemu_host_page_size,
fd6ce8f6
FB
1252 (prot & PAGE_BITS) & ~PAGE_WRITE);
1253#ifdef DEBUG_TB_INVALIDATE
ab3d1727 1254 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
53a5960a 1255 page_addr);
fd6ce8f6 1256#endif
fd6ce8f6 1257 }
9fa3e853
FB
1258#else
1259 /* if some code is already present, then the pages are already
1260 protected. So we handle the case where only the first TB is
1261 allocated in a physical page */
4429ab44 1262 if (!page_already_protected) {
6a00d601 1263 tlb_protect_code(page_addr);
9fa3e853
FB
1264 }
1265#endif
d720b93d
FB
1266
1267#endif /* TARGET_HAS_SMC */
fd6ce8f6
FB
1268}
1269
9fa3e853
FB
1270/* add a new TB and link it to the physical page tables. phys_page2 is
1271 (-1) to indicate that only one page contains the TB. */
41c1b1c9
PB
1272void tb_link_page(TranslationBlock *tb,
1273 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
d4e8164f 1274{
9fa3e853
FB
1275 unsigned int h;
1276 TranslationBlock **ptb;
1277
c8a706fe
PB
1278 /* Grab the mmap lock to stop another thread invalidating this TB
1279 before we are done. */
1280 mmap_lock();
9fa3e853
FB
1281 /* add in the physical hash table */
1282 h = tb_phys_hash_func(phys_pc);
1283 ptb = &tb_phys_hash[h];
1284 tb->phys_hash_next = *ptb;
1285 *ptb = tb;
fd6ce8f6
FB
1286
1287 /* add in the page list */
9fa3e853
FB
1288 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1289 if (phys_page2 != -1)
1290 tb_alloc_page(tb, 1, phys_page2);
1291 else
1292 tb->page_addr[1] = -1;
9fa3e853 1293
d4e8164f
FB
1294 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1295 tb->jmp_next[0] = NULL;
1296 tb->jmp_next[1] = NULL;
1297
1298 /* init original jump addresses */
1299 if (tb->tb_next_offset[0] != 0xffff)
1300 tb_reset_jump(tb, 0);
1301 if (tb->tb_next_offset[1] != 0xffff)
1302 tb_reset_jump(tb, 1);
8a40a180
FB
1303
1304#ifdef DEBUG_TB_CHECK
1305 tb_page_check();
1306#endif
c8a706fe 1307 mmap_unlock();
fd6ce8f6
FB
1308}
1309
9fa3e853
FB
1310/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1311 tb[1].tc_ptr. Return NULL if not found */
1312TranslationBlock *tb_find_pc(unsigned long tc_ptr)
fd6ce8f6 1313{
9fa3e853
FB
1314 int m_min, m_max, m;
1315 unsigned long v;
1316 TranslationBlock *tb;
a513fe19
FB
1317
1318 if (nb_tbs <= 0)
1319 return NULL;
1320 if (tc_ptr < (unsigned long)code_gen_buffer ||
1321 tc_ptr >= (unsigned long)code_gen_ptr)
1322 return NULL;
1323 /* binary search (cf Knuth) */
1324 m_min = 0;
1325 m_max = nb_tbs - 1;
1326 while (m_min <= m_max) {
1327 m = (m_min + m_max) >> 1;
1328 tb = &tbs[m];
1329 v = (unsigned long)tb->tc_ptr;
1330 if (v == tc_ptr)
1331 return tb;
1332 else if (tc_ptr < v) {
1333 m_max = m - 1;
1334 } else {
1335 m_min = m + 1;
1336 }
5fafdf24 1337 }
a513fe19
FB
1338 return &tbs[m_max];
1339}
7501267e 1340
ea041c0e
FB
1341static void tb_reset_jump_recursive(TranslationBlock *tb);
1342
1343static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1344{
1345 TranslationBlock *tb1, *tb_next, **ptb;
1346 unsigned int n1;
1347
1348 tb1 = tb->jmp_next[n];
1349 if (tb1 != NULL) {
1350 /* find head of list */
1351 for(;;) {
1352 n1 = (long)tb1 & 3;
1353 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1354 if (n1 == 2)
1355 break;
1356 tb1 = tb1->jmp_next[n1];
1357 }
1358 /* we are now sure now that tb jumps to tb1 */
1359 tb_next = tb1;
1360
1361 /* remove tb from the jmp_first list */
1362 ptb = &tb_next->jmp_first;
1363 for(;;) {
1364 tb1 = *ptb;
1365 n1 = (long)tb1 & 3;
1366 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1367 if (n1 == n && tb1 == tb)
1368 break;
1369 ptb = &tb1->jmp_next[n1];
1370 }
1371 *ptb = tb->jmp_next[n];
1372 tb->jmp_next[n] = NULL;
3b46e624 1373
ea041c0e
FB
1374 /* suppress the jump to next tb in generated code */
1375 tb_reset_jump(tb, n);
1376
0124311e 1377 /* suppress jumps in the tb on which we could have jumped */
ea041c0e
FB
1378 tb_reset_jump_recursive(tb_next);
1379 }
1380}
1381
1382static void tb_reset_jump_recursive(TranslationBlock *tb)
1383{
1384 tb_reset_jump_recursive2(tb, 0);
1385 tb_reset_jump_recursive2(tb, 1);
1386}
1387
1fddef4b 1388#if defined(TARGET_HAS_ICE)
94df27fd
PB
1389#if defined(CONFIG_USER_ONLY)
1390static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1391{
1392 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1393}
1394#else
d720b93d
FB
1395static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1396{
c227f099 1397 target_phys_addr_t addr;
9b3c35e0 1398 target_ulong pd;
c227f099 1399 ram_addr_t ram_addr;
c2f07f81 1400 PhysPageDesc *p;
d720b93d 1401
c2f07f81
PB
1402 addr = cpu_get_phys_page_debug(env, pc);
1403 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1404 if (!p) {
1405 pd = IO_MEM_UNASSIGNED;
1406 } else {
1407 pd = p->phys_offset;
1408 }
1409 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
706cd4b5 1410 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
d720b93d 1411}
c27004ec 1412#endif
94df27fd 1413#endif /* TARGET_HAS_ICE */
d720b93d 1414
c527ee8f
PB
1415#if defined(CONFIG_USER_ONLY)
1416void cpu_watchpoint_remove_all(CPUState *env, int mask)
1417
1418{
1419}
1420
1421int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1422 int flags, CPUWatchpoint **watchpoint)
1423{
1424 return -ENOSYS;
1425}
1426#else
6658ffb8 1427/* Add a watchpoint. */
a1d1bb31
AL
1428int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1429 int flags, CPUWatchpoint **watchpoint)
6658ffb8 1430{
b4051334 1431 target_ulong len_mask = ~(len - 1);
c0ce998e 1432 CPUWatchpoint *wp;
6658ffb8 1433
b4051334
AL
1434 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1435 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1436 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1437 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1438 return -EINVAL;
1439 }
a1d1bb31 1440 wp = qemu_malloc(sizeof(*wp));
a1d1bb31
AL
1441
1442 wp->vaddr = addr;
b4051334 1443 wp->len_mask = len_mask;
a1d1bb31
AL
1444 wp->flags = flags;
1445
2dc9f411 1446 /* keep all GDB-injected watchpoints in front */
c0ce998e 1447 if (flags & BP_GDB)
72cf2d4f 1448 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
c0ce998e 1449 else
72cf2d4f 1450 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
6658ffb8 1451
6658ffb8 1452 tlb_flush_page(env, addr);
a1d1bb31
AL
1453
1454 if (watchpoint)
1455 *watchpoint = wp;
1456 return 0;
6658ffb8
PB
1457}
1458
a1d1bb31
AL
1459/* Remove a specific watchpoint. */
1460int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1461 int flags)
6658ffb8 1462{
b4051334 1463 target_ulong len_mask = ~(len - 1);
a1d1bb31 1464 CPUWatchpoint *wp;
6658ffb8 1465
72cf2d4f 1466 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334 1467 if (addr == wp->vaddr && len_mask == wp->len_mask
6e140f28 1468 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
a1d1bb31 1469 cpu_watchpoint_remove_by_ref(env, wp);
6658ffb8
PB
1470 return 0;
1471 }
1472 }
a1d1bb31 1473 return -ENOENT;
6658ffb8
PB
1474}
1475
a1d1bb31
AL
1476/* Remove a specific watchpoint by reference. */
1477void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1478{
72cf2d4f 1479 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
7d03f82f 1480
a1d1bb31
AL
1481 tlb_flush_page(env, watchpoint->vaddr);
1482
1483 qemu_free(watchpoint);
1484}
1485
1486/* Remove all matching watchpoints. */
1487void cpu_watchpoint_remove_all(CPUState *env, int mask)
1488{
c0ce998e 1489 CPUWatchpoint *wp, *next;
a1d1bb31 1490
72cf2d4f 1491 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
a1d1bb31
AL
1492 if (wp->flags & mask)
1493 cpu_watchpoint_remove_by_ref(env, wp);
c0ce998e 1494 }
7d03f82f 1495}
c527ee8f 1496#endif
7d03f82f 1497
a1d1bb31
AL
1498/* Add a breakpoint. */
1499int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1500 CPUBreakpoint **breakpoint)
4c3a88a2 1501{
1fddef4b 1502#if defined(TARGET_HAS_ICE)
c0ce998e 1503 CPUBreakpoint *bp;
3b46e624 1504
a1d1bb31 1505 bp = qemu_malloc(sizeof(*bp));
4c3a88a2 1506
a1d1bb31
AL
1507 bp->pc = pc;
1508 bp->flags = flags;
1509
2dc9f411 1510 /* keep all GDB-injected breakpoints in front */
c0ce998e 1511 if (flags & BP_GDB)
72cf2d4f 1512 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
c0ce998e 1513 else
72cf2d4f 1514 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
3b46e624 1515
d720b93d 1516 breakpoint_invalidate(env, pc);
a1d1bb31
AL
1517
1518 if (breakpoint)
1519 *breakpoint = bp;
4c3a88a2
FB
1520 return 0;
1521#else
a1d1bb31 1522 return -ENOSYS;
4c3a88a2
FB
1523#endif
1524}
1525
a1d1bb31
AL
1526/* Remove a specific breakpoint. */
1527int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1528{
7d03f82f 1529#if defined(TARGET_HAS_ICE)
a1d1bb31
AL
1530 CPUBreakpoint *bp;
1531
72cf2d4f 1532 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31
AL
1533 if (bp->pc == pc && bp->flags == flags) {
1534 cpu_breakpoint_remove_by_ref(env, bp);
1535 return 0;
1536 }
7d03f82f 1537 }
a1d1bb31
AL
1538 return -ENOENT;
1539#else
1540 return -ENOSYS;
7d03f82f
EI
1541#endif
1542}
1543
a1d1bb31
AL
1544/* Remove a specific breakpoint by reference. */
1545void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
4c3a88a2 1546{
1fddef4b 1547#if defined(TARGET_HAS_ICE)
72cf2d4f 1548 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
d720b93d 1549
a1d1bb31
AL
1550 breakpoint_invalidate(env, breakpoint->pc);
1551
1552 qemu_free(breakpoint);
1553#endif
1554}
1555
1556/* Remove all matching breakpoints. */
1557void cpu_breakpoint_remove_all(CPUState *env, int mask)
1558{
1559#if defined(TARGET_HAS_ICE)
c0ce998e 1560 CPUBreakpoint *bp, *next;
a1d1bb31 1561
72cf2d4f 1562 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
a1d1bb31
AL
1563 if (bp->flags & mask)
1564 cpu_breakpoint_remove_by_ref(env, bp);
c0ce998e 1565 }
4c3a88a2
FB
1566#endif
1567}
1568
c33a346e
FB
1569/* enable or disable single step mode. EXCP_DEBUG is returned by the
1570 CPU loop after each instruction */
1571void cpu_single_step(CPUState *env, int enabled)
1572{
1fddef4b 1573#if defined(TARGET_HAS_ICE)
c33a346e
FB
1574 if (env->singlestep_enabled != enabled) {
1575 env->singlestep_enabled = enabled;
e22a25c9
AL
1576 if (kvm_enabled())
1577 kvm_update_guest_debug(env, 0);
1578 else {
ccbb4d44 1579 /* must flush all the translated code to avoid inconsistencies */
e22a25c9
AL
1580 /* XXX: only flush what is necessary */
1581 tb_flush(env);
1582 }
c33a346e
FB
1583 }
1584#endif
1585}
1586
34865134
FB
1587/* enable or disable low levels log */
1588void cpu_set_log(int log_flags)
1589{
1590 loglevel = log_flags;
1591 if (loglevel && !logfile) {
11fcfab4 1592 logfile = fopen(logfilename, log_append ? "a" : "w");
34865134
FB
1593 if (!logfile) {
1594 perror(logfilename);
1595 _exit(1);
1596 }
9fa3e853
FB
1597#if !defined(CONFIG_SOFTMMU)
1598 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1599 {
b55266b5 1600 static char logfile_buf[4096];
9fa3e853
FB
1601 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1602 }
bf65f53f
FN
1603#elif !defined(_WIN32)
1604 /* Win32 doesn't support line-buffering and requires size >= 2 */
34865134 1605 setvbuf(logfile, NULL, _IOLBF, 0);
9fa3e853 1606#endif
e735b91c
PB
1607 log_append = 1;
1608 }
1609 if (!loglevel && logfile) {
1610 fclose(logfile);
1611 logfile = NULL;
34865134
FB
1612 }
1613}
1614
1615void cpu_set_log_filename(const char *filename)
1616{
1617 logfilename = strdup(filename);
e735b91c
PB
1618 if (logfile) {
1619 fclose(logfile);
1620 logfile = NULL;
1621 }
1622 cpu_set_log(loglevel);
34865134 1623}
c33a346e 1624
3098dba0 1625static void cpu_unlink_tb(CPUState *env)
ea041c0e 1626{
3098dba0
AJ
1627 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1628 problem and hope the cpu will stop of its own accord. For userspace
1629 emulation this often isn't actually as bad as it sounds. Often
1630 signals are used primarily to interrupt blocking syscalls. */
ea041c0e 1631 TranslationBlock *tb;
c227f099 1632 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
59817ccb 1633
cab1b4bd 1634 spin_lock(&interrupt_lock);
3098dba0
AJ
1635 tb = env->current_tb;
1636 /* if the cpu is currently executing code, we must unlink it and
1637 all the potentially executing TB */
f76cfe56 1638 if (tb) {
3098dba0
AJ
1639 env->current_tb = NULL;
1640 tb_reset_jump_recursive(tb);
be214e6c 1641 }
cab1b4bd 1642 spin_unlock(&interrupt_lock);
3098dba0
AJ
1643}
1644
97ffbd8d 1645#ifndef CONFIG_USER_ONLY
3098dba0 1646/* mask must never be zero, except for A20 change call */
ec6959d0 1647static void tcg_handle_interrupt(CPUState *env, int mask)
3098dba0
AJ
1648{
1649 int old_mask;
be214e6c 1650
2e70f6ef 1651 old_mask = env->interrupt_request;
68a79315 1652 env->interrupt_request |= mask;
3098dba0 1653
8edac960
AL
1654 /*
1655 * If called from iothread context, wake the target cpu in
1656 * case its halted.
1657 */
b7680cb6 1658 if (!qemu_cpu_is_self(env)) {
8edac960
AL
1659 qemu_cpu_kick(env);
1660 return;
1661 }
8edac960 1662
2e70f6ef 1663 if (use_icount) {
266910c4 1664 env->icount_decr.u16.high = 0xffff;
2e70f6ef 1665 if (!can_do_io(env)
be214e6c 1666 && (mask & ~old_mask) != 0) {
2e70f6ef
PB
1667 cpu_abort(env, "Raised interrupt while not in I/O function");
1668 }
2e70f6ef 1669 } else {
3098dba0 1670 cpu_unlink_tb(env);
ea041c0e
FB
1671 }
1672}
1673
ec6959d0
JK
1674CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1675
97ffbd8d
JK
1676#else /* CONFIG_USER_ONLY */
1677
1678void cpu_interrupt(CPUState *env, int mask)
1679{
1680 env->interrupt_request |= mask;
1681 cpu_unlink_tb(env);
1682}
1683#endif /* CONFIG_USER_ONLY */
1684
b54ad049
FB
1685void cpu_reset_interrupt(CPUState *env, int mask)
1686{
1687 env->interrupt_request &= ~mask;
1688}
1689
3098dba0
AJ
1690void cpu_exit(CPUState *env)
1691{
1692 env->exit_request = 1;
1693 cpu_unlink_tb(env);
1694}
1695
c7cd6a37 1696const CPULogItem cpu_log_items[] = {
5fafdf24 1697 { CPU_LOG_TB_OUT_ASM, "out_asm",
f193c797
FB
1698 "show generated host assembly code for each compiled TB" },
1699 { CPU_LOG_TB_IN_ASM, "in_asm",
1700 "show target assembly code for each compiled TB" },
5fafdf24 1701 { CPU_LOG_TB_OP, "op",
57fec1fe 1702 "show micro ops for each compiled TB" },
f193c797 1703 { CPU_LOG_TB_OP_OPT, "op_opt",
e01a1157
BS
1704 "show micro ops "
1705#ifdef TARGET_I386
1706 "before eflags optimization and "
f193c797 1707#endif
e01a1157 1708 "after liveness analysis" },
f193c797
FB
1709 { CPU_LOG_INT, "int",
1710 "show interrupts/exceptions in short format" },
1711 { CPU_LOG_EXEC, "exec",
1712 "show trace before each executed TB (lots of logs)" },
9fddaa0c 1713 { CPU_LOG_TB_CPU, "cpu",
e91c8a77 1714 "show CPU state before block translation" },
f193c797
FB
1715#ifdef TARGET_I386
1716 { CPU_LOG_PCALL, "pcall",
1717 "show protected mode far calls/returns/exceptions" },
eca1bdf4
AL
1718 { CPU_LOG_RESET, "cpu_reset",
1719 "show CPU state before CPU resets" },
f193c797 1720#endif
8e3a9fd2 1721#ifdef DEBUG_IOPORT
fd872598
FB
1722 { CPU_LOG_IOPORT, "ioport",
1723 "show all i/o ports accesses" },
8e3a9fd2 1724#endif
f193c797
FB
1725 { 0, NULL, NULL },
1726};
1727
f6f3fbca
MT
1728#ifndef CONFIG_USER_ONLY
1729static QLIST_HEAD(memory_client_list, CPUPhysMemoryClient) memory_client_list
1730 = QLIST_HEAD_INITIALIZER(memory_client_list);
1731
1732static void cpu_notify_set_memory(target_phys_addr_t start_addr,
9742bf26 1733 ram_addr_t size,
0fd542fb
MT
1734 ram_addr_t phys_offset,
1735 bool log_dirty)
f6f3fbca
MT
1736{
1737 CPUPhysMemoryClient *client;
1738 QLIST_FOREACH(client, &memory_client_list, list) {
0fd542fb 1739 client->set_memory(client, start_addr, size, phys_offset, log_dirty);
f6f3fbca
MT
1740 }
1741}
1742
1743static int cpu_notify_sync_dirty_bitmap(target_phys_addr_t start,
9742bf26 1744 target_phys_addr_t end)
f6f3fbca
MT
1745{
1746 CPUPhysMemoryClient *client;
1747 QLIST_FOREACH(client, &memory_client_list, list) {
1748 int r = client->sync_dirty_bitmap(client, start, end);
1749 if (r < 0)
1750 return r;
1751 }
1752 return 0;
1753}
1754
1755static int cpu_notify_migration_log(int enable)
1756{
1757 CPUPhysMemoryClient *client;
1758 QLIST_FOREACH(client, &memory_client_list, list) {
1759 int r = client->migration_log(client, enable);
1760 if (r < 0)
1761 return r;
1762 }
1763 return 0;
1764}
1765
2173a75f
AW
1766struct last_map {
1767 target_phys_addr_t start_addr;
1768 ram_addr_t size;
1769 ram_addr_t phys_offset;
1770};
1771
8d4c78e7
AW
1772/* The l1_phys_map provides the upper P_L1_BITs of the guest physical
1773 * address. Each intermediate table provides the next L2_BITs of guest
1774 * physical address space. The number of levels vary based on host and
1775 * guest configuration, making it efficient to build the final guest
1776 * physical address by seeding the L1 offset and shifting and adding in
1777 * each L2 offset as we recurse through them. */
2173a75f
AW
1778static void phys_page_for_each_1(CPUPhysMemoryClient *client, int level,
1779 void **lp, target_phys_addr_t addr,
1780 struct last_map *map)
f6f3fbca 1781{
5cd2c5b6 1782 int i;
f6f3fbca 1783
5cd2c5b6
RH
1784 if (*lp == NULL) {
1785 return;
1786 }
1787 if (level == 0) {
1788 PhysPageDesc *pd = *lp;
8d4c78e7 1789 addr <<= L2_BITS + TARGET_PAGE_BITS;
7296abac 1790 for (i = 0; i < L2_SIZE; ++i) {
5cd2c5b6 1791 if (pd[i].phys_offset != IO_MEM_UNASSIGNED) {
2173a75f
AW
1792 target_phys_addr_t start_addr = addr | i << TARGET_PAGE_BITS;
1793
1794 if (map->size &&
1795 start_addr == map->start_addr + map->size &&
1796 pd[i].phys_offset == map->phys_offset + map->size) {
1797
1798 map->size += TARGET_PAGE_SIZE;
1799 continue;
1800 } else if (map->size) {
1801 client->set_memory(client, map->start_addr,
1802 map->size, map->phys_offset, false);
1803 }
1804
1805 map->start_addr = start_addr;
1806 map->size = TARGET_PAGE_SIZE;
1807 map->phys_offset = pd[i].phys_offset;
f6f3fbca 1808 }
5cd2c5b6
RH
1809 }
1810 } else {
1811 void **pp = *lp;
7296abac 1812 for (i = 0; i < L2_SIZE; ++i) {
8d4c78e7 1813 phys_page_for_each_1(client, level - 1, pp + i,
2173a75f 1814 (addr << L2_BITS) | i, map);
f6f3fbca
MT
1815 }
1816 }
1817}
1818
1819static void phys_page_for_each(CPUPhysMemoryClient *client)
1820{
5cd2c5b6 1821 int i;
2173a75f
AW
1822 struct last_map map = { };
1823
5cd2c5b6
RH
1824 for (i = 0; i < P_L1_SIZE; ++i) {
1825 phys_page_for_each_1(client, P_L1_SHIFT / L2_BITS - 1,
2173a75f
AW
1826 l1_phys_map + i, i, &map);
1827 }
1828 if (map.size) {
1829 client->set_memory(client, map.start_addr, map.size, map.phys_offset,
1830 false);
f6f3fbca 1831 }
f6f3fbca
MT
1832}
1833
1834void cpu_register_phys_memory_client(CPUPhysMemoryClient *client)
1835{
1836 QLIST_INSERT_HEAD(&memory_client_list, client, list);
1837 phys_page_for_each(client);
1838}
1839
1840void cpu_unregister_phys_memory_client(CPUPhysMemoryClient *client)
1841{
1842 QLIST_REMOVE(client, list);
1843}
1844#endif
1845
f193c797
FB
1846static int cmp1(const char *s1, int n, const char *s2)
1847{
1848 if (strlen(s2) != n)
1849 return 0;
1850 return memcmp(s1, s2, n) == 0;
1851}
3b46e624 1852
f193c797
FB
1853/* takes a comma separated list of log masks. Return 0 if error. */
1854int cpu_str_to_log_mask(const char *str)
1855{
c7cd6a37 1856 const CPULogItem *item;
f193c797
FB
1857 int mask;
1858 const char *p, *p1;
1859
1860 p = str;
1861 mask = 0;
1862 for(;;) {
1863 p1 = strchr(p, ',');
1864 if (!p1)
1865 p1 = p + strlen(p);
9742bf26
YT
1866 if(cmp1(p,p1-p,"all")) {
1867 for(item = cpu_log_items; item->mask != 0; item++) {
1868 mask |= item->mask;
1869 }
1870 } else {
1871 for(item = cpu_log_items; item->mask != 0; item++) {
1872 if (cmp1(p, p1 - p, item->name))
1873 goto found;
1874 }
1875 return 0;
f193c797 1876 }
f193c797
FB
1877 found:
1878 mask |= item->mask;
1879 if (*p1 != ',')
1880 break;
1881 p = p1 + 1;
1882 }
1883 return mask;
1884}
ea041c0e 1885
7501267e
FB
1886void cpu_abort(CPUState *env, const char *fmt, ...)
1887{
1888 va_list ap;
493ae1f0 1889 va_list ap2;
7501267e
FB
1890
1891 va_start(ap, fmt);
493ae1f0 1892 va_copy(ap2, ap);
7501267e
FB
1893 fprintf(stderr, "qemu: fatal: ");
1894 vfprintf(stderr, fmt, ap);
1895 fprintf(stderr, "\n");
1896#ifdef TARGET_I386
7fe48483
FB
1897 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1898#else
1899 cpu_dump_state(env, stderr, fprintf, 0);
7501267e 1900#endif
93fcfe39
AL
1901 if (qemu_log_enabled()) {
1902 qemu_log("qemu: fatal: ");
1903 qemu_log_vprintf(fmt, ap2);
1904 qemu_log("\n");
f9373291 1905#ifdef TARGET_I386
93fcfe39 1906 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
f9373291 1907#else
93fcfe39 1908 log_cpu_state(env, 0);
f9373291 1909#endif
31b1a7b4 1910 qemu_log_flush();
93fcfe39 1911 qemu_log_close();
924edcae 1912 }
493ae1f0 1913 va_end(ap2);
f9373291 1914 va_end(ap);
fd052bf6
RV
1915#if defined(CONFIG_USER_ONLY)
1916 {
1917 struct sigaction act;
1918 sigfillset(&act.sa_mask);
1919 act.sa_handler = SIG_DFL;
1920 sigaction(SIGABRT, &act, NULL);
1921 }
1922#endif
7501267e
FB
1923 abort();
1924}
1925
c5be9f08
TS
1926CPUState *cpu_copy(CPUState *env)
1927{
01ba9816 1928 CPUState *new_env = cpu_init(env->cpu_model_str);
c5be9f08
TS
1929 CPUState *next_cpu = new_env->next_cpu;
1930 int cpu_index = new_env->cpu_index;
5a38f081
AL
1931#if defined(TARGET_HAS_ICE)
1932 CPUBreakpoint *bp;
1933 CPUWatchpoint *wp;
1934#endif
1935
c5be9f08 1936 memcpy(new_env, env, sizeof(CPUState));
5a38f081
AL
1937
1938 /* Preserve chaining and index. */
c5be9f08
TS
1939 new_env->next_cpu = next_cpu;
1940 new_env->cpu_index = cpu_index;
5a38f081
AL
1941
1942 /* Clone all break/watchpoints.
1943 Note: Once we support ptrace with hw-debug register access, make sure
1944 BP_CPU break/watchpoints are handled correctly on clone. */
72cf2d4f
BS
1945 QTAILQ_INIT(&env->breakpoints);
1946 QTAILQ_INIT(&env->watchpoints);
5a38f081 1947#if defined(TARGET_HAS_ICE)
72cf2d4f 1948 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
5a38f081
AL
1949 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1950 }
72cf2d4f 1951 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
5a38f081
AL
1952 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1953 wp->flags, NULL);
1954 }
1955#endif
1956
c5be9f08
TS
1957 return new_env;
1958}
1959
0124311e
FB
1960#if !defined(CONFIG_USER_ONLY)
1961
5c751e99
EI
1962static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1963{
1964 unsigned int i;
1965
1966 /* Discard jump cache entries for any tb which might potentially
1967 overlap the flushed page. */
1968 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1969 memset (&env->tb_jmp_cache[i], 0,
9742bf26 1970 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
5c751e99
EI
1971
1972 i = tb_jmp_cache_hash_page(addr);
1973 memset (&env->tb_jmp_cache[i], 0,
9742bf26 1974 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
5c751e99
EI
1975}
1976
08738984
IK
1977static CPUTLBEntry s_cputlb_empty_entry = {
1978 .addr_read = -1,
1979 .addr_write = -1,
1980 .addr_code = -1,
1981 .addend = -1,
1982};
1983
ee8b7021
FB
1984/* NOTE: if flush_global is true, also flush global entries (not
1985 implemented yet) */
1986void tlb_flush(CPUState *env, int flush_global)
33417e70 1987{
33417e70 1988 int i;
0124311e 1989
9fa3e853
FB
1990#if defined(DEBUG_TLB)
1991 printf("tlb_flush:\n");
1992#endif
0124311e
FB
1993 /* must reset current TB so that interrupts cannot modify the
1994 links while we are modifying them */
1995 env->current_tb = NULL;
1996
33417e70 1997 for(i = 0; i < CPU_TLB_SIZE; i++) {
cfde4bd9
IY
1998 int mmu_idx;
1999 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
08738984 2000 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
cfde4bd9 2001 }
33417e70 2002 }
9fa3e853 2003
8a40a180 2004 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
9fa3e853 2005
d4c430a8
PB
2006 env->tlb_flush_addr = -1;
2007 env->tlb_flush_mask = 0;
e3db7226 2008 tlb_flush_count++;
33417e70
FB
2009}
2010
274da6b2 2011static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
61382a50 2012{
5fafdf24 2013 if (addr == (tlb_entry->addr_read &
84b7b8e7 2014 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 2015 addr == (tlb_entry->addr_write &
84b7b8e7 2016 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 2017 addr == (tlb_entry->addr_code &
84b7b8e7 2018 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
08738984 2019 *tlb_entry = s_cputlb_empty_entry;
84b7b8e7 2020 }
61382a50
FB
2021}
2022
2e12669a 2023void tlb_flush_page(CPUState *env, target_ulong addr)
33417e70 2024{
8a40a180 2025 int i;
cfde4bd9 2026 int mmu_idx;
0124311e 2027
9fa3e853 2028#if defined(DEBUG_TLB)
108c49b8 2029 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
9fa3e853 2030#endif
d4c430a8
PB
2031 /* Check if we need to flush due to large pages. */
2032 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
2033#if defined(DEBUG_TLB)
2034 printf("tlb_flush_page: forced full flush ("
2035 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
2036 env->tlb_flush_addr, env->tlb_flush_mask);
2037#endif
2038 tlb_flush(env, 1);
2039 return;
2040 }
0124311e
FB
2041 /* must reset current TB so that interrupts cannot modify the
2042 links while we are modifying them */
2043 env->current_tb = NULL;
61382a50
FB
2044
2045 addr &= TARGET_PAGE_MASK;
2046 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
cfde4bd9
IY
2047 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2048 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
0124311e 2049
5c751e99 2050 tlb_flush_jmp_cache(env, addr);
9fa3e853
FB
2051}
2052
9fa3e853
FB
2053/* update the TLBs so that writes to code in the virtual page 'addr'
2054 can be detected */
c227f099 2055static void tlb_protect_code(ram_addr_t ram_addr)
9fa3e853 2056{
5fafdf24 2057 cpu_physical_memory_reset_dirty(ram_addr,
6a00d601
FB
2058 ram_addr + TARGET_PAGE_SIZE,
2059 CODE_DIRTY_FLAG);
9fa3e853
FB
2060}
2061
9fa3e853 2062/* update the TLB so that writes in physical page 'phys_addr' are no longer
3a7d929e 2063 tested for self modifying code */
c227f099 2064static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 2065 target_ulong vaddr)
9fa3e853 2066{
f7c11b53 2067 cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
1ccde1cb
FB
2068}
2069
5fafdf24 2070static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1ccde1cb
FB
2071 unsigned long start, unsigned long length)
2072{
2073 unsigned long addr;
84b7b8e7
FB
2074 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2075 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1ccde1cb 2076 if ((addr - start) < length) {
0f459d16 2077 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1ccde1cb
FB
2078 }
2079 }
2080}
2081
5579c7f3 2082/* Note: start and end must be within the same ram block. */
c227f099 2083void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 2084 int dirty_flags)
1ccde1cb
FB
2085{
2086 CPUState *env;
4f2ac237 2087 unsigned long length, start1;
f7c11b53 2088 int i;
1ccde1cb
FB
2089
2090 start &= TARGET_PAGE_MASK;
2091 end = TARGET_PAGE_ALIGN(end);
2092
2093 length = end - start;
2094 if (length == 0)
2095 return;
f7c11b53 2096 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
f23db169 2097
1ccde1cb
FB
2098 /* we modify the TLB cache so that the dirty bit will be set again
2099 when accessing the range */
b2e0a138 2100 start1 = (unsigned long)qemu_safe_ram_ptr(start);
a57d23e4 2101 /* Check that we don't span multiple blocks - this breaks the
5579c7f3 2102 address comparisons below. */
b2e0a138 2103 if ((unsigned long)qemu_safe_ram_ptr(end - 1) - start1
5579c7f3
PB
2104 != (end - 1) - start) {
2105 abort();
2106 }
2107
6a00d601 2108 for(env = first_cpu; env != NULL; env = env->next_cpu) {
cfde4bd9
IY
2109 int mmu_idx;
2110 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2111 for(i = 0; i < CPU_TLB_SIZE; i++)
2112 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
2113 start1, length);
2114 }
6a00d601 2115 }
1ccde1cb
FB
2116}
2117
74576198
AL
2118int cpu_physical_memory_set_dirty_tracking(int enable)
2119{
f6f3fbca 2120 int ret = 0;
74576198 2121 in_migration = enable;
f6f3fbca
MT
2122 ret = cpu_notify_migration_log(!!enable);
2123 return ret;
74576198
AL
2124}
2125
2126int cpu_physical_memory_get_dirty_tracking(void)
2127{
2128 return in_migration;
2129}
2130
c227f099
AL
2131int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
2132 target_phys_addr_t end_addr)
2bec46dc 2133{
7b8f3b78 2134 int ret;
151f7749 2135
f6f3fbca 2136 ret = cpu_notify_sync_dirty_bitmap(start_addr, end_addr);
151f7749 2137 return ret;
2bec46dc
AL
2138}
2139
e5896b12
AP
2140int cpu_physical_log_start(target_phys_addr_t start_addr,
2141 ram_addr_t size)
2142{
2143 CPUPhysMemoryClient *client;
2144 QLIST_FOREACH(client, &memory_client_list, list) {
2145 if (client->log_start) {
2146 int r = client->log_start(client, start_addr, size);
2147 if (r < 0) {
2148 return r;
2149 }
2150 }
2151 }
2152 return 0;
2153}
2154
2155int cpu_physical_log_stop(target_phys_addr_t start_addr,
2156 ram_addr_t size)
2157{
2158 CPUPhysMemoryClient *client;
2159 QLIST_FOREACH(client, &memory_client_list, list) {
2160 if (client->log_stop) {
2161 int r = client->log_stop(client, start_addr, size);
2162 if (r < 0) {
2163 return r;
2164 }
2165 }
2166 }
2167 return 0;
2168}
2169
3a7d929e
FB
2170static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2171{
c227f099 2172 ram_addr_t ram_addr;
5579c7f3 2173 void *p;
3a7d929e 2174
84b7b8e7 2175 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
5579c7f3
PB
2176 p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
2177 + tlb_entry->addend);
e890261f 2178 ram_addr = qemu_ram_addr_from_host_nofail(p);
3a7d929e 2179 if (!cpu_physical_memory_is_dirty(ram_addr)) {
0f459d16 2180 tlb_entry->addr_write |= TLB_NOTDIRTY;
3a7d929e
FB
2181 }
2182 }
2183}
2184
2185/* update the TLB according to the current state of the dirty bits */
2186void cpu_tlb_update_dirty(CPUState *env)
2187{
2188 int i;
cfde4bd9
IY
2189 int mmu_idx;
2190 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2191 for(i = 0; i < CPU_TLB_SIZE; i++)
2192 tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2193 }
3a7d929e
FB
2194}
2195
0f459d16 2196static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1ccde1cb 2197{
0f459d16
PB
2198 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2199 tlb_entry->addr_write = vaddr;
1ccde1cb
FB
2200}
2201
0f459d16
PB
2202/* update the TLB corresponding to virtual page vaddr
2203 so that it is no longer dirty */
2204static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1ccde1cb 2205{
1ccde1cb 2206 int i;
cfde4bd9 2207 int mmu_idx;
1ccde1cb 2208
0f459d16 2209 vaddr &= TARGET_PAGE_MASK;
1ccde1cb 2210 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
cfde4bd9
IY
2211 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2212 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
9fa3e853
FB
2213}
2214
d4c430a8
PB
2215/* Our TLB does not support large pages, so remember the area covered by
2216 large pages and trigger a full TLB flush if these are invalidated. */
2217static void tlb_add_large_page(CPUState *env, target_ulong vaddr,
2218 target_ulong size)
2219{
2220 target_ulong mask = ~(size - 1);
2221
2222 if (env->tlb_flush_addr == (target_ulong)-1) {
2223 env->tlb_flush_addr = vaddr & mask;
2224 env->tlb_flush_mask = mask;
2225 return;
2226 }
2227 /* Extend the existing region to include the new page.
2228 This is a compromise between unnecessary flushes and the cost
2229 of maintaining a full variable size TLB. */
2230 mask &= env->tlb_flush_mask;
2231 while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
2232 mask <<= 1;
2233 }
2234 env->tlb_flush_addr &= mask;
2235 env->tlb_flush_mask = mask;
2236}
2237
2238/* Add a new TLB entry. At most one entry for a given virtual address
2239 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2240 supplied size is only used by tlb_flush_page. */
2241void tlb_set_page(CPUState *env, target_ulong vaddr,
2242 target_phys_addr_t paddr, int prot,
2243 int mmu_idx, target_ulong size)
9fa3e853 2244{
92e873b9 2245 PhysPageDesc *p;
4f2ac237 2246 unsigned long pd;
9fa3e853 2247 unsigned int index;
4f2ac237 2248 target_ulong address;
0f459d16 2249 target_ulong code_address;
355b1943 2250 unsigned long addend;
84b7b8e7 2251 CPUTLBEntry *te;
a1d1bb31 2252 CPUWatchpoint *wp;
c227f099 2253 target_phys_addr_t iotlb;
9fa3e853 2254
d4c430a8
PB
2255 assert(size >= TARGET_PAGE_SIZE);
2256 if (size != TARGET_PAGE_SIZE) {
2257 tlb_add_large_page(env, vaddr, size);
2258 }
92e873b9 2259 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
9fa3e853
FB
2260 if (!p) {
2261 pd = IO_MEM_UNASSIGNED;
9fa3e853
FB
2262 } else {
2263 pd = p->phys_offset;
9fa3e853
FB
2264 }
2265#if defined(DEBUG_TLB)
7fd3f494
SW
2266 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
2267 " prot=%x idx=%d pd=0x%08lx\n",
2268 vaddr, paddr, prot, mmu_idx, pd);
9fa3e853
FB
2269#endif
2270
0f459d16
PB
2271 address = vaddr;
2272 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2273 /* IO memory case (romd handled later) */
2274 address |= TLB_MMIO;
2275 }
5579c7f3 2276 addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
0f459d16
PB
2277 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2278 /* Normal RAM. */
2279 iotlb = pd & TARGET_PAGE_MASK;
2280 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2281 iotlb |= IO_MEM_NOTDIRTY;
2282 else
2283 iotlb |= IO_MEM_ROM;
2284 } else {
ccbb4d44 2285 /* IO handlers are currently passed a physical address.
0f459d16
PB
2286 It would be nice to pass an offset from the base address
2287 of that region. This would avoid having to special case RAM,
2288 and avoid full address decoding in every device.
2289 We can't use the high bits of pd for this because
2290 IO_MEM_ROMD uses these as a ram address. */
8da3ff18
PB
2291 iotlb = (pd & ~TARGET_PAGE_MASK);
2292 if (p) {
8da3ff18
PB
2293 iotlb += p->region_offset;
2294 } else {
2295 iotlb += paddr;
2296 }
0f459d16
PB
2297 }
2298
2299 code_address = address;
2300 /* Make accesses to pages with watchpoints go via the
2301 watchpoint trap routines. */
72cf2d4f 2302 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
a1d1bb31 2303 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
bf298f83
JK
2304 /* Avoid trapping reads of pages with a write breakpoint. */
2305 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
2306 iotlb = io_mem_watch + paddr;
2307 address |= TLB_MMIO;
2308 break;
2309 }
6658ffb8 2310 }
0f459d16 2311 }
d79acba4 2312
0f459d16
PB
2313 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2314 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2315 te = &env->tlb_table[mmu_idx][index];
2316 te->addend = addend - vaddr;
2317 if (prot & PAGE_READ) {
2318 te->addr_read = address;
2319 } else {
2320 te->addr_read = -1;
2321 }
5c751e99 2322
0f459d16
PB
2323 if (prot & PAGE_EXEC) {
2324 te->addr_code = code_address;
2325 } else {
2326 te->addr_code = -1;
2327 }
2328 if (prot & PAGE_WRITE) {
2329 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2330 (pd & IO_MEM_ROMD)) {
2331 /* Write access calls the I/O callback. */
2332 te->addr_write = address | TLB_MMIO;
2333 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2334 !cpu_physical_memory_is_dirty(pd)) {
2335 te->addr_write = address | TLB_NOTDIRTY;
9fa3e853 2336 } else {
0f459d16 2337 te->addr_write = address;
9fa3e853 2338 }
0f459d16
PB
2339 } else {
2340 te->addr_write = -1;
9fa3e853 2341 }
9fa3e853
FB
2342}
2343
0124311e
FB
2344#else
2345
ee8b7021 2346void tlb_flush(CPUState *env, int flush_global)
0124311e
FB
2347{
2348}
2349
2e12669a 2350void tlb_flush_page(CPUState *env, target_ulong addr)
0124311e
FB
2351{
2352}
2353
edf8e2af
MW
2354/*
2355 * Walks guest process memory "regions" one by one
2356 * and calls callback function 'fn' for each region.
2357 */
5cd2c5b6
RH
2358
2359struct walk_memory_regions_data
2360{
2361 walk_memory_regions_fn fn;
2362 void *priv;
2363 unsigned long start;
2364 int prot;
2365};
2366
2367static int walk_memory_regions_end(struct walk_memory_regions_data *data,
b480d9b7 2368 abi_ulong end, int new_prot)
5cd2c5b6
RH
2369{
2370 if (data->start != -1ul) {
2371 int rc = data->fn(data->priv, data->start, end, data->prot);
2372 if (rc != 0) {
2373 return rc;
2374 }
2375 }
2376
2377 data->start = (new_prot ? end : -1ul);
2378 data->prot = new_prot;
2379
2380 return 0;
2381}
2382
2383static int walk_memory_regions_1(struct walk_memory_regions_data *data,
b480d9b7 2384 abi_ulong base, int level, void **lp)
5cd2c5b6 2385{
b480d9b7 2386 abi_ulong pa;
5cd2c5b6
RH
2387 int i, rc;
2388
2389 if (*lp == NULL) {
2390 return walk_memory_regions_end(data, base, 0);
2391 }
2392
2393 if (level == 0) {
2394 PageDesc *pd = *lp;
7296abac 2395 for (i = 0; i < L2_SIZE; ++i) {
5cd2c5b6
RH
2396 int prot = pd[i].flags;
2397
2398 pa = base | (i << TARGET_PAGE_BITS);
2399 if (prot != data->prot) {
2400 rc = walk_memory_regions_end(data, pa, prot);
2401 if (rc != 0) {
2402 return rc;
9fa3e853 2403 }
9fa3e853 2404 }
5cd2c5b6
RH
2405 }
2406 } else {
2407 void **pp = *lp;
7296abac 2408 for (i = 0; i < L2_SIZE; ++i) {
b480d9b7
PB
2409 pa = base | ((abi_ulong)i <<
2410 (TARGET_PAGE_BITS + L2_BITS * level));
5cd2c5b6
RH
2411 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2412 if (rc != 0) {
2413 return rc;
2414 }
2415 }
2416 }
2417
2418 return 0;
2419}
2420
2421int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2422{
2423 struct walk_memory_regions_data data;
2424 unsigned long i;
2425
2426 data.fn = fn;
2427 data.priv = priv;
2428 data.start = -1ul;
2429 data.prot = 0;
2430
2431 for (i = 0; i < V_L1_SIZE; i++) {
b480d9b7 2432 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
5cd2c5b6
RH
2433 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2434 if (rc != 0) {
2435 return rc;
9fa3e853 2436 }
33417e70 2437 }
5cd2c5b6
RH
2438
2439 return walk_memory_regions_end(&data, 0, 0);
edf8e2af
MW
2440}
2441
b480d9b7
PB
2442static int dump_region(void *priv, abi_ulong start,
2443 abi_ulong end, unsigned long prot)
edf8e2af
MW
2444{
2445 FILE *f = (FILE *)priv;
2446
b480d9b7
PB
2447 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2448 " "TARGET_ABI_FMT_lx" %c%c%c\n",
edf8e2af
MW
2449 start, end, end - start,
2450 ((prot & PAGE_READ) ? 'r' : '-'),
2451 ((prot & PAGE_WRITE) ? 'w' : '-'),
2452 ((prot & PAGE_EXEC) ? 'x' : '-'));
2453
2454 return (0);
2455}
2456
2457/* dump memory mappings */
2458void page_dump(FILE *f)
2459{
2460 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2461 "start", "end", "size", "prot");
2462 walk_memory_regions(f, dump_region);
33417e70
FB
2463}
2464
53a5960a 2465int page_get_flags(target_ulong address)
33417e70 2466{
9fa3e853
FB
2467 PageDesc *p;
2468
2469 p = page_find(address >> TARGET_PAGE_BITS);
33417e70 2470 if (!p)
9fa3e853
FB
2471 return 0;
2472 return p->flags;
2473}
2474
376a7909
RH
2475/* Modify the flags of a page and invalidate the code if necessary.
2476 The flag PAGE_WRITE_ORG is positioned automatically depending
2477 on PAGE_WRITE. The mmap_lock should already be held. */
53a5960a 2478void page_set_flags(target_ulong start, target_ulong end, int flags)
9fa3e853 2479{
376a7909
RH
2480 target_ulong addr, len;
2481
2482 /* This function should never be called with addresses outside the
2483 guest address space. If this assert fires, it probably indicates
2484 a missing call to h2g_valid. */
b480d9b7
PB
2485#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2486 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
376a7909
RH
2487#endif
2488 assert(start < end);
9fa3e853
FB
2489
2490 start = start & TARGET_PAGE_MASK;
2491 end = TARGET_PAGE_ALIGN(end);
376a7909
RH
2492
2493 if (flags & PAGE_WRITE) {
9fa3e853 2494 flags |= PAGE_WRITE_ORG;
376a7909
RH
2495 }
2496
2497 for (addr = start, len = end - start;
2498 len != 0;
2499 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2500 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2501
2502 /* If the write protection bit is set, then we invalidate
2503 the code inside. */
5fafdf24 2504 if (!(p->flags & PAGE_WRITE) &&
9fa3e853
FB
2505 (flags & PAGE_WRITE) &&
2506 p->first_tb) {
d720b93d 2507 tb_invalidate_phys_page(addr, 0, NULL);
9fa3e853
FB
2508 }
2509 p->flags = flags;
2510 }
33417e70
FB
2511}
2512
3d97b40b
TS
2513int page_check_range(target_ulong start, target_ulong len, int flags)
2514{
2515 PageDesc *p;
2516 target_ulong end;
2517 target_ulong addr;
2518
376a7909
RH
2519 /* This function should never be called with addresses outside the
2520 guest address space. If this assert fires, it probably indicates
2521 a missing call to h2g_valid. */
338e9e6c
BS
2522#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2523 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
376a7909
RH
2524#endif
2525
3e0650a9
RH
2526 if (len == 0) {
2527 return 0;
2528 }
376a7909
RH
2529 if (start + len - 1 < start) {
2530 /* We've wrapped around. */
55f280c9 2531 return -1;
376a7909 2532 }
55f280c9 2533
3d97b40b
TS
2534 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2535 start = start & TARGET_PAGE_MASK;
2536
376a7909
RH
2537 for (addr = start, len = end - start;
2538 len != 0;
2539 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
3d97b40b
TS
2540 p = page_find(addr >> TARGET_PAGE_BITS);
2541 if( !p )
2542 return -1;
2543 if( !(p->flags & PAGE_VALID) )
2544 return -1;
2545
dae3270c 2546 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
3d97b40b 2547 return -1;
dae3270c
FB
2548 if (flags & PAGE_WRITE) {
2549 if (!(p->flags & PAGE_WRITE_ORG))
2550 return -1;
2551 /* unprotect the page if it was put read-only because it
2552 contains translated code */
2553 if (!(p->flags & PAGE_WRITE)) {
2554 if (!page_unprotect(addr, 0, NULL))
2555 return -1;
2556 }
2557 return 0;
2558 }
3d97b40b
TS
2559 }
2560 return 0;
2561}
2562
9fa3e853 2563/* called from signal handler: invalidate the code and unprotect the
ccbb4d44 2564 page. Return TRUE if the fault was successfully handled. */
53a5960a 2565int page_unprotect(target_ulong address, unsigned long pc, void *puc)
9fa3e853 2566{
45d679d6
AJ
2567 unsigned int prot;
2568 PageDesc *p;
53a5960a 2569 target_ulong host_start, host_end, addr;
9fa3e853 2570
c8a706fe
PB
2571 /* Technically this isn't safe inside a signal handler. However we
2572 know this only ever happens in a synchronous SEGV handler, so in
2573 practice it seems to be ok. */
2574 mmap_lock();
2575
45d679d6
AJ
2576 p = page_find(address >> TARGET_PAGE_BITS);
2577 if (!p) {
c8a706fe 2578 mmap_unlock();
9fa3e853 2579 return 0;
c8a706fe 2580 }
45d679d6 2581
9fa3e853
FB
2582 /* if the page was really writable, then we change its
2583 protection back to writable */
45d679d6
AJ
2584 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2585 host_start = address & qemu_host_page_mask;
2586 host_end = host_start + qemu_host_page_size;
2587
2588 prot = 0;
2589 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2590 p = page_find(addr >> TARGET_PAGE_BITS);
2591 p->flags |= PAGE_WRITE;
2592 prot |= p->flags;
2593
9fa3e853
FB
2594 /* and since the content will be modified, we must invalidate
2595 the corresponding translated code. */
45d679d6 2596 tb_invalidate_phys_page(addr, pc, puc);
9fa3e853 2597#ifdef DEBUG_TB_CHECK
45d679d6 2598 tb_invalidate_check(addr);
9fa3e853 2599#endif
9fa3e853 2600 }
45d679d6
AJ
2601 mprotect((void *)g2h(host_start), qemu_host_page_size,
2602 prot & PAGE_BITS);
2603
2604 mmap_unlock();
2605 return 1;
9fa3e853 2606 }
c8a706fe 2607 mmap_unlock();
9fa3e853
FB
2608 return 0;
2609}
2610
6a00d601
FB
2611static inline void tlb_set_dirty(CPUState *env,
2612 unsigned long addr, target_ulong vaddr)
1ccde1cb
FB
2613{
2614}
9fa3e853
FB
2615#endif /* defined(CONFIG_USER_ONLY) */
2616
e2eef170 2617#if !defined(CONFIG_USER_ONLY)
8da3ff18 2618
c04b2b78
PB
2619#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2620typedef struct subpage_t {
2621 target_phys_addr_t base;
f6405247
RH
2622 ram_addr_t sub_io_index[TARGET_PAGE_SIZE];
2623 ram_addr_t region_offset[TARGET_PAGE_SIZE];
c04b2b78
PB
2624} subpage_t;
2625
c227f099
AL
2626static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2627 ram_addr_t memory, ram_addr_t region_offset);
f6405247
RH
2628static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2629 ram_addr_t orig_memory,
2630 ram_addr_t region_offset);
db7b5426
BS
2631#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2632 need_subpage) \
2633 do { \
2634 if (addr > start_addr) \
2635 start_addr2 = 0; \
2636 else { \
2637 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2638 if (start_addr2 > 0) \
2639 need_subpage = 1; \
2640 } \
2641 \
49e9fba2 2642 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
db7b5426
BS
2643 end_addr2 = TARGET_PAGE_SIZE - 1; \
2644 else { \
2645 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2646 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2647 need_subpage = 1; \
2648 } \
2649 } while (0)
2650
8f2498f9
MT
2651/* register physical memory.
2652 For RAM, 'size' must be a multiple of the target page size.
2653 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
8da3ff18
PB
2654 io memory page. The address used when calling the IO function is
2655 the offset from the start of the region, plus region_offset. Both
ccbb4d44 2656 start_addr and region_offset are rounded down to a page boundary
8da3ff18
PB
2657 before calculating this offset. This should not be a problem unless
2658 the low bits of start_addr and region_offset differ. */
0fd542fb 2659void cpu_register_physical_memory_log(target_phys_addr_t start_addr,
c227f099
AL
2660 ram_addr_t size,
2661 ram_addr_t phys_offset,
0fd542fb
MT
2662 ram_addr_t region_offset,
2663 bool log_dirty)
33417e70 2664{
c227f099 2665 target_phys_addr_t addr, end_addr;
92e873b9 2666 PhysPageDesc *p;
9d42037b 2667 CPUState *env;
c227f099 2668 ram_addr_t orig_size = size;
f6405247 2669 subpage_t *subpage;
33417e70 2670
3b8e6a2d 2671 assert(size);
0fd542fb 2672 cpu_notify_set_memory(start_addr, size, phys_offset, log_dirty);
f6f3fbca 2673
67c4d23c
PB
2674 if (phys_offset == IO_MEM_UNASSIGNED) {
2675 region_offset = start_addr;
2676 }
8da3ff18 2677 region_offset &= TARGET_PAGE_MASK;
5fd386f6 2678 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
c227f099 2679 end_addr = start_addr + (target_phys_addr_t)size;
3b8e6a2d
EI
2680
2681 addr = start_addr;
2682 do {
db7b5426
BS
2683 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2684 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
c227f099
AL
2685 ram_addr_t orig_memory = p->phys_offset;
2686 target_phys_addr_t start_addr2, end_addr2;
db7b5426
BS
2687 int need_subpage = 0;
2688
2689 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2690 need_subpage);
f6405247 2691 if (need_subpage) {
db7b5426
BS
2692 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2693 subpage = subpage_init((addr & TARGET_PAGE_MASK),
8da3ff18
PB
2694 &p->phys_offset, orig_memory,
2695 p->region_offset);
db7b5426
BS
2696 } else {
2697 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2698 >> IO_MEM_SHIFT];
2699 }
8da3ff18
PB
2700 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2701 region_offset);
2702 p->region_offset = 0;
db7b5426
BS
2703 } else {
2704 p->phys_offset = phys_offset;
2705 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2706 (phys_offset & IO_MEM_ROMD))
2707 phys_offset += TARGET_PAGE_SIZE;
2708 }
2709 } else {
2710 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2711 p->phys_offset = phys_offset;
8da3ff18 2712 p->region_offset = region_offset;
db7b5426 2713 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
8da3ff18 2714 (phys_offset & IO_MEM_ROMD)) {
db7b5426 2715 phys_offset += TARGET_PAGE_SIZE;
0e8f0967 2716 } else {
c227f099 2717 target_phys_addr_t start_addr2, end_addr2;
db7b5426
BS
2718 int need_subpage = 0;
2719
2720 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2721 end_addr2, need_subpage);
2722
f6405247 2723 if (need_subpage) {
db7b5426 2724 subpage = subpage_init((addr & TARGET_PAGE_MASK),
8da3ff18 2725 &p->phys_offset, IO_MEM_UNASSIGNED,
67c4d23c 2726 addr & TARGET_PAGE_MASK);
db7b5426 2727 subpage_register(subpage, start_addr2, end_addr2,
8da3ff18
PB
2728 phys_offset, region_offset);
2729 p->region_offset = 0;
db7b5426
BS
2730 }
2731 }
2732 }
8da3ff18 2733 region_offset += TARGET_PAGE_SIZE;
3b8e6a2d
EI
2734 addr += TARGET_PAGE_SIZE;
2735 } while (addr != end_addr);
3b46e624 2736
9d42037b
FB
2737 /* since each CPU stores ram addresses in its TLB cache, we must
2738 reset the modified entries */
2739 /* XXX: slow ! */
2740 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2741 tlb_flush(env, 1);
2742 }
33417e70
FB
2743}
2744
ba863458 2745/* XXX: temporary until new memory mapping API */
c227f099 2746ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
ba863458
FB
2747{
2748 PhysPageDesc *p;
2749
2750 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2751 if (!p)
2752 return IO_MEM_UNASSIGNED;
2753 return p->phys_offset;
2754}
2755
c227f099 2756void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
f65ed4c1
AL
2757{
2758 if (kvm_enabled())
2759 kvm_coalesce_mmio_region(addr, size);
2760}
2761
c227f099 2762void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
f65ed4c1
AL
2763{
2764 if (kvm_enabled())
2765 kvm_uncoalesce_mmio_region(addr, size);
2766}
2767
62a2744c
SY
2768void qemu_flush_coalesced_mmio_buffer(void)
2769{
2770 if (kvm_enabled())
2771 kvm_flush_coalesced_mmio_buffer();
2772}
2773
c902760f
MT
2774#if defined(__linux__) && !defined(TARGET_S390X)
2775
2776#include <sys/vfs.h>
2777
2778#define HUGETLBFS_MAGIC 0x958458f6
2779
2780static long gethugepagesize(const char *path)
2781{
2782 struct statfs fs;
2783 int ret;
2784
2785 do {
9742bf26 2786 ret = statfs(path, &fs);
c902760f
MT
2787 } while (ret != 0 && errno == EINTR);
2788
2789 if (ret != 0) {
9742bf26
YT
2790 perror(path);
2791 return 0;
c902760f
MT
2792 }
2793
2794 if (fs.f_type != HUGETLBFS_MAGIC)
9742bf26 2795 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
c902760f
MT
2796
2797 return fs.f_bsize;
2798}
2799
04b16653
AW
2800static void *file_ram_alloc(RAMBlock *block,
2801 ram_addr_t memory,
2802 const char *path)
c902760f
MT
2803{
2804 char *filename;
2805 void *area;
2806 int fd;
2807#ifdef MAP_POPULATE
2808 int flags;
2809#endif
2810 unsigned long hpagesize;
2811
2812 hpagesize = gethugepagesize(path);
2813 if (!hpagesize) {
9742bf26 2814 return NULL;
c902760f
MT
2815 }
2816
2817 if (memory < hpagesize) {
2818 return NULL;
2819 }
2820
2821 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2822 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2823 return NULL;
2824 }
2825
2826 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
9742bf26 2827 return NULL;
c902760f
MT
2828 }
2829
2830 fd = mkstemp(filename);
2831 if (fd < 0) {
9742bf26
YT
2832 perror("unable to create backing store for hugepages");
2833 free(filename);
2834 return NULL;
c902760f
MT
2835 }
2836 unlink(filename);
2837 free(filename);
2838
2839 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2840
2841 /*
2842 * ftruncate is not supported by hugetlbfs in older
2843 * hosts, so don't bother bailing out on errors.
2844 * If anything goes wrong with it under other filesystems,
2845 * mmap will fail.
2846 */
2847 if (ftruncate(fd, memory))
9742bf26 2848 perror("ftruncate");
c902760f
MT
2849
2850#ifdef MAP_POPULATE
2851 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2852 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2853 * to sidestep this quirk.
2854 */
2855 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2856 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2857#else
2858 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2859#endif
2860 if (area == MAP_FAILED) {
9742bf26
YT
2861 perror("file_ram_alloc: can't mmap RAM pages");
2862 close(fd);
2863 return (NULL);
c902760f 2864 }
04b16653 2865 block->fd = fd;
c902760f
MT
2866 return area;
2867}
2868#endif
2869
d17b5288 2870static ram_addr_t find_ram_offset(ram_addr_t size)
04b16653
AW
2871{
2872 RAMBlock *block, *next_block;
09d7ae90 2873 ram_addr_t offset = 0, mingap = ULONG_MAX;
04b16653
AW
2874
2875 if (QLIST_EMPTY(&ram_list.blocks))
2876 return 0;
2877
2878 QLIST_FOREACH(block, &ram_list.blocks, next) {
2879 ram_addr_t end, next = ULONG_MAX;
2880
2881 end = block->offset + block->length;
2882
2883 QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2884 if (next_block->offset >= end) {
2885 next = MIN(next, next_block->offset);
2886 }
2887 }
2888 if (next - end >= size && next - end < mingap) {
2889 offset = end;
2890 mingap = next - end;
2891 }
2892 }
2893 return offset;
2894}
2895
2896static ram_addr_t last_ram_offset(void)
d17b5288
AW
2897{
2898 RAMBlock *block;
2899 ram_addr_t last = 0;
2900
2901 QLIST_FOREACH(block, &ram_list.blocks, next)
2902 last = MAX(last, block->offset + block->length);
2903
2904 return last;
2905}
2906
84b89d78 2907ram_addr_t qemu_ram_alloc_from_ptr(DeviceState *dev, const char *name,
6977dfe6 2908 ram_addr_t size, void *host)
84b89d78
CM
2909{
2910 RAMBlock *new_block, *block;
2911
2912 size = TARGET_PAGE_ALIGN(size);
2913 new_block = qemu_mallocz(sizeof(*new_block));
2914
2915 if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
2916 char *id = dev->parent_bus->info->get_dev_path(dev);
2917 if (id) {
2918 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
2919 qemu_free(id);
2920 }
2921 }
2922 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2923
2924 QLIST_FOREACH(block, &ram_list.blocks, next) {
2925 if (!strcmp(block->idstr, new_block->idstr)) {
2926 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2927 new_block->idstr);
2928 abort();
2929 }
2930 }
2931
432d268c 2932 new_block->offset = find_ram_offset(size);
6977dfe6
YT
2933 if (host) {
2934 new_block->host = host;
cd19cfa2 2935 new_block->flags |= RAM_PREALLOC_MASK;
6977dfe6
YT
2936 } else {
2937 if (mem_path) {
c902760f 2938#if defined (__linux__) && !defined(TARGET_S390X)
6977dfe6
YT
2939 new_block->host = file_ram_alloc(new_block, size, mem_path);
2940 if (!new_block->host) {
2941 new_block->host = qemu_vmalloc(size);
e78815a5 2942 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
6977dfe6 2943 }
c902760f 2944#else
6977dfe6
YT
2945 fprintf(stderr, "-mem-path option unsupported\n");
2946 exit(1);
c902760f 2947#endif
6977dfe6 2948 } else {
6b02494d 2949#if defined(TARGET_S390X) && defined(CONFIG_KVM)
ff83678a
CB
2950 /* S390 KVM requires the topmost vma of the RAM to be smaller than
2951 an system defined value, which is at least 256GB. Larger systems
2952 have larger values. We put the guest between the end of data
2953 segment (system break) and this value. We use 32GB as a base to
2954 have enough room for the system break to grow. */
2955 new_block->host = mmap((void*)0x800000000, size,
6977dfe6 2956 PROT_EXEC|PROT_READ|PROT_WRITE,
ff83678a 2957 MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
fb8b2735
AG
2958 if (new_block->host == MAP_FAILED) {
2959 fprintf(stderr, "Allocating RAM failed\n");
2960 abort();
2961 }
6b02494d 2962#else
868bb33f 2963 if (xen_enabled()) {
432d268c
JN
2964 xen_ram_alloc(new_block->offset, size);
2965 } else {
2966 new_block->host = qemu_vmalloc(size);
2967 }
6b02494d 2968#endif
e78815a5 2969 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
6977dfe6 2970 }
c902760f 2971 }
94a6b54f
PB
2972 new_block->length = size;
2973
f471a17e 2974 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
94a6b54f 2975
f471a17e 2976 ram_list.phys_dirty = qemu_realloc(ram_list.phys_dirty,
04b16653 2977 last_ram_offset() >> TARGET_PAGE_BITS);
d17b5288 2978 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
94a6b54f
PB
2979 0xff, size >> TARGET_PAGE_BITS);
2980
6f0437e8
JK
2981 if (kvm_enabled())
2982 kvm_setup_guest_memory(new_block->host, size);
2983
94a6b54f
PB
2984 return new_block->offset;
2985}
e9a1ab19 2986
6977dfe6
YT
2987ram_addr_t qemu_ram_alloc(DeviceState *dev, const char *name, ram_addr_t size)
2988{
2989 return qemu_ram_alloc_from_ptr(dev, name, size, NULL);
2990}
2991
1f2e98b6
AW
2992void qemu_ram_free_from_ptr(ram_addr_t addr)
2993{
2994 RAMBlock *block;
2995
2996 QLIST_FOREACH(block, &ram_list.blocks, next) {
2997 if (addr == block->offset) {
2998 QLIST_REMOVE(block, next);
2999 qemu_free(block);
3000 return;
3001 }
3002 }
3003}
3004
c227f099 3005void qemu_ram_free(ram_addr_t addr)
e9a1ab19 3006{
04b16653
AW
3007 RAMBlock *block;
3008
3009 QLIST_FOREACH(block, &ram_list.blocks, next) {
3010 if (addr == block->offset) {
3011 QLIST_REMOVE(block, next);
cd19cfa2
HY
3012 if (block->flags & RAM_PREALLOC_MASK) {
3013 ;
3014 } else if (mem_path) {
04b16653
AW
3015#if defined (__linux__) && !defined(TARGET_S390X)
3016 if (block->fd) {
3017 munmap(block->host, block->length);
3018 close(block->fd);
3019 } else {
3020 qemu_vfree(block->host);
3021 }
fd28aa13
JK
3022#else
3023 abort();
04b16653
AW
3024#endif
3025 } else {
3026#if defined(TARGET_S390X) && defined(CONFIG_KVM)
3027 munmap(block->host, block->length);
3028#else
868bb33f 3029 if (xen_enabled()) {
e41d7c69 3030 xen_invalidate_map_cache_entry(block->host);
432d268c
JN
3031 } else {
3032 qemu_vfree(block->host);
3033 }
04b16653
AW
3034#endif
3035 }
3036 qemu_free(block);
3037 return;
3038 }
3039 }
3040
e9a1ab19
FB
3041}
3042
cd19cfa2
HY
3043#ifndef _WIN32
3044void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
3045{
3046 RAMBlock *block;
3047 ram_addr_t offset;
3048 int flags;
3049 void *area, *vaddr;
3050
3051 QLIST_FOREACH(block, &ram_list.blocks, next) {
3052 offset = addr - block->offset;
3053 if (offset < block->length) {
3054 vaddr = block->host + offset;
3055 if (block->flags & RAM_PREALLOC_MASK) {
3056 ;
3057 } else {
3058 flags = MAP_FIXED;
3059 munmap(vaddr, length);
3060 if (mem_path) {
3061#if defined(__linux__) && !defined(TARGET_S390X)
3062 if (block->fd) {
3063#ifdef MAP_POPULATE
3064 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
3065 MAP_PRIVATE;
3066#else
3067 flags |= MAP_PRIVATE;
3068#endif
3069 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3070 flags, block->fd, offset);
3071 } else {
3072 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
3073 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3074 flags, -1, 0);
3075 }
fd28aa13
JK
3076#else
3077 abort();
cd19cfa2
HY
3078#endif
3079 } else {
3080#if defined(TARGET_S390X) && defined(CONFIG_KVM)
3081 flags |= MAP_SHARED | MAP_ANONYMOUS;
3082 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
3083 flags, -1, 0);
3084#else
3085 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
3086 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3087 flags, -1, 0);
3088#endif
3089 }
3090 if (area != vaddr) {
3091 fprintf(stderr, "Could not remap addr: %lx@%lx\n",
3092 length, addr);
3093 exit(1);
3094 }
3095 qemu_madvise(vaddr, length, QEMU_MADV_MERGEABLE);
3096 }
3097 return;
3098 }
3099 }
3100}
3101#endif /* !_WIN32 */
3102
dc828ca1 3103/* Return a host pointer to ram allocated with qemu_ram_alloc.
5579c7f3
PB
3104 With the exception of the softmmu code in this file, this should
3105 only be used for local memory (e.g. video ram) that the device owns,
3106 and knows it isn't going to access beyond the end of the block.
3107
3108 It should not be used for general purpose DMA.
3109 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
3110 */
c227f099 3111void *qemu_get_ram_ptr(ram_addr_t addr)
dc828ca1 3112{
94a6b54f
PB
3113 RAMBlock *block;
3114
f471a17e
AW
3115 QLIST_FOREACH(block, &ram_list.blocks, next) {
3116 if (addr - block->offset < block->length) {
7d82af38
VP
3117 /* Move this entry to to start of the list. */
3118 if (block != QLIST_FIRST(&ram_list.blocks)) {
3119 QLIST_REMOVE(block, next);
3120 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
3121 }
868bb33f 3122 if (xen_enabled()) {
432d268c
JN
3123 /* We need to check if the requested address is in the RAM
3124 * because we don't want to map the entire memory in QEMU.
712c2b41 3125 * In that case just map until the end of the page.
432d268c
JN
3126 */
3127 if (block->offset == 0) {
e41d7c69 3128 return xen_map_cache(addr, 0, 0);
432d268c 3129 } else if (block->host == NULL) {
e41d7c69
JK
3130 block->host =
3131 xen_map_cache(block->offset, block->length, 1);
432d268c
JN
3132 }
3133 }
f471a17e
AW
3134 return block->host + (addr - block->offset);
3135 }
94a6b54f 3136 }
f471a17e
AW
3137
3138 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3139 abort();
3140
3141 return NULL;
dc828ca1
PB
3142}
3143
b2e0a138
MT
3144/* Return a host pointer to ram allocated with qemu_ram_alloc.
3145 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
3146 */
3147void *qemu_safe_ram_ptr(ram_addr_t addr)
3148{
3149 RAMBlock *block;
3150
3151 QLIST_FOREACH(block, &ram_list.blocks, next) {
3152 if (addr - block->offset < block->length) {
868bb33f 3153 if (xen_enabled()) {
432d268c
JN
3154 /* We need to check if the requested address is in the RAM
3155 * because we don't want to map the entire memory in QEMU.
712c2b41 3156 * In that case just map until the end of the page.
432d268c
JN
3157 */
3158 if (block->offset == 0) {
e41d7c69 3159 return xen_map_cache(addr, 0, 0);
432d268c 3160 } else if (block->host == NULL) {
e41d7c69
JK
3161 block->host =
3162 xen_map_cache(block->offset, block->length, 1);
432d268c
JN
3163 }
3164 }
b2e0a138
MT
3165 return block->host + (addr - block->offset);
3166 }
3167 }
3168
3169 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3170 abort();
3171
3172 return NULL;
3173}
3174
38bee5dc
SS
3175/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
3176 * but takes a size argument */
8ab934f9 3177void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
38bee5dc 3178{
8ab934f9
SS
3179 if (*size == 0) {
3180 return NULL;
3181 }
868bb33f 3182 if (xen_enabled()) {
e41d7c69 3183 return xen_map_cache(addr, *size, 1);
868bb33f 3184 } else {
38bee5dc
SS
3185 RAMBlock *block;
3186
3187 QLIST_FOREACH(block, &ram_list.blocks, next) {
3188 if (addr - block->offset < block->length) {
3189 if (addr - block->offset + *size > block->length)
3190 *size = block->length - addr + block->offset;
3191 return block->host + (addr - block->offset);
3192 }
3193 }
3194
3195 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3196 abort();
38bee5dc
SS
3197 }
3198}
3199
050a0ddf
AP
3200void qemu_put_ram_ptr(void *addr)
3201{
3202 trace_qemu_put_ram_ptr(addr);
050a0ddf
AP
3203}
3204
e890261f 3205int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
5579c7f3 3206{
94a6b54f
PB
3207 RAMBlock *block;
3208 uint8_t *host = ptr;
3209
868bb33f 3210 if (xen_enabled()) {
e41d7c69 3211 *ram_addr = xen_ram_addr_from_mapcache(ptr);
712c2b41
SS
3212 return 0;
3213 }
3214
f471a17e 3215 QLIST_FOREACH(block, &ram_list.blocks, next) {
432d268c
JN
3216 /* This case append when the block is not mapped. */
3217 if (block->host == NULL) {
3218 continue;
3219 }
f471a17e 3220 if (host - block->host < block->length) {
e890261f
MT
3221 *ram_addr = block->offset + (host - block->host);
3222 return 0;
f471a17e 3223 }
94a6b54f 3224 }
432d268c 3225
e890261f
MT
3226 return -1;
3227}
f471a17e 3228
e890261f
MT
3229/* Some of the softmmu routines need to translate from a host pointer
3230 (typically a TLB entry) back to a ram offset. */
3231ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
3232{
3233 ram_addr_t ram_addr;
f471a17e 3234
e890261f
MT
3235 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
3236 fprintf(stderr, "Bad ram pointer %p\n", ptr);
3237 abort();
3238 }
3239 return ram_addr;
5579c7f3
PB
3240}
3241
c227f099 3242static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
33417e70 3243{
67d3b957 3244#ifdef DEBUG_UNASSIGNED
ab3d1727 3245 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
b4f0a316 3246#endif
5b450407 3247#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
b14ef7c9 3248 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, 1);
e18231a3
BS
3249#endif
3250 return 0;
3251}
3252
c227f099 3253static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
e18231a3
BS
3254{
3255#ifdef DEBUG_UNASSIGNED
3256 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3257#endif
5b450407 3258#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
b14ef7c9 3259 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, 2);
e18231a3
BS
3260#endif
3261 return 0;
3262}
3263
c227f099 3264static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
e18231a3
BS
3265{
3266#ifdef DEBUG_UNASSIGNED
3267 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3268#endif
5b450407 3269#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
b14ef7c9 3270 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, 4);
67d3b957 3271#endif
33417e70
FB
3272 return 0;
3273}
3274
c227f099 3275static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
33417e70 3276{
67d3b957 3277#ifdef DEBUG_UNASSIGNED
ab3d1727 3278 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
67d3b957 3279#endif
5b450407 3280#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
b14ef7c9 3281 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, 1);
e18231a3
BS
3282#endif
3283}
3284
c227f099 3285static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
e18231a3
BS
3286{
3287#ifdef DEBUG_UNASSIGNED
3288 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3289#endif
5b450407 3290#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
b14ef7c9 3291 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, 2);
e18231a3
BS
3292#endif
3293}
3294
c227f099 3295static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
e18231a3
BS
3296{
3297#ifdef DEBUG_UNASSIGNED
3298 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3299#endif
5b450407 3300#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
b14ef7c9 3301 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, 4);
b4f0a316 3302#endif
33417e70
FB
3303}
3304
d60efc6b 3305static CPUReadMemoryFunc * const unassigned_mem_read[3] = {
33417e70 3306 unassigned_mem_readb,
e18231a3
BS
3307 unassigned_mem_readw,
3308 unassigned_mem_readl,
33417e70
FB
3309};
3310
d60efc6b 3311static CPUWriteMemoryFunc * const unassigned_mem_write[3] = {
33417e70 3312 unassigned_mem_writeb,
e18231a3
BS
3313 unassigned_mem_writew,
3314 unassigned_mem_writel,
33417e70
FB
3315};
3316
c227f099 3317static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
0f459d16 3318 uint32_t val)
9fa3e853 3319{
3a7d929e 3320 int dirty_flags;
f7c11b53 3321 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3a7d929e 3322 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 3323#if !defined(CONFIG_USER_ONLY)
3a7d929e 3324 tb_invalidate_phys_page_fast(ram_addr, 1);
f7c11b53 3325 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
9fa3e853 3326#endif
3a7d929e 3327 }
5579c7f3 3328 stb_p(qemu_get_ram_ptr(ram_addr), val);
f23db169 3329 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
f7c11b53 3330 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
f23db169
FB
3331 /* we remove the notdirty callback only if the code has been
3332 flushed */
3333 if (dirty_flags == 0xff)
2e70f6ef 3334 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
3335}
3336
c227f099 3337static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
0f459d16 3338 uint32_t val)
9fa3e853 3339{
3a7d929e 3340 int dirty_flags;
f7c11b53 3341 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3a7d929e 3342 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 3343#if !defined(CONFIG_USER_ONLY)
3a7d929e 3344 tb_invalidate_phys_page_fast(ram_addr, 2);
f7c11b53 3345 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
9fa3e853 3346#endif
3a7d929e 3347 }
5579c7f3 3348 stw_p(qemu_get_ram_ptr(ram_addr), val);
f23db169 3349 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
f7c11b53 3350 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
f23db169
FB
3351 /* we remove the notdirty callback only if the code has been
3352 flushed */
3353 if (dirty_flags == 0xff)
2e70f6ef 3354 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
3355}
3356
c227f099 3357static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
0f459d16 3358 uint32_t val)
9fa3e853 3359{
3a7d929e 3360 int dirty_flags;
f7c11b53 3361 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3a7d929e 3362 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 3363#if !defined(CONFIG_USER_ONLY)
3a7d929e 3364 tb_invalidate_phys_page_fast(ram_addr, 4);
f7c11b53 3365 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
9fa3e853 3366#endif
3a7d929e 3367 }
5579c7f3 3368 stl_p(qemu_get_ram_ptr(ram_addr), val);
f23db169 3369 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
f7c11b53 3370 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
f23db169
FB
3371 /* we remove the notdirty callback only if the code has been
3372 flushed */
3373 if (dirty_flags == 0xff)
2e70f6ef 3374 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
3375}
3376
d60efc6b 3377static CPUReadMemoryFunc * const error_mem_read[3] = {
9fa3e853
FB
3378 NULL, /* never used */
3379 NULL, /* never used */
3380 NULL, /* never used */
3381};
3382
d60efc6b 3383static CPUWriteMemoryFunc * const notdirty_mem_write[3] = {
1ccde1cb
FB
3384 notdirty_mem_writeb,
3385 notdirty_mem_writew,
3386 notdirty_mem_writel,
3387};
3388
0f459d16 3389/* Generate a debug exception if a watchpoint has been hit. */
b4051334 3390static void check_watchpoint(int offset, int len_mask, int flags)
0f459d16
PB
3391{
3392 CPUState *env = cpu_single_env;
06d55cc1
AL
3393 target_ulong pc, cs_base;
3394 TranslationBlock *tb;
0f459d16 3395 target_ulong vaddr;
a1d1bb31 3396 CPUWatchpoint *wp;
06d55cc1 3397 int cpu_flags;
0f459d16 3398
06d55cc1
AL
3399 if (env->watchpoint_hit) {
3400 /* We re-entered the check after replacing the TB. Now raise
3401 * the debug interrupt so that is will trigger after the
3402 * current instruction. */
3403 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3404 return;
3405 }
2e70f6ef 3406 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
72cf2d4f 3407 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334
AL
3408 if ((vaddr == (wp->vaddr & len_mask) ||
3409 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
6e140f28
AL
3410 wp->flags |= BP_WATCHPOINT_HIT;
3411 if (!env->watchpoint_hit) {
3412 env->watchpoint_hit = wp;
3413 tb = tb_find_pc(env->mem_io_pc);
3414 if (!tb) {
3415 cpu_abort(env, "check_watchpoint: could not find TB for "
3416 "pc=%p", (void *)env->mem_io_pc);
3417 }
618ba8e6 3418 cpu_restore_state(tb, env, env->mem_io_pc);
6e140f28
AL
3419 tb_phys_invalidate(tb, -1);
3420 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3421 env->exception_index = EXCP_DEBUG;
3422 } else {
3423 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3424 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
3425 }
3426 cpu_resume_from_signal(env, NULL);
06d55cc1 3427 }
6e140f28
AL
3428 } else {
3429 wp->flags &= ~BP_WATCHPOINT_HIT;
0f459d16
PB
3430 }
3431 }
3432}
3433
6658ffb8
PB
3434/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3435 so these check for a hit then pass through to the normal out-of-line
3436 phys routines. */
c227f099 3437static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
6658ffb8 3438{
b4051334 3439 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
6658ffb8
PB
3440 return ldub_phys(addr);
3441}
3442
c227f099 3443static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
6658ffb8 3444{
b4051334 3445 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
6658ffb8
PB
3446 return lduw_phys(addr);
3447}
3448
c227f099 3449static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
6658ffb8 3450{
b4051334 3451 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
6658ffb8
PB
3452 return ldl_phys(addr);
3453}
3454
c227f099 3455static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
6658ffb8
PB
3456 uint32_t val)
3457{
b4051334 3458 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
6658ffb8
PB
3459 stb_phys(addr, val);
3460}
3461
c227f099 3462static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
6658ffb8
PB
3463 uint32_t val)
3464{
b4051334 3465 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
6658ffb8
PB
3466 stw_phys(addr, val);
3467}
3468
c227f099 3469static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
6658ffb8
PB
3470 uint32_t val)
3471{
b4051334 3472 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
6658ffb8
PB
3473 stl_phys(addr, val);
3474}
3475
d60efc6b 3476static CPUReadMemoryFunc * const watch_mem_read[3] = {
6658ffb8
PB
3477 watch_mem_readb,
3478 watch_mem_readw,
3479 watch_mem_readl,
3480};
3481
d60efc6b 3482static CPUWriteMemoryFunc * const watch_mem_write[3] = {
6658ffb8
PB
3483 watch_mem_writeb,
3484 watch_mem_writew,
3485 watch_mem_writel,
3486};
6658ffb8 3487
f6405247
RH
3488static inline uint32_t subpage_readlen (subpage_t *mmio,
3489 target_phys_addr_t addr,
3490 unsigned int len)
db7b5426 3491{
f6405247 3492 unsigned int idx = SUBPAGE_IDX(addr);
db7b5426
BS
3493#if defined(DEBUG_SUBPAGE)
3494 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3495 mmio, len, addr, idx);
3496#endif
db7b5426 3497
f6405247
RH
3498 addr += mmio->region_offset[idx];
3499 idx = mmio->sub_io_index[idx];
3500 return io_mem_read[idx][len](io_mem_opaque[idx], addr);
db7b5426
BS
3501}
3502
c227f099 3503static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
f6405247 3504 uint32_t value, unsigned int len)
db7b5426 3505{
f6405247 3506 unsigned int idx = SUBPAGE_IDX(addr);
db7b5426 3507#if defined(DEBUG_SUBPAGE)
f6405247
RH
3508 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n",
3509 __func__, mmio, len, addr, idx, value);
db7b5426 3510#endif
f6405247
RH
3511
3512 addr += mmio->region_offset[idx];
3513 idx = mmio->sub_io_index[idx];
3514 io_mem_write[idx][len](io_mem_opaque[idx], addr, value);
db7b5426
BS
3515}
3516
c227f099 3517static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
db7b5426 3518{
db7b5426
BS
3519 return subpage_readlen(opaque, addr, 0);
3520}
3521
c227f099 3522static void subpage_writeb (void *opaque, target_phys_addr_t addr,
db7b5426
BS
3523 uint32_t value)
3524{
db7b5426
BS
3525 subpage_writelen(opaque, addr, value, 0);
3526}
3527
c227f099 3528static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
db7b5426 3529{
db7b5426
BS
3530 return subpage_readlen(opaque, addr, 1);
3531}
3532
c227f099 3533static void subpage_writew (void *opaque, target_phys_addr_t addr,
db7b5426
BS
3534 uint32_t value)
3535{
db7b5426
BS
3536 subpage_writelen(opaque, addr, value, 1);
3537}
3538
c227f099 3539static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
db7b5426 3540{
db7b5426
BS
3541 return subpage_readlen(opaque, addr, 2);
3542}
3543
f6405247
RH
3544static void subpage_writel (void *opaque, target_phys_addr_t addr,
3545 uint32_t value)
db7b5426 3546{
db7b5426
BS
3547 subpage_writelen(opaque, addr, value, 2);
3548}
3549
d60efc6b 3550static CPUReadMemoryFunc * const subpage_read[] = {
db7b5426
BS
3551 &subpage_readb,
3552 &subpage_readw,
3553 &subpage_readl,
3554};
3555
d60efc6b 3556static CPUWriteMemoryFunc * const subpage_write[] = {
db7b5426
BS
3557 &subpage_writeb,
3558 &subpage_writew,
3559 &subpage_writel,
3560};
3561
c227f099
AL
3562static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3563 ram_addr_t memory, ram_addr_t region_offset)
db7b5426
BS
3564{
3565 int idx, eidx;
3566
3567 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3568 return -1;
3569 idx = SUBPAGE_IDX(start);
3570 eidx = SUBPAGE_IDX(end);
3571#if defined(DEBUG_SUBPAGE)
0bf9e31a 3572 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
db7b5426
BS
3573 mmio, start, end, idx, eidx, memory);
3574#endif
95c318f5
GN
3575 if ((memory & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
3576 memory = IO_MEM_UNASSIGNED;
f6405247 3577 memory = (memory >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
db7b5426 3578 for (; idx <= eidx; idx++) {
f6405247
RH
3579 mmio->sub_io_index[idx] = memory;
3580 mmio->region_offset[idx] = region_offset;
db7b5426
BS
3581 }
3582
3583 return 0;
3584}
3585
f6405247
RH
3586static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
3587 ram_addr_t orig_memory,
3588 ram_addr_t region_offset)
db7b5426 3589{
c227f099 3590 subpage_t *mmio;
db7b5426
BS
3591 int subpage_memory;
3592
c227f099 3593 mmio = qemu_mallocz(sizeof(subpage_t));
1eec614b
AL
3594
3595 mmio->base = base;
2507c12a
AG
3596 subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio,
3597 DEVICE_NATIVE_ENDIAN);
db7b5426 3598#if defined(DEBUG_SUBPAGE)
1eec614b
AL
3599 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3600 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
db7b5426 3601#endif
1eec614b 3602 *phys = subpage_memory | IO_MEM_SUBPAGE;
f6405247 3603 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, orig_memory, region_offset);
db7b5426
BS
3604
3605 return mmio;
3606}
3607
88715657
AL
3608static int get_free_io_mem_idx(void)
3609{
3610 int i;
3611
3612 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3613 if (!io_mem_used[i]) {
3614 io_mem_used[i] = 1;
3615 return i;
3616 }
c6703b47 3617 fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
88715657
AL
3618 return -1;
3619}
3620
dd310534
AG
3621/*
3622 * Usually, devices operate in little endian mode. There are devices out
3623 * there that operate in big endian too. Each device gets byte swapped
3624 * mmio if plugged onto a CPU that does the other endianness.
3625 *
3626 * CPU Device swap?
3627 *
3628 * little little no
3629 * little big yes
3630 * big little yes
3631 * big big no
3632 */
3633
3634typedef struct SwapEndianContainer {
3635 CPUReadMemoryFunc *read[3];
3636 CPUWriteMemoryFunc *write[3];
3637 void *opaque;
3638} SwapEndianContainer;
3639
3640static uint32_t swapendian_mem_readb (void *opaque, target_phys_addr_t addr)
3641{
3642 uint32_t val;
3643 SwapEndianContainer *c = opaque;
3644 val = c->read[0](c->opaque, addr);
3645 return val;
3646}
3647
3648static uint32_t swapendian_mem_readw(void *opaque, target_phys_addr_t addr)
3649{
3650 uint32_t val;
3651 SwapEndianContainer *c = opaque;
3652 val = bswap16(c->read[1](c->opaque, addr));
3653 return val;
3654}
3655
3656static uint32_t swapendian_mem_readl(void *opaque, target_phys_addr_t addr)
3657{
3658 uint32_t val;
3659 SwapEndianContainer *c = opaque;
3660 val = bswap32(c->read[2](c->opaque, addr));
3661 return val;
3662}
3663
3664static CPUReadMemoryFunc * const swapendian_readfn[3]={
3665 swapendian_mem_readb,
3666 swapendian_mem_readw,
3667 swapendian_mem_readl
3668};
3669
3670static void swapendian_mem_writeb(void *opaque, target_phys_addr_t addr,
3671 uint32_t val)
3672{
3673 SwapEndianContainer *c = opaque;
3674 c->write[0](c->opaque, addr, val);
3675}
3676
3677static void swapendian_mem_writew(void *opaque, target_phys_addr_t addr,
3678 uint32_t val)
3679{
3680 SwapEndianContainer *c = opaque;
3681 c->write[1](c->opaque, addr, bswap16(val));
3682}
3683
3684static void swapendian_mem_writel(void *opaque, target_phys_addr_t addr,
3685 uint32_t val)
3686{
3687 SwapEndianContainer *c = opaque;
3688 c->write[2](c->opaque, addr, bswap32(val));
3689}
3690
3691static CPUWriteMemoryFunc * const swapendian_writefn[3]={
3692 swapendian_mem_writeb,
3693 swapendian_mem_writew,
3694 swapendian_mem_writel
3695};
3696
3697static void swapendian_init(int io_index)
3698{
3699 SwapEndianContainer *c = qemu_malloc(sizeof(SwapEndianContainer));
3700 int i;
3701
3702 /* Swap mmio for big endian targets */
3703 c->opaque = io_mem_opaque[io_index];
3704 for (i = 0; i < 3; i++) {
3705 c->read[i] = io_mem_read[io_index][i];
3706 c->write[i] = io_mem_write[io_index][i];
3707
3708 io_mem_read[io_index][i] = swapendian_readfn[i];
3709 io_mem_write[io_index][i] = swapendian_writefn[i];
3710 }
3711 io_mem_opaque[io_index] = c;
3712}
3713
3714static void swapendian_del(int io_index)
3715{
3716 if (io_mem_read[io_index][0] == swapendian_readfn[0]) {
3717 qemu_free(io_mem_opaque[io_index]);
3718 }
3719}
3720
33417e70
FB
3721/* mem_read and mem_write are arrays of functions containing the
3722 function to access byte (index 0), word (index 1) and dword (index
0b4e6e3e 3723 2). Functions can be omitted with a NULL function pointer.
3ee89922 3724 If io_index is non zero, the corresponding io zone is
4254fab8
BS
3725 modified. If it is zero, a new io zone is allocated. The return
3726 value can be used with cpu_register_physical_memory(). (-1) is
3727 returned if error. */
1eed09cb 3728static int cpu_register_io_memory_fixed(int io_index,
d60efc6b
BS
3729 CPUReadMemoryFunc * const *mem_read,
3730 CPUWriteMemoryFunc * const *mem_write,
dd310534 3731 void *opaque, enum device_endian endian)
33417e70 3732{
3cab721d
RH
3733 int i;
3734
33417e70 3735 if (io_index <= 0) {
88715657
AL
3736 io_index = get_free_io_mem_idx();
3737 if (io_index == -1)
3738 return io_index;
33417e70 3739 } else {
1eed09cb 3740 io_index >>= IO_MEM_SHIFT;
33417e70
FB
3741 if (io_index >= IO_MEM_NB_ENTRIES)
3742 return -1;
3743 }
b5ff1b31 3744
3cab721d
RH
3745 for (i = 0; i < 3; ++i) {
3746 io_mem_read[io_index][i]
3747 = (mem_read[i] ? mem_read[i] : unassigned_mem_read[i]);
3748 }
3749 for (i = 0; i < 3; ++i) {
3750 io_mem_write[io_index][i]
3751 = (mem_write[i] ? mem_write[i] : unassigned_mem_write[i]);
3752 }
a4193c8a 3753 io_mem_opaque[io_index] = opaque;
f6405247 3754
dd310534
AG
3755 switch (endian) {
3756 case DEVICE_BIG_ENDIAN:
3757#ifndef TARGET_WORDS_BIGENDIAN
3758 swapendian_init(io_index);
3759#endif
3760 break;
3761 case DEVICE_LITTLE_ENDIAN:
3762#ifdef TARGET_WORDS_BIGENDIAN
3763 swapendian_init(io_index);
3764#endif
3765 break;
3766 case DEVICE_NATIVE_ENDIAN:
3767 default:
3768 break;
3769 }
3770
f6405247 3771 return (io_index << IO_MEM_SHIFT);
33417e70 3772}
61382a50 3773
d60efc6b
BS
3774int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
3775 CPUWriteMemoryFunc * const *mem_write,
dd310534 3776 void *opaque, enum device_endian endian)
1eed09cb 3777{
2507c12a 3778 return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque, endian);
1eed09cb
AK
3779}
3780
88715657
AL
3781void cpu_unregister_io_memory(int io_table_address)
3782{
3783 int i;
3784 int io_index = io_table_address >> IO_MEM_SHIFT;
3785
dd310534
AG
3786 swapendian_del(io_index);
3787
88715657
AL
3788 for (i=0;i < 3; i++) {
3789 io_mem_read[io_index][i] = unassigned_mem_read[i];
3790 io_mem_write[io_index][i] = unassigned_mem_write[i];
3791 }
3792 io_mem_opaque[io_index] = NULL;
3793 io_mem_used[io_index] = 0;
3794}
3795
e9179ce1
AK
3796static void io_mem_init(void)
3797{
3798 int i;
3799
2507c12a
AG
3800 cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read,
3801 unassigned_mem_write, NULL,
3802 DEVICE_NATIVE_ENDIAN);
3803 cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read,
3804 unassigned_mem_write, NULL,
3805 DEVICE_NATIVE_ENDIAN);
3806 cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read,
3807 notdirty_mem_write, NULL,
3808 DEVICE_NATIVE_ENDIAN);
e9179ce1
AK
3809 for (i=0; i<5; i++)
3810 io_mem_used[i] = 1;
3811
3812 io_mem_watch = cpu_register_io_memory(watch_mem_read,
2507c12a
AG
3813 watch_mem_write, NULL,
3814 DEVICE_NATIVE_ENDIAN);
e9179ce1
AK
3815}
3816
62152b8a
AK
3817static void memory_map_init(void)
3818{
3819 system_memory = qemu_malloc(sizeof(*system_memory));
3820 memory_region_init(system_memory, "system", UINT64_MAX);
3821 set_system_memory_map(system_memory);
3822}
3823
3824MemoryRegion *get_system_memory(void)
3825{
3826 return system_memory;
3827}
3828
e2eef170
PB
3829#endif /* !defined(CONFIG_USER_ONLY) */
3830
13eb76e0
FB
3831/* physical memory access (slow version, mainly for debug) */
3832#if defined(CONFIG_USER_ONLY)
a68fe89c
PB
3833int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3834 uint8_t *buf, int len, int is_write)
13eb76e0
FB
3835{
3836 int l, flags;
3837 target_ulong page;
53a5960a 3838 void * p;
13eb76e0
FB
3839
3840 while (len > 0) {
3841 page = addr & TARGET_PAGE_MASK;
3842 l = (page + TARGET_PAGE_SIZE) - addr;
3843 if (l > len)
3844 l = len;
3845 flags = page_get_flags(page);
3846 if (!(flags & PAGE_VALID))
a68fe89c 3847 return -1;
13eb76e0
FB
3848 if (is_write) {
3849 if (!(flags & PAGE_WRITE))
a68fe89c 3850 return -1;
579a97f7 3851 /* XXX: this code should not depend on lock_user */
72fb7daa 3852 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
a68fe89c 3853 return -1;
72fb7daa
AJ
3854 memcpy(p, buf, l);
3855 unlock_user(p, addr, l);
13eb76e0
FB
3856 } else {
3857 if (!(flags & PAGE_READ))
a68fe89c 3858 return -1;
579a97f7 3859 /* XXX: this code should not depend on lock_user */
72fb7daa 3860 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
a68fe89c 3861 return -1;
72fb7daa 3862 memcpy(buf, p, l);
5b257578 3863 unlock_user(p, addr, 0);
13eb76e0
FB
3864 }
3865 len -= l;
3866 buf += l;
3867 addr += l;
3868 }
a68fe89c 3869 return 0;
13eb76e0 3870}
8df1cd07 3871
13eb76e0 3872#else
c227f099 3873void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
3874 int len, int is_write)
3875{
3876 int l, io_index;
3877 uint8_t *ptr;
3878 uint32_t val;
c227f099 3879 target_phys_addr_t page;
2e12669a 3880 unsigned long pd;
92e873b9 3881 PhysPageDesc *p;
3b46e624 3882
13eb76e0
FB
3883 while (len > 0) {
3884 page = addr & TARGET_PAGE_MASK;
3885 l = (page + TARGET_PAGE_SIZE) - addr;
3886 if (l > len)
3887 l = len;
92e873b9 3888 p = phys_page_find(page >> TARGET_PAGE_BITS);
13eb76e0
FB
3889 if (!p) {
3890 pd = IO_MEM_UNASSIGNED;
3891 } else {
3892 pd = p->phys_offset;
3893 }
3b46e624 3894
13eb76e0 3895 if (is_write) {
3a7d929e 3896 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
c227f099 3897 target_phys_addr_t addr1 = addr;
13eb76e0 3898 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18 3899 if (p)
6c2934db 3900 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
6a00d601
FB
3901 /* XXX: could force cpu_single_env to NULL to avoid
3902 potential bugs */
6c2934db 3903 if (l >= 4 && ((addr1 & 3) == 0)) {
1c213d19 3904 /* 32 bit write access */
c27004ec 3905 val = ldl_p(buf);
6c2934db 3906 io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
13eb76e0 3907 l = 4;
6c2934db 3908 } else if (l >= 2 && ((addr1 & 1) == 0)) {
1c213d19 3909 /* 16 bit write access */
c27004ec 3910 val = lduw_p(buf);
6c2934db 3911 io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
13eb76e0
FB
3912 l = 2;
3913 } else {
1c213d19 3914 /* 8 bit write access */
c27004ec 3915 val = ldub_p(buf);
6c2934db 3916 io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
13eb76e0
FB
3917 l = 1;
3918 }
3919 } else {
b448f2f3
FB
3920 unsigned long addr1;
3921 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
13eb76e0 3922 /* RAM case */
5579c7f3 3923 ptr = qemu_get_ram_ptr(addr1);
13eb76e0 3924 memcpy(ptr, buf, l);
3a7d929e
FB
3925 if (!cpu_physical_memory_is_dirty(addr1)) {
3926 /* invalidate code */
3927 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3928 /* set dirty bit */
f7c11b53
YT
3929 cpu_physical_memory_set_dirty_flags(
3930 addr1, (0xff & ~CODE_DIRTY_FLAG));
3a7d929e 3931 }
050a0ddf 3932 qemu_put_ram_ptr(ptr);
13eb76e0
FB
3933 }
3934 } else {
5fafdf24 3935 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2a4188a3 3936 !(pd & IO_MEM_ROMD)) {
c227f099 3937 target_phys_addr_t addr1 = addr;
13eb76e0
FB
3938 /* I/O case */
3939 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18 3940 if (p)
6c2934db
AJ
3941 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3942 if (l >= 4 && ((addr1 & 3) == 0)) {
13eb76e0 3943 /* 32 bit read access */
6c2934db 3944 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
c27004ec 3945 stl_p(buf, val);
13eb76e0 3946 l = 4;
6c2934db 3947 } else if (l >= 2 && ((addr1 & 1) == 0)) {
13eb76e0 3948 /* 16 bit read access */
6c2934db 3949 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
c27004ec 3950 stw_p(buf, val);
13eb76e0
FB
3951 l = 2;
3952 } else {
1c213d19 3953 /* 8 bit read access */
6c2934db 3954 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
c27004ec 3955 stb_p(buf, val);
13eb76e0
FB
3956 l = 1;
3957 }
3958 } else {
3959 /* RAM case */
050a0ddf
AP
3960 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
3961 memcpy(buf, ptr + (addr & ~TARGET_PAGE_MASK), l);
3962 qemu_put_ram_ptr(ptr);
13eb76e0
FB
3963 }
3964 }
3965 len -= l;
3966 buf += l;
3967 addr += l;
3968 }
3969}
8df1cd07 3970
d0ecd2aa 3971/* used for ROM loading : can write in RAM and ROM */
c227f099 3972void cpu_physical_memory_write_rom(target_phys_addr_t addr,
d0ecd2aa
FB
3973 const uint8_t *buf, int len)
3974{
3975 int l;
3976 uint8_t *ptr;
c227f099 3977 target_phys_addr_t page;
d0ecd2aa
FB
3978 unsigned long pd;
3979 PhysPageDesc *p;
3b46e624 3980
d0ecd2aa
FB
3981 while (len > 0) {
3982 page = addr & TARGET_PAGE_MASK;
3983 l = (page + TARGET_PAGE_SIZE) - addr;
3984 if (l > len)
3985 l = len;
3986 p = phys_page_find(page >> TARGET_PAGE_BITS);
3987 if (!p) {
3988 pd = IO_MEM_UNASSIGNED;
3989 } else {
3990 pd = p->phys_offset;
3991 }
3b46e624 3992
d0ecd2aa 3993 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2a4188a3
FB
3994 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3995 !(pd & IO_MEM_ROMD)) {
d0ecd2aa
FB
3996 /* do nothing */
3997 } else {
3998 unsigned long addr1;
3999 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4000 /* ROM/RAM case */
5579c7f3 4001 ptr = qemu_get_ram_ptr(addr1);
d0ecd2aa 4002 memcpy(ptr, buf, l);
050a0ddf 4003 qemu_put_ram_ptr(ptr);
d0ecd2aa
FB
4004 }
4005 len -= l;
4006 buf += l;
4007 addr += l;
4008 }
4009}
4010
6d16c2f8
AL
4011typedef struct {
4012 void *buffer;
c227f099
AL
4013 target_phys_addr_t addr;
4014 target_phys_addr_t len;
6d16c2f8
AL
4015} BounceBuffer;
4016
4017static BounceBuffer bounce;
4018
ba223c29
AL
4019typedef struct MapClient {
4020 void *opaque;
4021 void (*callback)(void *opaque);
72cf2d4f 4022 QLIST_ENTRY(MapClient) link;
ba223c29
AL
4023} MapClient;
4024
72cf2d4f
BS
4025static QLIST_HEAD(map_client_list, MapClient) map_client_list
4026 = QLIST_HEAD_INITIALIZER(map_client_list);
ba223c29
AL
4027
4028void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
4029{
4030 MapClient *client = qemu_malloc(sizeof(*client));
4031
4032 client->opaque = opaque;
4033 client->callback = callback;
72cf2d4f 4034 QLIST_INSERT_HEAD(&map_client_list, client, link);
ba223c29
AL
4035 return client;
4036}
4037
4038void cpu_unregister_map_client(void *_client)
4039{
4040 MapClient *client = (MapClient *)_client;
4041
72cf2d4f 4042 QLIST_REMOVE(client, link);
34d5e948 4043 qemu_free(client);
ba223c29
AL
4044}
4045
4046static void cpu_notify_map_clients(void)
4047{
4048 MapClient *client;
4049
72cf2d4f
BS
4050 while (!QLIST_EMPTY(&map_client_list)) {
4051 client = QLIST_FIRST(&map_client_list);
ba223c29 4052 client->callback(client->opaque);
34d5e948 4053 cpu_unregister_map_client(client);
ba223c29
AL
4054 }
4055}
4056
6d16c2f8
AL
4057/* Map a physical memory region into a host virtual address.
4058 * May map a subset of the requested range, given by and returned in *plen.
4059 * May return NULL if resources needed to perform the mapping are exhausted.
4060 * Use only for reads OR writes - not for read-modify-write operations.
ba223c29
AL
4061 * Use cpu_register_map_client() to know when retrying the map operation is
4062 * likely to succeed.
6d16c2f8 4063 */
c227f099
AL
4064void *cpu_physical_memory_map(target_phys_addr_t addr,
4065 target_phys_addr_t *plen,
6d16c2f8
AL
4066 int is_write)
4067{
c227f099 4068 target_phys_addr_t len = *plen;
38bee5dc 4069 target_phys_addr_t todo = 0;
6d16c2f8 4070 int l;
c227f099 4071 target_phys_addr_t page;
6d16c2f8
AL
4072 unsigned long pd;
4073 PhysPageDesc *p;
8ab934f9
SS
4074 ram_addr_t raddr = ULONG_MAX;
4075 ram_addr_t rlen;
4076 void *ret;
6d16c2f8
AL
4077
4078 while (len > 0) {
4079 page = addr & TARGET_PAGE_MASK;
4080 l = (page + TARGET_PAGE_SIZE) - addr;
4081 if (l > len)
4082 l = len;
4083 p = phys_page_find(page >> TARGET_PAGE_BITS);
4084 if (!p) {
4085 pd = IO_MEM_UNASSIGNED;
4086 } else {
4087 pd = p->phys_offset;
4088 }
4089
4090 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
38bee5dc 4091 if (todo || bounce.buffer) {
6d16c2f8
AL
4092 break;
4093 }
4094 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
4095 bounce.addr = addr;
4096 bounce.len = l;
4097 if (!is_write) {
54f7b4a3 4098 cpu_physical_memory_read(addr, bounce.buffer, l);
6d16c2f8 4099 }
38bee5dc
SS
4100
4101 *plen = l;
4102 return bounce.buffer;
6d16c2f8 4103 }
8ab934f9
SS
4104 if (!todo) {
4105 raddr = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4106 }
6d16c2f8
AL
4107
4108 len -= l;
4109 addr += l;
38bee5dc 4110 todo += l;
6d16c2f8 4111 }
8ab934f9
SS
4112 rlen = todo;
4113 ret = qemu_ram_ptr_length(raddr, &rlen);
4114 *plen = rlen;
4115 return ret;
6d16c2f8
AL
4116}
4117
4118/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
4119 * Will also mark the memory as dirty if is_write == 1. access_len gives
4120 * the amount of memory that was actually read or written by the caller.
4121 */
c227f099
AL
4122void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
4123 int is_write, target_phys_addr_t access_len)
6d16c2f8
AL
4124{
4125 if (buffer != bounce.buffer) {
4126 if (is_write) {
e890261f 4127 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
6d16c2f8
AL
4128 while (access_len) {
4129 unsigned l;
4130 l = TARGET_PAGE_SIZE;
4131 if (l > access_len)
4132 l = access_len;
4133 if (!cpu_physical_memory_is_dirty(addr1)) {
4134 /* invalidate code */
4135 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
4136 /* set dirty bit */
f7c11b53
YT
4137 cpu_physical_memory_set_dirty_flags(
4138 addr1, (0xff & ~CODE_DIRTY_FLAG));
6d16c2f8
AL
4139 }
4140 addr1 += l;
4141 access_len -= l;
4142 }
4143 }
868bb33f 4144 if (xen_enabled()) {
e41d7c69 4145 xen_invalidate_map_cache_entry(buffer);
050a0ddf 4146 }
6d16c2f8
AL
4147 return;
4148 }
4149 if (is_write) {
4150 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
4151 }
f8a83245 4152 qemu_vfree(bounce.buffer);
6d16c2f8 4153 bounce.buffer = NULL;
ba223c29 4154 cpu_notify_map_clients();
6d16c2f8 4155}
d0ecd2aa 4156
8df1cd07 4157/* warning: addr must be aligned */
1e78bcc1
AG
4158static inline uint32_t ldl_phys_internal(target_phys_addr_t addr,
4159 enum device_endian endian)
8df1cd07
FB
4160{
4161 int io_index;
4162 uint8_t *ptr;
4163 uint32_t val;
4164 unsigned long pd;
4165 PhysPageDesc *p;
4166
4167 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4168 if (!p) {
4169 pd = IO_MEM_UNASSIGNED;
4170 } else {
4171 pd = p->phys_offset;
4172 }
3b46e624 4173
5fafdf24 4174 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2a4188a3 4175 !(pd & IO_MEM_ROMD)) {
8df1cd07
FB
4176 /* I/O case */
4177 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
4178 if (p)
4179 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
8df1cd07 4180 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
1e78bcc1
AG
4181#if defined(TARGET_WORDS_BIGENDIAN)
4182 if (endian == DEVICE_LITTLE_ENDIAN) {
4183 val = bswap32(val);
4184 }
4185#else
4186 if (endian == DEVICE_BIG_ENDIAN) {
4187 val = bswap32(val);
4188 }
4189#endif
8df1cd07
FB
4190 } else {
4191 /* RAM case */
5579c7f3 4192 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
8df1cd07 4193 (addr & ~TARGET_PAGE_MASK);
1e78bcc1
AG
4194 switch (endian) {
4195 case DEVICE_LITTLE_ENDIAN:
4196 val = ldl_le_p(ptr);
4197 break;
4198 case DEVICE_BIG_ENDIAN:
4199 val = ldl_be_p(ptr);
4200 break;
4201 default:
4202 val = ldl_p(ptr);
4203 break;
4204 }
8df1cd07
FB
4205 }
4206 return val;
4207}
4208
1e78bcc1
AG
4209uint32_t ldl_phys(target_phys_addr_t addr)
4210{
4211 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4212}
4213
4214uint32_t ldl_le_phys(target_phys_addr_t addr)
4215{
4216 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4217}
4218
4219uint32_t ldl_be_phys(target_phys_addr_t addr)
4220{
4221 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
4222}
4223
84b7b8e7 4224/* warning: addr must be aligned */
1e78bcc1
AG
4225static inline uint64_t ldq_phys_internal(target_phys_addr_t addr,
4226 enum device_endian endian)
84b7b8e7
FB
4227{
4228 int io_index;
4229 uint8_t *ptr;
4230 uint64_t val;
4231 unsigned long pd;
4232 PhysPageDesc *p;
4233
4234 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4235 if (!p) {
4236 pd = IO_MEM_UNASSIGNED;
4237 } else {
4238 pd = p->phys_offset;
4239 }
3b46e624 4240
2a4188a3
FB
4241 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
4242 !(pd & IO_MEM_ROMD)) {
84b7b8e7
FB
4243 /* I/O case */
4244 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
4245 if (p)
4246 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
1e78bcc1
AG
4247
4248 /* XXX This is broken when device endian != cpu endian.
4249 Fix and add "endian" variable check */
84b7b8e7
FB
4250#ifdef TARGET_WORDS_BIGENDIAN
4251 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
4252 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
4253#else
4254 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
4255 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
4256#endif
4257 } else {
4258 /* RAM case */
5579c7f3 4259 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
84b7b8e7 4260 (addr & ~TARGET_PAGE_MASK);
1e78bcc1
AG
4261 switch (endian) {
4262 case DEVICE_LITTLE_ENDIAN:
4263 val = ldq_le_p(ptr);
4264 break;
4265 case DEVICE_BIG_ENDIAN:
4266 val = ldq_be_p(ptr);
4267 break;
4268 default:
4269 val = ldq_p(ptr);
4270 break;
4271 }
84b7b8e7
FB
4272 }
4273 return val;
4274}
4275
1e78bcc1
AG
4276uint64_t ldq_phys(target_phys_addr_t addr)
4277{
4278 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4279}
4280
4281uint64_t ldq_le_phys(target_phys_addr_t addr)
4282{
4283 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4284}
4285
4286uint64_t ldq_be_phys(target_phys_addr_t addr)
4287{
4288 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
4289}
4290
aab33094 4291/* XXX: optimize */
c227f099 4292uint32_t ldub_phys(target_phys_addr_t addr)
aab33094
FB
4293{
4294 uint8_t val;
4295 cpu_physical_memory_read(addr, &val, 1);
4296 return val;
4297}
4298
733f0b02 4299/* warning: addr must be aligned */
1e78bcc1
AG
4300static inline uint32_t lduw_phys_internal(target_phys_addr_t addr,
4301 enum device_endian endian)
aab33094 4302{
733f0b02
MT
4303 int io_index;
4304 uint8_t *ptr;
4305 uint64_t val;
4306 unsigned long pd;
4307 PhysPageDesc *p;
4308
4309 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4310 if (!p) {
4311 pd = IO_MEM_UNASSIGNED;
4312 } else {
4313 pd = p->phys_offset;
4314 }
4315
4316 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
4317 !(pd & IO_MEM_ROMD)) {
4318 /* I/O case */
4319 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4320 if (p)
4321 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4322 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
1e78bcc1
AG
4323#if defined(TARGET_WORDS_BIGENDIAN)
4324 if (endian == DEVICE_LITTLE_ENDIAN) {
4325 val = bswap16(val);
4326 }
4327#else
4328 if (endian == DEVICE_BIG_ENDIAN) {
4329 val = bswap16(val);
4330 }
4331#endif
733f0b02
MT
4332 } else {
4333 /* RAM case */
4334 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4335 (addr & ~TARGET_PAGE_MASK);
1e78bcc1
AG
4336 switch (endian) {
4337 case DEVICE_LITTLE_ENDIAN:
4338 val = lduw_le_p(ptr);
4339 break;
4340 case DEVICE_BIG_ENDIAN:
4341 val = lduw_be_p(ptr);
4342 break;
4343 default:
4344 val = lduw_p(ptr);
4345 break;
4346 }
733f0b02
MT
4347 }
4348 return val;
aab33094
FB
4349}
4350
1e78bcc1
AG
4351uint32_t lduw_phys(target_phys_addr_t addr)
4352{
4353 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4354}
4355
4356uint32_t lduw_le_phys(target_phys_addr_t addr)
4357{
4358 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4359}
4360
4361uint32_t lduw_be_phys(target_phys_addr_t addr)
4362{
4363 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
4364}
4365
8df1cd07
FB
4366/* warning: addr must be aligned. The ram page is not masked as dirty
4367 and the code inside is not invalidated. It is useful if the dirty
4368 bits are used to track modified PTEs */
c227f099 4369void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
8df1cd07
FB
4370{
4371 int io_index;
4372 uint8_t *ptr;
4373 unsigned long pd;
4374 PhysPageDesc *p;
4375
4376 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4377 if (!p) {
4378 pd = IO_MEM_UNASSIGNED;
4379 } else {
4380 pd = p->phys_offset;
4381 }
3b46e624 4382
3a7d929e 4383 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07 4384 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
4385 if (p)
4386 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
8df1cd07
FB
4387 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4388 } else {
74576198 4389 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
5579c7f3 4390 ptr = qemu_get_ram_ptr(addr1);
8df1cd07 4391 stl_p(ptr, val);
74576198
AL
4392
4393 if (unlikely(in_migration)) {
4394 if (!cpu_physical_memory_is_dirty(addr1)) {
4395 /* invalidate code */
4396 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4397 /* set dirty bit */
f7c11b53
YT
4398 cpu_physical_memory_set_dirty_flags(
4399 addr1, (0xff & ~CODE_DIRTY_FLAG));
74576198
AL
4400 }
4401 }
8df1cd07
FB
4402 }
4403}
4404
c227f099 4405void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
bc98a7ef
JM
4406{
4407 int io_index;
4408 uint8_t *ptr;
4409 unsigned long pd;
4410 PhysPageDesc *p;
4411
4412 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4413 if (!p) {
4414 pd = IO_MEM_UNASSIGNED;
4415 } else {
4416 pd = p->phys_offset;
4417 }
3b46e624 4418
bc98a7ef
JM
4419 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4420 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
4421 if (p)
4422 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bc98a7ef
JM
4423#ifdef TARGET_WORDS_BIGENDIAN
4424 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
4425 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
4426#else
4427 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4428 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
4429#endif
4430 } else {
5579c7f3 4431 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
bc98a7ef
JM
4432 (addr & ~TARGET_PAGE_MASK);
4433 stq_p(ptr, val);
4434 }
4435}
4436
8df1cd07 4437/* warning: addr must be aligned */
1e78bcc1
AG
4438static inline void stl_phys_internal(target_phys_addr_t addr, uint32_t val,
4439 enum device_endian endian)
8df1cd07
FB
4440{
4441 int io_index;
4442 uint8_t *ptr;
4443 unsigned long pd;
4444 PhysPageDesc *p;
4445
4446 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4447 if (!p) {
4448 pd = IO_MEM_UNASSIGNED;
4449 } else {
4450 pd = p->phys_offset;
4451 }
3b46e624 4452
3a7d929e 4453 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07 4454 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
4455 if (p)
4456 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
1e78bcc1
AG
4457#if defined(TARGET_WORDS_BIGENDIAN)
4458 if (endian == DEVICE_LITTLE_ENDIAN) {
4459 val = bswap32(val);
4460 }
4461#else
4462 if (endian == DEVICE_BIG_ENDIAN) {
4463 val = bswap32(val);
4464 }
4465#endif
8df1cd07
FB
4466 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4467 } else {
4468 unsigned long addr1;
4469 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4470 /* RAM case */
5579c7f3 4471 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
4472 switch (endian) {
4473 case DEVICE_LITTLE_ENDIAN:
4474 stl_le_p(ptr, val);
4475 break;
4476 case DEVICE_BIG_ENDIAN:
4477 stl_be_p(ptr, val);
4478 break;
4479 default:
4480 stl_p(ptr, val);
4481 break;
4482 }
3a7d929e
FB
4483 if (!cpu_physical_memory_is_dirty(addr1)) {
4484 /* invalidate code */
4485 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4486 /* set dirty bit */
f7c11b53
YT
4487 cpu_physical_memory_set_dirty_flags(addr1,
4488 (0xff & ~CODE_DIRTY_FLAG));
3a7d929e 4489 }
8df1cd07
FB
4490 }
4491}
4492
1e78bcc1
AG
4493void stl_phys(target_phys_addr_t addr, uint32_t val)
4494{
4495 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4496}
4497
4498void stl_le_phys(target_phys_addr_t addr, uint32_t val)
4499{
4500 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4501}
4502
4503void stl_be_phys(target_phys_addr_t addr, uint32_t val)
4504{
4505 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4506}
4507
aab33094 4508/* XXX: optimize */
c227f099 4509void stb_phys(target_phys_addr_t addr, uint32_t val)
aab33094
FB
4510{
4511 uint8_t v = val;
4512 cpu_physical_memory_write(addr, &v, 1);
4513}
4514
733f0b02 4515/* warning: addr must be aligned */
1e78bcc1
AG
4516static inline void stw_phys_internal(target_phys_addr_t addr, uint32_t val,
4517 enum device_endian endian)
aab33094 4518{
733f0b02
MT
4519 int io_index;
4520 uint8_t *ptr;
4521 unsigned long pd;
4522 PhysPageDesc *p;
4523
4524 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4525 if (!p) {
4526 pd = IO_MEM_UNASSIGNED;
4527 } else {
4528 pd = p->phys_offset;
4529 }
4530
4531 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4532 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4533 if (p)
4534 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
1e78bcc1
AG
4535#if defined(TARGET_WORDS_BIGENDIAN)
4536 if (endian == DEVICE_LITTLE_ENDIAN) {
4537 val = bswap16(val);
4538 }
4539#else
4540 if (endian == DEVICE_BIG_ENDIAN) {
4541 val = bswap16(val);
4542 }
4543#endif
733f0b02
MT
4544 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
4545 } else {
4546 unsigned long addr1;
4547 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4548 /* RAM case */
4549 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
4550 switch (endian) {
4551 case DEVICE_LITTLE_ENDIAN:
4552 stw_le_p(ptr, val);
4553 break;
4554 case DEVICE_BIG_ENDIAN:
4555 stw_be_p(ptr, val);
4556 break;
4557 default:
4558 stw_p(ptr, val);
4559 break;
4560 }
733f0b02
MT
4561 if (!cpu_physical_memory_is_dirty(addr1)) {
4562 /* invalidate code */
4563 tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
4564 /* set dirty bit */
4565 cpu_physical_memory_set_dirty_flags(addr1,
4566 (0xff & ~CODE_DIRTY_FLAG));
4567 }
4568 }
aab33094
FB
4569}
4570
1e78bcc1
AG
4571void stw_phys(target_phys_addr_t addr, uint32_t val)
4572{
4573 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4574}
4575
4576void stw_le_phys(target_phys_addr_t addr, uint32_t val)
4577{
4578 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4579}
4580
4581void stw_be_phys(target_phys_addr_t addr, uint32_t val)
4582{
4583 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4584}
4585
aab33094 4586/* XXX: optimize */
c227f099 4587void stq_phys(target_phys_addr_t addr, uint64_t val)
aab33094
FB
4588{
4589 val = tswap64(val);
71d2b725 4590 cpu_physical_memory_write(addr, &val, 8);
aab33094
FB
4591}
4592
1e78bcc1
AG
4593void stq_le_phys(target_phys_addr_t addr, uint64_t val)
4594{
4595 val = cpu_to_le64(val);
4596 cpu_physical_memory_write(addr, &val, 8);
4597}
4598
4599void stq_be_phys(target_phys_addr_t addr, uint64_t val)
4600{
4601 val = cpu_to_be64(val);
4602 cpu_physical_memory_write(addr, &val, 8);
4603}
4604
5e2972fd 4605/* virtual memory access for debug (includes writing to ROM) */
5fafdf24 4606int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
b448f2f3 4607 uint8_t *buf, int len, int is_write)
13eb76e0
FB
4608{
4609 int l;
c227f099 4610 target_phys_addr_t phys_addr;
9b3c35e0 4611 target_ulong page;
13eb76e0
FB
4612
4613 while (len > 0) {
4614 page = addr & TARGET_PAGE_MASK;
4615 phys_addr = cpu_get_phys_page_debug(env, page);
4616 /* if no physical page mapped, return an error */
4617 if (phys_addr == -1)
4618 return -1;
4619 l = (page + TARGET_PAGE_SIZE) - addr;
4620 if (l > len)
4621 l = len;
5e2972fd 4622 phys_addr += (addr & ~TARGET_PAGE_MASK);
5e2972fd
AL
4623 if (is_write)
4624 cpu_physical_memory_write_rom(phys_addr, buf, l);
4625 else
5e2972fd 4626 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
13eb76e0
FB
4627 len -= l;
4628 buf += l;
4629 addr += l;
4630 }
4631 return 0;
4632}
a68fe89c 4633#endif
13eb76e0 4634
2e70f6ef
PB
4635/* in deterministic execution mode, instructions doing device I/Os
4636 must be at the end of the TB */
4637void cpu_io_recompile(CPUState *env, void *retaddr)
4638{
4639 TranslationBlock *tb;
4640 uint32_t n, cflags;
4641 target_ulong pc, cs_base;
4642 uint64_t flags;
4643
4644 tb = tb_find_pc((unsigned long)retaddr);
4645 if (!tb) {
4646 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
4647 retaddr);
4648 }
4649 n = env->icount_decr.u16.low + tb->icount;
618ba8e6 4650 cpu_restore_state(tb, env, (unsigned long)retaddr);
2e70f6ef 4651 /* Calculate how many instructions had been executed before the fault
bf20dc07 4652 occurred. */
2e70f6ef
PB
4653 n = n - env->icount_decr.u16.low;
4654 /* Generate a new TB ending on the I/O insn. */
4655 n++;
4656 /* On MIPS and SH, delay slot instructions can only be restarted if
4657 they were already the first instruction in the TB. If this is not
bf20dc07 4658 the first instruction in a TB then re-execute the preceding
2e70f6ef
PB
4659 branch. */
4660#if defined(TARGET_MIPS)
4661 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4662 env->active_tc.PC -= 4;
4663 env->icount_decr.u16.low++;
4664 env->hflags &= ~MIPS_HFLAG_BMASK;
4665 }
4666#elif defined(TARGET_SH4)
4667 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4668 && n > 1) {
4669 env->pc -= 2;
4670 env->icount_decr.u16.low++;
4671 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4672 }
4673#endif
4674 /* This should never happen. */
4675 if (n > CF_COUNT_MASK)
4676 cpu_abort(env, "TB too big during recompile");
4677
4678 cflags = n | CF_LAST_IO;
4679 pc = tb->pc;
4680 cs_base = tb->cs_base;
4681 flags = tb->flags;
4682 tb_phys_invalidate(tb, -1);
4683 /* FIXME: In theory this could raise an exception. In practice
4684 we have already translated the block once so it's probably ok. */
4685 tb_gen_code(env, pc, cs_base, flags, cflags);
bf20dc07 4686 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
2e70f6ef
PB
4687 the first in the TB) then we end up generating a whole new TB and
4688 repeating the fault, which is horribly inefficient.
4689 Better would be to execute just this insn uncached, or generate a
4690 second new TB. */
4691 cpu_resume_from_signal(env, NULL);
4692}
4693
b3755a91
PB
4694#if !defined(CONFIG_USER_ONLY)
4695
055403b2 4696void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
e3db7226
FB
4697{
4698 int i, target_code_size, max_target_code_size;
4699 int direct_jmp_count, direct_jmp2_count, cross_page;
4700 TranslationBlock *tb;
3b46e624 4701
e3db7226
FB
4702 target_code_size = 0;
4703 max_target_code_size = 0;
4704 cross_page = 0;
4705 direct_jmp_count = 0;
4706 direct_jmp2_count = 0;
4707 for(i = 0; i < nb_tbs; i++) {
4708 tb = &tbs[i];
4709 target_code_size += tb->size;
4710 if (tb->size > max_target_code_size)
4711 max_target_code_size = tb->size;
4712 if (tb->page_addr[1] != -1)
4713 cross_page++;
4714 if (tb->tb_next_offset[0] != 0xffff) {
4715 direct_jmp_count++;
4716 if (tb->tb_next_offset[1] != 0xffff) {
4717 direct_jmp2_count++;
4718 }
4719 }
4720 }
4721 /* XXX: avoid using doubles ? */
57fec1fe 4722 cpu_fprintf(f, "Translation buffer state:\n");
055403b2 4723 cpu_fprintf(f, "gen code size %td/%ld\n",
26a5f13b
FB
4724 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4725 cpu_fprintf(f, "TB count %d/%d\n",
4726 nb_tbs, code_gen_max_blocks);
5fafdf24 4727 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
e3db7226
FB
4728 nb_tbs ? target_code_size / nb_tbs : 0,
4729 max_target_code_size);
055403b2 4730 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
e3db7226
FB
4731 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4732 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
5fafdf24
TS
4733 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4734 cross_page,
e3db7226
FB
4735 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4736 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
5fafdf24 4737 direct_jmp_count,
e3db7226
FB
4738 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4739 direct_jmp2_count,
4740 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
57fec1fe 4741 cpu_fprintf(f, "\nStatistics:\n");
e3db7226
FB
4742 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4743 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4744 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
b67d9a52 4745 tcg_dump_info(f, cpu_fprintf);
e3db7226
FB
4746}
4747
61382a50
FB
4748#define MMUSUFFIX _cmmu
4749#define GETPC() NULL
4750#define env cpu_single_env
b769d8fe 4751#define SOFTMMU_CODE_ACCESS
61382a50
FB
4752
4753#define SHIFT 0
4754#include "softmmu_template.h"
4755
4756#define SHIFT 1
4757#include "softmmu_template.h"
4758
4759#define SHIFT 2
4760#include "softmmu_template.h"
4761
4762#define SHIFT 3
4763#include "softmmu_template.h"
4764
4765#undef env
4766
4767#endif