]> git.proxmox.com Git - mirror_qemu.git/blame - exec.c
Make glib mandatory and fixup utils appropriately
[mirror_qemu.git] / exec.c
CommitLineData
54936004 1/*
fd6ce8f6 2 * virtual page mapping and translated block handling
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
54936004 18 */
67b915a5 19#include "config.h"
d5a8f07c
FB
20#ifdef _WIN32
21#include <windows.h>
22#else
a98d49b1 23#include <sys/types.h>
d5a8f07c
FB
24#include <sys/mman.h>
25#endif
54936004 26
055403b2 27#include "qemu-common.h"
6180a181 28#include "cpu.h"
b67d9a52 29#include "tcg.h"
b3c7724c 30#include "hw/hw.h"
cc9e98cb 31#include "hw/qdev.h"
74576198 32#include "osdep.h"
7ba1e619 33#include "kvm.h"
432d268c 34#include "hw/xen.h"
29e922b6 35#include "qemu-timer.h"
62152b8a
AK
36#include "memory.h"
37#include "exec-memory.h"
53a5960a
PB
38#if defined(CONFIG_USER_ONLY)
39#include <qemu.h>
f01576f1
JL
40#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41#include <sys/param.h>
42#if __FreeBSD_version >= 700104
43#define HAVE_KINFO_GETVMMAP
44#define sigqueue sigqueue_freebsd /* avoid redefinition */
45#include <sys/time.h>
46#include <sys/proc.h>
47#include <machine/profile.h>
48#define _KERNEL
49#include <sys/user.h>
50#undef _KERNEL
51#undef sigqueue
52#include <libutil.h>
53#endif
54#endif
432d268c
JN
55#else /* !CONFIG_USER_ONLY */
56#include "xen-mapcache.h"
6506e4f9 57#include "trace.h"
53a5960a 58#endif
54936004 59
fd6ce8f6 60//#define DEBUG_TB_INVALIDATE
66e85a21 61//#define DEBUG_FLUSH
9fa3e853 62//#define DEBUG_TLB
67d3b957 63//#define DEBUG_UNASSIGNED
fd6ce8f6
FB
64
65/* make various TB consistency checks */
5fafdf24
TS
66//#define DEBUG_TB_CHECK
67//#define DEBUG_TLB_CHECK
fd6ce8f6 68
1196be37 69//#define DEBUG_IOPORT
db7b5426 70//#define DEBUG_SUBPAGE
1196be37 71
99773bd4
PB
72#if !defined(CONFIG_USER_ONLY)
73/* TB consistency checks only implemented for usermode emulation. */
74#undef DEBUG_TB_CHECK
75#endif
76
9fa3e853
FB
77#define SMC_BITMAP_USE_THRESHOLD 10
78
bdaf78e0 79static TranslationBlock *tbs;
24ab68ac 80static int code_gen_max_blocks;
9fa3e853 81TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
bdaf78e0 82static int nb_tbs;
eb51d102 83/* any access to the tbs or the page table must use this lock */
c227f099 84spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
fd6ce8f6 85
141ac468
BS
86#if defined(__arm__) || defined(__sparc_v9__)
87/* The prologue must be reachable with a direct jump. ARM and Sparc64
88 have limited branch ranges (possibly also PPC) so place it in a
d03d860b
BS
89 section close to code segment. */
90#define code_gen_section \
91 __attribute__((__section__(".gen_code"))) \
92 __attribute__((aligned (32)))
f8e2af11
SW
93#elif defined(_WIN32)
94/* Maximum alignment for Win32 is 16. */
95#define code_gen_section \
96 __attribute__((aligned (16)))
d03d860b
BS
97#else
98#define code_gen_section \
99 __attribute__((aligned (32)))
100#endif
101
102uint8_t code_gen_prologue[1024] code_gen_section;
bdaf78e0
BS
103static uint8_t *code_gen_buffer;
104static unsigned long code_gen_buffer_size;
26a5f13b 105/* threshold to flush the translated code buffer */
bdaf78e0 106static unsigned long code_gen_buffer_max_size;
24ab68ac 107static uint8_t *code_gen_ptr;
fd6ce8f6 108
e2eef170 109#if !defined(CONFIG_USER_ONLY)
9fa3e853 110int phys_ram_fd;
74576198 111static int in_migration;
94a6b54f 112
85d59fef 113RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
62152b8a
AK
114
115static MemoryRegion *system_memory;
309cb471 116static MemoryRegion *system_io;
62152b8a 117
e2eef170 118#endif
9fa3e853 119
6a00d601
FB
120CPUState *first_cpu;
121/* current CPU in the current thread. It is only valid inside
122 cpu_exec() */
5fafdf24 123CPUState *cpu_single_env;
2e70f6ef 124/* 0 = Do not count executed instructions.
bf20dc07 125 1 = Precise instruction counting.
2e70f6ef
PB
126 2 = Adaptive rate instruction counting. */
127int use_icount = 0;
128/* Current instruction counter. While executing translated code this may
129 include some instructions that have not yet been executed. */
130int64_t qemu_icount;
6a00d601 131
54936004 132typedef struct PageDesc {
92e873b9 133 /* list of TBs intersecting this ram page */
fd6ce8f6 134 TranslationBlock *first_tb;
9fa3e853
FB
135 /* in order to optimize self modifying code, we count the number
136 of lookups we do to a given page to use a bitmap */
137 unsigned int code_write_count;
138 uint8_t *code_bitmap;
139#if defined(CONFIG_USER_ONLY)
140 unsigned long flags;
141#endif
54936004
FB
142} PageDesc;
143
41c1b1c9 144/* In system mode we want L1_MAP to be based on ram offsets,
5cd2c5b6
RH
145 while in user mode we want it to be based on virtual addresses. */
146#if !defined(CONFIG_USER_ONLY)
41c1b1c9
PB
147#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
148# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
149#else
5cd2c5b6 150# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
41c1b1c9 151#endif
bedb69ea 152#else
5cd2c5b6 153# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
bedb69ea 154#endif
54936004 155
5cd2c5b6
RH
156/* Size of the L2 (and L3, etc) page tables. */
157#define L2_BITS 10
54936004
FB
158#define L2_SIZE (1 << L2_BITS)
159
5cd2c5b6
RH
160/* The bits remaining after N lower levels of page tables. */
161#define P_L1_BITS_REM \
162 ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
163#define V_L1_BITS_REM \
164 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
165
166/* Size of the L1 page table. Avoid silly small sizes. */
167#if P_L1_BITS_REM < 4
168#define P_L1_BITS (P_L1_BITS_REM + L2_BITS)
169#else
170#define P_L1_BITS P_L1_BITS_REM
171#endif
172
173#if V_L1_BITS_REM < 4
174#define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
175#else
176#define V_L1_BITS V_L1_BITS_REM
177#endif
178
179#define P_L1_SIZE ((target_phys_addr_t)1 << P_L1_BITS)
180#define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
181
182#define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS)
183#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
184
83fb7adf
FB
185unsigned long qemu_real_host_page_size;
186unsigned long qemu_host_page_bits;
187unsigned long qemu_host_page_size;
188unsigned long qemu_host_page_mask;
54936004 189
5cd2c5b6
RH
190/* This is a multi-level map on the virtual address space.
191 The bottom level has pointers to PageDesc. */
192static void *l1_map[V_L1_SIZE];
54936004 193
e2eef170 194#if !defined(CONFIG_USER_ONLY)
41c1b1c9
PB
195typedef struct PhysPageDesc {
196 /* offset in host memory of the page + io_index in the low bits */
197 ram_addr_t phys_offset;
198 ram_addr_t region_offset;
199} PhysPageDesc;
200
5cd2c5b6
RH
201/* This is a multi-level map on the physical address space.
202 The bottom level has pointers to PhysPageDesc. */
203static void *l1_phys_map[P_L1_SIZE];
6d9a1304 204
e2eef170 205static void io_mem_init(void);
62152b8a 206static void memory_map_init(void);
e2eef170 207
33417e70 208/* io memory support */
33417e70
FB
209CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
210CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
a4193c8a 211void *io_mem_opaque[IO_MEM_NB_ENTRIES];
511d2b14 212static char io_mem_used[IO_MEM_NB_ENTRIES];
6658ffb8
PB
213static int io_mem_watch;
214#endif
33417e70 215
34865134 216/* log support */
1e8b27ca
JR
217#ifdef WIN32
218static const char *logfilename = "qemu.log";
219#else
d9b630fd 220static const char *logfilename = "/tmp/qemu.log";
1e8b27ca 221#endif
34865134
FB
222FILE *logfile;
223int loglevel;
e735b91c 224static int log_append = 0;
34865134 225
e3db7226 226/* statistics */
b3755a91 227#if !defined(CONFIG_USER_ONLY)
e3db7226 228static int tlb_flush_count;
b3755a91 229#endif
e3db7226
FB
230static int tb_flush_count;
231static int tb_phys_invalidate_count;
232
7cb69cae
FB
233#ifdef _WIN32
234static void map_exec(void *addr, long size)
235{
236 DWORD old_protect;
237 VirtualProtect(addr, size,
238 PAGE_EXECUTE_READWRITE, &old_protect);
239
240}
241#else
242static void map_exec(void *addr, long size)
243{
4369415f 244 unsigned long start, end, page_size;
7cb69cae 245
4369415f 246 page_size = getpagesize();
7cb69cae 247 start = (unsigned long)addr;
4369415f 248 start &= ~(page_size - 1);
7cb69cae
FB
249
250 end = (unsigned long)addr + size;
4369415f
FB
251 end += page_size - 1;
252 end &= ~(page_size - 1);
7cb69cae
FB
253
254 mprotect((void *)start, end - start,
255 PROT_READ | PROT_WRITE | PROT_EXEC);
256}
257#endif
258
b346ff46 259static void page_init(void)
54936004 260{
83fb7adf 261 /* NOTE: we can always suppose that qemu_host_page_size >=
54936004 262 TARGET_PAGE_SIZE */
c2b48b69
AL
263#ifdef _WIN32
264 {
265 SYSTEM_INFO system_info;
266
267 GetSystemInfo(&system_info);
268 qemu_real_host_page_size = system_info.dwPageSize;
269 }
270#else
271 qemu_real_host_page_size = getpagesize();
272#endif
83fb7adf
FB
273 if (qemu_host_page_size == 0)
274 qemu_host_page_size = qemu_real_host_page_size;
275 if (qemu_host_page_size < TARGET_PAGE_SIZE)
276 qemu_host_page_size = TARGET_PAGE_SIZE;
277 qemu_host_page_bits = 0;
278 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
279 qemu_host_page_bits++;
280 qemu_host_page_mask = ~(qemu_host_page_size - 1);
50a9569b 281
2e9a5713 282#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
50a9569b 283 {
f01576f1
JL
284#ifdef HAVE_KINFO_GETVMMAP
285 struct kinfo_vmentry *freep;
286 int i, cnt;
287
288 freep = kinfo_getvmmap(getpid(), &cnt);
289 if (freep) {
290 mmap_lock();
291 for (i = 0; i < cnt; i++) {
292 unsigned long startaddr, endaddr;
293
294 startaddr = freep[i].kve_start;
295 endaddr = freep[i].kve_end;
296 if (h2g_valid(startaddr)) {
297 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
298
299 if (h2g_valid(endaddr)) {
300 endaddr = h2g(endaddr);
fd436907 301 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
f01576f1
JL
302 } else {
303#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
304 endaddr = ~0ul;
fd436907 305 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
f01576f1
JL
306#endif
307 }
308 }
309 }
310 free(freep);
311 mmap_unlock();
312 }
313#else
50a9569b 314 FILE *f;
50a9569b 315
0776590d 316 last_brk = (unsigned long)sbrk(0);
5cd2c5b6 317
fd436907 318 f = fopen("/compat/linux/proc/self/maps", "r");
50a9569b 319 if (f) {
5cd2c5b6
RH
320 mmap_lock();
321
50a9569b 322 do {
5cd2c5b6
RH
323 unsigned long startaddr, endaddr;
324 int n;
325
326 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
327
328 if (n == 2 && h2g_valid(startaddr)) {
329 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
330
331 if (h2g_valid(endaddr)) {
332 endaddr = h2g(endaddr);
333 } else {
334 endaddr = ~0ul;
335 }
336 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
50a9569b
AZ
337 }
338 } while (!feof(f));
5cd2c5b6 339
50a9569b 340 fclose(f);
5cd2c5b6 341 mmap_unlock();
50a9569b 342 }
f01576f1 343#endif
50a9569b
AZ
344 }
345#endif
54936004
FB
346}
347
41c1b1c9 348static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
54936004 349{
41c1b1c9
PB
350 PageDesc *pd;
351 void **lp;
352 int i;
353
5cd2c5b6 354#if defined(CONFIG_USER_ONLY)
2e9a5713 355 /* We can't use qemu_malloc because it may recurse into a locked mutex. */
5cd2c5b6
RH
356# define ALLOC(P, SIZE) \
357 do { \
358 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
359 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
5cd2c5b6
RH
360 } while (0)
361#else
362# define ALLOC(P, SIZE) \
363 do { P = qemu_mallocz(SIZE); } while (0)
17e2377a 364#endif
434929bf 365
5cd2c5b6
RH
366 /* Level 1. Always allocated. */
367 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
368
369 /* Level 2..N-1. */
370 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
371 void **p = *lp;
372
373 if (p == NULL) {
374 if (!alloc) {
375 return NULL;
376 }
377 ALLOC(p, sizeof(void *) * L2_SIZE);
378 *lp = p;
17e2377a 379 }
5cd2c5b6
RH
380
381 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
382 }
383
384 pd = *lp;
385 if (pd == NULL) {
386 if (!alloc) {
387 return NULL;
388 }
389 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
390 *lp = pd;
54936004 391 }
5cd2c5b6
RH
392
393#undef ALLOC
5cd2c5b6
RH
394
395 return pd + (index & (L2_SIZE - 1));
54936004
FB
396}
397
41c1b1c9 398static inline PageDesc *page_find(tb_page_addr_t index)
54936004 399{
5cd2c5b6 400 return page_find_alloc(index, 0);
fd6ce8f6
FB
401}
402
6d9a1304 403#if !defined(CONFIG_USER_ONLY)
c227f099 404static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
92e873b9 405{
e3f4e2a4 406 PhysPageDesc *pd;
5cd2c5b6
RH
407 void **lp;
408 int i;
92e873b9 409
5cd2c5b6
RH
410 /* Level 1. Always allocated. */
411 lp = l1_phys_map + ((index >> P_L1_SHIFT) & (P_L1_SIZE - 1));
108c49b8 412
5cd2c5b6
RH
413 /* Level 2..N-1. */
414 for (i = P_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
415 void **p = *lp;
416 if (p == NULL) {
417 if (!alloc) {
418 return NULL;
419 }
420 *lp = p = qemu_mallocz(sizeof(void *) * L2_SIZE);
421 }
422 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
108c49b8 423 }
5cd2c5b6 424
e3f4e2a4 425 pd = *lp;
5cd2c5b6 426 if (pd == NULL) {
e3f4e2a4 427 int i;
5cd2c5b6
RH
428
429 if (!alloc) {
108c49b8 430 return NULL;
5cd2c5b6
RH
431 }
432
433 *lp = pd = qemu_malloc(sizeof(PhysPageDesc) * L2_SIZE);
434
67c4d23c 435 for (i = 0; i < L2_SIZE; i++) {
5cd2c5b6
RH
436 pd[i].phys_offset = IO_MEM_UNASSIGNED;
437 pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
67c4d23c 438 }
92e873b9 439 }
5cd2c5b6
RH
440
441 return pd + (index & (L2_SIZE - 1));
92e873b9
FB
442}
443
c227f099 444static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
92e873b9 445{
108c49b8 446 return phys_page_find_alloc(index, 0);
92e873b9
FB
447}
448
c227f099
AL
449static void tlb_protect_code(ram_addr_t ram_addr);
450static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 451 target_ulong vaddr);
c8a706fe
PB
452#define mmap_lock() do { } while(0)
453#define mmap_unlock() do { } while(0)
9fa3e853 454#endif
fd6ce8f6 455
4369415f
FB
456#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
457
458#if defined(CONFIG_USER_ONLY)
ccbb4d44 459/* Currently it is not recommended to allocate big chunks of data in
4369415f
FB
460 user mode. It will change when a dedicated libc will be used */
461#define USE_STATIC_CODE_GEN_BUFFER
462#endif
463
464#ifdef USE_STATIC_CODE_GEN_BUFFER
ebf50fb3
AJ
465static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
466 __attribute__((aligned (CODE_GEN_ALIGN)));
4369415f
FB
467#endif
468
8fcd3692 469static void code_gen_alloc(unsigned long tb_size)
26a5f13b 470{
4369415f
FB
471#ifdef USE_STATIC_CODE_GEN_BUFFER
472 code_gen_buffer = static_code_gen_buffer;
473 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
474 map_exec(code_gen_buffer, code_gen_buffer_size);
475#else
26a5f13b
FB
476 code_gen_buffer_size = tb_size;
477 if (code_gen_buffer_size == 0) {
4369415f
FB
478#if defined(CONFIG_USER_ONLY)
479 /* in user mode, phys_ram_size is not meaningful */
480 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
481#else
ccbb4d44 482 /* XXX: needs adjustments */
94a6b54f 483 code_gen_buffer_size = (unsigned long)(ram_size / 4);
4369415f 484#endif
26a5f13b
FB
485 }
486 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
487 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
488 /* The code gen buffer location may have constraints depending on
489 the host cpu and OS */
490#if defined(__linux__)
491 {
492 int flags;
141ac468
BS
493 void *start = NULL;
494
26a5f13b
FB
495 flags = MAP_PRIVATE | MAP_ANONYMOUS;
496#if defined(__x86_64__)
497 flags |= MAP_32BIT;
498 /* Cannot map more than that */
499 if (code_gen_buffer_size > (800 * 1024 * 1024))
500 code_gen_buffer_size = (800 * 1024 * 1024);
141ac468
BS
501#elif defined(__sparc_v9__)
502 // Map the buffer below 2G, so we can use direct calls and branches
503 flags |= MAP_FIXED;
504 start = (void *) 0x60000000UL;
505 if (code_gen_buffer_size > (512 * 1024 * 1024))
506 code_gen_buffer_size = (512 * 1024 * 1024);
1cb0661e 507#elif defined(__arm__)
63d41246 508 /* Map the buffer below 32M, so we can use direct calls and branches */
1cb0661e
AZ
509 flags |= MAP_FIXED;
510 start = (void *) 0x01000000UL;
511 if (code_gen_buffer_size > 16 * 1024 * 1024)
512 code_gen_buffer_size = 16 * 1024 * 1024;
eba0b893
RH
513#elif defined(__s390x__)
514 /* Map the buffer so that we can use direct calls and branches. */
515 /* We have a +- 4GB range on the branches; leave some slop. */
516 if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
517 code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
518 }
519 start = (void *)0x90000000UL;
26a5f13b 520#endif
141ac468
BS
521 code_gen_buffer = mmap(start, code_gen_buffer_size,
522 PROT_WRITE | PROT_READ | PROT_EXEC,
26a5f13b
FB
523 flags, -1, 0);
524 if (code_gen_buffer == MAP_FAILED) {
525 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
526 exit(1);
527 }
528 }
cbb608a5 529#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
9f4b09a4
TN
530 || defined(__DragonFly__) || defined(__OpenBSD__) \
531 || defined(__NetBSD__)
06e67a82
AL
532 {
533 int flags;
534 void *addr = NULL;
535 flags = MAP_PRIVATE | MAP_ANONYMOUS;
536#if defined(__x86_64__)
537 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
538 * 0x40000000 is free */
539 flags |= MAP_FIXED;
540 addr = (void *)0x40000000;
541 /* Cannot map more than that */
542 if (code_gen_buffer_size > (800 * 1024 * 1024))
543 code_gen_buffer_size = (800 * 1024 * 1024);
4cd31ad2
BS
544#elif defined(__sparc_v9__)
545 // Map the buffer below 2G, so we can use direct calls and branches
546 flags |= MAP_FIXED;
547 addr = (void *) 0x60000000UL;
548 if (code_gen_buffer_size > (512 * 1024 * 1024)) {
549 code_gen_buffer_size = (512 * 1024 * 1024);
550 }
06e67a82
AL
551#endif
552 code_gen_buffer = mmap(addr, code_gen_buffer_size,
553 PROT_WRITE | PROT_READ | PROT_EXEC,
554 flags, -1, 0);
555 if (code_gen_buffer == MAP_FAILED) {
556 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
557 exit(1);
558 }
559 }
26a5f13b
FB
560#else
561 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
26a5f13b
FB
562 map_exec(code_gen_buffer, code_gen_buffer_size);
563#endif
4369415f 564#endif /* !USE_STATIC_CODE_GEN_BUFFER */
26a5f13b 565 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
a884da8a
PM
566 code_gen_buffer_max_size = code_gen_buffer_size -
567 (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
26a5f13b
FB
568 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
569 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
570}
571
572/* Must be called before using the QEMU cpus. 'tb_size' is the size
573 (in bytes) allocated to the translation buffer. Zero means default
574 size. */
d5ab9713 575void tcg_exec_init(unsigned long tb_size)
26a5f13b 576{
26a5f13b
FB
577 cpu_gen_init();
578 code_gen_alloc(tb_size);
579 code_gen_ptr = code_gen_buffer;
4369415f 580 page_init();
9002ec79
RH
581#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
582 /* There's no guest base to take into account, so go ahead and
583 initialize the prologue now. */
584 tcg_prologue_init(&tcg_ctx);
585#endif
26a5f13b
FB
586}
587
d5ab9713
JK
588bool tcg_enabled(void)
589{
590 return code_gen_buffer != NULL;
591}
592
593void cpu_exec_init_all(void)
594{
595#if !defined(CONFIG_USER_ONLY)
596 memory_map_init();
597 io_mem_init();
598#endif
599}
600
9656f324
PB
601#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
602
e59fb374 603static int cpu_common_post_load(void *opaque, int version_id)
e7f4eff7
JQ
604{
605 CPUState *env = opaque;
9656f324 606
3098dba0
AJ
607 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
608 version_id is increased. */
609 env->interrupt_request &= ~0x01;
9656f324
PB
610 tlb_flush(env, 1);
611
612 return 0;
613}
e7f4eff7
JQ
614
615static const VMStateDescription vmstate_cpu_common = {
616 .name = "cpu_common",
617 .version_id = 1,
618 .minimum_version_id = 1,
619 .minimum_version_id_old = 1,
e7f4eff7
JQ
620 .post_load = cpu_common_post_load,
621 .fields = (VMStateField []) {
622 VMSTATE_UINT32(halted, CPUState),
623 VMSTATE_UINT32(interrupt_request, CPUState),
624 VMSTATE_END_OF_LIST()
625 }
626};
9656f324
PB
627#endif
628
950f1472
GC
629CPUState *qemu_get_cpu(int cpu)
630{
631 CPUState *env = first_cpu;
632
633 while (env) {
634 if (env->cpu_index == cpu)
635 break;
636 env = env->next_cpu;
637 }
638
639 return env;
640}
641
6a00d601 642void cpu_exec_init(CPUState *env)
fd6ce8f6 643{
6a00d601
FB
644 CPUState **penv;
645 int cpu_index;
646
c2764719
PB
647#if defined(CONFIG_USER_ONLY)
648 cpu_list_lock();
649#endif
6a00d601
FB
650 env->next_cpu = NULL;
651 penv = &first_cpu;
652 cpu_index = 0;
653 while (*penv != NULL) {
1e9fa730 654 penv = &(*penv)->next_cpu;
6a00d601
FB
655 cpu_index++;
656 }
657 env->cpu_index = cpu_index;
268a362c 658 env->numa_node = 0;
72cf2d4f
BS
659 QTAILQ_INIT(&env->breakpoints);
660 QTAILQ_INIT(&env->watchpoints);
dc7a09cf
JK
661#ifndef CONFIG_USER_ONLY
662 env->thread_id = qemu_get_thread_id();
663#endif
6a00d601 664 *penv = env;
c2764719
PB
665#if defined(CONFIG_USER_ONLY)
666 cpu_list_unlock();
667#endif
b3c7724c 668#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
0be71e32
AW
669 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
670 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
b3c7724c
PB
671 cpu_save, cpu_load, env);
672#endif
fd6ce8f6
FB
673}
674
d1a1eb74
TG
675/* Allocate a new translation block. Flush the translation buffer if
676 too many translation blocks or too much generated code. */
677static TranslationBlock *tb_alloc(target_ulong pc)
678{
679 TranslationBlock *tb;
680
681 if (nb_tbs >= code_gen_max_blocks ||
682 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
683 return NULL;
684 tb = &tbs[nb_tbs++];
685 tb->pc = pc;
686 tb->cflags = 0;
687 return tb;
688}
689
690void tb_free(TranslationBlock *tb)
691{
692 /* In practice this is mostly used for single use temporary TB
693 Ignore the hard cases and just back up if this TB happens to
694 be the last one generated. */
695 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
696 code_gen_ptr = tb->tc_ptr;
697 nb_tbs--;
698 }
699}
700
9fa3e853
FB
701static inline void invalidate_page_bitmap(PageDesc *p)
702{
703 if (p->code_bitmap) {
59817ccb 704 qemu_free(p->code_bitmap);
9fa3e853
FB
705 p->code_bitmap = NULL;
706 }
707 p->code_write_count = 0;
708}
709
5cd2c5b6
RH
710/* Set to NULL all the 'first_tb' fields in all PageDescs. */
711
712static void page_flush_tb_1 (int level, void **lp)
fd6ce8f6 713{
5cd2c5b6 714 int i;
fd6ce8f6 715
5cd2c5b6
RH
716 if (*lp == NULL) {
717 return;
718 }
719 if (level == 0) {
720 PageDesc *pd = *lp;
7296abac 721 for (i = 0; i < L2_SIZE; ++i) {
5cd2c5b6
RH
722 pd[i].first_tb = NULL;
723 invalidate_page_bitmap(pd + i);
fd6ce8f6 724 }
5cd2c5b6
RH
725 } else {
726 void **pp = *lp;
7296abac 727 for (i = 0; i < L2_SIZE; ++i) {
5cd2c5b6
RH
728 page_flush_tb_1 (level - 1, pp + i);
729 }
730 }
731}
732
733static void page_flush_tb(void)
734{
735 int i;
736 for (i = 0; i < V_L1_SIZE; i++) {
737 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
fd6ce8f6
FB
738 }
739}
740
741/* flush all the translation blocks */
d4e8164f 742/* XXX: tb_flush is currently not thread safe */
6a00d601 743void tb_flush(CPUState *env1)
fd6ce8f6 744{
6a00d601 745 CPUState *env;
0124311e 746#if defined(DEBUG_FLUSH)
ab3d1727
BS
747 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
748 (unsigned long)(code_gen_ptr - code_gen_buffer),
749 nb_tbs, nb_tbs > 0 ?
750 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
fd6ce8f6 751#endif
26a5f13b 752 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
a208e54a
PB
753 cpu_abort(env1, "Internal error: code buffer overflow\n");
754
fd6ce8f6 755 nb_tbs = 0;
3b46e624 756
6a00d601
FB
757 for(env = first_cpu; env != NULL; env = env->next_cpu) {
758 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
759 }
9fa3e853 760
8a8a608f 761 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
fd6ce8f6 762 page_flush_tb();
9fa3e853 763
fd6ce8f6 764 code_gen_ptr = code_gen_buffer;
d4e8164f
FB
765 /* XXX: flush processor icache at this point if cache flush is
766 expensive */
e3db7226 767 tb_flush_count++;
fd6ce8f6
FB
768}
769
770#ifdef DEBUG_TB_CHECK
771
bc98a7ef 772static void tb_invalidate_check(target_ulong address)
fd6ce8f6
FB
773{
774 TranslationBlock *tb;
775 int i;
776 address &= TARGET_PAGE_MASK;
99773bd4
PB
777 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
778 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
779 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
780 address >= tb->pc + tb->size)) {
0bf9e31a
BS
781 printf("ERROR invalidate: address=" TARGET_FMT_lx
782 " PC=%08lx size=%04x\n",
99773bd4 783 address, (long)tb->pc, tb->size);
fd6ce8f6
FB
784 }
785 }
786 }
787}
788
789/* verify that all the pages have correct rights for code */
790static void tb_page_check(void)
791{
792 TranslationBlock *tb;
793 int i, flags1, flags2;
3b46e624 794
99773bd4
PB
795 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
796 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
797 flags1 = page_get_flags(tb->pc);
798 flags2 = page_get_flags(tb->pc + tb->size - 1);
799 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
800 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
99773bd4 801 (long)tb->pc, tb->size, flags1, flags2);
fd6ce8f6
FB
802 }
803 }
804 }
805}
806
807#endif
808
809/* invalidate one TB */
810static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
811 int next_offset)
812{
813 TranslationBlock *tb1;
814 for(;;) {
815 tb1 = *ptb;
816 if (tb1 == tb) {
817 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
818 break;
819 }
820 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
821 }
822}
823
9fa3e853
FB
824static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
825{
826 TranslationBlock *tb1;
827 unsigned int n1;
828
829 for(;;) {
830 tb1 = *ptb;
831 n1 = (long)tb1 & 3;
832 tb1 = (TranslationBlock *)((long)tb1 & ~3);
833 if (tb1 == tb) {
834 *ptb = tb1->page_next[n1];
835 break;
836 }
837 ptb = &tb1->page_next[n1];
838 }
839}
840
d4e8164f
FB
841static inline void tb_jmp_remove(TranslationBlock *tb, int n)
842{
843 TranslationBlock *tb1, **ptb;
844 unsigned int n1;
845
846 ptb = &tb->jmp_next[n];
847 tb1 = *ptb;
848 if (tb1) {
849 /* find tb(n) in circular list */
850 for(;;) {
851 tb1 = *ptb;
852 n1 = (long)tb1 & 3;
853 tb1 = (TranslationBlock *)((long)tb1 & ~3);
854 if (n1 == n && tb1 == tb)
855 break;
856 if (n1 == 2) {
857 ptb = &tb1->jmp_first;
858 } else {
859 ptb = &tb1->jmp_next[n1];
860 }
861 }
862 /* now we can suppress tb(n) from the list */
863 *ptb = tb->jmp_next[n];
864
865 tb->jmp_next[n] = NULL;
866 }
867}
868
869/* reset the jump entry 'n' of a TB so that it is not chained to
870 another TB */
871static inline void tb_reset_jump(TranslationBlock *tb, int n)
872{
873 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
874}
875
41c1b1c9 876void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
fd6ce8f6 877{
6a00d601 878 CPUState *env;
8a40a180 879 PageDesc *p;
d4e8164f 880 unsigned int h, n1;
41c1b1c9 881 tb_page_addr_t phys_pc;
8a40a180 882 TranslationBlock *tb1, *tb2;
3b46e624 883
8a40a180
FB
884 /* remove the TB from the hash list */
885 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
886 h = tb_phys_hash_func(phys_pc);
5fafdf24 887 tb_remove(&tb_phys_hash[h], tb,
8a40a180
FB
888 offsetof(TranslationBlock, phys_hash_next));
889
890 /* remove the TB from the page list */
891 if (tb->page_addr[0] != page_addr) {
892 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
893 tb_page_remove(&p->first_tb, tb);
894 invalidate_page_bitmap(p);
895 }
896 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
897 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
898 tb_page_remove(&p->first_tb, tb);
899 invalidate_page_bitmap(p);
900 }
901
36bdbe54 902 tb_invalidated_flag = 1;
59817ccb 903
fd6ce8f6 904 /* remove the TB from the hash list */
8a40a180 905 h = tb_jmp_cache_hash_func(tb->pc);
6a00d601
FB
906 for(env = first_cpu; env != NULL; env = env->next_cpu) {
907 if (env->tb_jmp_cache[h] == tb)
908 env->tb_jmp_cache[h] = NULL;
909 }
d4e8164f
FB
910
911 /* suppress this TB from the two jump lists */
912 tb_jmp_remove(tb, 0);
913 tb_jmp_remove(tb, 1);
914
915 /* suppress any remaining jumps to this TB */
916 tb1 = tb->jmp_first;
917 for(;;) {
918 n1 = (long)tb1 & 3;
919 if (n1 == 2)
920 break;
921 tb1 = (TranslationBlock *)((long)tb1 & ~3);
922 tb2 = tb1->jmp_next[n1];
923 tb_reset_jump(tb1, n1);
924 tb1->jmp_next[n1] = NULL;
925 tb1 = tb2;
926 }
927 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
9fa3e853 928
e3db7226 929 tb_phys_invalidate_count++;
9fa3e853
FB
930}
931
932static inline void set_bits(uint8_t *tab, int start, int len)
933{
934 int end, mask, end1;
935
936 end = start + len;
937 tab += start >> 3;
938 mask = 0xff << (start & 7);
939 if ((start & ~7) == (end & ~7)) {
940 if (start < end) {
941 mask &= ~(0xff << (end & 7));
942 *tab |= mask;
943 }
944 } else {
945 *tab++ |= mask;
946 start = (start + 8) & ~7;
947 end1 = end & ~7;
948 while (start < end1) {
949 *tab++ = 0xff;
950 start += 8;
951 }
952 if (start < end) {
953 mask = ~(0xff << (end & 7));
954 *tab |= mask;
955 }
956 }
957}
958
959static void build_page_bitmap(PageDesc *p)
960{
961 int n, tb_start, tb_end;
962 TranslationBlock *tb;
3b46e624 963
b2a7081a 964 p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
9fa3e853
FB
965
966 tb = p->first_tb;
967 while (tb != NULL) {
968 n = (long)tb & 3;
969 tb = (TranslationBlock *)((long)tb & ~3);
970 /* NOTE: this is subtle as a TB may span two physical pages */
971 if (n == 0) {
972 /* NOTE: tb_end may be after the end of the page, but
973 it is not a problem */
974 tb_start = tb->pc & ~TARGET_PAGE_MASK;
975 tb_end = tb_start + tb->size;
976 if (tb_end > TARGET_PAGE_SIZE)
977 tb_end = TARGET_PAGE_SIZE;
978 } else {
979 tb_start = 0;
980 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
981 }
982 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
983 tb = tb->page_next[n];
984 }
985}
986
2e70f6ef
PB
987TranslationBlock *tb_gen_code(CPUState *env,
988 target_ulong pc, target_ulong cs_base,
989 int flags, int cflags)
d720b93d
FB
990{
991 TranslationBlock *tb;
992 uint8_t *tc_ptr;
41c1b1c9
PB
993 tb_page_addr_t phys_pc, phys_page2;
994 target_ulong virt_page2;
d720b93d
FB
995 int code_gen_size;
996
41c1b1c9 997 phys_pc = get_page_addr_code(env, pc);
c27004ec 998 tb = tb_alloc(pc);
d720b93d
FB
999 if (!tb) {
1000 /* flush must be done */
1001 tb_flush(env);
1002 /* cannot fail at this point */
c27004ec 1003 tb = tb_alloc(pc);
2e70f6ef
PB
1004 /* Don't forget to invalidate previous TB info. */
1005 tb_invalidated_flag = 1;
d720b93d
FB
1006 }
1007 tc_ptr = code_gen_ptr;
1008 tb->tc_ptr = tc_ptr;
1009 tb->cs_base = cs_base;
1010 tb->flags = flags;
1011 tb->cflags = cflags;
d07bde88 1012 cpu_gen_code(env, tb, &code_gen_size);
d720b93d 1013 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
3b46e624 1014
d720b93d 1015 /* check next page if needed */
c27004ec 1016 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
d720b93d 1017 phys_page2 = -1;
c27004ec 1018 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
41c1b1c9 1019 phys_page2 = get_page_addr_code(env, virt_page2);
d720b93d 1020 }
41c1b1c9 1021 tb_link_page(tb, phys_pc, phys_page2);
2e70f6ef 1022 return tb;
d720b93d 1023}
3b46e624 1024
9fa3e853
FB
1025/* invalidate all TBs which intersect with the target physical page
1026 starting in range [start;end[. NOTE: start and end must refer to
d720b93d
FB
1027 the same physical page. 'is_cpu_write_access' should be true if called
1028 from a real cpu write access: the virtual CPU will exit the current
1029 TB if code is modified inside this TB. */
41c1b1c9 1030void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
d720b93d
FB
1031 int is_cpu_write_access)
1032{
6b917547 1033 TranslationBlock *tb, *tb_next, *saved_tb;
d720b93d 1034 CPUState *env = cpu_single_env;
41c1b1c9 1035 tb_page_addr_t tb_start, tb_end;
6b917547
AL
1036 PageDesc *p;
1037 int n;
1038#ifdef TARGET_HAS_PRECISE_SMC
1039 int current_tb_not_found = is_cpu_write_access;
1040 TranslationBlock *current_tb = NULL;
1041 int current_tb_modified = 0;
1042 target_ulong current_pc = 0;
1043 target_ulong current_cs_base = 0;
1044 int current_flags = 0;
1045#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
1046
1047 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 1048 if (!p)
9fa3e853 1049 return;
5fafdf24 1050 if (!p->code_bitmap &&
d720b93d
FB
1051 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1052 is_cpu_write_access) {
9fa3e853
FB
1053 /* build code bitmap */
1054 build_page_bitmap(p);
1055 }
1056
1057 /* we remove all the TBs in the range [start, end[ */
1058 /* XXX: see if in some cases it could be faster to invalidate all the code */
1059 tb = p->first_tb;
1060 while (tb != NULL) {
1061 n = (long)tb & 3;
1062 tb = (TranslationBlock *)((long)tb & ~3);
1063 tb_next = tb->page_next[n];
1064 /* NOTE: this is subtle as a TB may span two physical pages */
1065 if (n == 0) {
1066 /* NOTE: tb_end may be after the end of the page, but
1067 it is not a problem */
1068 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1069 tb_end = tb_start + tb->size;
1070 } else {
1071 tb_start = tb->page_addr[1];
1072 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1073 }
1074 if (!(tb_end <= start || tb_start >= end)) {
d720b93d
FB
1075#ifdef TARGET_HAS_PRECISE_SMC
1076 if (current_tb_not_found) {
1077 current_tb_not_found = 0;
1078 current_tb = NULL;
2e70f6ef 1079 if (env->mem_io_pc) {
d720b93d 1080 /* now we have a real cpu fault */
2e70f6ef 1081 current_tb = tb_find_pc(env->mem_io_pc);
d720b93d
FB
1082 }
1083 }
1084 if (current_tb == tb &&
2e70f6ef 1085 (current_tb->cflags & CF_COUNT_MASK) != 1) {
d720b93d
FB
1086 /* If we are modifying the current TB, we must stop
1087 its execution. We could be more precise by checking
1088 that the modification is after the current PC, but it
1089 would require a specialized function to partially
1090 restore the CPU state */
3b46e624 1091
d720b93d 1092 current_tb_modified = 1;
618ba8e6 1093 cpu_restore_state(current_tb, env, env->mem_io_pc);
6b917547
AL
1094 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1095 &current_flags);
d720b93d
FB
1096 }
1097#endif /* TARGET_HAS_PRECISE_SMC */
6f5a9f7e
FB
1098 /* we need to do that to handle the case where a signal
1099 occurs while doing tb_phys_invalidate() */
1100 saved_tb = NULL;
1101 if (env) {
1102 saved_tb = env->current_tb;
1103 env->current_tb = NULL;
1104 }
9fa3e853 1105 tb_phys_invalidate(tb, -1);
6f5a9f7e
FB
1106 if (env) {
1107 env->current_tb = saved_tb;
1108 if (env->interrupt_request && env->current_tb)
1109 cpu_interrupt(env, env->interrupt_request);
1110 }
9fa3e853
FB
1111 }
1112 tb = tb_next;
1113 }
1114#if !defined(CONFIG_USER_ONLY)
1115 /* if no code remaining, no need to continue to use slow writes */
1116 if (!p->first_tb) {
1117 invalidate_page_bitmap(p);
d720b93d 1118 if (is_cpu_write_access) {
2e70f6ef 1119 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
d720b93d
FB
1120 }
1121 }
1122#endif
1123#ifdef TARGET_HAS_PRECISE_SMC
1124 if (current_tb_modified) {
1125 /* we generate a block containing just the instruction
1126 modifying the memory. It will ensure that it cannot modify
1127 itself */
ea1c1802 1128 env->current_tb = NULL;
2e70f6ef 1129 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
d720b93d 1130 cpu_resume_from_signal(env, NULL);
9fa3e853 1131 }
fd6ce8f6 1132#endif
9fa3e853 1133}
fd6ce8f6 1134
9fa3e853 1135/* len must be <= 8 and start must be a multiple of len */
41c1b1c9 1136static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
9fa3e853
FB
1137{
1138 PageDesc *p;
1139 int offset, b;
59817ccb 1140#if 0
a4193c8a 1141 if (1) {
93fcfe39
AL
1142 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1143 cpu_single_env->mem_io_vaddr, len,
1144 cpu_single_env->eip,
1145 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
59817ccb
FB
1146 }
1147#endif
9fa3e853 1148 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 1149 if (!p)
9fa3e853
FB
1150 return;
1151 if (p->code_bitmap) {
1152 offset = start & ~TARGET_PAGE_MASK;
1153 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1154 if (b & ((1 << len) - 1))
1155 goto do_invalidate;
1156 } else {
1157 do_invalidate:
d720b93d 1158 tb_invalidate_phys_page_range(start, start + len, 1);
9fa3e853
FB
1159 }
1160}
1161
9fa3e853 1162#if !defined(CONFIG_SOFTMMU)
41c1b1c9 1163static void tb_invalidate_phys_page(tb_page_addr_t addr,
d720b93d 1164 unsigned long pc, void *puc)
9fa3e853 1165{
6b917547 1166 TranslationBlock *tb;
9fa3e853 1167 PageDesc *p;
6b917547 1168 int n;
d720b93d 1169#ifdef TARGET_HAS_PRECISE_SMC
6b917547 1170 TranslationBlock *current_tb = NULL;
d720b93d 1171 CPUState *env = cpu_single_env;
6b917547
AL
1172 int current_tb_modified = 0;
1173 target_ulong current_pc = 0;
1174 target_ulong current_cs_base = 0;
1175 int current_flags = 0;
d720b93d 1176#endif
9fa3e853
FB
1177
1178 addr &= TARGET_PAGE_MASK;
1179 p = page_find(addr >> TARGET_PAGE_BITS);
5fafdf24 1180 if (!p)
9fa3e853
FB
1181 return;
1182 tb = p->first_tb;
d720b93d
FB
1183#ifdef TARGET_HAS_PRECISE_SMC
1184 if (tb && pc != 0) {
1185 current_tb = tb_find_pc(pc);
1186 }
1187#endif
9fa3e853
FB
1188 while (tb != NULL) {
1189 n = (long)tb & 3;
1190 tb = (TranslationBlock *)((long)tb & ~3);
d720b93d
FB
1191#ifdef TARGET_HAS_PRECISE_SMC
1192 if (current_tb == tb &&
2e70f6ef 1193 (current_tb->cflags & CF_COUNT_MASK) != 1) {
d720b93d
FB
1194 /* If we are modifying the current TB, we must stop
1195 its execution. We could be more precise by checking
1196 that the modification is after the current PC, but it
1197 would require a specialized function to partially
1198 restore the CPU state */
3b46e624 1199
d720b93d 1200 current_tb_modified = 1;
618ba8e6 1201 cpu_restore_state(current_tb, env, pc);
6b917547
AL
1202 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1203 &current_flags);
d720b93d
FB
1204 }
1205#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
1206 tb_phys_invalidate(tb, addr);
1207 tb = tb->page_next[n];
1208 }
fd6ce8f6 1209 p->first_tb = NULL;
d720b93d
FB
1210#ifdef TARGET_HAS_PRECISE_SMC
1211 if (current_tb_modified) {
1212 /* we generate a block containing just the instruction
1213 modifying the memory. It will ensure that it cannot modify
1214 itself */
ea1c1802 1215 env->current_tb = NULL;
2e70f6ef 1216 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
d720b93d
FB
1217 cpu_resume_from_signal(env, puc);
1218 }
1219#endif
fd6ce8f6 1220}
9fa3e853 1221#endif
fd6ce8f6
FB
1222
1223/* add the tb in the target page and protect it if necessary */
5fafdf24 1224static inline void tb_alloc_page(TranslationBlock *tb,
41c1b1c9 1225 unsigned int n, tb_page_addr_t page_addr)
fd6ce8f6
FB
1226{
1227 PageDesc *p;
4429ab44
JQ
1228#ifndef CONFIG_USER_ONLY
1229 bool page_already_protected;
1230#endif
9fa3e853
FB
1231
1232 tb->page_addr[n] = page_addr;
5cd2c5b6 1233 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
9fa3e853 1234 tb->page_next[n] = p->first_tb;
4429ab44
JQ
1235#ifndef CONFIG_USER_ONLY
1236 page_already_protected = p->first_tb != NULL;
1237#endif
9fa3e853
FB
1238 p->first_tb = (TranslationBlock *)((long)tb | n);
1239 invalidate_page_bitmap(p);
fd6ce8f6 1240
107db443 1241#if defined(TARGET_HAS_SMC) || 1
d720b93d 1242
9fa3e853 1243#if defined(CONFIG_USER_ONLY)
fd6ce8f6 1244 if (p->flags & PAGE_WRITE) {
53a5960a
PB
1245 target_ulong addr;
1246 PageDesc *p2;
9fa3e853
FB
1247 int prot;
1248
fd6ce8f6
FB
1249 /* force the host page as non writable (writes will have a
1250 page fault + mprotect overhead) */
53a5960a 1251 page_addr &= qemu_host_page_mask;
fd6ce8f6 1252 prot = 0;
53a5960a
PB
1253 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1254 addr += TARGET_PAGE_SIZE) {
1255
1256 p2 = page_find (addr >> TARGET_PAGE_BITS);
1257 if (!p2)
1258 continue;
1259 prot |= p2->flags;
1260 p2->flags &= ~PAGE_WRITE;
53a5960a 1261 }
5fafdf24 1262 mprotect(g2h(page_addr), qemu_host_page_size,
fd6ce8f6
FB
1263 (prot & PAGE_BITS) & ~PAGE_WRITE);
1264#ifdef DEBUG_TB_INVALIDATE
ab3d1727 1265 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
53a5960a 1266 page_addr);
fd6ce8f6 1267#endif
fd6ce8f6 1268 }
9fa3e853
FB
1269#else
1270 /* if some code is already present, then the pages are already
1271 protected. So we handle the case where only the first TB is
1272 allocated in a physical page */
4429ab44 1273 if (!page_already_protected) {
6a00d601 1274 tlb_protect_code(page_addr);
9fa3e853
FB
1275 }
1276#endif
d720b93d
FB
1277
1278#endif /* TARGET_HAS_SMC */
fd6ce8f6
FB
1279}
1280
9fa3e853
FB
1281/* add a new TB and link it to the physical page tables. phys_page2 is
1282 (-1) to indicate that only one page contains the TB. */
41c1b1c9
PB
1283void tb_link_page(TranslationBlock *tb,
1284 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
d4e8164f 1285{
9fa3e853
FB
1286 unsigned int h;
1287 TranslationBlock **ptb;
1288
c8a706fe
PB
1289 /* Grab the mmap lock to stop another thread invalidating this TB
1290 before we are done. */
1291 mmap_lock();
9fa3e853
FB
1292 /* add in the physical hash table */
1293 h = tb_phys_hash_func(phys_pc);
1294 ptb = &tb_phys_hash[h];
1295 tb->phys_hash_next = *ptb;
1296 *ptb = tb;
fd6ce8f6
FB
1297
1298 /* add in the page list */
9fa3e853
FB
1299 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1300 if (phys_page2 != -1)
1301 tb_alloc_page(tb, 1, phys_page2);
1302 else
1303 tb->page_addr[1] = -1;
9fa3e853 1304
d4e8164f
FB
1305 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1306 tb->jmp_next[0] = NULL;
1307 tb->jmp_next[1] = NULL;
1308
1309 /* init original jump addresses */
1310 if (tb->tb_next_offset[0] != 0xffff)
1311 tb_reset_jump(tb, 0);
1312 if (tb->tb_next_offset[1] != 0xffff)
1313 tb_reset_jump(tb, 1);
8a40a180
FB
1314
1315#ifdef DEBUG_TB_CHECK
1316 tb_page_check();
1317#endif
c8a706fe 1318 mmap_unlock();
fd6ce8f6
FB
1319}
1320
9fa3e853
FB
1321/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1322 tb[1].tc_ptr. Return NULL if not found */
1323TranslationBlock *tb_find_pc(unsigned long tc_ptr)
fd6ce8f6 1324{
9fa3e853
FB
1325 int m_min, m_max, m;
1326 unsigned long v;
1327 TranslationBlock *tb;
a513fe19
FB
1328
1329 if (nb_tbs <= 0)
1330 return NULL;
1331 if (tc_ptr < (unsigned long)code_gen_buffer ||
1332 tc_ptr >= (unsigned long)code_gen_ptr)
1333 return NULL;
1334 /* binary search (cf Knuth) */
1335 m_min = 0;
1336 m_max = nb_tbs - 1;
1337 while (m_min <= m_max) {
1338 m = (m_min + m_max) >> 1;
1339 tb = &tbs[m];
1340 v = (unsigned long)tb->tc_ptr;
1341 if (v == tc_ptr)
1342 return tb;
1343 else if (tc_ptr < v) {
1344 m_max = m - 1;
1345 } else {
1346 m_min = m + 1;
1347 }
5fafdf24 1348 }
a513fe19
FB
1349 return &tbs[m_max];
1350}
7501267e 1351
ea041c0e
FB
1352static void tb_reset_jump_recursive(TranslationBlock *tb);
1353
1354static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1355{
1356 TranslationBlock *tb1, *tb_next, **ptb;
1357 unsigned int n1;
1358
1359 tb1 = tb->jmp_next[n];
1360 if (tb1 != NULL) {
1361 /* find head of list */
1362 for(;;) {
1363 n1 = (long)tb1 & 3;
1364 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1365 if (n1 == 2)
1366 break;
1367 tb1 = tb1->jmp_next[n1];
1368 }
1369 /* we are now sure now that tb jumps to tb1 */
1370 tb_next = tb1;
1371
1372 /* remove tb from the jmp_first list */
1373 ptb = &tb_next->jmp_first;
1374 for(;;) {
1375 tb1 = *ptb;
1376 n1 = (long)tb1 & 3;
1377 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1378 if (n1 == n && tb1 == tb)
1379 break;
1380 ptb = &tb1->jmp_next[n1];
1381 }
1382 *ptb = tb->jmp_next[n];
1383 tb->jmp_next[n] = NULL;
3b46e624 1384
ea041c0e
FB
1385 /* suppress the jump to next tb in generated code */
1386 tb_reset_jump(tb, n);
1387
0124311e 1388 /* suppress jumps in the tb on which we could have jumped */
ea041c0e
FB
1389 tb_reset_jump_recursive(tb_next);
1390 }
1391}
1392
1393static void tb_reset_jump_recursive(TranslationBlock *tb)
1394{
1395 tb_reset_jump_recursive2(tb, 0);
1396 tb_reset_jump_recursive2(tb, 1);
1397}
1398
1fddef4b 1399#if defined(TARGET_HAS_ICE)
94df27fd
PB
1400#if defined(CONFIG_USER_ONLY)
1401static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1402{
1403 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1404}
1405#else
d720b93d
FB
1406static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1407{
c227f099 1408 target_phys_addr_t addr;
9b3c35e0 1409 target_ulong pd;
c227f099 1410 ram_addr_t ram_addr;
c2f07f81 1411 PhysPageDesc *p;
d720b93d 1412
c2f07f81
PB
1413 addr = cpu_get_phys_page_debug(env, pc);
1414 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1415 if (!p) {
1416 pd = IO_MEM_UNASSIGNED;
1417 } else {
1418 pd = p->phys_offset;
1419 }
1420 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
706cd4b5 1421 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
d720b93d 1422}
c27004ec 1423#endif
94df27fd 1424#endif /* TARGET_HAS_ICE */
d720b93d 1425
c527ee8f
PB
1426#if defined(CONFIG_USER_ONLY)
1427void cpu_watchpoint_remove_all(CPUState *env, int mask)
1428
1429{
1430}
1431
1432int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1433 int flags, CPUWatchpoint **watchpoint)
1434{
1435 return -ENOSYS;
1436}
1437#else
6658ffb8 1438/* Add a watchpoint. */
a1d1bb31
AL
1439int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1440 int flags, CPUWatchpoint **watchpoint)
6658ffb8 1441{
b4051334 1442 target_ulong len_mask = ~(len - 1);
c0ce998e 1443 CPUWatchpoint *wp;
6658ffb8 1444
b4051334
AL
1445 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1446 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1447 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1448 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1449 return -EINVAL;
1450 }
a1d1bb31 1451 wp = qemu_malloc(sizeof(*wp));
a1d1bb31
AL
1452
1453 wp->vaddr = addr;
b4051334 1454 wp->len_mask = len_mask;
a1d1bb31
AL
1455 wp->flags = flags;
1456
2dc9f411 1457 /* keep all GDB-injected watchpoints in front */
c0ce998e 1458 if (flags & BP_GDB)
72cf2d4f 1459 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
c0ce998e 1460 else
72cf2d4f 1461 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
6658ffb8 1462
6658ffb8 1463 tlb_flush_page(env, addr);
a1d1bb31
AL
1464
1465 if (watchpoint)
1466 *watchpoint = wp;
1467 return 0;
6658ffb8
PB
1468}
1469
a1d1bb31
AL
1470/* Remove a specific watchpoint. */
1471int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1472 int flags)
6658ffb8 1473{
b4051334 1474 target_ulong len_mask = ~(len - 1);
a1d1bb31 1475 CPUWatchpoint *wp;
6658ffb8 1476
72cf2d4f 1477 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334 1478 if (addr == wp->vaddr && len_mask == wp->len_mask
6e140f28 1479 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
a1d1bb31 1480 cpu_watchpoint_remove_by_ref(env, wp);
6658ffb8
PB
1481 return 0;
1482 }
1483 }
a1d1bb31 1484 return -ENOENT;
6658ffb8
PB
1485}
1486
a1d1bb31
AL
1487/* Remove a specific watchpoint by reference. */
1488void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1489{
72cf2d4f 1490 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
7d03f82f 1491
a1d1bb31
AL
1492 tlb_flush_page(env, watchpoint->vaddr);
1493
1494 qemu_free(watchpoint);
1495}
1496
1497/* Remove all matching watchpoints. */
1498void cpu_watchpoint_remove_all(CPUState *env, int mask)
1499{
c0ce998e 1500 CPUWatchpoint *wp, *next;
a1d1bb31 1501
72cf2d4f 1502 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
a1d1bb31
AL
1503 if (wp->flags & mask)
1504 cpu_watchpoint_remove_by_ref(env, wp);
c0ce998e 1505 }
7d03f82f 1506}
c527ee8f 1507#endif
7d03f82f 1508
a1d1bb31
AL
1509/* Add a breakpoint. */
1510int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1511 CPUBreakpoint **breakpoint)
4c3a88a2 1512{
1fddef4b 1513#if defined(TARGET_HAS_ICE)
c0ce998e 1514 CPUBreakpoint *bp;
3b46e624 1515
a1d1bb31 1516 bp = qemu_malloc(sizeof(*bp));
4c3a88a2 1517
a1d1bb31
AL
1518 bp->pc = pc;
1519 bp->flags = flags;
1520
2dc9f411 1521 /* keep all GDB-injected breakpoints in front */
c0ce998e 1522 if (flags & BP_GDB)
72cf2d4f 1523 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
c0ce998e 1524 else
72cf2d4f 1525 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
3b46e624 1526
d720b93d 1527 breakpoint_invalidate(env, pc);
a1d1bb31
AL
1528
1529 if (breakpoint)
1530 *breakpoint = bp;
4c3a88a2
FB
1531 return 0;
1532#else
a1d1bb31 1533 return -ENOSYS;
4c3a88a2
FB
1534#endif
1535}
1536
a1d1bb31
AL
1537/* Remove a specific breakpoint. */
1538int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1539{
7d03f82f 1540#if defined(TARGET_HAS_ICE)
a1d1bb31
AL
1541 CPUBreakpoint *bp;
1542
72cf2d4f 1543 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31
AL
1544 if (bp->pc == pc && bp->flags == flags) {
1545 cpu_breakpoint_remove_by_ref(env, bp);
1546 return 0;
1547 }
7d03f82f 1548 }
a1d1bb31
AL
1549 return -ENOENT;
1550#else
1551 return -ENOSYS;
7d03f82f
EI
1552#endif
1553}
1554
a1d1bb31
AL
1555/* Remove a specific breakpoint by reference. */
1556void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
4c3a88a2 1557{
1fddef4b 1558#if defined(TARGET_HAS_ICE)
72cf2d4f 1559 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
d720b93d 1560
a1d1bb31
AL
1561 breakpoint_invalidate(env, breakpoint->pc);
1562
1563 qemu_free(breakpoint);
1564#endif
1565}
1566
1567/* Remove all matching breakpoints. */
1568void cpu_breakpoint_remove_all(CPUState *env, int mask)
1569{
1570#if defined(TARGET_HAS_ICE)
c0ce998e 1571 CPUBreakpoint *bp, *next;
a1d1bb31 1572
72cf2d4f 1573 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
a1d1bb31
AL
1574 if (bp->flags & mask)
1575 cpu_breakpoint_remove_by_ref(env, bp);
c0ce998e 1576 }
4c3a88a2
FB
1577#endif
1578}
1579
c33a346e
FB
1580/* enable or disable single step mode. EXCP_DEBUG is returned by the
1581 CPU loop after each instruction */
1582void cpu_single_step(CPUState *env, int enabled)
1583{
1fddef4b 1584#if defined(TARGET_HAS_ICE)
c33a346e
FB
1585 if (env->singlestep_enabled != enabled) {
1586 env->singlestep_enabled = enabled;
e22a25c9
AL
1587 if (kvm_enabled())
1588 kvm_update_guest_debug(env, 0);
1589 else {
ccbb4d44 1590 /* must flush all the translated code to avoid inconsistencies */
e22a25c9
AL
1591 /* XXX: only flush what is necessary */
1592 tb_flush(env);
1593 }
c33a346e
FB
1594 }
1595#endif
1596}
1597
34865134
FB
1598/* enable or disable low levels log */
1599void cpu_set_log(int log_flags)
1600{
1601 loglevel = log_flags;
1602 if (loglevel && !logfile) {
11fcfab4 1603 logfile = fopen(logfilename, log_append ? "a" : "w");
34865134
FB
1604 if (!logfile) {
1605 perror(logfilename);
1606 _exit(1);
1607 }
9fa3e853
FB
1608#if !defined(CONFIG_SOFTMMU)
1609 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1610 {
b55266b5 1611 static char logfile_buf[4096];
9fa3e853
FB
1612 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1613 }
bf65f53f
FN
1614#elif !defined(_WIN32)
1615 /* Win32 doesn't support line-buffering and requires size >= 2 */
34865134 1616 setvbuf(logfile, NULL, _IOLBF, 0);
9fa3e853 1617#endif
e735b91c
PB
1618 log_append = 1;
1619 }
1620 if (!loglevel && logfile) {
1621 fclose(logfile);
1622 logfile = NULL;
34865134
FB
1623 }
1624}
1625
1626void cpu_set_log_filename(const char *filename)
1627{
1628 logfilename = strdup(filename);
e735b91c
PB
1629 if (logfile) {
1630 fclose(logfile);
1631 logfile = NULL;
1632 }
1633 cpu_set_log(loglevel);
34865134 1634}
c33a346e 1635
3098dba0 1636static void cpu_unlink_tb(CPUState *env)
ea041c0e 1637{
3098dba0
AJ
1638 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1639 problem and hope the cpu will stop of its own accord. For userspace
1640 emulation this often isn't actually as bad as it sounds. Often
1641 signals are used primarily to interrupt blocking syscalls. */
ea041c0e 1642 TranslationBlock *tb;
c227f099 1643 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
59817ccb 1644
cab1b4bd 1645 spin_lock(&interrupt_lock);
3098dba0
AJ
1646 tb = env->current_tb;
1647 /* if the cpu is currently executing code, we must unlink it and
1648 all the potentially executing TB */
f76cfe56 1649 if (tb) {
3098dba0
AJ
1650 env->current_tb = NULL;
1651 tb_reset_jump_recursive(tb);
be214e6c 1652 }
cab1b4bd 1653 spin_unlock(&interrupt_lock);
3098dba0
AJ
1654}
1655
97ffbd8d 1656#ifndef CONFIG_USER_ONLY
3098dba0 1657/* mask must never be zero, except for A20 change call */
ec6959d0 1658static void tcg_handle_interrupt(CPUState *env, int mask)
3098dba0
AJ
1659{
1660 int old_mask;
be214e6c 1661
2e70f6ef 1662 old_mask = env->interrupt_request;
68a79315 1663 env->interrupt_request |= mask;
3098dba0 1664
8edac960
AL
1665 /*
1666 * If called from iothread context, wake the target cpu in
1667 * case its halted.
1668 */
b7680cb6 1669 if (!qemu_cpu_is_self(env)) {
8edac960
AL
1670 qemu_cpu_kick(env);
1671 return;
1672 }
8edac960 1673
2e70f6ef 1674 if (use_icount) {
266910c4 1675 env->icount_decr.u16.high = 0xffff;
2e70f6ef 1676 if (!can_do_io(env)
be214e6c 1677 && (mask & ~old_mask) != 0) {
2e70f6ef
PB
1678 cpu_abort(env, "Raised interrupt while not in I/O function");
1679 }
2e70f6ef 1680 } else {
3098dba0 1681 cpu_unlink_tb(env);
ea041c0e
FB
1682 }
1683}
1684
ec6959d0
JK
1685CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1686
97ffbd8d
JK
1687#else /* CONFIG_USER_ONLY */
1688
1689void cpu_interrupt(CPUState *env, int mask)
1690{
1691 env->interrupt_request |= mask;
1692 cpu_unlink_tb(env);
1693}
1694#endif /* CONFIG_USER_ONLY */
1695
b54ad049
FB
1696void cpu_reset_interrupt(CPUState *env, int mask)
1697{
1698 env->interrupt_request &= ~mask;
1699}
1700
3098dba0
AJ
1701void cpu_exit(CPUState *env)
1702{
1703 env->exit_request = 1;
1704 cpu_unlink_tb(env);
1705}
1706
c7cd6a37 1707const CPULogItem cpu_log_items[] = {
5fafdf24 1708 { CPU_LOG_TB_OUT_ASM, "out_asm",
f193c797
FB
1709 "show generated host assembly code for each compiled TB" },
1710 { CPU_LOG_TB_IN_ASM, "in_asm",
1711 "show target assembly code for each compiled TB" },
5fafdf24 1712 { CPU_LOG_TB_OP, "op",
57fec1fe 1713 "show micro ops for each compiled TB" },
f193c797 1714 { CPU_LOG_TB_OP_OPT, "op_opt",
e01a1157
BS
1715 "show micro ops "
1716#ifdef TARGET_I386
1717 "before eflags optimization and "
f193c797 1718#endif
e01a1157 1719 "after liveness analysis" },
f193c797
FB
1720 { CPU_LOG_INT, "int",
1721 "show interrupts/exceptions in short format" },
1722 { CPU_LOG_EXEC, "exec",
1723 "show trace before each executed TB (lots of logs)" },
9fddaa0c 1724 { CPU_LOG_TB_CPU, "cpu",
e91c8a77 1725 "show CPU state before block translation" },
f193c797
FB
1726#ifdef TARGET_I386
1727 { CPU_LOG_PCALL, "pcall",
1728 "show protected mode far calls/returns/exceptions" },
eca1bdf4
AL
1729 { CPU_LOG_RESET, "cpu_reset",
1730 "show CPU state before CPU resets" },
f193c797 1731#endif
8e3a9fd2 1732#ifdef DEBUG_IOPORT
fd872598
FB
1733 { CPU_LOG_IOPORT, "ioport",
1734 "show all i/o ports accesses" },
8e3a9fd2 1735#endif
f193c797
FB
1736 { 0, NULL, NULL },
1737};
1738
f6f3fbca
MT
1739#ifndef CONFIG_USER_ONLY
1740static QLIST_HEAD(memory_client_list, CPUPhysMemoryClient) memory_client_list
1741 = QLIST_HEAD_INITIALIZER(memory_client_list);
1742
1743static void cpu_notify_set_memory(target_phys_addr_t start_addr,
9742bf26 1744 ram_addr_t size,
0fd542fb
MT
1745 ram_addr_t phys_offset,
1746 bool log_dirty)
f6f3fbca
MT
1747{
1748 CPUPhysMemoryClient *client;
1749 QLIST_FOREACH(client, &memory_client_list, list) {
0fd542fb 1750 client->set_memory(client, start_addr, size, phys_offset, log_dirty);
f6f3fbca
MT
1751 }
1752}
1753
1754static int cpu_notify_sync_dirty_bitmap(target_phys_addr_t start,
9742bf26 1755 target_phys_addr_t end)
f6f3fbca
MT
1756{
1757 CPUPhysMemoryClient *client;
1758 QLIST_FOREACH(client, &memory_client_list, list) {
1759 int r = client->sync_dirty_bitmap(client, start, end);
1760 if (r < 0)
1761 return r;
1762 }
1763 return 0;
1764}
1765
1766static int cpu_notify_migration_log(int enable)
1767{
1768 CPUPhysMemoryClient *client;
1769 QLIST_FOREACH(client, &memory_client_list, list) {
1770 int r = client->migration_log(client, enable);
1771 if (r < 0)
1772 return r;
1773 }
1774 return 0;
1775}
1776
2173a75f
AW
1777struct last_map {
1778 target_phys_addr_t start_addr;
1779 ram_addr_t size;
1780 ram_addr_t phys_offset;
1781};
1782
8d4c78e7
AW
1783/* The l1_phys_map provides the upper P_L1_BITs of the guest physical
1784 * address. Each intermediate table provides the next L2_BITs of guest
1785 * physical address space. The number of levels vary based on host and
1786 * guest configuration, making it efficient to build the final guest
1787 * physical address by seeding the L1 offset and shifting and adding in
1788 * each L2 offset as we recurse through them. */
2173a75f
AW
1789static void phys_page_for_each_1(CPUPhysMemoryClient *client, int level,
1790 void **lp, target_phys_addr_t addr,
1791 struct last_map *map)
f6f3fbca 1792{
5cd2c5b6 1793 int i;
f6f3fbca 1794
5cd2c5b6
RH
1795 if (*lp == NULL) {
1796 return;
1797 }
1798 if (level == 0) {
1799 PhysPageDesc *pd = *lp;
8d4c78e7 1800 addr <<= L2_BITS + TARGET_PAGE_BITS;
7296abac 1801 for (i = 0; i < L2_SIZE; ++i) {
5cd2c5b6 1802 if (pd[i].phys_offset != IO_MEM_UNASSIGNED) {
2173a75f
AW
1803 target_phys_addr_t start_addr = addr | i << TARGET_PAGE_BITS;
1804
1805 if (map->size &&
1806 start_addr == map->start_addr + map->size &&
1807 pd[i].phys_offset == map->phys_offset + map->size) {
1808
1809 map->size += TARGET_PAGE_SIZE;
1810 continue;
1811 } else if (map->size) {
1812 client->set_memory(client, map->start_addr,
1813 map->size, map->phys_offset, false);
1814 }
1815
1816 map->start_addr = start_addr;
1817 map->size = TARGET_PAGE_SIZE;
1818 map->phys_offset = pd[i].phys_offset;
f6f3fbca 1819 }
5cd2c5b6
RH
1820 }
1821 } else {
1822 void **pp = *lp;
7296abac 1823 for (i = 0; i < L2_SIZE; ++i) {
8d4c78e7 1824 phys_page_for_each_1(client, level - 1, pp + i,
2173a75f 1825 (addr << L2_BITS) | i, map);
f6f3fbca
MT
1826 }
1827 }
1828}
1829
1830static void phys_page_for_each(CPUPhysMemoryClient *client)
1831{
5cd2c5b6 1832 int i;
2173a75f
AW
1833 struct last_map map = { };
1834
5cd2c5b6
RH
1835 for (i = 0; i < P_L1_SIZE; ++i) {
1836 phys_page_for_each_1(client, P_L1_SHIFT / L2_BITS - 1,
2173a75f
AW
1837 l1_phys_map + i, i, &map);
1838 }
1839 if (map.size) {
1840 client->set_memory(client, map.start_addr, map.size, map.phys_offset,
1841 false);
f6f3fbca 1842 }
f6f3fbca
MT
1843}
1844
1845void cpu_register_phys_memory_client(CPUPhysMemoryClient *client)
1846{
1847 QLIST_INSERT_HEAD(&memory_client_list, client, list);
1848 phys_page_for_each(client);
1849}
1850
1851void cpu_unregister_phys_memory_client(CPUPhysMemoryClient *client)
1852{
1853 QLIST_REMOVE(client, list);
1854}
1855#endif
1856
f193c797
FB
1857static int cmp1(const char *s1, int n, const char *s2)
1858{
1859 if (strlen(s2) != n)
1860 return 0;
1861 return memcmp(s1, s2, n) == 0;
1862}
3b46e624 1863
f193c797
FB
1864/* takes a comma separated list of log masks. Return 0 if error. */
1865int cpu_str_to_log_mask(const char *str)
1866{
c7cd6a37 1867 const CPULogItem *item;
f193c797
FB
1868 int mask;
1869 const char *p, *p1;
1870
1871 p = str;
1872 mask = 0;
1873 for(;;) {
1874 p1 = strchr(p, ',');
1875 if (!p1)
1876 p1 = p + strlen(p);
9742bf26
YT
1877 if(cmp1(p,p1-p,"all")) {
1878 for(item = cpu_log_items; item->mask != 0; item++) {
1879 mask |= item->mask;
1880 }
1881 } else {
1882 for(item = cpu_log_items; item->mask != 0; item++) {
1883 if (cmp1(p, p1 - p, item->name))
1884 goto found;
1885 }
1886 return 0;
f193c797 1887 }
f193c797
FB
1888 found:
1889 mask |= item->mask;
1890 if (*p1 != ',')
1891 break;
1892 p = p1 + 1;
1893 }
1894 return mask;
1895}
ea041c0e 1896
7501267e
FB
1897void cpu_abort(CPUState *env, const char *fmt, ...)
1898{
1899 va_list ap;
493ae1f0 1900 va_list ap2;
7501267e
FB
1901
1902 va_start(ap, fmt);
493ae1f0 1903 va_copy(ap2, ap);
7501267e
FB
1904 fprintf(stderr, "qemu: fatal: ");
1905 vfprintf(stderr, fmt, ap);
1906 fprintf(stderr, "\n");
1907#ifdef TARGET_I386
7fe48483
FB
1908 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1909#else
1910 cpu_dump_state(env, stderr, fprintf, 0);
7501267e 1911#endif
93fcfe39
AL
1912 if (qemu_log_enabled()) {
1913 qemu_log("qemu: fatal: ");
1914 qemu_log_vprintf(fmt, ap2);
1915 qemu_log("\n");
f9373291 1916#ifdef TARGET_I386
93fcfe39 1917 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
f9373291 1918#else
93fcfe39 1919 log_cpu_state(env, 0);
f9373291 1920#endif
31b1a7b4 1921 qemu_log_flush();
93fcfe39 1922 qemu_log_close();
924edcae 1923 }
493ae1f0 1924 va_end(ap2);
f9373291 1925 va_end(ap);
fd052bf6
RV
1926#if defined(CONFIG_USER_ONLY)
1927 {
1928 struct sigaction act;
1929 sigfillset(&act.sa_mask);
1930 act.sa_handler = SIG_DFL;
1931 sigaction(SIGABRT, &act, NULL);
1932 }
1933#endif
7501267e
FB
1934 abort();
1935}
1936
c5be9f08
TS
1937CPUState *cpu_copy(CPUState *env)
1938{
01ba9816 1939 CPUState *new_env = cpu_init(env->cpu_model_str);
c5be9f08
TS
1940 CPUState *next_cpu = new_env->next_cpu;
1941 int cpu_index = new_env->cpu_index;
5a38f081
AL
1942#if defined(TARGET_HAS_ICE)
1943 CPUBreakpoint *bp;
1944 CPUWatchpoint *wp;
1945#endif
1946
c5be9f08 1947 memcpy(new_env, env, sizeof(CPUState));
5a38f081
AL
1948
1949 /* Preserve chaining and index. */
c5be9f08
TS
1950 new_env->next_cpu = next_cpu;
1951 new_env->cpu_index = cpu_index;
5a38f081
AL
1952
1953 /* Clone all break/watchpoints.
1954 Note: Once we support ptrace with hw-debug register access, make sure
1955 BP_CPU break/watchpoints are handled correctly on clone. */
72cf2d4f
BS
1956 QTAILQ_INIT(&env->breakpoints);
1957 QTAILQ_INIT(&env->watchpoints);
5a38f081 1958#if defined(TARGET_HAS_ICE)
72cf2d4f 1959 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
5a38f081
AL
1960 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1961 }
72cf2d4f 1962 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
5a38f081
AL
1963 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1964 wp->flags, NULL);
1965 }
1966#endif
1967
c5be9f08
TS
1968 return new_env;
1969}
1970
0124311e
FB
1971#if !defined(CONFIG_USER_ONLY)
1972
5c751e99
EI
1973static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1974{
1975 unsigned int i;
1976
1977 /* Discard jump cache entries for any tb which might potentially
1978 overlap the flushed page. */
1979 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1980 memset (&env->tb_jmp_cache[i], 0,
9742bf26 1981 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
5c751e99
EI
1982
1983 i = tb_jmp_cache_hash_page(addr);
1984 memset (&env->tb_jmp_cache[i], 0,
9742bf26 1985 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
5c751e99
EI
1986}
1987
08738984
IK
1988static CPUTLBEntry s_cputlb_empty_entry = {
1989 .addr_read = -1,
1990 .addr_write = -1,
1991 .addr_code = -1,
1992 .addend = -1,
1993};
1994
ee8b7021
FB
1995/* NOTE: if flush_global is true, also flush global entries (not
1996 implemented yet) */
1997void tlb_flush(CPUState *env, int flush_global)
33417e70 1998{
33417e70 1999 int i;
0124311e 2000
9fa3e853
FB
2001#if defined(DEBUG_TLB)
2002 printf("tlb_flush:\n");
2003#endif
0124311e
FB
2004 /* must reset current TB so that interrupts cannot modify the
2005 links while we are modifying them */
2006 env->current_tb = NULL;
2007
33417e70 2008 for(i = 0; i < CPU_TLB_SIZE; i++) {
cfde4bd9
IY
2009 int mmu_idx;
2010 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
08738984 2011 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
cfde4bd9 2012 }
33417e70 2013 }
9fa3e853 2014
8a40a180 2015 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
9fa3e853 2016
d4c430a8
PB
2017 env->tlb_flush_addr = -1;
2018 env->tlb_flush_mask = 0;
e3db7226 2019 tlb_flush_count++;
33417e70
FB
2020}
2021
274da6b2 2022static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
61382a50 2023{
5fafdf24 2024 if (addr == (tlb_entry->addr_read &
84b7b8e7 2025 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 2026 addr == (tlb_entry->addr_write &
84b7b8e7 2027 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 2028 addr == (tlb_entry->addr_code &
84b7b8e7 2029 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
08738984 2030 *tlb_entry = s_cputlb_empty_entry;
84b7b8e7 2031 }
61382a50
FB
2032}
2033
2e12669a 2034void tlb_flush_page(CPUState *env, target_ulong addr)
33417e70 2035{
8a40a180 2036 int i;
cfde4bd9 2037 int mmu_idx;
0124311e 2038
9fa3e853 2039#if defined(DEBUG_TLB)
108c49b8 2040 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
9fa3e853 2041#endif
d4c430a8
PB
2042 /* Check if we need to flush due to large pages. */
2043 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
2044#if defined(DEBUG_TLB)
2045 printf("tlb_flush_page: forced full flush ("
2046 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
2047 env->tlb_flush_addr, env->tlb_flush_mask);
2048#endif
2049 tlb_flush(env, 1);
2050 return;
2051 }
0124311e
FB
2052 /* must reset current TB so that interrupts cannot modify the
2053 links while we are modifying them */
2054 env->current_tb = NULL;
61382a50
FB
2055
2056 addr &= TARGET_PAGE_MASK;
2057 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
cfde4bd9
IY
2058 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2059 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
0124311e 2060
5c751e99 2061 tlb_flush_jmp_cache(env, addr);
9fa3e853
FB
2062}
2063
9fa3e853
FB
2064/* update the TLBs so that writes to code in the virtual page 'addr'
2065 can be detected */
c227f099 2066static void tlb_protect_code(ram_addr_t ram_addr)
9fa3e853 2067{
5fafdf24 2068 cpu_physical_memory_reset_dirty(ram_addr,
6a00d601
FB
2069 ram_addr + TARGET_PAGE_SIZE,
2070 CODE_DIRTY_FLAG);
9fa3e853
FB
2071}
2072
9fa3e853 2073/* update the TLB so that writes in physical page 'phys_addr' are no longer
3a7d929e 2074 tested for self modifying code */
c227f099 2075static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 2076 target_ulong vaddr)
9fa3e853 2077{
f7c11b53 2078 cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
1ccde1cb
FB
2079}
2080
5fafdf24 2081static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1ccde1cb
FB
2082 unsigned long start, unsigned long length)
2083{
2084 unsigned long addr;
84b7b8e7
FB
2085 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2086 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1ccde1cb 2087 if ((addr - start) < length) {
0f459d16 2088 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1ccde1cb
FB
2089 }
2090 }
2091}
2092
5579c7f3 2093/* Note: start and end must be within the same ram block. */
c227f099 2094void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 2095 int dirty_flags)
1ccde1cb
FB
2096{
2097 CPUState *env;
4f2ac237 2098 unsigned long length, start1;
f7c11b53 2099 int i;
1ccde1cb
FB
2100
2101 start &= TARGET_PAGE_MASK;
2102 end = TARGET_PAGE_ALIGN(end);
2103
2104 length = end - start;
2105 if (length == 0)
2106 return;
f7c11b53 2107 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
f23db169 2108
1ccde1cb
FB
2109 /* we modify the TLB cache so that the dirty bit will be set again
2110 when accessing the range */
b2e0a138 2111 start1 = (unsigned long)qemu_safe_ram_ptr(start);
a57d23e4 2112 /* Check that we don't span multiple blocks - this breaks the
5579c7f3 2113 address comparisons below. */
b2e0a138 2114 if ((unsigned long)qemu_safe_ram_ptr(end - 1) - start1
5579c7f3
PB
2115 != (end - 1) - start) {
2116 abort();
2117 }
2118
6a00d601 2119 for(env = first_cpu; env != NULL; env = env->next_cpu) {
cfde4bd9
IY
2120 int mmu_idx;
2121 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2122 for(i = 0; i < CPU_TLB_SIZE; i++)
2123 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
2124 start1, length);
2125 }
6a00d601 2126 }
1ccde1cb
FB
2127}
2128
74576198
AL
2129int cpu_physical_memory_set_dirty_tracking(int enable)
2130{
f6f3fbca 2131 int ret = 0;
74576198 2132 in_migration = enable;
f6f3fbca
MT
2133 ret = cpu_notify_migration_log(!!enable);
2134 return ret;
74576198
AL
2135}
2136
2137int cpu_physical_memory_get_dirty_tracking(void)
2138{
2139 return in_migration;
2140}
2141
c227f099
AL
2142int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
2143 target_phys_addr_t end_addr)
2bec46dc 2144{
7b8f3b78 2145 int ret;
151f7749 2146
f6f3fbca 2147 ret = cpu_notify_sync_dirty_bitmap(start_addr, end_addr);
151f7749 2148 return ret;
2bec46dc
AL
2149}
2150
e5896b12
AP
2151int cpu_physical_log_start(target_phys_addr_t start_addr,
2152 ram_addr_t size)
2153{
2154 CPUPhysMemoryClient *client;
2155 QLIST_FOREACH(client, &memory_client_list, list) {
2156 if (client->log_start) {
2157 int r = client->log_start(client, start_addr, size);
2158 if (r < 0) {
2159 return r;
2160 }
2161 }
2162 }
2163 return 0;
2164}
2165
2166int cpu_physical_log_stop(target_phys_addr_t start_addr,
2167 ram_addr_t size)
2168{
2169 CPUPhysMemoryClient *client;
2170 QLIST_FOREACH(client, &memory_client_list, list) {
2171 if (client->log_stop) {
2172 int r = client->log_stop(client, start_addr, size);
2173 if (r < 0) {
2174 return r;
2175 }
2176 }
2177 }
2178 return 0;
2179}
2180
3a7d929e
FB
2181static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2182{
c227f099 2183 ram_addr_t ram_addr;
5579c7f3 2184 void *p;
3a7d929e 2185
84b7b8e7 2186 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
5579c7f3
PB
2187 p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
2188 + tlb_entry->addend);
e890261f 2189 ram_addr = qemu_ram_addr_from_host_nofail(p);
3a7d929e 2190 if (!cpu_physical_memory_is_dirty(ram_addr)) {
0f459d16 2191 tlb_entry->addr_write |= TLB_NOTDIRTY;
3a7d929e
FB
2192 }
2193 }
2194}
2195
2196/* update the TLB according to the current state of the dirty bits */
2197void cpu_tlb_update_dirty(CPUState *env)
2198{
2199 int i;
cfde4bd9
IY
2200 int mmu_idx;
2201 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2202 for(i = 0; i < CPU_TLB_SIZE; i++)
2203 tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2204 }
3a7d929e
FB
2205}
2206
0f459d16 2207static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1ccde1cb 2208{
0f459d16
PB
2209 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2210 tlb_entry->addr_write = vaddr;
1ccde1cb
FB
2211}
2212
0f459d16
PB
2213/* update the TLB corresponding to virtual page vaddr
2214 so that it is no longer dirty */
2215static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1ccde1cb 2216{
1ccde1cb 2217 int i;
cfde4bd9 2218 int mmu_idx;
1ccde1cb 2219
0f459d16 2220 vaddr &= TARGET_PAGE_MASK;
1ccde1cb 2221 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
cfde4bd9
IY
2222 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2223 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
9fa3e853
FB
2224}
2225
d4c430a8
PB
2226/* Our TLB does not support large pages, so remember the area covered by
2227 large pages and trigger a full TLB flush if these are invalidated. */
2228static void tlb_add_large_page(CPUState *env, target_ulong vaddr,
2229 target_ulong size)
2230{
2231 target_ulong mask = ~(size - 1);
2232
2233 if (env->tlb_flush_addr == (target_ulong)-1) {
2234 env->tlb_flush_addr = vaddr & mask;
2235 env->tlb_flush_mask = mask;
2236 return;
2237 }
2238 /* Extend the existing region to include the new page.
2239 This is a compromise between unnecessary flushes and the cost
2240 of maintaining a full variable size TLB. */
2241 mask &= env->tlb_flush_mask;
2242 while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
2243 mask <<= 1;
2244 }
2245 env->tlb_flush_addr &= mask;
2246 env->tlb_flush_mask = mask;
2247}
2248
2249/* Add a new TLB entry. At most one entry for a given virtual address
2250 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2251 supplied size is only used by tlb_flush_page. */
2252void tlb_set_page(CPUState *env, target_ulong vaddr,
2253 target_phys_addr_t paddr, int prot,
2254 int mmu_idx, target_ulong size)
9fa3e853 2255{
92e873b9 2256 PhysPageDesc *p;
4f2ac237 2257 unsigned long pd;
9fa3e853 2258 unsigned int index;
4f2ac237 2259 target_ulong address;
0f459d16 2260 target_ulong code_address;
355b1943 2261 unsigned long addend;
84b7b8e7 2262 CPUTLBEntry *te;
a1d1bb31 2263 CPUWatchpoint *wp;
c227f099 2264 target_phys_addr_t iotlb;
9fa3e853 2265
d4c430a8
PB
2266 assert(size >= TARGET_PAGE_SIZE);
2267 if (size != TARGET_PAGE_SIZE) {
2268 tlb_add_large_page(env, vaddr, size);
2269 }
92e873b9 2270 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
9fa3e853
FB
2271 if (!p) {
2272 pd = IO_MEM_UNASSIGNED;
9fa3e853
FB
2273 } else {
2274 pd = p->phys_offset;
9fa3e853
FB
2275 }
2276#if defined(DEBUG_TLB)
7fd3f494
SW
2277 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
2278 " prot=%x idx=%d pd=0x%08lx\n",
2279 vaddr, paddr, prot, mmu_idx, pd);
9fa3e853
FB
2280#endif
2281
0f459d16
PB
2282 address = vaddr;
2283 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2284 /* IO memory case (romd handled later) */
2285 address |= TLB_MMIO;
2286 }
5579c7f3 2287 addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
0f459d16
PB
2288 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2289 /* Normal RAM. */
2290 iotlb = pd & TARGET_PAGE_MASK;
2291 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2292 iotlb |= IO_MEM_NOTDIRTY;
2293 else
2294 iotlb |= IO_MEM_ROM;
2295 } else {
ccbb4d44 2296 /* IO handlers are currently passed a physical address.
0f459d16
PB
2297 It would be nice to pass an offset from the base address
2298 of that region. This would avoid having to special case RAM,
2299 and avoid full address decoding in every device.
2300 We can't use the high bits of pd for this because
2301 IO_MEM_ROMD uses these as a ram address. */
8da3ff18
PB
2302 iotlb = (pd & ~TARGET_PAGE_MASK);
2303 if (p) {
8da3ff18
PB
2304 iotlb += p->region_offset;
2305 } else {
2306 iotlb += paddr;
2307 }
0f459d16
PB
2308 }
2309
2310 code_address = address;
2311 /* Make accesses to pages with watchpoints go via the
2312 watchpoint trap routines. */
72cf2d4f 2313 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
a1d1bb31 2314 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
bf298f83
JK
2315 /* Avoid trapping reads of pages with a write breakpoint. */
2316 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
2317 iotlb = io_mem_watch + paddr;
2318 address |= TLB_MMIO;
2319 break;
2320 }
6658ffb8 2321 }
0f459d16 2322 }
d79acba4 2323
0f459d16
PB
2324 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2325 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2326 te = &env->tlb_table[mmu_idx][index];
2327 te->addend = addend - vaddr;
2328 if (prot & PAGE_READ) {
2329 te->addr_read = address;
2330 } else {
2331 te->addr_read = -1;
2332 }
5c751e99 2333
0f459d16
PB
2334 if (prot & PAGE_EXEC) {
2335 te->addr_code = code_address;
2336 } else {
2337 te->addr_code = -1;
2338 }
2339 if (prot & PAGE_WRITE) {
2340 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2341 (pd & IO_MEM_ROMD)) {
2342 /* Write access calls the I/O callback. */
2343 te->addr_write = address | TLB_MMIO;
2344 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2345 !cpu_physical_memory_is_dirty(pd)) {
2346 te->addr_write = address | TLB_NOTDIRTY;
9fa3e853 2347 } else {
0f459d16 2348 te->addr_write = address;
9fa3e853 2349 }
0f459d16
PB
2350 } else {
2351 te->addr_write = -1;
9fa3e853 2352 }
9fa3e853
FB
2353}
2354
0124311e
FB
2355#else
2356
ee8b7021 2357void tlb_flush(CPUState *env, int flush_global)
0124311e
FB
2358{
2359}
2360
2e12669a 2361void tlb_flush_page(CPUState *env, target_ulong addr)
0124311e
FB
2362{
2363}
2364
edf8e2af
MW
2365/*
2366 * Walks guest process memory "regions" one by one
2367 * and calls callback function 'fn' for each region.
2368 */
5cd2c5b6
RH
2369
2370struct walk_memory_regions_data
2371{
2372 walk_memory_regions_fn fn;
2373 void *priv;
2374 unsigned long start;
2375 int prot;
2376};
2377
2378static int walk_memory_regions_end(struct walk_memory_regions_data *data,
b480d9b7 2379 abi_ulong end, int new_prot)
5cd2c5b6
RH
2380{
2381 if (data->start != -1ul) {
2382 int rc = data->fn(data->priv, data->start, end, data->prot);
2383 if (rc != 0) {
2384 return rc;
2385 }
2386 }
2387
2388 data->start = (new_prot ? end : -1ul);
2389 data->prot = new_prot;
2390
2391 return 0;
2392}
2393
2394static int walk_memory_regions_1(struct walk_memory_regions_data *data,
b480d9b7 2395 abi_ulong base, int level, void **lp)
5cd2c5b6 2396{
b480d9b7 2397 abi_ulong pa;
5cd2c5b6
RH
2398 int i, rc;
2399
2400 if (*lp == NULL) {
2401 return walk_memory_regions_end(data, base, 0);
2402 }
2403
2404 if (level == 0) {
2405 PageDesc *pd = *lp;
7296abac 2406 for (i = 0; i < L2_SIZE; ++i) {
5cd2c5b6
RH
2407 int prot = pd[i].flags;
2408
2409 pa = base | (i << TARGET_PAGE_BITS);
2410 if (prot != data->prot) {
2411 rc = walk_memory_regions_end(data, pa, prot);
2412 if (rc != 0) {
2413 return rc;
9fa3e853 2414 }
9fa3e853 2415 }
5cd2c5b6
RH
2416 }
2417 } else {
2418 void **pp = *lp;
7296abac 2419 for (i = 0; i < L2_SIZE; ++i) {
b480d9b7
PB
2420 pa = base | ((abi_ulong)i <<
2421 (TARGET_PAGE_BITS + L2_BITS * level));
5cd2c5b6
RH
2422 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2423 if (rc != 0) {
2424 return rc;
2425 }
2426 }
2427 }
2428
2429 return 0;
2430}
2431
2432int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2433{
2434 struct walk_memory_regions_data data;
2435 unsigned long i;
2436
2437 data.fn = fn;
2438 data.priv = priv;
2439 data.start = -1ul;
2440 data.prot = 0;
2441
2442 for (i = 0; i < V_L1_SIZE; i++) {
b480d9b7 2443 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
5cd2c5b6
RH
2444 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2445 if (rc != 0) {
2446 return rc;
9fa3e853 2447 }
33417e70 2448 }
5cd2c5b6
RH
2449
2450 return walk_memory_regions_end(&data, 0, 0);
edf8e2af
MW
2451}
2452
b480d9b7
PB
2453static int dump_region(void *priv, abi_ulong start,
2454 abi_ulong end, unsigned long prot)
edf8e2af
MW
2455{
2456 FILE *f = (FILE *)priv;
2457
b480d9b7
PB
2458 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2459 " "TARGET_ABI_FMT_lx" %c%c%c\n",
edf8e2af
MW
2460 start, end, end - start,
2461 ((prot & PAGE_READ) ? 'r' : '-'),
2462 ((prot & PAGE_WRITE) ? 'w' : '-'),
2463 ((prot & PAGE_EXEC) ? 'x' : '-'));
2464
2465 return (0);
2466}
2467
2468/* dump memory mappings */
2469void page_dump(FILE *f)
2470{
2471 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2472 "start", "end", "size", "prot");
2473 walk_memory_regions(f, dump_region);
33417e70
FB
2474}
2475
53a5960a 2476int page_get_flags(target_ulong address)
33417e70 2477{
9fa3e853
FB
2478 PageDesc *p;
2479
2480 p = page_find(address >> TARGET_PAGE_BITS);
33417e70 2481 if (!p)
9fa3e853
FB
2482 return 0;
2483 return p->flags;
2484}
2485
376a7909
RH
2486/* Modify the flags of a page and invalidate the code if necessary.
2487 The flag PAGE_WRITE_ORG is positioned automatically depending
2488 on PAGE_WRITE. The mmap_lock should already be held. */
53a5960a 2489void page_set_flags(target_ulong start, target_ulong end, int flags)
9fa3e853 2490{
376a7909
RH
2491 target_ulong addr, len;
2492
2493 /* This function should never be called with addresses outside the
2494 guest address space. If this assert fires, it probably indicates
2495 a missing call to h2g_valid. */
b480d9b7
PB
2496#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2497 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
376a7909
RH
2498#endif
2499 assert(start < end);
9fa3e853
FB
2500
2501 start = start & TARGET_PAGE_MASK;
2502 end = TARGET_PAGE_ALIGN(end);
376a7909
RH
2503
2504 if (flags & PAGE_WRITE) {
9fa3e853 2505 flags |= PAGE_WRITE_ORG;
376a7909
RH
2506 }
2507
2508 for (addr = start, len = end - start;
2509 len != 0;
2510 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2511 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2512
2513 /* If the write protection bit is set, then we invalidate
2514 the code inside. */
5fafdf24 2515 if (!(p->flags & PAGE_WRITE) &&
9fa3e853
FB
2516 (flags & PAGE_WRITE) &&
2517 p->first_tb) {
d720b93d 2518 tb_invalidate_phys_page(addr, 0, NULL);
9fa3e853
FB
2519 }
2520 p->flags = flags;
2521 }
33417e70
FB
2522}
2523
3d97b40b
TS
2524int page_check_range(target_ulong start, target_ulong len, int flags)
2525{
2526 PageDesc *p;
2527 target_ulong end;
2528 target_ulong addr;
2529
376a7909
RH
2530 /* This function should never be called with addresses outside the
2531 guest address space. If this assert fires, it probably indicates
2532 a missing call to h2g_valid. */
338e9e6c
BS
2533#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2534 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
376a7909
RH
2535#endif
2536
3e0650a9
RH
2537 if (len == 0) {
2538 return 0;
2539 }
376a7909
RH
2540 if (start + len - 1 < start) {
2541 /* We've wrapped around. */
55f280c9 2542 return -1;
376a7909 2543 }
55f280c9 2544
3d97b40b
TS
2545 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2546 start = start & TARGET_PAGE_MASK;
2547
376a7909
RH
2548 for (addr = start, len = end - start;
2549 len != 0;
2550 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
3d97b40b
TS
2551 p = page_find(addr >> TARGET_PAGE_BITS);
2552 if( !p )
2553 return -1;
2554 if( !(p->flags & PAGE_VALID) )
2555 return -1;
2556
dae3270c 2557 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
3d97b40b 2558 return -1;
dae3270c
FB
2559 if (flags & PAGE_WRITE) {
2560 if (!(p->flags & PAGE_WRITE_ORG))
2561 return -1;
2562 /* unprotect the page if it was put read-only because it
2563 contains translated code */
2564 if (!(p->flags & PAGE_WRITE)) {
2565 if (!page_unprotect(addr, 0, NULL))
2566 return -1;
2567 }
2568 return 0;
2569 }
3d97b40b
TS
2570 }
2571 return 0;
2572}
2573
9fa3e853 2574/* called from signal handler: invalidate the code and unprotect the
ccbb4d44 2575 page. Return TRUE if the fault was successfully handled. */
53a5960a 2576int page_unprotect(target_ulong address, unsigned long pc, void *puc)
9fa3e853 2577{
45d679d6
AJ
2578 unsigned int prot;
2579 PageDesc *p;
53a5960a 2580 target_ulong host_start, host_end, addr;
9fa3e853 2581
c8a706fe
PB
2582 /* Technically this isn't safe inside a signal handler. However we
2583 know this only ever happens in a synchronous SEGV handler, so in
2584 practice it seems to be ok. */
2585 mmap_lock();
2586
45d679d6
AJ
2587 p = page_find(address >> TARGET_PAGE_BITS);
2588 if (!p) {
c8a706fe 2589 mmap_unlock();
9fa3e853 2590 return 0;
c8a706fe 2591 }
45d679d6 2592
9fa3e853
FB
2593 /* if the page was really writable, then we change its
2594 protection back to writable */
45d679d6
AJ
2595 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2596 host_start = address & qemu_host_page_mask;
2597 host_end = host_start + qemu_host_page_size;
2598
2599 prot = 0;
2600 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2601 p = page_find(addr >> TARGET_PAGE_BITS);
2602 p->flags |= PAGE_WRITE;
2603 prot |= p->flags;
2604
9fa3e853
FB
2605 /* and since the content will be modified, we must invalidate
2606 the corresponding translated code. */
45d679d6 2607 tb_invalidate_phys_page(addr, pc, puc);
9fa3e853 2608#ifdef DEBUG_TB_CHECK
45d679d6 2609 tb_invalidate_check(addr);
9fa3e853 2610#endif
9fa3e853 2611 }
45d679d6
AJ
2612 mprotect((void *)g2h(host_start), qemu_host_page_size,
2613 prot & PAGE_BITS);
2614
2615 mmap_unlock();
2616 return 1;
9fa3e853 2617 }
c8a706fe 2618 mmap_unlock();
9fa3e853
FB
2619 return 0;
2620}
2621
6a00d601
FB
2622static inline void tlb_set_dirty(CPUState *env,
2623 unsigned long addr, target_ulong vaddr)
1ccde1cb
FB
2624{
2625}
9fa3e853
FB
2626#endif /* defined(CONFIG_USER_ONLY) */
2627
e2eef170 2628#if !defined(CONFIG_USER_ONLY)
8da3ff18 2629
c04b2b78
PB
2630#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2631typedef struct subpage_t {
2632 target_phys_addr_t base;
f6405247
RH
2633 ram_addr_t sub_io_index[TARGET_PAGE_SIZE];
2634 ram_addr_t region_offset[TARGET_PAGE_SIZE];
c04b2b78
PB
2635} subpage_t;
2636
c227f099
AL
2637static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2638 ram_addr_t memory, ram_addr_t region_offset);
f6405247
RH
2639static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2640 ram_addr_t orig_memory,
2641 ram_addr_t region_offset);
db7b5426
BS
2642#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2643 need_subpage) \
2644 do { \
2645 if (addr > start_addr) \
2646 start_addr2 = 0; \
2647 else { \
2648 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2649 if (start_addr2 > 0) \
2650 need_subpage = 1; \
2651 } \
2652 \
49e9fba2 2653 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
db7b5426
BS
2654 end_addr2 = TARGET_PAGE_SIZE - 1; \
2655 else { \
2656 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2657 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2658 need_subpage = 1; \
2659 } \
2660 } while (0)
2661
8f2498f9
MT
2662/* register physical memory.
2663 For RAM, 'size' must be a multiple of the target page size.
2664 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
8da3ff18
PB
2665 io memory page. The address used when calling the IO function is
2666 the offset from the start of the region, plus region_offset. Both
ccbb4d44 2667 start_addr and region_offset are rounded down to a page boundary
8da3ff18
PB
2668 before calculating this offset. This should not be a problem unless
2669 the low bits of start_addr and region_offset differ. */
0fd542fb 2670void cpu_register_physical_memory_log(target_phys_addr_t start_addr,
c227f099
AL
2671 ram_addr_t size,
2672 ram_addr_t phys_offset,
0fd542fb
MT
2673 ram_addr_t region_offset,
2674 bool log_dirty)
33417e70 2675{
c227f099 2676 target_phys_addr_t addr, end_addr;
92e873b9 2677 PhysPageDesc *p;
9d42037b 2678 CPUState *env;
c227f099 2679 ram_addr_t orig_size = size;
f6405247 2680 subpage_t *subpage;
33417e70 2681
3b8e6a2d 2682 assert(size);
0fd542fb 2683 cpu_notify_set_memory(start_addr, size, phys_offset, log_dirty);
f6f3fbca 2684
67c4d23c
PB
2685 if (phys_offset == IO_MEM_UNASSIGNED) {
2686 region_offset = start_addr;
2687 }
8da3ff18 2688 region_offset &= TARGET_PAGE_MASK;
5fd386f6 2689 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
c227f099 2690 end_addr = start_addr + (target_phys_addr_t)size;
3b8e6a2d
EI
2691
2692 addr = start_addr;
2693 do {
db7b5426
BS
2694 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2695 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
c227f099
AL
2696 ram_addr_t orig_memory = p->phys_offset;
2697 target_phys_addr_t start_addr2, end_addr2;
db7b5426
BS
2698 int need_subpage = 0;
2699
2700 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2701 need_subpage);
f6405247 2702 if (need_subpage) {
db7b5426
BS
2703 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2704 subpage = subpage_init((addr & TARGET_PAGE_MASK),
8da3ff18
PB
2705 &p->phys_offset, orig_memory,
2706 p->region_offset);
db7b5426
BS
2707 } else {
2708 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2709 >> IO_MEM_SHIFT];
2710 }
8da3ff18
PB
2711 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2712 region_offset);
2713 p->region_offset = 0;
db7b5426
BS
2714 } else {
2715 p->phys_offset = phys_offset;
2716 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2717 (phys_offset & IO_MEM_ROMD))
2718 phys_offset += TARGET_PAGE_SIZE;
2719 }
2720 } else {
2721 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2722 p->phys_offset = phys_offset;
8da3ff18 2723 p->region_offset = region_offset;
db7b5426 2724 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
8da3ff18 2725 (phys_offset & IO_MEM_ROMD)) {
db7b5426 2726 phys_offset += TARGET_PAGE_SIZE;
0e8f0967 2727 } else {
c227f099 2728 target_phys_addr_t start_addr2, end_addr2;
db7b5426
BS
2729 int need_subpage = 0;
2730
2731 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2732 end_addr2, need_subpage);
2733
f6405247 2734 if (need_subpage) {
db7b5426 2735 subpage = subpage_init((addr & TARGET_PAGE_MASK),
8da3ff18 2736 &p->phys_offset, IO_MEM_UNASSIGNED,
67c4d23c 2737 addr & TARGET_PAGE_MASK);
db7b5426 2738 subpage_register(subpage, start_addr2, end_addr2,
8da3ff18
PB
2739 phys_offset, region_offset);
2740 p->region_offset = 0;
db7b5426
BS
2741 }
2742 }
2743 }
8da3ff18 2744 region_offset += TARGET_PAGE_SIZE;
3b8e6a2d
EI
2745 addr += TARGET_PAGE_SIZE;
2746 } while (addr != end_addr);
3b46e624 2747
9d42037b
FB
2748 /* since each CPU stores ram addresses in its TLB cache, we must
2749 reset the modified entries */
2750 /* XXX: slow ! */
2751 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2752 tlb_flush(env, 1);
2753 }
33417e70
FB
2754}
2755
ba863458 2756/* XXX: temporary until new memory mapping API */
c227f099 2757ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
ba863458
FB
2758{
2759 PhysPageDesc *p;
2760
2761 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2762 if (!p)
2763 return IO_MEM_UNASSIGNED;
2764 return p->phys_offset;
2765}
2766
c227f099 2767void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
f65ed4c1
AL
2768{
2769 if (kvm_enabled())
2770 kvm_coalesce_mmio_region(addr, size);
2771}
2772
c227f099 2773void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
f65ed4c1
AL
2774{
2775 if (kvm_enabled())
2776 kvm_uncoalesce_mmio_region(addr, size);
2777}
2778
62a2744c
SY
2779void qemu_flush_coalesced_mmio_buffer(void)
2780{
2781 if (kvm_enabled())
2782 kvm_flush_coalesced_mmio_buffer();
2783}
2784
c902760f
MT
2785#if defined(__linux__) && !defined(TARGET_S390X)
2786
2787#include <sys/vfs.h>
2788
2789#define HUGETLBFS_MAGIC 0x958458f6
2790
2791static long gethugepagesize(const char *path)
2792{
2793 struct statfs fs;
2794 int ret;
2795
2796 do {
9742bf26 2797 ret = statfs(path, &fs);
c902760f
MT
2798 } while (ret != 0 && errno == EINTR);
2799
2800 if (ret != 0) {
9742bf26
YT
2801 perror(path);
2802 return 0;
c902760f
MT
2803 }
2804
2805 if (fs.f_type != HUGETLBFS_MAGIC)
9742bf26 2806 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
c902760f
MT
2807
2808 return fs.f_bsize;
2809}
2810
04b16653
AW
2811static void *file_ram_alloc(RAMBlock *block,
2812 ram_addr_t memory,
2813 const char *path)
c902760f
MT
2814{
2815 char *filename;
2816 void *area;
2817 int fd;
2818#ifdef MAP_POPULATE
2819 int flags;
2820#endif
2821 unsigned long hpagesize;
2822
2823 hpagesize = gethugepagesize(path);
2824 if (!hpagesize) {
9742bf26 2825 return NULL;
c902760f
MT
2826 }
2827
2828 if (memory < hpagesize) {
2829 return NULL;
2830 }
2831
2832 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2833 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2834 return NULL;
2835 }
2836
2837 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
9742bf26 2838 return NULL;
c902760f
MT
2839 }
2840
2841 fd = mkstemp(filename);
2842 if (fd < 0) {
9742bf26
YT
2843 perror("unable to create backing store for hugepages");
2844 free(filename);
2845 return NULL;
c902760f
MT
2846 }
2847 unlink(filename);
2848 free(filename);
2849
2850 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2851
2852 /*
2853 * ftruncate is not supported by hugetlbfs in older
2854 * hosts, so don't bother bailing out on errors.
2855 * If anything goes wrong with it under other filesystems,
2856 * mmap will fail.
2857 */
2858 if (ftruncate(fd, memory))
9742bf26 2859 perror("ftruncate");
c902760f
MT
2860
2861#ifdef MAP_POPULATE
2862 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2863 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2864 * to sidestep this quirk.
2865 */
2866 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2867 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2868#else
2869 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2870#endif
2871 if (area == MAP_FAILED) {
9742bf26
YT
2872 perror("file_ram_alloc: can't mmap RAM pages");
2873 close(fd);
2874 return (NULL);
c902760f 2875 }
04b16653 2876 block->fd = fd;
c902760f
MT
2877 return area;
2878}
2879#endif
2880
d17b5288 2881static ram_addr_t find_ram_offset(ram_addr_t size)
04b16653
AW
2882{
2883 RAMBlock *block, *next_block;
f15fbc4b 2884 ram_addr_t offset = 0, mingap = RAM_ADDR_MAX;
04b16653
AW
2885
2886 if (QLIST_EMPTY(&ram_list.blocks))
2887 return 0;
2888
2889 QLIST_FOREACH(block, &ram_list.blocks, next) {
f15fbc4b 2890 ram_addr_t end, next = RAM_ADDR_MAX;
04b16653
AW
2891
2892 end = block->offset + block->length;
2893
2894 QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2895 if (next_block->offset >= end) {
2896 next = MIN(next, next_block->offset);
2897 }
2898 }
2899 if (next - end >= size && next - end < mingap) {
2900 offset = end;
2901 mingap = next - end;
2902 }
2903 }
2904 return offset;
2905}
2906
2907static ram_addr_t last_ram_offset(void)
d17b5288
AW
2908{
2909 RAMBlock *block;
2910 ram_addr_t last = 0;
2911
2912 QLIST_FOREACH(block, &ram_list.blocks, next)
2913 last = MAX(last, block->offset + block->length);
2914
2915 return last;
2916}
2917
84b89d78 2918ram_addr_t qemu_ram_alloc_from_ptr(DeviceState *dev, const char *name,
6977dfe6 2919 ram_addr_t size, void *host)
84b89d78
CM
2920{
2921 RAMBlock *new_block, *block;
2922
2923 size = TARGET_PAGE_ALIGN(size);
2924 new_block = qemu_mallocz(sizeof(*new_block));
2925
2926 if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
2927 char *id = dev->parent_bus->info->get_dev_path(dev);
2928 if (id) {
2929 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
2930 qemu_free(id);
2931 }
2932 }
2933 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2934
2935 QLIST_FOREACH(block, &ram_list.blocks, next) {
2936 if (!strcmp(block->idstr, new_block->idstr)) {
2937 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2938 new_block->idstr);
2939 abort();
2940 }
2941 }
2942
432d268c 2943 new_block->offset = find_ram_offset(size);
6977dfe6
YT
2944 if (host) {
2945 new_block->host = host;
cd19cfa2 2946 new_block->flags |= RAM_PREALLOC_MASK;
6977dfe6
YT
2947 } else {
2948 if (mem_path) {
c902760f 2949#if defined (__linux__) && !defined(TARGET_S390X)
6977dfe6
YT
2950 new_block->host = file_ram_alloc(new_block, size, mem_path);
2951 if (!new_block->host) {
2952 new_block->host = qemu_vmalloc(size);
e78815a5 2953 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
6977dfe6 2954 }
c902760f 2955#else
6977dfe6
YT
2956 fprintf(stderr, "-mem-path option unsupported\n");
2957 exit(1);
c902760f 2958#endif
6977dfe6 2959 } else {
6b02494d 2960#if defined(TARGET_S390X) && defined(CONFIG_KVM)
ff83678a
CB
2961 /* S390 KVM requires the topmost vma of the RAM to be smaller than
2962 an system defined value, which is at least 256GB. Larger systems
2963 have larger values. We put the guest between the end of data
2964 segment (system break) and this value. We use 32GB as a base to
2965 have enough room for the system break to grow. */
2966 new_block->host = mmap((void*)0x800000000, size,
6977dfe6 2967 PROT_EXEC|PROT_READ|PROT_WRITE,
ff83678a 2968 MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
fb8b2735
AG
2969 if (new_block->host == MAP_FAILED) {
2970 fprintf(stderr, "Allocating RAM failed\n");
2971 abort();
2972 }
6b02494d 2973#else
868bb33f 2974 if (xen_enabled()) {
432d268c
JN
2975 xen_ram_alloc(new_block->offset, size);
2976 } else {
2977 new_block->host = qemu_vmalloc(size);
2978 }
6b02494d 2979#endif
e78815a5 2980 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
6977dfe6 2981 }
c902760f 2982 }
94a6b54f
PB
2983 new_block->length = size;
2984
f471a17e 2985 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
94a6b54f 2986
f471a17e 2987 ram_list.phys_dirty = qemu_realloc(ram_list.phys_dirty,
04b16653 2988 last_ram_offset() >> TARGET_PAGE_BITS);
d17b5288 2989 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
94a6b54f
PB
2990 0xff, size >> TARGET_PAGE_BITS);
2991
6f0437e8
JK
2992 if (kvm_enabled())
2993 kvm_setup_guest_memory(new_block->host, size);
2994
94a6b54f
PB
2995 return new_block->offset;
2996}
e9a1ab19 2997
6977dfe6
YT
2998ram_addr_t qemu_ram_alloc(DeviceState *dev, const char *name, ram_addr_t size)
2999{
3000 return qemu_ram_alloc_from_ptr(dev, name, size, NULL);
3001}
3002
1f2e98b6
AW
3003void qemu_ram_free_from_ptr(ram_addr_t addr)
3004{
3005 RAMBlock *block;
3006
3007 QLIST_FOREACH(block, &ram_list.blocks, next) {
3008 if (addr == block->offset) {
3009 QLIST_REMOVE(block, next);
3010 qemu_free(block);
3011 return;
3012 }
3013 }
3014}
3015
c227f099 3016void qemu_ram_free(ram_addr_t addr)
e9a1ab19 3017{
04b16653
AW
3018 RAMBlock *block;
3019
3020 QLIST_FOREACH(block, &ram_list.blocks, next) {
3021 if (addr == block->offset) {
3022 QLIST_REMOVE(block, next);
cd19cfa2
HY
3023 if (block->flags & RAM_PREALLOC_MASK) {
3024 ;
3025 } else if (mem_path) {
04b16653
AW
3026#if defined (__linux__) && !defined(TARGET_S390X)
3027 if (block->fd) {
3028 munmap(block->host, block->length);
3029 close(block->fd);
3030 } else {
3031 qemu_vfree(block->host);
3032 }
fd28aa13
JK
3033#else
3034 abort();
04b16653
AW
3035#endif
3036 } else {
3037#if defined(TARGET_S390X) && defined(CONFIG_KVM)
3038 munmap(block->host, block->length);
3039#else
868bb33f 3040 if (xen_enabled()) {
e41d7c69 3041 xen_invalidate_map_cache_entry(block->host);
432d268c
JN
3042 } else {
3043 qemu_vfree(block->host);
3044 }
04b16653
AW
3045#endif
3046 }
3047 qemu_free(block);
3048 return;
3049 }
3050 }
3051
e9a1ab19
FB
3052}
3053
cd19cfa2
HY
3054#ifndef _WIN32
3055void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
3056{
3057 RAMBlock *block;
3058 ram_addr_t offset;
3059 int flags;
3060 void *area, *vaddr;
3061
3062 QLIST_FOREACH(block, &ram_list.blocks, next) {
3063 offset = addr - block->offset;
3064 if (offset < block->length) {
3065 vaddr = block->host + offset;
3066 if (block->flags & RAM_PREALLOC_MASK) {
3067 ;
3068 } else {
3069 flags = MAP_FIXED;
3070 munmap(vaddr, length);
3071 if (mem_path) {
3072#if defined(__linux__) && !defined(TARGET_S390X)
3073 if (block->fd) {
3074#ifdef MAP_POPULATE
3075 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
3076 MAP_PRIVATE;
3077#else
3078 flags |= MAP_PRIVATE;
3079#endif
3080 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3081 flags, block->fd, offset);
3082 } else {
3083 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
3084 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3085 flags, -1, 0);
3086 }
fd28aa13
JK
3087#else
3088 abort();
cd19cfa2
HY
3089#endif
3090 } else {
3091#if defined(TARGET_S390X) && defined(CONFIG_KVM)
3092 flags |= MAP_SHARED | MAP_ANONYMOUS;
3093 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
3094 flags, -1, 0);
3095#else
3096 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
3097 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3098 flags, -1, 0);
3099#endif
3100 }
3101 if (area != vaddr) {
f15fbc4b
AP
3102 fprintf(stderr, "Could not remap addr: "
3103 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
cd19cfa2
HY
3104 length, addr);
3105 exit(1);
3106 }
3107 qemu_madvise(vaddr, length, QEMU_MADV_MERGEABLE);
3108 }
3109 return;
3110 }
3111 }
3112}
3113#endif /* !_WIN32 */
3114
dc828ca1 3115/* Return a host pointer to ram allocated with qemu_ram_alloc.
5579c7f3
PB
3116 With the exception of the softmmu code in this file, this should
3117 only be used for local memory (e.g. video ram) that the device owns,
3118 and knows it isn't going to access beyond the end of the block.
3119
3120 It should not be used for general purpose DMA.
3121 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
3122 */
c227f099 3123void *qemu_get_ram_ptr(ram_addr_t addr)
dc828ca1 3124{
94a6b54f
PB
3125 RAMBlock *block;
3126
f471a17e
AW
3127 QLIST_FOREACH(block, &ram_list.blocks, next) {
3128 if (addr - block->offset < block->length) {
7d82af38
VP
3129 /* Move this entry to to start of the list. */
3130 if (block != QLIST_FIRST(&ram_list.blocks)) {
3131 QLIST_REMOVE(block, next);
3132 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
3133 }
868bb33f 3134 if (xen_enabled()) {
432d268c
JN
3135 /* We need to check if the requested address is in the RAM
3136 * because we don't want to map the entire memory in QEMU.
712c2b41 3137 * In that case just map until the end of the page.
432d268c
JN
3138 */
3139 if (block->offset == 0) {
e41d7c69 3140 return xen_map_cache(addr, 0, 0);
432d268c 3141 } else if (block->host == NULL) {
e41d7c69
JK
3142 block->host =
3143 xen_map_cache(block->offset, block->length, 1);
432d268c
JN
3144 }
3145 }
f471a17e
AW
3146 return block->host + (addr - block->offset);
3147 }
94a6b54f 3148 }
f471a17e
AW
3149
3150 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3151 abort();
3152
3153 return NULL;
dc828ca1
PB
3154}
3155
b2e0a138
MT
3156/* Return a host pointer to ram allocated with qemu_ram_alloc.
3157 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
3158 */
3159void *qemu_safe_ram_ptr(ram_addr_t addr)
3160{
3161 RAMBlock *block;
3162
3163 QLIST_FOREACH(block, &ram_list.blocks, next) {
3164 if (addr - block->offset < block->length) {
868bb33f 3165 if (xen_enabled()) {
432d268c
JN
3166 /* We need to check if the requested address is in the RAM
3167 * because we don't want to map the entire memory in QEMU.
712c2b41 3168 * In that case just map until the end of the page.
432d268c
JN
3169 */
3170 if (block->offset == 0) {
e41d7c69 3171 return xen_map_cache(addr, 0, 0);
432d268c 3172 } else if (block->host == NULL) {
e41d7c69
JK
3173 block->host =
3174 xen_map_cache(block->offset, block->length, 1);
432d268c
JN
3175 }
3176 }
b2e0a138
MT
3177 return block->host + (addr - block->offset);
3178 }
3179 }
3180
3181 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3182 abort();
3183
3184 return NULL;
3185}
3186
38bee5dc
SS
3187/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
3188 * but takes a size argument */
8ab934f9 3189void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
38bee5dc 3190{
8ab934f9
SS
3191 if (*size == 0) {
3192 return NULL;
3193 }
868bb33f 3194 if (xen_enabled()) {
e41d7c69 3195 return xen_map_cache(addr, *size, 1);
868bb33f 3196 } else {
38bee5dc
SS
3197 RAMBlock *block;
3198
3199 QLIST_FOREACH(block, &ram_list.blocks, next) {
3200 if (addr - block->offset < block->length) {
3201 if (addr - block->offset + *size > block->length)
3202 *size = block->length - addr + block->offset;
3203 return block->host + (addr - block->offset);
3204 }
3205 }
3206
3207 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3208 abort();
38bee5dc
SS
3209 }
3210}
3211
050a0ddf
AP
3212void qemu_put_ram_ptr(void *addr)
3213{
3214 trace_qemu_put_ram_ptr(addr);
050a0ddf
AP
3215}
3216
e890261f 3217int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
5579c7f3 3218{
94a6b54f
PB
3219 RAMBlock *block;
3220 uint8_t *host = ptr;
3221
868bb33f 3222 if (xen_enabled()) {
e41d7c69 3223 *ram_addr = xen_ram_addr_from_mapcache(ptr);
712c2b41
SS
3224 return 0;
3225 }
3226
f471a17e 3227 QLIST_FOREACH(block, &ram_list.blocks, next) {
432d268c
JN
3228 /* This case append when the block is not mapped. */
3229 if (block->host == NULL) {
3230 continue;
3231 }
f471a17e 3232 if (host - block->host < block->length) {
e890261f
MT
3233 *ram_addr = block->offset + (host - block->host);
3234 return 0;
f471a17e 3235 }
94a6b54f 3236 }
432d268c 3237
e890261f
MT
3238 return -1;
3239}
f471a17e 3240
e890261f
MT
3241/* Some of the softmmu routines need to translate from a host pointer
3242 (typically a TLB entry) back to a ram offset. */
3243ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
3244{
3245 ram_addr_t ram_addr;
f471a17e 3246
e890261f
MT
3247 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
3248 fprintf(stderr, "Bad ram pointer %p\n", ptr);
3249 abort();
3250 }
3251 return ram_addr;
5579c7f3
PB
3252}
3253
c227f099 3254static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
33417e70 3255{
67d3b957 3256#ifdef DEBUG_UNASSIGNED
ab3d1727 3257 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
b4f0a316 3258#endif
5b450407 3259#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
b14ef7c9 3260 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, 1);
e18231a3
BS
3261#endif
3262 return 0;
3263}
3264
c227f099 3265static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
e18231a3
BS
3266{
3267#ifdef DEBUG_UNASSIGNED
3268 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3269#endif
5b450407 3270#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
b14ef7c9 3271 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, 2);
e18231a3
BS
3272#endif
3273 return 0;
3274}
3275
c227f099 3276static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
e18231a3
BS
3277{
3278#ifdef DEBUG_UNASSIGNED
3279 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3280#endif
5b450407 3281#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
b14ef7c9 3282 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, 4);
67d3b957 3283#endif
33417e70
FB
3284 return 0;
3285}
3286
c227f099 3287static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
33417e70 3288{
67d3b957 3289#ifdef DEBUG_UNASSIGNED
ab3d1727 3290 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
67d3b957 3291#endif
5b450407 3292#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
b14ef7c9 3293 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, 1);
e18231a3
BS
3294#endif
3295}
3296
c227f099 3297static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
e18231a3
BS
3298{
3299#ifdef DEBUG_UNASSIGNED
3300 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3301#endif
5b450407 3302#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
b14ef7c9 3303 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, 2);
e18231a3
BS
3304#endif
3305}
3306
c227f099 3307static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
e18231a3
BS
3308{
3309#ifdef DEBUG_UNASSIGNED
3310 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3311#endif
5b450407 3312#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
b14ef7c9 3313 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, 4);
b4f0a316 3314#endif
33417e70
FB
3315}
3316
d60efc6b 3317static CPUReadMemoryFunc * const unassigned_mem_read[3] = {
33417e70 3318 unassigned_mem_readb,
e18231a3
BS
3319 unassigned_mem_readw,
3320 unassigned_mem_readl,
33417e70
FB
3321};
3322
d60efc6b 3323static CPUWriteMemoryFunc * const unassigned_mem_write[3] = {
33417e70 3324 unassigned_mem_writeb,
e18231a3
BS
3325 unassigned_mem_writew,
3326 unassigned_mem_writel,
33417e70
FB
3327};
3328
c227f099 3329static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
0f459d16 3330 uint32_t val)
9fa3e853 3331{
3a7d929e 3332 int dirty_flags;
f7c11b53 3333 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3a7d929e 3334 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 3335#if !defined(CONFIG_USER_ONLY)
3a7d929e 3336 tb_invalidate_phys_page_fast(ram_addr, 1);
f7c11b53 3337 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
9fa3e853 3338#endif
3a7d929e 3339 }
5579c7f3 3340 stb_p(qemu_get_ram_ptr(ram_addr), val);
f23db169 3341 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
f7c11b53 3342 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
f23db169
FB
3343 /* we remove the notdirty callback only if the code has been
3344 flushed */
3345 if (dirty_flags == 0xff)
2e70f6ef 3346 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
3347}
3348
c227f099 3349static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
0f459d16 3350 uint32_t val)
9fa3e853 3351{
3a7d929e 3352 int dirty_flags;
f7c11b53 3353 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3a7d929e 3354 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 3355#if !defined(CONFIG_USER_ONLY)
3a7d929e 3356 tb_invalidate_phys_page_fast(ram_addr, 2);
f7c11b53 3357 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
9fa3e853 3358#endif
3a7d929e 3359 }
5579c7f3 3360 stw_p(qemu_get_ram_ptr(ram_addr), val);
f23db169 3361 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
f7c11b53 3362 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
f23db169
FB
3363 /* we remove the notdirty callback only if the code has been
3364 flushed */
3365 if (dirty_flags == 0xff)
2e70f6ef 3366 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
3367}
3368
c227f099 3369static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
0f459d16 3370 uint32_t val)
9fa3e853 3371{
3a7d929e 3372 int dirty_flags;
f7c11b53 3373 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3a7d929e 3374 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 3375#if !defined(CONFIG_USER_ONLY)
3a7d929e 3376 tb_invalidate_phys_page_fast(ram_addr, 4);
f7c11b53 3377 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
9fa3e853 3378#endif
3a7d929e 3379 }
5579c7f3 3380 stl_p(qemu_get_ram_ptr(ram_addr), val);
f23db169 3381 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
f7c11b53 3382 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
f23db169
FB
3383 /* we remove the notdirty callback only if the code has been
3384 flushed */
3385 if (dirty_flags == 0xff)
2e70f6ef 3386 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
3387}
3388
d60efc6b 3389static CPUReadMemoryFunc * const error_mem_read[3] = {
9fa3e853
FB
3390 NULL, /* never used */
3391 NULL, /* never used */
3392 NULL, /* never used */
3393};
3394
d60efc6b 3395static CPUWriteMemoryFunc * const notdirty_mem_write[3] = {
1ccde1cb
FB
3396 notdirty_mem_writeb,
3397 notdirty_mem_writew,
3398 notdirty_mem_writel,
3399};
3400
0f459d16 3401/* Generate a debug exception if a watchpoint has been hit. */
b4051334 3402static void check_watchpoint(int offset, int len_mask, int flags)
0f459d16
PB
3403{
3404 CPUState *env = cpu_single_env;
06d55cc1
AL
3405 target_ulong pc, cs_base;
3406 TranslationBlock *tb;
0f459d16 3407 target_ulong vaddr;
a1d1bb31 3408 CPUWatchpoint *wp;
06d55cc1 3409 int cpu_flags;
0f459d16 3410
06d55cc1
AL
3411 if (env->watchpoint_hit) {
3412 /* We re-entered the check after replacing the TB. Now raise
3413 * the debug interrupt so that is will trigger after the
3414 * current instruction. */
3415 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3416 return;
3417 }
2e70f6ef 3418 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
72cf2d4f 3419 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334
AL
3420 if ((vaddr == (wp->vaddr & len_mask) ||
3421 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
6e140f28
AL
3422 wp->flags |= BP_WATCHPOINT_HIT;
3423 if (!env->watchpoint_hit) {
3424 env->watchpoint_hit = wp;
3425 tb = tb_find_pc(env->mem_io_pc);
3426 if (!tb) {
3427 cpu_abort(env, "check_watchpoint: could not find TB for "
3428 "pc=%p", (void *)env->mem_io_pc);
3429 }
618ba8e6 3430 cpu_restore_state(tb, env, env->mem_io_pc);
6e140f28
AL
3431 tb_phys_invalidate(tb, -1);
3432 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3433 env->exception_index = EXCP_DEBUG;
3434 } else {
3435 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3436 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
3437 }
3438 cpu_resume_from_signal(env, NULL);
06d55cc1 3439 }
6e140f28
AL
3440 } else {
3441 wp->flags &= ~BP_WATCHPOINT_HIT;
0f459d16
PB
3442 }
3443 }
3444}
3445
6658ffb8
PB
3446/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3447 so these check for a hit then pass through to the normal out-of-line
3448 phys routines. */
c227f099 3449static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
6658ffb8 3450{
b4051334 3451 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
6658ffb8
PB
3452 return ldub_phys(addr);
3453}
3454
c227f099 3455static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
6658ffb8 3456{
b4051334 3457 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
6658ffb8
PB
3458 return lduw_phys(addr);
3459}
3460
c227f099 3461static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
6658ffb8 3462{
b4051334 3463 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
6658ffb8
PB
3464 return ldl_phys(addr);
3465}
3466
c227f099 3467static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
6658ffb8
PB
3468 uint32_t val)
3469{
b4051334 3470 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
6658ffb8
PB
3471 stb_phys(addr, val);
3472}
3473
c227f099 3474static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
6658ffb8
PB
3475 uint32_t val)
3476{
b4051334 3477 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
6658ffb8
PB
3478 stw_phys(addr, val);
3479}
3480
c227f099 3481static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
6658ffb8
PB
3482 uint32_t val)
3483{
b4051334 3484 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
6658ffb8
PB
3485 stl_phys(addr, val);
3486}
3487
d60efc6b 3488static CPUReadMemoryFunc * const watch_mem_read[3] = {
6658ffb8
PB
3489 watch_mem_readb,
3490 watch_mem_readw,
3491 watch_mem_readl,
3492};
3493
d60efc6b 3494static CPUWriteMemoryFunc * const watch_mem_write[3] = {
6658ffb8
PB
3495 watch_mem_writeb,
3496 watch_mem_writew,
3497 watch_mem_writel,
3498};
6658ffb8 3499
f6405247
RH
3500static inline uint32_t subpage_readlen (subpage_t *mmio,
3501 target_phys_addr_t addr,
3502 unsigned int len)
db7b5426 3503{
f6405247 3504 unsigned int idx = SUBPAGE_IDX(addr);
db7b5426
BS
3505#if defined(DEBUG_SUBPAGE)
3506 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3507 mmio, len, addr, idx);
3508#endif
db7b5426 3509
f6405247
RH
3510 addr += mmio->region_offset[idx];
3511 idx = mmio->sub_io_index[idx];
3512 return io_mem_read[idx][len](io_mem_opaque[idx], addr);
db7b5426
BS
3513}
3514
c227f099 3515static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
f6405247 3516 uint32_t value, unsigned int len)
db7b5426 3517{
f6405247 3518 unsigned int idx = SUBPAGE_IDX(addr);
db7b5426 3519#if defined(DEBUG_SUBPAGE)
f6405247
RH
3520 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n",
3521 __func__, mmio, len, addr, idx, value);
db7b5426 3522#endif
f6405247
RH
3523
3524 addr += mmio->region_offset[idx];
3525 idx = mmio->sub_io_index[idx];
3526 io_mem_write[idx][len](io_mem_opaque[idx], addr, value);
db7b5426
BS
3527}
3528
c227f099 3529static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
db7b5426 3530{
db7b5426
BS
3531 return subpage_readlen(opaque, addr, 0);
3532}
3533
c227f099 3534static void subpage_writeb (void *opaque, target_phys_addr_t addr,
db7b5426
BS
3535 uint32_t value)
3536{
db7b5426
BS
3537 subpage_writelen(opaque, addr, value, 0);
3538}
3539
c227f099 3540static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
db7b5426 3541{
db7b5426
BS
3542 return subpage_readlen(opaque, addr, 1);
3543}
3544
c227f099 3545static void subpage_writew (void *opaque, target_phys_addr_t addr,
db7b5426
BS
3546 uint32_t value)
3547{
db7b5426
BS
3548 subpage_writelen(opaque, addr, value, 1);
3549}
3550
c227f099 3551static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
db7b5426 3552{
db7b5426
BS
3553 return subpage_readlen(opaque, addr, 2);
3554}
3555
f6405247
RH
3556static void subpage_writel (void *opaque, target_phys_addr_t addr,
3557 uint32_t value)
db7b5426 3558{
db7b5426
BS
3559 subpage_writelen(opaque, addr, value, 2);
3560}
3561
d60efc6b 3562static CPUReadMemoryFunc * const subpage_read[] = {
db7b5426
BS
3563 &subpage_readb,
3564 &subpage_readw,
3565 &subpage_readl,
3566};
3567
d60efc6b 3568static CPUWriteMemoryFunc * const subpage_write[] = {
db7b5426
BS
3569 &subpage_writeb,
3570 &subpage_writew,
3571 &subpage_writel,
3572};
3573
c227f099
AL
3574static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3575 ram_addr_t memory, ram_addr_t region_offset)
db7b5426
BS
3576{
3577 int idx, eidx;
3578
3579 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3580 return -1;
3581 idx = SUBPAGE_IDX(start);
3582 eidx = SUBPAGE_IDX(end);
3583#if defined(DEBUG_SUBPAGE)
0bf9e31a 3584 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
db7b5426
BS
3585 mmio, start, end, idx, eidx, memory);
3586#endif
95c318f5
GN
3587 if ((memory & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
3588 memory = IO_MEM_UNASSIGNED;
f6405247 3589 memory = (memory >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
db7b5426 3590 for (; idx <= eidx; idx++) {
f6405247
RH
3591 mmio->sub_io_index[idx] = memory;
3592 mmio->region_offset[idx] = region_offset;
db7b5426
BS
3593 }
3594
3595 return 0;
3596}
3597
f6405247
RH
3598static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
3599 ram_addr_t orig_memory,
3600 ram_addr_t region_offset)
db7b5426 3601{
c227f099 3602 subpage_t *mmio;
db7b5426
BS
3603 int subpage_memory;
3604
c227f099 3605 mmio = qemu_mallocz(sizeof(subpage_t));
1eec614b
AL
3606
3607 mmio->base = base;
2507c12a
AG
3608 subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio,
3609 DEVICE_NATIVE_ENDIAN);
db7b5426 3610#if defined(DEBUG_SUBPAGE)
1eec614b
AL
3611 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3612 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
db7b5426 3613#endif
1eec614b 3614 *phys = subpage_memory | IO_MEM_SUBPAGE;
f6405247 3615 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, orig_memory, region_offset);
db7b5426
BS
3616
3617 return mmio;
3618}
3619
88715657
AL
3620static int get_free_io_mem_idx(void)
3621{
3622 int i;
3623
3624 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3625 if (!io_mem_used[i]) {
3626 io_mem_used[i] = 1;
3627 return i;
3628 }
c6703b47 3629 fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
88715657
AL
3630 return -1;
3631}
3632
dd310534
AG
3633/*
3634 * Usually, devices operate in little endian mode. There are devices out
3635 * there that operate in big endian too. Each device gets byte swapped
3636 * mmio if plugged onto a CPU that does the other endianness.
3637 *
3638 * CPU Device swap?
3639 *
3640 * little little no
3641 * little big yes
3642 * big little yes
3643 * big big no
3644 */
3645
3646typedef struct SwapEndianContainer {
3647 CPUReadMemoryFunc *read[3];
3648 CPUWriteMemoryFunc *write[3];
3649 void *opaque;
3650} SwapEndianContainer;
3651
3652static uint32_t swapendian_mem_readb (void *opaque, target_phys_addr_t addr)
3653{
3654 uint32_t val;
3655 SwapEndianContainer *c = opaque;
3656 val = c->read[0](c->opaque, addr);
3657 return val;
3658}
3659
3660static uint32_t swapendian_mem_readw(void *opaque, target_phys_addr_t addr)
3661{
3662 uint32_t val;
3663 SwapEndianContainer *c = opaque;
3664 val = bswap16(c->read[1](c->opaque, addr));
3665 return val;
3666}
3667
3668static uint32_t swapendian_mem_readl(void *opaque, target_phys_addr_t addr)
3669{
3670 uint32_t val;
3671 SwapEndianContainer *c = opaque;
3672 val = bswap32(c->read[2](c->opaque, addr));
3673 return val;
3674}
3675
3676static CPUReadMemoryFunc * const swapendian_readfn[3]={
3677 swapendian_mem_readb,
3678 swapendian_mem_readw,
3679 swapendian_mem_readl
3680};
3681
3682static void swapendian_mem_writeb(void *opaque, target_phys_addr_t addr,
3683 uint32_t val)
3684{
3685 SwapEndianContainer *c = opaque;
3686 c->write[0](c->opaque, addr, val);
3687}
3688
3689static void swapendian_mem_writew(void *opaque, target_phys_addr_t addr,
3690 uint32_t val)
3691{
3692 SwapEndianContainer *c = opaque;
3693 c->write[1](c->opaque, addr, bswap16(val));
3694}
3695
3696static void swapendian_mem_writel(void *opaque, target_phys_addr_t addr,
3697 uint32_t val)
3698{
3699 SwapEndianContainer *c = opaque;
3700 c->write[2](c->opaque, addr, bswap32(val));
3701}
3702
3703static CPUWriteMemoryFunc * const swapendian_writefn[3]={
3704 swapendian_mem_writeb,
3705 swapendian_mem_writew,
3706 swapendian_mem_writel
3707};
3708
3709static void swapendian_init(int io_index)
3710{
3711 SwapEndianContainer *c = qemu_malloc(sizeof(SwapEndianContainer));
3712 int i;
3713
3714 /* Swap mmio for big endian targets */
3715 c->opaque = io_mem_opaque[io_index];
3716 for (i = 0; i < 3; i++) {
3717 c->read[i] = io_mem_read[io_index][i];
3718 c->write[i] = io_mem_write[io_index][i];
3719
3720 io_mem_read[io_index][i] = swapendian_readfn[i];
3721 io_mem_write[io_index][i] = swapendian_writefn[i];
3722 }
3723 io_mem_opaque[io_index] = c;
3724}
3725
3726static void swapendian_del(int io_index)
3727{
3728 if (io_mem_read[io_index][0] == swapendian_readfn[0]) {
3729 qemu_free(io_mem_opaque[io_index]);
3730 }
3731}
3732
33417e70
FB
3733/* mem_read and mem_write are arrays of functions containing the
3734 function to access byte (index 0), word (index 1) and dword (index
0b4e6e3e 3735 2). Functions can be omitted with a NULL function pointer.
3ee89922 3736 If io_index is non zero, the corresponding io zone is
4254fab8
BS
3737 modified. If it is zero, a new io zone is allocated. The return
3738 value can be used with cpu_register_physical_memory(). (-1) is
3739 returned if error. */
1eed09cb 3740static int cpu_register_io_memory_fixed(int io_index,
d60efc6b
BS
3741 CPUReadMemoryFunc * const *mem_read,
3742 CPUWriteMemoryFunc * const *mem_write,
dd310534 3743 void *opaque, enum device_endian endian)
33417e70 3744{
3cab721d
RH
3745 int i;
3746
33417e70 3747 if (io_index <= 0) {
88715657
AL
3748 io_index = get_free_io_mem_idx();
3749 if (io_index == -1)
3750 return io_index;
33417e70 3751 } else {
1eed09cb 3752 io_index >>= IO_MEM_SHIFT;
33417e70
FB
3753 if (io_index >= IO_MEM_NB_ENTRIES)
3754 return -1;
3755 }
b5ff1b31 3756
3cab721d
RH
3757 for (i = 0; i < 3; ++i) {
3758 io_mem_read[io_index][i]
3759 = (mem_read[i] ? mem_read[i] : unassigned_mem_read[i]);
3760 }
3761 for (i = 0; i < 3; ++i) {
3762 io_mem_write[io_index][i]
3763 = (mem_write[i] ? mem_write[i] : unassigned_mem_write[i]);
3764 }
a4193c8a 3765 io_mem_opaque[io_index] = opaque;
f6405247 3766
dd310534
AG
3767 switch (endian) {
3768 case DEVICE_BIG_ENDIAN:
3769#ifndef TARGET_WORDS_BIGENDIAN
3770 swapendian_init(io_index);
3771#endif
3772 break;
3773 case DEVICE_LITTLE_ENDIAN:
3774#ifdef TARGET_WORDS_BIGENDIAN
3775 swapendian_init(io_index);
3776#endif
3777 break;
3778 case DEVICE_NATIVE_ENDIAN:
3779 default:
3780 break;
3781 }
3782
f6405247 3783 return (io_index << IO_MEM_SHIFT);
33417e70 3784}
61382a50 3785
d60efc6b
BS
3786int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
3787 CPUWriteMemoryFunc * const *mem_write,
dd310534 3788 void *opaque, enum device_endian endian)
1eed09cb 3789{
2507c12a 3790 return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque, endian);
1eed09cb
AK
3791}
3792
88715657
AL
3793void cpu_unregister_io_memory(int io_table_address)
3794{
3795 int i;
3796 int io_index = io_table_address >> IO_MEM_SHIFT;
3797
dd310534
AG
3798 swapendian_del(io_index);
3799
88715657
AL
3800 for (i=0;i < 3; i++) {
3801 io_mem_read[io_index][i] = unassigned_mem_read[i];
3802 io_mem_write[io_index][i] = unassigned_mem_write[i];
3803 }
3804 io_mem_opaque[io_index] = NULL;
3805 io_mem_used[io_index] = 0;
3806}
3807
e9179ce1
AK
3808static void io_mem_init(void)
3809{
3810 int i;
3811
2507c12a
AG
3812 cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read,
3813 unassigned_mem_write, NULL,
3814 DEVICE_NATIVE_ENDIAN);
3815 cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read,
3816 unassigned_mem_write, NULL,
3817 DEVICE_NATIVE_ENDIAN);
3818 cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read,
3819 notdirty_mem_write, NULL,
3820 DEVICE_NATIVE_ENDIAN);
e9179ce1
AK
3821 for (i=0; i<5; i++)
3822 io_mem_used[i] = 1;
3823
3824 io_mem_watch = cpu_register_io_memory(watch_mem_read,
2507c12a
AG
3825 watch_mem_write, NULL,
3826 DEVICE_NATIVE_ENDIAN);
e9179ce1
AK
3827}
3828
62152b8a
AK
3829static void memory_map_init(void)
3830{
3831 system_memory = qemu_malloc(sizeof(*system_memory));
8417cebf 3832 memory_region_init(system_memory, "system", INT64_MAX);
62152b8a 3833 set_system_memory_map(system_memory);
309cb471
AK
3834
3835 system_io = qemu_malloc(sizeof(*system_io));
3836 memory_region_init(system_io, "io", 65536);
3837 set_system_io_map(system_io);
62152b8a
AK
3838}
3839
3840MemoryRegion *get_system_memory(void)
3841{
3842 return system_memory;
3843}
3844
309cb471
AK
3845MemoryRegion *get_system_io(void)
3846{
3847 return system_io;
3848}
3849
e2eef170
PB
3850#endif /* !defined(CONFIG_USER_ONLY) */
3851
13eb76e0
FB
3852/* physical memory access (slow version, mainly for debug) */
3853#if defined(CONFIG_USER_ONLY)
a68fe89c
PB
3854int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3855 uint8_t *buf, int len, int is_write)
13eb76e0
FB
3856{
3857 int l, flags;
3858 target_ulong page;
53a5960a 3859 void * p;
13eb76e0
FB
3860
3861 while (len > 0) {
3862 page = addr & TARGET_PAGE_MASK;
3863 l = (page + TARGET_PAGE_SIZE) - addr;
3864 if (l > len)
3865 l = len;
3866 flags = page_get_flags(page);
3867 if (!(flags & PAGE_VALID))
a68fe89c 3868 return -1;
13eb76e0
FB
3869 if (is_write) {
3870 if (!(flags & PAGE_WRITE))
a68fe89c 3871 return -1;
579a97f7 3872 /* XXX: this code should not depend on lock_user */
72fb7daa 3873 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
a68fe89c 3874 return -1;
72fb7daa
AJ
3875 memcpy(p, buf, l);
3876 unlock_user(p, addr, l);
13eb76e0
FB
3877 } else {
3878 if (!(flags & PAGE_READ))
a68fe89c 3879 return -1;
579a97f7 3880 /* XXX: this code should not depend on lock_user */
72fb7daa 3881 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
a68fe89c 3882 return -1;
72fb7daa 3883 memcpy(buf, p, l);
5b257578 3884 unlock_user(p, addr, 0);
13eb76e0
FB
3885 }
3886 len -= l;
3887 buf += l;
3888 addr += l;
3889 }
a68fe89c 3890 return 0;
13eb76e0 3891}
8df1cd07 3892
13eb76e0 3893#else
c227f099 3894void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
3895 int len, int is_write)
3896{
3897 int l, io_index;
3898 uint8_t *ptr;
3899 uint32_t val;
c227f099 3900 target_phys_addr_t page;
8ca5692d 3901 ram_addr_t pd;
92e873b9 3902 PhysPageDesc *p;
3b46e624 3903
13eb76e0
FB
3904 while (len > 0) {
3905 page = addr & TARGET_PAGE_MASK;
3906 l = (page + TARGET_PAGE_SIZE) - addr;
3907 if (l > len)
3908 l = len;
92e873b9 3909 p = phys_page_find(page >> TARGET_PAGE_BITS);
13eb76e0
FB
3910 if (!p) {
3911 pd = IO_MEM_UNASSIGNED;
3912 } else {
3913 pd = p->phys_offset;
3914 }
3b46e624 3915
13eb76e0 3916 if (is_write) {
3a7d929e 3917 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
c227f099 3918 target_phys_addr_t addr1 = addr;
13eb76e0 3919 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18 3920 if (p)
6c2934db 3921 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
6a00d601
FB
3922 /* XXX: could force cpu_single_env to NULL to avoid
3923 potential bugs */
6c2934db 3924 if (l >= 4 && ((addr1 & 3) == 0)) {
1c213d19 3925 /* 32 bit write access */
c27004ec 3926 val = ldl_p(buf);
6c2934db 3927 io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
13eb76e0 3928 l = 4;
6c2934db 3929 } else if (l >= 2 && ((addr1 & 1) == 0)) {
1c213d19 3930 /* 16 bit write access */
c27004ec 3931 val = lduw_p(buf);
6c2934db 3932 io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
13eb76e0
FB
3933 l = 2;
3934 } else {
1c213d19 3935 /* 8 bit write access */
c27004ec 3936 val = ldub_p(buf);
6c2934db 3937 io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
13eb76e0
FB
3938 l = 1;
3939 }
3940 } else {
8ca5692d 3941 ram_addr_t addr1;
b448f2f3 3942 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
13eb76e0 3943 /* RAM case */
5579c7f3 3944 ptr = qemu_get_ram_ptr(addr1);
13eb76e0 3945 memcpy(ptr, buf, l);
3a7d929e
FB
3946 if (!cpu_physical_memory_is_dirty(addr1)) {
3947 /* invalidate code */
3948 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3949 /* set dirty bit */
f7c11b53
YT
3950 cpu_physical_memory_set_dirty_flags(
3951 addr1, (0xff & ~CODE_DIRTY_FLAG));
3a7d929e 3952 }
050a0ddf 3953 qemu_put_ram_ptr(ptr);
13eb76e0
FB
3954 }
3955 } else {
5fafdf24 3956 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2a4188a3 3957 !(pd & IO_MEM_ROMD)) {
c227f099 3958 target_phys_addr_t addr1 = addr;
13eb76e0
FB
3959 /* I/O case */
3960 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18 3961 if (p)
6c2934db
AJ
3962 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3963 if (l >= 4 && ((addr1 & 3) == 0)) {
13eb76e0 3964 /* 32 bit read access */
6c2934db 3965 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
c27004ec 3966 stl_p(buf, val);
13eb76e0 3967 l = 4;
6c2934db 3968 } else if (l >= 2 && ((addr1 & 1) == 0)) {
13eb76e0 3969 /* 16 bit read access */
6c2934db 3970 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
c27004ec 3971 stw_p(buf, val);
13eb76e0
FB
3972 l = 2;
3973 } else {
1c213d19 3974 /* 8 bit read access */
6c2934db 3975 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
c27004ec 3976 stb_p(buf, val);
13eb76e0
FB
3977 l = 1;
3978 }
3979 } else {
3980 /* RAM case */
050a0ddf
AP
3981 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
3982 memcpy(buf, ptr + (addr & ~TARGET_PAGE_MASK), l);
3983 qemu_put_ram_ptr(ptr);
13eb76e0
FB
3984 }
3985 }
3986 len -= l;
3987 buf += l;
3988 addr += l;
3989 }
3990}
8df1cd07 3991
d0ecd2aa 3992/* used for ROM loading : can write in RAM and ROM */
c227f099 3993void cpu_physical_memory_write_rom(target_phys_addr_t addr,
d0ecd2aa
FB
3994 const uint8_t *buf, int len)
3995{
3996 int l;
3997 uint8_t *ptr;
c227f099 3998 target_phys_addr_t page;
d0ecd2aa
FB
3999 unsigned long pd;
4000 PhysPageDesc *p;
3b46e624 4001
d0ecd2aa
FB
4002 while (len > 0) {
4003 page = addr & TARGET_PAGE_MASK;
4004 l = (page + TARGET_PAGE_SIZE) - addr;
4005 if (l > len)
4006 l = len;
4007 p = phys_page_find(page >> TARGET_PAGE_BITS);
4008 if (!p) {
4009 pd = IO_MEM_UNASSIGNED;
4010 } else {
4011 pd = p->phys_offset;
4012 }
3b46e624 4013
d0ecd2aa 4014 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2a4188a3
FB
4015 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
4016 !(pd & IO_MEM_ROMD)) {
d0ecd2aa
FB
4017 /* do nothing */
4018 } else {
4019 unsigned long addr1;
4020 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4021 /* ROM/RAM case */
5579c7f3 4022 ptr = qemu_get_ram_ptr(addr1);
d0ecd2aa 4023 memcpy(ptr, buf, l);
050a0ddf 4024 qemu_put_ram_ptr(ptr);
d0ecd2aa
FB
4025 }
4026 len -= l;
4027 buf += l;
4028 addr += l;
4029 }
4030}
4031
6d16c2f8
AL
4032typedef struct {
4033 void *buffer;
c227f099
AL
4034 target_phys_addr_t addr;
4035 target_phys_addr_t len;
6d16c2f8
AL
4036} BounceBuffer;
4037
4038static BounceBuffer bounce;
4039
ba223c29
AL
4040typedef struct MapClient {
4041 void *opaque;
4042 void (*callback)(void *opaque);
72cf2d4f 4043 QLIST_ENTRY(MapClient) link;
ba223c29
AL
4044} MapClient;
4045
72cf2d4f
BS
4046static QLIST_HEAD(map_client_list, MapClient) map_client_list
4047 = QLIST_HEAD_INITIALIZER(map_client_list);
ba223c29
AL
4048
4049void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
4050{
4051 MapClient *client = qemu_malloc(sizeof(*client));
4052
4053 client->opaque = opaque;
4054 client->callback = callback;
72cf2d4f 4055 QLIST_INSERT_HEAD(&map_client_list, client, link);
ba223c29
AL
4056 return client;
4057}
4058
4059void cpu_unregister_map_client(void *_client)
4060{
4061 MapClient *client = (MapClient *)_client;
4062
72cf2d4f 4063 QLIST_REMOVE(client, link);
34d5e948 4064 qemu_free(client);
ba223c29
AL
4065}
4066
4067static void cpu_notify_map_clients(void)
4068{
4069 MapClient *client;
4070
72cf2d4f
BS
4071 while (!QLIST_EMPTY(&map_client_list)) {
4072 client = QLIST_FIRST(&map_client_list);
ba223c29 4073 client->callback(client->opaque);
34d5e948 4074 cpu_unregister_map_client(client);
ba223c29
AL
4075 }
4076}
4077
6d16c2f8
AL
4078/* Map a physical memory region into a host virtual address.
4079 * May map a subset of the requested range, given by and returned in *plen.
4080 * May return NULL if resources needed to perform the mapping are exhausted.
4081 * Use only for reads OR writes - not for read-modify-write operations.
ba223c29
AL
4082 * Use cpu_register_map_client() to know when retrying the map operation is
4083 * likely to succeed.
6d16c2f8 4084 */
c227f099
AL
4085void *cpu_physical_memory_map(target_phys_addr_t addr,
4086 target_phys_addr_t *plen,
6d16c2f8
AL
4087 int is_write)
4088{
c227f099 4089 target_phys_addr_t len = *plen;
38bee5dc 4090 target_phys_addr_t todo = 0;
6d16c2f8 4091 int l;
c227f099 4092 target_phys_addr_t page;
6d16c2f8
AL
4093 unsigned long pd;
4094 PhysPageDesc *p;
f15fbc4b 4095 ram_addr_t raddr = RAM_ADDR_MAX;
8ab934f9
SS
4096 ram_addr_t rlen;
4097 void *ret;
6d16c2f8
AL
4098
4099 while (len > 0) {
4100 page = addr & TARGET_PAGE_MASK;
4101 l = (page + TARGET_PAGE_SIZE) - addr;
4102 if (l > len)
4103 l = len;
4104 p = phys_page_find(page >> TARGET_PAGE_BITS);
4105 if (!p) {
4106 pd = IO_MEM_UNASSIGNED;
4107 } else {
4108 pd = p->phys_offset;
4109 }
4110
4111 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
38bee5dc 4112 if (todo || bounce.buffer) {
6d16c2f8
AL
4113 break;
4114 }
4115 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
4116 bounce.addr = addr;
4117 bounce.len = l;
4118 if (!is_write) {
54f7b4a3 4119 cpu_physical_memory_read(addr, bounce.buffer, l);
6d16c2f8 4120 }
38bee5dc
SS
4121
4122 *plen = l;
4123 return bounce.buffer;
6d16c2f8 4124 }
8ab934f9
SS
4125 if (!todo) {
4126 raddr = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4127 }
6d16c2f8
AL
4128
4129 len -= l;
4130 addr += l;
38bee5dc 4131 todo += l;
6d16c2f8 4132 }
8ab934f9
SS
4133 rlen = todo;
4134 ret = qemu_ram_ptr_length(raddr, &rlen);
4135 *plen = rlen;
4136 return ret;
6d16c2f8
AL
4137}
4138
4139/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
4140 * Will also mark the memory as dirty if is_write == 1. access_len gives
4141 * the amount of memory that was actually read or written by the caller.
4142 */
c227f099
AL
4143void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
4144 int is_write, target_phys_addr_t access_len)
6d16c2f8
AL
4145{
4146 if (buffer != bounce.buffer) {
4147 if (is_write) {
e890261f 4148 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
6d16c2f8
AL
4149 while (access_len) {
4150 unsigned l;
4151 l = TARGET_PAGE_SIZE;
4152 if (l > access_len)
4153 l = access_len;
4154 if (!cpu_physical_memory_is_dirty(addr1)) {
4155 /* invalidate code */
4156 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
4157 /* set dirty bit */
f7c11b53
YT
4158 cpu_physical_memory_set_dirty_flags(
4159 addr1, (0xff & ~CODE_DIRTY_FLAG));
6d16c2f8
AL
4160 }
4161 addr1 += l;
4162 access_len -= l;
4163 }
4164 }
868bb33f 4165 if (xen_enabled()) {
e41d7c69 4166 xen_invalidate_map_cache_entry(buffer);
050a0ddf 4167 }
6d16c2f8
AL
4168 return;
4169 }
4170 if (is_write) {
4171 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
4172 }
f8a83245 4173 qemu_vfree(bounce.buffer);
6d16c2f8 4174 bounce.buffer = NULL;
ba223c29 4175 cpu_notify_map_clients();
6d16c2f8 4176}
d0ecd2aa 4177
8df1cd07 4178/* warning: addr must be aligned */
1e78bcc1
AG
4179static inline uint32_t ldl_phys_internal(target_phys_addr_t addr,
4180 enum device_endian endian)
8df1cd07
FB
4181{
4182 int io_index;
4183 uint8_t *ptr;
4184 uint32_t val;
4185 unsigned long pd;
4186 PhysPageDesc *p;
4187
4188 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4189 if (!p) {
4190 pd = IO_MEM_UNASSIGNED;
4191 } else {
4192 pd = p->phys_offset;
4193 }
3b46e624 4194
5fafdf24 4195 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2a4188a3 4196 !(pd & IO_MEM_ROMD)) {
8df1cd07
FB
4197 /* I/O case */
4198 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
4199 if (p)
4200 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
8df1cd07 4201 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
1e78bcc1
AG
4202#if defined(TARGET_WORDS_BIGENDIAN)
4203 if (endian == DEVICE_LITTLE_ENDIAN) {
4204 val = bswap32(val);
4205 }
4206#else
4207 if (endian == DEVICE_BIG_ENDIAN) {
4208 val = bswap32(val);
4209 }
4210#endif
8df1cd07
FB
4211 } else {
4212 /* RAM case */
5579c7f3 4213 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
8df1cd07 4214 (addr & ~TARGET_PAGE_MASK);
1e78bcc1
AG
4215 switch (endian) {
4216 case DEVICE_LITTLE_ENDIAN:
4217 val = ldl_le_p(ptr);
4218 break;
4219 case DEVICE_BIG_ENDIAN:
4220 val = ldl_be_p(ptr);
4221 break;
4222 default:
4223 val = ldl_p(ptr);
4224 break;
4225 }
8df1cd07
FB
4226 }
4227 return val;
4228}
4229
1e78bcc1
AG
4230uint32_t ldl_phys(target_phys_addr_t addr)
4231{
4232 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4233}
4234
4235uint32_t ldl_le_phys(target_phys_addr_t addr)
4236{
4237 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4238}
4239
4240uint32_t ldl_be_phys(target_phys_addr_t addr)
4241{
4242 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
4243}
4244
84b7b8e7 4245/* warning: addr must be aligned */
1e78bcc1
AG
4246static inline uint64_t ldq_phys_internal(target_phys_addr_t addr,
4247 enum device_endian endian)
84b7b8e7
FB
4248{
4249 int io_index;
4250 uint8_t *ptr;
4251 uint64_t val;
4252 unsigned long pd;
4253 PhysPageDesc *p;
4254
4255 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4256 if (!p) {
4257 pd = IO_MEM_UNASSIGNED;
4258 } else {
4259 pd = p->phys_offset;
4260 }
3b46e624 4261
2a4188a3
FB
4262 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
4263 !(pd & IO_MEM_ROMD)) {
84b7b8e7
FB
4264 /* I/O case */
4265 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
4266 if (p)
4267 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
1e78bcc1
AG
4268
4269 /* XXX This is broken when device endian != cpu endian.
4270 Fix and add "endian" variable check */
84b7b8e7
FB
4271#ifdef TARGET_WORDS_BIGENDIAN
4272 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
4273 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
4274#else
4275 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
4276 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
4277#endif
4278 } else {
4279 /* RAM case */
5579c7f3 4280 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
84b7b8e7 4281 (addr & ~TARGET_PAGE_MASK);
1e78bcc1
AG
4282 switch (endian) {
4283 case DEVICE_LITTLE_ENDIAN:
4284 val = ldq_le_p(ptr);
4285 break;
4286 case DEVICE_BIG_ENDIAN:
4287 val = ldq_be_p(ptr);
4288 break;
4289 default:
4290 val = ldq_p(ptr);
4291 break;
4292 }
84b7b8e7
FB
4293 }
4294 return val;
4295}
4296
1e78bcc1
AG
4297uint64_t ldq_phys(target_phys_addr_t addr)
4298{
4299 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4300}
4301
4302uint64_t ldq_le_phys(target_phys_addr_t addr)
4303{
4304 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4305}
4306
4307uint64_t ldq_be_phys(target_phys_addr_t addr)
4308{
4309 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
4310}
4311
aab33094 4312/* XXX: optimize */
c227f099 4313uint32_t ldub_phys(target_phys_addr_t addr)
aab33094
FB
4314{
4315 uint8_t val;
4316 cpu_physical_memory_read(addr, &val, 1);
4317 return val;
4318}
4319
733f0b02 4320/* warning: addr must be aligned */
1e78bcc1
AG
4321static inline uint32_t lduw_phys_internal(target_phys_addr_t addr,
4322 enum device_endian endian)
aab33094 4323{
733f0b02
MT
4324 int io_index;
4325 uint8_t *ptr;
4326 uint64_t val;
4327 unsigned long pd;
4328 PhysPageDesc *p;
4329
4330 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4331 if (!p) {
4332 pd = IO_MEM_UNASSIGNED;
4333 } else {
4334 pd = p->phys_offset;
4335 }
4336
4337 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
4338 !(pd & IO_MEM_ROMD)) {
4339 /* I/O case */
4340 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4341 if (p)
4342 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4343 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
1e78bcc1
AG
4344#if defined(TARGET_WORDS_BIGENDIAN)
4345 if (endian == DEVICE_LITTLE_ENDIAN) {
4346 val = bswap16(val);
4347 }
4348#else
4349 if (endian == DEVICE_BIG_ENDIAN) {
4350 val = bswap16(val);
4351 }
4352#endif
733f0b02
MT
4353 } else {
4354 /* RAM case */
4355 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4356 (addr & ~TARGET_PAGE_MASK);
1e78bcc1
AG
4357 switch (endian) {
4358 case DEVICE_LITTLE_ENDIAN:
4359 val = lduw_le_p(ptr);
4360 break;
4361 case DEVICE_BIG_ENDIAN:
4362 val = lduw_be_p(ptr);
4363 break;
4364 default:
4365 val = lduw_p(ptr);
4366 break;
4367 }
733f0b02
MT
4368 }
4369 return val;
aab33094
FB
4370}
4371
1e78bcc1
AG
4372uint32_t lduw_phys(target_phys_addr_t addr)
4373{
4374 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4375}
4376
4377uint32_t lduw_le_phys(target_phys_addr_t addr)
4378{
4379 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4380}
4381
4382uint32_t lduw_be_phys(target_phys_addr_t addr)
4383{
4384 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
4385}
4386
8df1cd07
FB
4387/* warning: addr must be aligned. The ram page is not masked as dirty
4388 and the code inside is not invalidated. It is useful if the dirty
4389 bits are used to track modified PTEs */
c227f099 4390void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
8df1cd07
FB
4391{
4392 int io_index;
4393 uint8_t *ptr;
4394 unsigned long pd;
4395 PhysPageDesc *p;
4396
4397 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4398 if (!p) {
4399 pd = IO_MEM_UNASSIGNED;
4400 } else {
4401 pd = p->phys_offset;
4402 }
3b46e624 4403
3a7d929e 4404 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07 4405 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
4406 if (p)
4407 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
8df1cd07
FB
4408 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4409 } else {
74576198 4410 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
5579c7f3 4411 ptr = qemu_get_ram_ptr(addr1);
8df1cd07 4412 stl_p(ptr, val);
74576198
AL
4413
4414 if (unlikely(in_migration)) {
4415 if (!cpu_physical_memory_is_dirty(addr1)) {
4416 /* invalidate code */
4417 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4418 /* set dirty bit */
f7c11b53
YT
4419 cpu_physical_memory_set_dirty_flags(
4420 addr1, (0xff & ~CODE_DIRTY_FLAG));
74576198
AL
4421 }
4422 }
8df1cd07
FB
4423 }
4424}
4425
c227f099 4426void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
bc98a7ef
JM
4427{
4428 int io_index;
4429 uint8_t *ptr;
4430 unsigned long pd;
4431 PhysPageDesc *p;
4432
4433 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4434 if (!p) {
4435 pd = IO_MEM_UNASSIGNED;
4436 } else {
4437 pd = p->phys_offset;
4438 }
3b46e624 4439
bc98a7ef
JM
4440 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4441 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
4442 if (p)
4443 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bc98a7ef
JM
4444#ifdef TARGET_WORDS_BIGENDIAN
4445 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
4446 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
4447#else
4448 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4449 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
4450#endif
4451 } else {
5579c7f3 4452 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
bc98a7ef
JM
4453 (addr & ~TARGET_PAGE_MASK);
4454 stq_p(ptr, val);
4455 }
4456}
4457
8df1cd07 4458/* warning: addr must be aligned */
1e78bcc1
AG
4459static inline void stl_phys_internal(target_phys_addr_t addr, uint32_t val,
4460 enum device_endian endian)
8df1cd07
FB
4461{
4462 int io_index;
4463 uint8_t *ptr;
4464 unsigned long pd;
4465 PhysPageDesc *p;
4466
4467 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4468 if (!p) {
4469 pd = IO_MEM_UNASSIGNED;
4470 } else {
4471 pd = p->phys_offset;
4472 }
3b46e624 4473
3a7d929e 4474 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07 4475 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
4476 if (p)
4477 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
1e78bcc1
AG
4478#if defined(TARGET_WORDS_BIGENDIAN)
4479 if (endian == DEVICE_LITTLE_ENDIAN) {
4480 val = bswap32(val);
4481 }
4482#else
4483 if (endian == DEVICE_BIG_ENDIAN) {
4484 val = bswap32(val);
4485 }
4486#endif
8df1cd07
FB
4487 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4488 } else {
4489 unsigned long addr1;
4490 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4491 /* RAM case */
5579c7f3 4492 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
4493 switch (endian) {
4494 case DEVICE_LITTLE_ENDIAN:
4495 stl_le_p(ptr, val);
4496 break;
4497 case DEVICE_BIG_ENDIAN:
4498 stl_be_p(ptr, val);
4499 break;
4500 default:
4501 stl_p(ptr, val);
4502 break;
4503 }
3a7d929e
FB
4504 if (!cpu_physical_memory_is_dirty(addr1)) {
4505 /* invalidate code */
4506 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4507 /* set dirty bit */
f7c11b53
YT
4508 cpu_physical_memory_set_dirty_flags(addr1,
4509 (0xff & ~CODE_DIRTY_FLAG));
3a7d929e 4510 }
8df1cd07
FB
4511 }
4512}
4513
1e78bcc1
AG
4514void stl_phys(target_phys_addr_t addr, uint32_t val)
4515{
4516 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4517}
4518
4519void stl_le_phys(target_phys_addr_t addr, uint32_t val)
4520{
4521 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4522}
4523
4524void stl_be_phys(target_phys_addr_t addr, uint32_t val)
4525{
4526 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4527}
4528
aab33094 4529/* XXX: optimize */
c227f099 4530void stb_phys(target_phys_addr_t addr, uint32_t val)
aab33094
FB
4531{
4532 uint8_t v = val;
4533 cpu_physical_memory_write(addr, &v, 1);
4534}
4535
733f0b02 4536/* warning: addr must be aligned */
1e78bcc1
AG
4537static inline void stw_phys_internal(target_phys_addr_t addr, uint32_t val,
4538 enum device_endian endian)
aab33094 4539{
733f0b02
MT
4540 int io_index;
4541 uint8_t *ptr;
4542 unsigned long pd;
4543 PhysPageDesc *p;
4544
4545 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4546 if (!p) {
4547 pd = IO_MEM_UNASSIGNED;
4548 } else {
4549 pd = p->phys_offset;
4550 }
4551
4552 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4553 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4554 if (p)
4555 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
1e78bcc1
AG
4556#if defined(TARGET_WORDS_BIGENDIAN)
4557 if (endian == DEVICE_LITTLE_ENDIAN) {
4558 val = bswap16(val);
4559 }
4560#else
4561 if (endian == DEVICE_BIG_ENDIAN) {
4562 val = bswap16(val);
4563 }
4564#endif
733f0b02
MT
4565 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
4566 } else {
4567 unsigned long addr1;
4568 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4569 /* RAM case */
4570 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
4571 switch (endian) {
4572 case DEVICE_LITTLE_ENDIAN:
4573 stw_le_p(ptr, val);
4574 break;
4575 case DEVICE_BIG_ENDIAN:
4576 stw_be_p(ptr, val);
4577 break;
4578 default:
4579 stw_p(ptr, val);
4580 break;
4581 }
733f0b02
MT
4582 if (!cpu_physical_memory_is_dirty(addr1)) {
4583 /* invalidate code */
4584 tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
4585 /* set dirty bit */
4586 cpu_physical_memory_set_dirty_flags(addr1,
4587 (0xff & ~CODE_DIRTY_FLAG));
4588 }
4589 }
aab33094
FB
4590}
4591
1e78bcc1
AG
4592void stw_phys(target_phys_addr_t addr, uint32_t val)
4593{
4594 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4595}
4596
4597void stw_le_phys(target_phys_addr_t addr, uint32_t val)
4598{
4599 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4600}
4601
4602void stw_be_phys(target_phys_addr_t addr, uint32_t val)
4603{
4604 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4605}
4606
aab33094 4607/* XXX: optimize */
c227f099 4608void stq_phys(target_phys_addr_t addr, uint64_t val)
aab33094
FB
4609{
4610 val = tswap64(val);
71d2b725 4611 cpu_physical_memory_write(addr, &val, 8);
aab33094
FB
4612}
4613
1e78bcc1
AG
4614void stq_le_phys(target_phys_addr_t addr, uint64_t val)
4615{
4616 val = cpu_to_le64(val);
4617 cpu_physical_memory_write(addr, &val, 8);
4618}
4619
4620void stq_be_phys(target_phys_addr_t addr, uint64_t val)
4621{
4622 val = cpu_to_be64(val);
4623 cpu_physical_memory_write(addr, &val, 8);
4624}
4625
5e2972fd 4626/* virtual memory access for debug (includes writing to ROM) */
5fafdf24 4627int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
b448f2f3 4628 uint8_t *buf, int len, int is_write)
13eb76e0
FB
4629{
4630 int l;
c227f099 4631 target_phys_addr_t phys_addr;
9b3c35e0 4632 target_ulong page;
13eb76e0
FB
4633
4634 while (len > 0) {
4635 page = addr & TARGET_PAGE_MASK;
4636 phys_addr = cpu_get_phys_page_debug(env, page);
4637 /* if no physical page mapped, return an error */
4638 if (phys_addr == -1)
4639 return -1;
4640 l = (page + TARGET_PAGE_SIZE) - addr;
4641 if (l > len)
4642 l = len;
5e2972fd 4643 phys_addr += (addr & ~TARGET_PAGE_MASK);
5e2972fd
AL
4644 if (is_write)
4645 cpu_physical_memory_write_rom(phys_addr, buf, l);
4646 else
5e2972fd 4647 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
13eb76e0
FB
4648 len -= l;
4649 buf += l;
4650 addr += l;
4651 }
4652 return 0;
4653}
a68fe89c 4654#endif
13eb76e0 4655
2e70f6ef
PB
4656/* in deterministic execution mode, instructions doing device I/Os
4657 must be at the end of the TB */
4658void cpu_io_recompile(CPUState *env, void *retaddr)
4659{
4660 TranslationBlock *tb;
4661 uint32_t n, cflags;
4662 target_ulong pc, cs_base;
4663 uint64_t flags;
4664
4665 tb = tb_find_pc((unsigned long)retaddr);
4666 if (!tb) {
4667 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
4668 retaddr);
4669 }
4670 n = env->icount_decr.u16.low + tb->icount;
618ba8e6 4671 cpu_restore_state(tb, env, (unsigned long)retaddr);
2e70f6ef 4672 /* Calculate how many instructions had been executed before the fault
bf20dc07 4673 occurred. */
2e70f6ef
PB
4674 n = n - env->icount_decr.u16.low;
4675 /* Generate a new TB ending on the I/O insn. */
4676 n++;
4677 /* On MIPS and SH, delay slot instructions can only be restarted if
4678 they were already the first instruction in the TB. If this is not
bf20dc07 4679 the first instruction in a TB then re-execute the preceding
2e70f6ef
PB
4680 branch. */
4681#if defined(TARGET_MIPS)
4682 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4683 env->active_tc.PC -= 4;
4684 env->icount_decr.u16.low++;
4685 env->hflags &= ~MIPS_HFLAG_BMASK;
4686 }
4687#elif defined(TARGET_SH4)
4688 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4689 && n > 1) {
4690 env->pc -= 2;
4691 env->icount_decr.u16.low++;
4692 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4693 }
4694#endif
4695 /* This should never happen. */
4696 if (n > CF_COUNT_MASK)
4697 cpu_abort(env, "TB too big during recompile");
4698
4699 cflags = n | CF_LAST_IO;
4700 pc = tb->pc;
4701 cs_base = tb->cs_base;
4702 flags = tb->flags;
4703 tb_phys_invalidate(tb, -1);
4704 /* FIXME: In theory this could raise an exception. In practice
4705 we have already translated the block once so it's probably ok. */
4706 tb_gen_code(env, pc, cs_base, flags, cflags);
bf20dc07 4707 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
2e70f6ef
PB
4708 the first in the TB) then we end up generating a whole new TB and
4709 repeating the fault, which is horribly inefficient.
4710 Better would be to execute just this insn uncached, or generate a
4711 second new TB. */
4712 cpu_resume_from_signal(env, NULL);
4713}
4714
b3755a91
PB
4715#if !defined(CONFIG_USER_ONLY)
4716
055403b2 4717void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
e3db7226
FB
4718{
4719 int i, target_code_size, max_target_code_size;
4720 int direct_jmp_count, direct_jmp2_count, cross_page;
4721 TranslationBlock *tb;
3b46e624 4722
e3db7226
FB
4723 target_code_size = 0;
4724 max_target_code_size = 0;
4725 cross_page = 0;
4726 direct_jmp_count = 0;
4727 direct_jmp2_count = 0;
4728 for(i = 0; i < nb_tbs; i++) {
4729 tb = &tbs[i];
4730 target_code_size += tb->size;
4731 if (tb->size > max_target_code_size)
4732 max_target_code_size = tb->size;
4733 if (tb->page_addr[1] != -1)
4734 cross_page++;
4735 if (tb->tb_next_offset[0] != 0xffff) {
4736 direct_jmp_count++;
4737 if (tb->tb_next_offset[1] != 0xffff) {
4738 direct_jmp2_count++;
4739 }
4740 }
4741 }
4742 /* XXX: avoid using doubles ? */
57fec1fe 4743 cpu_fprintf(f, "Translation buffer state:\n");
055403b2 4744 cpu_fprintf(f, "gen code size %td/%ld\n",
26a5f13b
FB
4745 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4746 cpu_fprintf(f, "TB count %d/%d\n",
4747 nb_tbs, code_gen_max_blocks);
5fafdf24 4748 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
e3db7226
FB
4749 nb_tbs ? target_code_size / nb_tbs : 0,
4750 max_target_code_size);
055403b2 4751 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
e3db7226
FB
4752 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4753 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
5fafdf24
TS
4754 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4755 cross_page,
e3db7226
FB
4756 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4757 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
5fafdf24 4758 direct_jmp_count,
e3db7226
FB
4759 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4760 direct_jmp2_count,
4761 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
57fec1fe 4762 cpu_fprintf(f, "\nStatistics:\n");
e3db7226
FB
4763 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4764 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4765 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
b67d9a52 4766 tcg_dump_info(f, cpu_fprintf);
e3db7226
FB
4767}
4768
61382a50
FB
4769#define MMUSUFFIX _cmmu
4770#define GETPC() NULL
4771#define env cpu_single_env
b769d8fe 4772#define SOFTMMU_CODE_ACCESS
61382a50
FB
4773
4774#define SHIFT 0
4775#include "softmmu_template.h"
4776
4777#define SHIFT 1
4778#include "softmmu_template.h"
4779
4780#define SHIFT 2
4781#include "softmmu_template.h"
4782
4783#define SHIFT 3
4784#include "softmmu_template.h"
4785
4786#undef env
4787
4788#endif