]> git.proxmox.com Git - qemu.git/blame - exec.c
VMDK: move 'static' cid_update flag to bs field
[qemu.git] / exec.c
CommitLineData
54936004 1/*
fd6ce8f6 2 * virtual page mapping and translated block handling
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
54936004 18 */
67b915a5 19#include "config.h"
d5a8f07c
FB
20#ifdef _WIN32
21#include <windows.h>
22#else
a98d49b1 23#include <sys/types.h>
d5a8f07c
FB
24#include <sys/mman.h>
25#endif
54936004 26
055403b2 27#include "qemu-common.h"
6180a181 28#include "cpu.h"
b67d9a52 29#include "tcg.h"
b3c7724c 30#include "hw/hw.h"
cc9e98cb 31#include "hw/qdev.h"
74576198 32#include "osdep.h"
7ba1e619 33#include "kvm.h"
432d268c 34#include "hw/xen.h"
29e922b6 35#include "qemu-timer.h"
53a5960a
PB
36#if defined(CONFIG_USER_ONLY)
37#include <qemu.h>
f01576f1
JL
38#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
39#include <sys/param.h>
40#if __FreeBSD_version >= 700104
41#define HAVE_KINFO_GETVMMAP
42#define sigqueue sigqueue_freebsd /* avoid redefinition */
43#include <sys/time.h>
44#include <sys/proc.h>
45#include <machine/profile.h>
46#define _KERNEL
47#include <sys/user.h>
48#undef _KERNEL
49#undef sigqueue
50#include <libutil.h>
51#endif
52#endif
432d268c
JN
53#else /* !CONFIG_USER_ONLY */
54#include "xen-mapcache.h"
6506e4f9 55#include "trace.h"
53a5960a 56#endif
54936004 57
fd6ce8f6 58//#define DEBUG_TB_INVALIDATE
66e85a21 59//#define DEBUG_FLUSH
9fa3e853 60//#define DEBUG_TLB
67d3b957 61//#define DEBUG_UNASSIGNED
fd6ce8f6
FB
62
63/* make various TB consistency checks */
5fafdf24
TS
64//#define DEBUG_TB_CHECK
65//#define DEBUG_TLB_CHECK
fd6ce8f6 66
1196be37 67//#define DEBUG_IOPORT
db7b5426 68//#define DEBUG_SUBPAGE
1196be37 69
99773bd4
PB
70#if !defined(CONFIG_USER_ONLY)
71/* TB consistency checks only implemented for usermode emulation. */
72#undef DEBUG_TB_CHECK
73#endif
74
9fa3e853
FB
75#define SMC_BITMAP_USE_THRESHOLD 10
76
bdaf78e0 77static TranslationBlock *tbs;
24ab68ac 78static int code_gen_max_blocks;
9fa3e853 79TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
bdaf78e0 80static int nb_tbs;
eb51d102 81/* any access to the tbs or the page table must use this lock */
c227f099 82spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
fd6ce8f6 83
141ac468
BS
84#if defined(__arm__) || defined(__sparc_v9__)
85/* The prologue must be reachable with a direct jump. ARM and Sparc64
86 have limited branch ranges (possibly also PPC) so place it in a
d03d860b
BS
87 section close to code segment. */
88#define code_gen_section \
89 __attribute__((__section__(".gen_code"))) \
90 __attribute__((aligned (32)))
f8e2af11
SW
91#elif defined(_WIN32)
92/* Maximum alignment for Win32 is 16. */
93#define code_gen_section \
94 __attribute__((aligned (16)))
d03d860b
BS
95#else
96#define code_gen_section \
97 __attribute__((aligned (32)))
98#endif
99
100uint8_t code_gen_prologue[1024] code_gen_section;
bdaf78e0
BS
101static uint8_t *code_gen_buffer;
102static unsigned long code_gen_buffer_size;
26a5f13b 103/* threshold to flush the translated code buffer */
bdaf78e0 104static unsigned long code_gen_buffer_max_size;
24ab68ac 105static uint8_t *code_gen_ptr;
fd6ce8f6 106
e2eef170 107#if !defined(CONFIG_USER_ONLY)
9fa3e853 108int phys_ram_fd;
74576198 109static int in_migration;
94a6b54f 110
f471a17e 111RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list) };
e2eef170 112#endif
9fa3e853 113
6a00d601
FB
114CPUState *first_cpu;
115/* current CPU in the current thread. It is only valid inside
116 cpu_exec() */
5fafdf24 117CPUState *cpu_single_env;
2e70f6ef 118/* 0 = Do not count executed instructions.
bf20dc07 119 1 = Precise instruction counting.
2e70f6ef
PB
120 2 = Adaptive rate instruction counting. */
121int use_icount = 0;
122/* Current instruction counter. While executing translated code this may
123 include some instructions that have not yet been executed. */
124int64_t qemu_icount;
6a00d601 125
54936004 126typedef struct PageDesc {
92e873b9 127 /* list of TBs intersecting this ram page */
fd6ce8f6 128 TranslationBlock *first_tb;
9fa3e853
FB
129 /* in order to optimize self modifying code, we count the number
130 of lookups we do to a given page to use a bitmap */
131 unsigned int code_write_count;
132 uint8_t *code_bitmap;
133#if defined(CONFIG_USER_ONLY)
134 unsigned long flags;
135#endif
54936004
FB
136} PageDesc;
137
41c1b1c9 138/* In system mode we want L1_MAP to be based on ram offsets,
5cd2c5b6
RH
139 while in user mode we want it to be based on virtual addresses. */
140#if !defined(CONFIG_USER_ONLY)
41c1b1c9
PB
141#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
142# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
143#else
5cd2c5b6 144# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
41c1b1c9 145#endif
bedb69ea 146#else
5cd2c5b6 147# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
bedb69ea 148#endif
54936004 149
5cd2c5b6
RH
150/* Size of the L2 (and L3, etc) page tables. */
151#define L2_BITS 10
54936004
FB
152#define L2_SIZE (1 << L2_BITS)
153
5cd2c5b6
RH
154/* The bits remaining after N lower levels of page tables. */
155#define P_L1_BITS_REM \
156 ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
157#define V_L1_BITS_REM \
158 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
159
160/* Size of the L1 page table. Avoid silly small sizes. */
161#if P_L1_BITS_REM < 4
162#define P_L1_BITS (P_L1_BITS_REM + L2_BITS)
163#else
164#define P_L1_BITS P_L1_BITS_REM
165#endif
166
167#if V_L1_BITS_REM < 4
168#define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
169#else
170#define V_L1_BITS V_L1_BITS_REM
171#endif
172
173#define P_L1_SIZE ((target_phys_addr_t)1 << P_L1_BITS)
174#define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
175
176#define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS)
177#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
178
83fb7adf
FB
179unsigned long qemu_real_host_page_size;
180unsigned long qemu_host_page_bits;
181unsigned long qemu_host_page_size;
182unsigned long qemu_host_page_mask;
54936004 183
5cd2c5b6
RH
184/* This is a multi-level map on the virtual address space.
185 The bottom level has pointers to PageDesc. */
186static void *l1_map[V_L1_SIZE];
54936004 187
e2eef170 188#if !defined(CONFIG_USER_ONLY)
41c1b1c9
PB
189typedef struct PhysPageDesc {
190 /* offset in host memory of the page + io_index in the low bits */
191 ram_addr_t phys_offset;
192 ram_addr_t region_offset;
193} PhysPageDesc;
194
5cd2c5b6
RH
195/* This is a multi-level map on the physical address space.
196 The bottom level has pointers to PhysPageDesc. */
197static void *l1_phys_map[P_L1_SIZE];
6d9a1304 198
e2eef170
PB
199static void io_mem_init(void);
200
33417e70 201/* io memory support */
33417e70
FB
202CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
203CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
a4193c8a 204void *io_mem_opaque[IO_MEM_NB_ENTRIES];
511d2b14 205static char io_mem_used[IO_MEM_NB_ENTRIES];
6658ffb8
PB
206static int io_mem_watch;
207#endif
33417e70 208
34865134 209/* log support */
1e8b27ca
JR
210#ifdef WIN32
211static const char *logfilename = "qemu.log";
212#else
d9b630fd 213static const char *logfilename = "/tmp/qemu.log";
1e8b27ca 214#endif
34865134
FB
215FILE *logfile;
216int loglevel;
e735b91c 217static int log_append = 0;
34865134 218
e3db7226 219/* statistics */
b3755a91 220#if !defined(CONFIG_USER_ONLY)
e3db7226 221static int tlb_flush_count;
b3755a91 222#endif
e3db7226
FB
223static int tb_flush_count;
224static int tb_phys_invalidate_count;
225
7cb69cae
FB
226#ifdef _WIN32
227static void map_exec(void *addr, long size)
228{
229 DWORD old_protect;
230 VirtualProtect(addr, size,
231 PAGE_EXECUTE_READWRITE, &old_protect);
232
233}
234#else
235static void map_exec(void *addr, long size)
236{
4369415f 237 unsigned long start, end, page_size;
7cb69cae 238
4369415f 239 page_size = getpagesize();
7cb69cae 240 start = (unsigned long)addr;
4369415f 241 start &= ~(page_size - 1);
7cb69cae
FB
242
243 end = (unsigned long)addr + size;
4369415f
FB
244 end += page_size - 1;
245 end &= ~(page_size - 1);
7cb69cae
FB
246
247 mprotect((void *)start, end - start,
248 PROT_READ | PROT_WRITE | PROT_EXEC);
249}
250#endif
251
b346ff46 252static void page_init(void)
54936004 253{
83fb7adf 254 /* NOTE: we can always suppose that qemu_host_page_size >=
54936004 255 TARGET_PAGE_SIZE */
c2b48b69
AL
256#ifdef _WIN32
257 {
258 SYSTEM_INFO system_info;
259
260 GetSystemInfo(&system_info);
261 qemu_real_host_page_size = system_info.dwPageSize;
262 }
263#else
264 qemu_real_host_page_size = getpagesize();
265#endif
83fb7adf
FB
266 if (qemu_host_page_size == 0)
267 qemu_host_page_size = qemu_real_host_page_size;
268 if (qemu_host_page_size < TARGET_PAGE_SIZE)
269 qemu_host_page_size = TARGET_PAGE_SIZE;
270 qemu_host_page_bits = 0;
271 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
272 qemu_host_page_bits++;
273 qemu_host_page_mask = ~(qemu_host_page_size - 1);
50a9569b 274
2e9a5713 275#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
50a9569b 276 {
f01576f1
JL
277#ifdef HAVE_KINFO_GETVMMAP
278 struct kinfo_vmentry *freep;
279 int i, cnt;
280
281 freep = kinfo_getvmmap(getpid(), &cnt);
282 if (freep) {
283 mmap_lock();
284 for (i = 0; i < cnt; i++) {
285 unsigned long startaddr, endaddr;
286
287 startaddr = freep[i].kve_start;
288 endaddr = freep[i].kve_end;
289 if (h2g_valid(startaddr)) {
290 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
291
292 if (h2g_valid(endaddr)) {
293 endaddr = h2g(endaddr);
fd436907 294 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
f01576f1
JL
295 } else {
296#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
297 endaddr = ~0ul;
fd436907 298 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
f01576f1
JL
299#endif
300 }
301 }
302 }
303 free(freep);
304 mmap_unlock();
305 }
306#else
50a9569b 307 FILE *f;
50a9569b 308
0776590d 309 last_brk = (unsigned long)sbrk(0);
5cd2c5b6 310
fd436907 311 f = fopen("/compat/linux/proc/self/maps", "r");
50a9569b 312 if (f) {
5cd2c5b6
RH
313 mmap_lock();
314
50a9569b 315 do {
5cd2c5b6
RH
316 unsigned long startaddr, endaddr;
317 int n;
318
319 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
320
321 if (n == 2 && h2g_valid(startaddr)) {
322 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
323
324 if (h2g_valid(endaddr)) {
325 endaddr = h2g(endaddr);
326 } else {
327 endaddr = ~0ul;
328 }
329 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
50a9569b
AZ
330 }
331 } while (!feof(f));
5cd2c5b6 332
50a9569b 333 fclose(f);
5cd2c5b6 334 mmap_unlock();
50a9569b 335 }
f01576f1 336#endif
50a9569b
AZ
337 }
338#endif
54936004
FB
339}
340
41c1b1c9 341static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
54936004 342{
41c1b1c9
PB
343 PageDesc *pd;
344 void **lp;
345 int i;
346
5cd2c5b6 347#if defined(CONFIG_USER_ONLY)
2e9a5713 348 /* We can't use qemu_malloc because it may recurse into a locked mutex. */
5cd2c5b6
RH
349# define ALLOC(P, SIZE) \
350 do { \
351 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
352 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
5cd2c5b6
RH
353 } while (0)
354#else
355# define ALLOC(P, SIZE) \
356 do { P = qemu_mallocz(SIZE); } while (0)
17e2377a 357#endif
434929bf 358
5cd2c5b6
RH
359 /* Level 1. Always allocated. */
360 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
361
362 /* Level 2..N-1. */
363 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
364 void **p = *lp;
365
366 if (p == NULL) {
367 if (!alloc) {
368 return NULL;
369 }
370 ALLOC(p, sizeof(void *) * L2_SIZE);
371 *lp = p;
17e2377a 372 }
5cd2c5b6
RH
373
374 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
375 }
376
377 pd = *lp;
378 if (pd == NULL) {
379 if (!alloc) {
380 return NULL;
381 }
382 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
383 *lp = pd;
54936004 384 }
5cd2c5b6
RH
385
386#undef ALLOC
5cd2c5b6
RH
387
388 return pd + (index & (L2_SIZE - 1));
54936004
FB
389}
390
41c1b1c9 391static inline PageDesc *page_find(tb_page_addr_t index)
54936004 392{
5cd2c5b6 393 return page_find_alloc(index, 0);
fd6ce8f6
FB
394}
395
6d9a1304 396#if !defined(CONFIG_USER_ONLY)
c227f099 397static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
92e873b9 398{
e3f4e2a4 399 PhysPageDesc *pd;
5cd2c5b6
RH
400 void **lp;
401 int i;
92e873b9 402
5cd2c5b6
RH
403 /* Level 1. Always allocated. */
404 lp = l1_phys_map + ((index >> P_L1_SHIFT) & (P_L1_SIZE - 1));
108c49b8 405
5cd2c5b6
RH
406 /* Level 2..N-1. */
407 for (i = P_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
408 void **p = *lp;
409 if (p == NULL) {
410 if (!alloc) {
411 return NULL;
412 }
413 *lp = p = qemu_mallocz(sizeof(void *) * L2_SIZE);
414 }
415 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
108c49b8 416 }
5cd2c5b6 417
e3f4e2a4 418 pd = *lp;
5cd2c5b6 419 if (pd == NULL) {
e3f4e2a4 420 int i;
5cd2c5b6
RH
421
422 if (!alloc) {
108c49b8 423 return NULL;
5cd2c5b6
RH
424 }
425
426 *lp = pd = qemu_malloc(sizeof(PhysPageDesc) * L2_SIZE);
427
67c4d23c 428 for (i = 0; i < L2_SIZE; i++) {
5cd2c5b6
RH
429 pd[i].phys_offset = IO_MEM_UNASSIGNED;
430 pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
67c4d23c 431 }
92e873b9 432 }
5cd2c5b6
RH
433
434 return pd + (index & (L2_SIZE - 1));
92e873b9
FB
435}
436
c227f099 437static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
92e873b9 438{
108c49b8 439 return phys_page_find_alloc(index, 0);
92e873b9
FB
440}
441
c227f099
AL
442static void tlb_protect_code(ram_addr_t ram_addr);
443static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 444 target_ulong vaddr);
c8a706fe
PB
445#define mmap_lock() do { } while(0)
446#define mmap_unlock() do { } while(0)
9fa3e853 447#endif
fd6ce8f6 448
4369415f
FB
449#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
450
451#if defined(CONFIG_USER_ONLY)
ccbb4d44 452/* Currently it is not recommended to allocate big chunks of data in
4369415f
FB
453 user mode. It will change when a dedicated libc will be used */
454#define USE_STATIC_CODE_GEN_BUFFER
455#endif
456
457#ifdef USE_STATIC_CODE_GEN_BUFFER
ebf50fb3
AJ
458static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
459 __attribute__((aligned (CODE_GEN_ALIGN)));
4369415f
FB
460#endif
461
8fcd3692 462static void code_gen_alloc(unsigned long tb_size)
26a5f13b 463{
4369415f
FB
464#ifdef USE_STATIC_CODE_GEN_BUFFER
465 code_gen_buffer = static_code_gen_buffer;
466 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
467 map_exec(code_gen_buffer, code_gen_buffer_size);
468#else
26a5f13b
FB
469 code_gen_buffer_size = tb_size;
470 if (code_gen_buffer_size == 0) {
4369415f
FB
471#if defined(CONFIG_USER_ONLY)
472 /* in user mode, phys_ram_size is not meaningful */
473 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
474#else
ccbb4d44 475 /* XXX: needs adjustments */
94a6b54f 476 code_gen_buffer_size = (unsigned long)(ram_size / 4);
4369415f 477#endif
26a5f13b
FB
478 }
479 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
480 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
481 /* The code gen buffer location may have constraints depending on
482 the host cpu and OS */
483#if defined(__linux__)
484 {
485 int flags;
141ac468
BS
486 void *start = NULL;
487
26a5f13b
FB
488 flags = MAP_PRIVATE | MAP_ANONYMOUS;
489#if defined(__x86_64__)
490 flags |= MAP_32BIT;
491 /* Cannot map more than that */
492 if (code_gen_buffer_size > (800 * 1024 * 1024))
493 code_gen_buffer_size = (800 * 1024 * 1024);
141ac468
BS
494#elif defined(__sparc_v9__)
495 // Map the buffer below 2G, so we can use direct calls and branches
496 flags |= MAP_FIXED;
497 start = (void *) 0x60000000UL;
498 if (code_gen_buffer_size > (512 * 1024 * 1024))
499 code_gen_buffer_size = (512 * 1024 * 1024);
1cb0661e 500#elif defined(__arm__)
63d41246 501 /* Map the buffer below 32M, so we can use direct calls and branches */
1cb0661e
AZ
502 flags |= MAP_FIXED;
503 start = (void *) 0x01000000UL;
504 if (code_gen_buffer_size > 16 * 1024 * 1024)
505 code_gen_buffer_size = 16 * 1024 * 1024;
eba0b893
RH
506#elif defined(__s390x__)
507 /* Map the buffer so that we can use direct calls and branches. */
508 /* We have a +- 4GB range on the branches; leave some slop. */
509 if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
510 code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
511 }
512 start = (void *)0x90000000UL;
26a5f13b 513#endif
141ac468
BS
514 code_gen_buffer = mmap(start, code_gen_buffer_size,
515 PROT_WRITE | PROT_READ | PROT_EXEC,
26a5f13b
FB
516 flags, -1, 0);
517 if (code_gen_buffer == MAP_FAILED) {
518 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
519 exit(1);
520 }
521 }
cbb608a5
B
522#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
523 || defined(__DragonFly__) || defined(__OpenBSD__)
06e67a82
AL
524 {
525 int flags;
526 void *addr = NULL;
527 flags = MAP_PRIVATE | MAP_ANONYMOUS;
528#if defined(__x86_64__)
529 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
530 * 0x40000000 is free */
531 flags |= MAP_FIXED;
532 addr = (void *)0x40000000;
533 /* Cannot map more than that */
534 if (code_gen_buffer_size > (800 * 1024 * 1024))
535 code_gen_buffer_size = (800 * 1024 * 1024);
4cd31ad2
BS
536#elif defined(__sparc_v9__)
537 // Map the buffer below 2G, so we can use direct calls and branches
538 flags |= MAP_FIXED;
539 addr = (void *) 0x60000000UL;
540 if (code_gen_buffer_size > (512 * 1024 * 1024)) {
541 code_gen_buffer_size = (512 * 1024 * 1024);
542 }
06e67a82
AL
543#endif
544 code_gen_buffer = mmap(addr, code_gen_buffer_size,
545 PROT_WRITE | PROT_READ | PROT_EXEC,
546 flags, -1, 0);
547 if (code_gen_buffer == MAP_FAILED) {
548 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
549 exit(1);
550 }
551 }
26a5f13b
FB
552#else
553 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
26a5f13b
FB
554 map_exec(code_gen_buffer, code_gen_buffer_size);
555#endif
4369415f 556#endif /* !USE_STATIC_CODE_GEN_BUFFER */
26a5f13b 557 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
a884da8a
PM
558 code_gen_buffer_max_size = code_gen_buffer_size -
559 (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
26a5f13b
FB
560 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
561 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
562}
563
564/* Must be called before using the QEMU cpus. 'tb_size' is the size
565 (in bytes) allocated to the translation buffer. Zero means default
566 size. */
567void cpu_exec_init_all(unsigned long tb_size)
568{
26a5f13b
FB
569 cpu_gen_init();
570 code_gen_alloc(tb_size);
571 code_gen_ptr = code_gen_buffer;
4369415f 572 page_init();
e2eef170 573#if !defined(CONFIG_USER_ONLY)
26a5f13b 574 io_mem_init();
e2eef170 575#endif
9002ec79
RH
576#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
577 /* There's no guest base to take into account, so go ahead and
578 initialize the prologue now. */
579 tcg_prologue_init(&tcg_ctx);
580#endif
26a5f13b
FB
581}
582
9656f324
PB
583#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
584
e59fb374 585static int cpu_common_post_load(void *opaque, int version_id)
e7f4eff7
JQ
586{
587 CPUState *env = opaque;
9656f324 588
3098dba0
AJ
589 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
590 version_id is increased. */
591 env->interrupt_request &= ~0x01;
9656f324
PB
592 tlb_flush(env, 1);
593
594 return 0;
595}
e7f4eff7
JQ
596
597static const VMStateDescription vmstate_cpu_common = {
598 .name = "cpu_common",
599 .version_id = 1,
600 .minimum_version_id = 1,
601 .minimum_version_id_old = 1,
e7f4eff7
JQ
602 .post_load = cpu_common_post_load,
603 .fields = (VMStateField []) {
604 VMSTATE_UINT32(halted, CPUState),
605 VMSTATE_UINT32(interrupt_request, CPUState),
606 VMSTATE_END_OF_LIST()
607 }
608};
9656f324
PB
609#endif
610
950f1472
GC
611CPUState *qemu_get_cpu(int cpu)
612{
613 CPUState *env = first_cpu;
614
615 while (env) {
616 if (env->cpu_index == cpu)
617 break;
618 env = env->next_cpu;
619 }
620
621 return env;
622}
623
6a00d601 624void cpu_exec_init(CPUState *env)
fd6ce8f6 625{
6a00d601
FB
626 CPUState **penv;
627 int cpu_index;
628
c2764719
PB
629#if defined(CONFIG_USER_ONLY)
630 cpu_list_lock();
631#endif
6a00d601
FB
632 env->next_cpu = NULL;
633 penv = &first_cpu;
634 cpu_index = 0;
635 while (*penv != NULL) {
1e9fa730 636 penv = &(*penv)->next_cpu;
6a00d601
FB
637 cpu_index++;
638 }
639 env->cpu_index = cpu_index;
268a362c 640 env->numa_node = 0;
72cf2d4f
BS
641 QTAILQ_INIT(&env->breakpoints);
642 QTAILQ_INIT(&env->watchpoints);
dc7a09cf
JK
643#ifndef CONFIG_USER_ONLY
644 env->thread_id = qemu_get_thread_id();
645#endif
6a00d601 646 *penv = env;
c2764719
PB
647#if defined(CONFIG_USER_ONLY)
648 cpu_list_unlock();
649#endif
b3c7724c 650#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
0be71e32
AW
651 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
652 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
b3c7724c
PB
653 cpu_save, cpu_load, env);
654#endif
fd6ce8f6
FB
655}
656
d1a1eb74
TG
657/* Allocate a new translation block. Flush the translation buffer if
658 too many translation blocks or too much generated code. */
659static TranslationBlock *tb_alloc(target_ulong pc)
660{
661 TranslationBlock *tb;
662
663 if (nb_tbs >= code_gen_max_blocks ||
664 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
665 return NULL;
666 tb = &tbs[nb_tbs++];
667 tb->pc = pc;
668 tb->cflags = 0;
669 return tb;
670}
671
672void tb_free(TranslationBlock *tb)
673{
674 /* In practice this is mostly used for single use temporary TB
675 Ignore the hard cases and just back up if this TB happens to
676 be the last one generated. */
677 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
678 code_gen_ptr = tb->tc_ptr;
679 nb_tbs--;
680 }
681}
682
9fa3e853
FB
683static inline void invalidate_page_bitmap(PageDesc *p)
684{
685 if (p->code_bitmap) {
59817ccb 686 qemu_free(p->code_bitmap);
9fa3e853
FB
687 p->code_bitmap = NULL;
688 }
689 p->code_write_count = 0;
690}
691
5cd2c5b6
RH
692/* Set to NULL all the 'first_tb' fields in all PageDescs. */
693
694static void page_flush_tb_1 (int level, void **lp)
fd6ce8f6 695{
5cd2c5b6 696 int i;
fd6ce8f6 697
5cd2c5b6
RH
698 if (*lp == NULL) {
699 return;
700 }
701 if (level == 0) {
702 PageDesc *pd = *lp;
7296abac 703 for (i = 0; i < L2_SIZE; ++i) {
5cd2c5b6
RH
704 pd[i].first_tb = NULL;
705 invalidate_page_bitmap(pd + i);
fd6ce8f6 706 }
5cd2c5b6
RH
707 } else {
708 void **pp = *lp;
7296abac 709 for (i = 0; i < L2_SIZE; ++i) {
5cd2c5b6
RH
710 page_flush_tb_1 (level - 1, pp + i);
711 }
712 }
713}
714
715static void page_flush_tb(void)
716{
717 int i;
718 for (i = 0; i < V_L1_SIZE; i++) {
719 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
fd6ce8f6
FB
720 }
721}
722
723/* flush all the translation blocks */
d4e8164f 724/* XXX: tb_flush is currently not thread safe */
6a00d601 725void tb_flush(CPUState *env1)
fd6ce8f6 726{
6a00d601 727 CPUState *env;
0124311e 728#if defined(DEBUG_FLUSH)
ab3d1727
BS
729 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
730 (unsigned long)(code_gen_ptr - code_gen_buffer),
731 nb_tbs, nb_tbs > 0 ?
732 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
fd6ce8f6 733#endif
26a5f13b 734 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
a208e54a
PB
735 cpu_abort(env1, "Internal error: code buffer overflow\n");
736
fd6ce8f6 737 nb_tbs = 0;
3b46e624 738
6a00d601
FB
739 for(env = first_cpu; env != NULL; env = env->next_cpu) {
740 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
741 }
9fa3e853 742
8a8a608f 743 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
fd6ce8f6 744 page_flush_tb();
9fa3e853 745
fd6ce8f6 746 code_gen_ptr = code_gen_buffer;
d4e8164f
FB
747 /* XXX: flush processor icache at this point if cache flush is
748 expensive */
e3db7226 749 tb_flush_count++;
fd6ce8f6
FB
750}
751
752#ifdef DEBUG_TB_CHECK
753
bc98a7ef 754static void tb_invalidate_check(target_ulong address)
fd6ce8f6
FB
755{
756 TranslationBlock *tb;
757 int i;
758 address &= TARGET_PAGE_MASK;
99773bd4
PB
759 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
760 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
761 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
762 address >= tb->pc + tb->size)) {
0bf9e31a
BS
763 printf("ERROR invalidate: address=" TARGET_FMT_lx
764 " PC=%08lx size=%04x\n",
99773bd4 765 address, (long)tb->pc, tb->size);
fd6ce8f6
FB
766 }
767 }
768 }
769}
770
771/* verify that all the pages have correct rights for code */
772static void tb_page_check(void)
773{
774 TranslationBlock *tb;
775 int i, flags1, flags2;
3b46e624 776
99773bd4
PB
777 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
778 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
779 flags1 = page_get_flags(tb->pc);
780 flags2 = page_get_flags(tb->pc + tb->size - 1);
781 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
782 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
99773bd4 783 (long)tb->pc, tb->size, flags1, flags2);
fd6ce8f6
FB
784 }
785 }
786 }
787}
788
789#endif
790
791/* invalidate one TB */
792static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
793 int next_offset)
794{
795 TranslationBlock *tb1;
796 for(;;) {
797 tb1 = *ptb;
798 if (tb1 == tb) {
799 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
800 break;
801 }
802 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
803 }
804}
805
9fa3e853
FB
806static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
807{
808 TranslationBlock *tb1;
809 unsigned int n1;
810
811 for(;;) {
812 tb1 = *ptb;
813 n1 = (long)tb1 & 3;
814 tb1 = (TranslationBlock *)((long)tb1 & ~3);
815 if (tb1 == tb) {
816 *ptb = tb1->page_next[n1];
817 break;
818 }
819 ptb = &tb1->page_next[n1];
820 }
821}
822
d4e8164f
FB
823static inline void tb_jmp_remove(TranslationBlock *tb, int n)
824{
825 TranslationBlock *tb1, **ptb;
826 unsigned int n1;
827
828 ptb = &tb->jmp_next[n];
829 tb1 = *ptb;
830 if (tb1) {
831 /* find tb(n) in circular list */
832 for(;;) {
833 tb1 = *ptb;
834 n1 = (long)tb1 & 3;
835 tb1 = (TranslationBlock *)((long)tb1 & ~3);
836 if (n1 == n && tb1 == tb)
837 break;
838 if (n1 == 2) {
839 ptb = &tb1->jmp_first;
840 } else {
841 ptb = &tb1->jmp_next[n1];
842 }
843 }
844 /* now we can suppress tb(n) from the list */
845 *ptb = tb->jmp_next[n];
846
847 tb->jmp_next[n] = NULL;
848 }
849}
850
851/* reset the jump entry 'n' of a TB so that it is not chained to
852 another TB */
853static inline void tb_reset_jump(TranslationBlock *tb, int n)
854{
855 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
856}
857
41c1b1c9 858void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
fd6ce8f6 859{
6a00d601 860 CPUState *env;
8a40a180 861 PageDesc *p;
d4e8164f 862 unsigned int h, n1;
41c1b1c9 863 tb_page_addr_t phys_pc;
8a40a180 864 TranslationBlock *tb1, *tb2;
3b46e624 865
8a40a180
FB
866 /* remove the TB from the hash list */
867 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
868 h = tb_phys_hash_func(phys_pc);
5fafdf24 869 tb_remove(&tb_phys_hash[h], tb,
8a40a180
FB
870 offsetof(TranslationBlock, phys_hash_next));
871
872 /* remove the TB from the page list */
873 if (tb->page_addr[0] != page_addr) {
874 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
875 tb_page_remove(&p->first_tb, tb);
876 invalidate_page_bitmap(p);
877 }
878 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
879 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
880 tb_page_remove(&p->first_tb, tb);
881 invalidate_page_bitmap(p);
882 }
883
36bdbe54 884 tb_invalidated_flag = 1;
59817ccb 885
fd6ce8f6 886 /* remove the TB from the hash list */
8a40a180 887 h = tb_jmp_cache_hash_func(tb->pc);
6a00d601
FB
888 for(env = first_cpu; env != NULL; env = env->next_cpu) {
889 if (env->tb_jmp_cache[h] == tb)
890 env->tb_jmp_cache[h] = NULL;
891 }
d4e8164f
FB
892
893 /* suppress this TB from the two jump lists */
894 tb_jmp_remove(tb, 0);
895 tb_jmp_remove(tb, 1);
896
897 /* suppress any remaining jumps to this TB */
898 tb1 = tb->jmp_first;
899 for(;;) {
900 n1 = (long)tb1 & 3;
901 if (n1 == 2)
902 break;
903 tb1 = (TranslationBlock *)((long)tb1 & ~3);
904 tb2 = tb1->jmp_next[n1];
905 tb_reset_jump(tb1, n1);
906 tb1->jmp_next[n1] = NULL;
907 tb1 = tb2;
908 }
909 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
9fa3e853 910
e3db7226 911 tb_phys_invalidate_count++;
9fa3e853
FB
912}
913
914static inline void set_bits(uint8_t *tab, int start, int len)
915{
916 int end, mask, end1;
917
918 end = start + len;
919 tab += start >> 3;
920 mask = 0xff << (start & 7);
921 if ((start & ~7) == (end & ~7)) {
922 if (start < end) {
923 mask &= ~(0xff << (end & 7));
924 *tab |= mask;
925 }
926 } else {
927 *tab++ |= mask;
928 start = (start + 8) & ~7;
929 end1 = end & ~7;
930 while (start < end1) {
931 *tab++ = 0xff;
932 start += 8;
933 }
934 if (start < end) {
935 mask = ~(0xff << (end & 7));
936 *tab |= mask;
937 }
938 }
939}
940
941static void build_page_bitmap(PageDesc *p)
942{
943 int n, tb_start, tb_end;
944 TranslationBlock *tb;
3b46e624 945
b2a7081a 946 p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
9fa3e853
FB
947
948 tb = p->first_tb;
949 while (tb != NULL) {
950 n = (long)tb & 3;
951 tb = (TranslationBlock *)((long)tb & ~3);
952 /* NOTE: this is subtle as a TB may span two physical pages */
953 if (n == 0) {
954 /* NOTE: tb_end may be after the end of the page, but
955 it is not a problem */
956 tb_start = tb->pc & ~TARGET_PAGE_MASK;
957 tb_end = tb_start + tb->size;
958 if (tb_end > TARGET_PAGE_SIZE)
959 tb_end = TARGET_PAGE_SIZE;
960 } else {
961 tb_start = 0;
962 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
963 }
964 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
965 tb = tb->page_next[n];
966 }
967}
968
2e70f6ef
PB
969TranslationBlock *tb_gen_code(CPUState *env,
970 target_ulong pc, target_ulong cs_base,
971 int flags, int cflags)
d720b93d
FB
972{
973 TranslationBlock *tb;
974 uint8_t *tc_ptr;
41c1b1c9
PB
975 tb_page_addr_t phys_pc, phys_page2;
976 target_ulong virt_page2;
d720b93d
FB
977 int code_gen_size;
978
41c1b1c9 979 phys_pc = get_page_addr_code(env, pc);
c27004ec 980 tb = tb_alloc(pc);
d720b93d
FB
981 if (!tb) {
982 /* flush must be done */
983 tb_flush(env);
984 /* cannot fail at this point */
c27004ec 985 tb = tb_alloc(pc);
2e70f6ef
PB
986 /* Don't forget to invalidate previous TB info. */
987 tb_invalidated_flag = 1;
d720b93d
FB
988 }
989 tc_ptr = code_gen_ptr;
990 tb->tc_ptr = tc_ptr;
991 tb->cs_base = cs_base;
992 tb->flags = flags;
993 tb->cflags = cflags;
d07bde88 994 cpu_gen_code(env, tb, &code_gen_size);
d720b93d 995 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
3b46e624 996
d720b93d 997 /* check next page if needed */
c27004ec 998 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
d720b93d 999 phys_page2 = -1;
c27004ec 1000 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
41c1b1c9 1001 phys_page2 = get_page_addr_code(env, virt_page2);
d720b93d 1002 }
41c1b1c9 1003 tb_link_page(tb, phys_pc, phys_page2);
2e70f6ef 1004 return tb;
d720b93d 1005}
3b46e624 1006
9fa3e853
FB
1007/* invalidate all TBs which intersect with the target physical page
1008 starting in range [start;end[. NOTE: start and end must refer to
d720b93d
FB
1009 the same physical page. 'is_cpu_write_access' should be true if called
1010 from a real cpu write access: the virtual CPU will exit the current
1011 TB if code is modified inside this TB. */
41c1b1c9 1012void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
d720b93d
FB
1013 int is_cpu_write_access)
1014{
6b917547 1015 TranslationBlock *tb, *tb_next, *saved_tb;
d720b93d 1016 CPUState *env = cpu_single_env;
41c1b1c9 1017 tb_page_addr_t tb_start, tb_end;
6b917547
AL
1018 PageDesc *p;
1019 int n;
1020#ifdef TARGET_HAS_PRECISE_SMC
1021 int current_tb_not_found = is_cpu_write_access;
1022 TranslationBlock *current_tb = NULL;
1023 int current_tb_modified = 0;
1024 target_ulong current_pc = 0;
1025 target_ulong current_cs_base = 0;
1026 int current_flags = 0;
1027#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
1028
1029 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 1030 if (!p)
9fa3e853 1031 return;
5fafdf24 1032 if (!p->code_bitmap &&
d720b93d
FB
1033 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1034 is_cpu_write_access) {
9fa3e853
FB
1035 /* build code bitmap */
1036 build_page_bitmap(p);
1037 }
1038
1039 /* we remove all the TBs in the range [start, end[ */
1040 /* XXX: see if in some cases it could be faster to invalidate all the code */
1041 tb = p->first_tb;
1042 while (tb != NULL) {
1043 n = (long)tb & 3;
1044 tb = (TranslationBlock *)((long)tb & ~3);
1045 tb_next = tb->page_next[n];
1046 /* NOTE: this is subtle as a TB may span two physical pages */
1047 if (n == 0) {
1048 /* NOTE: tb_end may be after the end of the page, but
1049 it is not a problem */
1050 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1051 tb_end = tb_start + tb->size;
1052 } else {
1053 tb_start = tb->page_addr[1];
1054 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1055 }
1056 if (!(tb_end <= start || tb_start >= end)) {
d720b93d
FB
1057#ifdef TARGET_HAS_PRECISE_SMC
1058 if (current_tb_not_found) {
1059 current_tb_not_found = 0;
1060 current_tb = NULL;
2e70f6ef 1061 if (env->mem_io_pc) {
d720b93d 1062 /* now we have a real cpu fault */
2e70f6ef 1063 current_tb = tb_find_pc(env->mem_io_pc);
d720b93d
FB
1064 }
1065 }
1066 if (current_tb == tb &&
2e70f6ef 1067 (current_tb->cflags & CF_COUNT_MASK) != 1) {
d720b93d
FB
1068 /* If we are modifying the current TB, we must stop
1069 its execution. We could be more precise by checking
1070 that the modification is after the current PC, but it
1071 would require a specialized function to partially
1072 restore the CPU state */
3b46e624 1073
d720b93d 1074 current_tb_modified = 1;
618ba8e6 1075 cpu_restore_state(current_tb, env, env->mem_io_pc);
6b917547
AL
1076 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1077 &current_flags);
d720b93d
FB
1078 }
1079#endif /* TARGET_HAS_PRECISE_SMC */
6f5a9f7e
FB
1080 /* we need to do that to handle the case where a signal
1081 occurs while doing tb_phys_invalidate() */
1082 saved_tb = NULL;
1083 if (env) {
1084 saved_tb = env->current_tb;
1085 env->current_tb = NULL;
1086 }
9fa3e853 1087 tb_phys_invalidate(tb, -1);
6f5a9f7e
FB
1088 if (env) {
1089 env->current_tb = saved_tb;
1090 if (env->interrupt_request && env->current_tb)
1091 cpu_interrupt(env, env->interrupt_request);
1092 }
9fa3e853
FB
1093 }
1094 tb = tb_next;
1095 }
1096#if !defined(CONFIG_USER_ONLY)
1097 /* if no code remaining, no need to continue to use slow writes */
1098 if (!p->first_tb) {
1099 invalidate_page_bitmap(p);
d720b93d 1100 if (is_cpu_write_access) {
2e70f6ef 1101 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
d720b93d
FB
1102 }
1103 }
1104#endif
1105#ifdef TARGET_HAS_PRECISE_SMC
1106 if (current_tb_modified) {
1107 /* we generate a block containing just the instruction
1108 modifying the memory. It will ensure that it cannot modify
1109 itself */
ea1c1802 1110 env->current_tb = NULL;
2e70f6ef 1111 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
d720b93d 1112 cpu_resume_from_signal(env, NULL);
9fa3e853 1113 }
fd6ce8f6 1114#endif
9fa3e853 1115}
fd6ce8f6 1116
9fa3e853 1117/* len must be <= 8 and start must be a multiple of len */
41c1b1c9 1118static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
9fa3e853
FB
1119{
1120 PageDesc *p;
1121 int offset, b;
59817ccb 1122#if 0
a4193c8a 1123 if (1) {
93fcfe39
AL
1124 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1125 cpu_single_env->mem_io_vaddr, len,
1126 cpu_single_env->eip,
1127 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
59817ccb
FB
1128 }
1129#endif
9fa3e853 1130 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 1131 if (!p)
9fa3e853
FB
1132 return;
1133 if (p->code_bitmap) {
1134 offset = start & ~TARGET_PAGE_MASK;
1135 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1136 if (b & ((1 << len) - 1))
1137 goto do_invalidate;
1138 } else {
1139 do_invalidate:
d720b93d 1140 tb_invalidate_phys_page_range(start, start + len, 1);
9fa3e853
FB
1141 }
1142}
1143
9fa3e853 1144#if !defined(CONFIG_SOFTMMU)
41c1b1c9 1145static void tb_invalidate_phys_page(tb_page_addr_t addr,
d720b93d 1146 unsigned long pc, void *puc)
9fa3e853 1147{
6b917547 1148 TranslationBlock *tb;
9fa3e853 1149 PageDesc *p;
6b917547 1150 int n;
d720b93d 1151#ifdef TARGET_HAS_PRECISE_SMC
6b917547 1152 TranslationBlock *current_tb = NULL;
d720b93d 1153 CPUState *env = cpu_single_env;
6b917547
AL
1154 int current_tb_modified = 0;
1155 target_ulong current_pc = 0;
1156 target_ulong current_cs_base = 0;
1157 int current_flags = 0;
d720b93d 1158#endif
9fa3e853
FB
1159
1160 addr &= TARGET_PAGE_MASK;
1161 p = page_find(addr >> TARGET_PAGE_BITS);
5fafdf24 1162 if (!p)
9fa3e853
FB
1163 return;
1164 tb = p->first_tb;
d720b93d
FB
1165#ifdef TARGET_HAS_PRECISE_SMC
1166 if (tb && pc != 0) {
1167 current_tb = tb_find_pc(pc);
1168 }
1169#endif
9fa3e853
FB
1170 while (tb != NULL) {
1171 n = (long)tb & 3;
1172 tb = (TranslationBlock *)((long)tb & ~3);
d720b93d
FB
1173#ifdef TARGET_HAS_PRECISE_SMC
1174 if (current_tb == tb &&
2e70f6ef 1175 (current_tb->cflags & CF_COUNT_MASK) != 1) {
d720b93d
FB
1176 /* If we are modifying the current TB, we must stop
1177 its execution. We could be more precise by checking
1178 that the modification is after the current PC, but it
1179 would require a specialized function to partially
1180 restore the CPU state */
3b46e624 1181
d720b93d 1182 current_tb_modified = 1;
618ba8e6 1183 cpu_restore_state(current_tb, env, pc);
6b917547
AL
1184 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1185 &current_flags);
d720b93d
FB
1186 }
1187#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
1188 tb_phys_invalidate(tb, addr);
1189 tb = tb->page_next[n];
1190 }
fd6ce8f6 1191 p->first_tb = NULL;
d720b93d
FB
1192#ifdef TARGET_HAS_PRECISE_SMC
1193 if (current_tb_modified) {
1194 /* we generate a block containing just the instruction
1195 modifying the memory. It will ensure that it cannot modify
1196 itself */
ea1c1802 1197 env->current_tb = NULL;
2e70f6ef 1198 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
d720b93d
FB
1199 cpu_resume_from_signal(env, puc);
1200 }
1201#endif
fd6ce8f6 1202}
9fa3e853 1203#endif
fd6ce8f6
FB
1204
1205/* add the tb in the target page and protect it if necessary */
5fafdf24 1206static inline void tb_alloc_page(TranslationBlock *tb,
41c1b1c9 1207 unsigned int n, tb_page_addr_t page_addr)
fd6ce8f6
FB
1208{
1209 PageDesc *p;
4429ab44
JQ
1210#ifndef CONFIG_USER_ONLY
1211 bool page_already_protected;
1212#endif
9fa3e853
FB
1213
1214 tb->page_addr[n] = page_addr;
5cd2c5b6 1215 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
9fa3e853 1216 tb->page_next[n] = p->first_tb;
4429ab44
JQ
1217#ifndef CONFIG_USER_ONLY
1218 page_already_protected = p->first_tb != NULL;
1219#endif
9fa3e853
FB
1220 p->first_tb = (TranslationBlock *)((long)tb | n);
1221 invalidate_page_bitmap(p);
fd6ce8f6 1222
107db443 1223#if defined(TARGET_HAS_SMC) || 1
d720b93d 1224
9fa3e853 1225#if defined(CONFIG_USER_ONLY)
fd6ce8f6 1226 if (p->flags & PAGE_WRITE) {
53a5960a
PB
1227 target_ulong addr;
1228 PageDesc *p2;
9fa3e853
FB
1229 int prot;
1230
fd6ce8f6
FB
1231 /* force the host page as non writable (writes will have a
1232 page fault + mprotect overhead) */
53a5960a 1233 page_addr &= qemu_host_page_mask;
fd6ce8f6 1234 prot = 0;
53a5960a
PB
1235 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1236 addr += TARGET_PAGE_SIZE) {
1237
1238 p2 = page_find (addr >> TARGET_PAGE_BITS);
1239 if (!p2)
1240 continue;
1241 prot |= p2->flags;
1242 p2->flags &= ~PAGE_WRITE;
53a5960a 1243 }
5fafdf24 1244 mprotect(g2h(page_addr), qemu_host_page_size,
fd6ce8f6
FB
1245 (prot & PAGE_BITS) & ~PAGE_WRITE);
1246#ifdef DEBUG_TB_INVALIDATE
ab3d1727 1247 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
53a5960a 1248 page_addr);
fd6ce8f6 1249#endif
fd6ce8f6 1250 }
9fa3e853
FB
1251#else
1252 /* if some code is already present, then the pages are already
1253 protected. So we handle the case where only the first TB is
1254 allocated in a physical page */
4429ab44 1255 if (!page_already_protected) {
6a00d601 1256 tlb_protect_code(page_addr);
9fa3e853
FB
1257 }
1258#endif
d720b93d
FB
1259
1260#endif /* TARGET_HAS_SMC */
fd6ce8f6
FB
1261}
1262
9fa3e853
FB
1263/* add a new TB and link it to the physical page tables. phys_page2 is
1264 (-1) to indicate that only one page contains the TB. */
41c1b1c9
PB
1265void tb_link_page(TranslationBlock *tb,
1266 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
d4e8164f 1267{
9fa3e853
FB
1268 unsigned int h;
1269 TranslationBlock **ptb;
1270
c8a706fe
PB
1271 /* Grab the mmap lock to stop another thread invalidating this TB
1272 before we are done. */
1273 mmap_lock();
9fa3e853
FB
1274 /* add in the physical hash table */
1275 h = tb_phys_hash_func(phys_pc);
1276 ptb = &tb_phys_hash[h];
1277 tb->phys_hash_next = *ptb;
1278 *ptb = tb;
fd6ce8f6
FB
1279
1280 /* add in the page list */
9fa3e853
FB
1281 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1282 if (phys_page2 != -1)
1283 tb_alloc_page(tb, 1, phys_page2);
1284 else
1285 tb->page_addr[1] = -1;
9fa3e853 1286
d4e8164f
FB
1287 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1288 tb->jmp_next[0] = NULL;
1289 tb->jmp_next[1] = NULL;
1290
1291 /* init original jump addresses */
1292 if (tb->tb_next_offset[0] != 0xffff)
1293 tb_reset_jump(tb, 0);
1294 if (tb->tb_next_offset[1] != 0xffff)
1295 tb_reset_jump(tb, 1);
8a40a180
FB
1296
1297#ifdef DEBUG_TB_CHECK
1298 tb_page_check();
1299#endif
c8a706fe 1300 mmap_unlock();
fd6ce8f6
FB
1301}
1302
9fa3e853
FB
1303/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1304 tb[1].tc_ptr. Return NULL if not found */
1305TranslationBlock *tb_find_pc(unsigned long tc_ptr)
fd6ce8f6 1306{
9fa3e853
FB
1307 int m_min, m_max, m;
1308 unsigned long v;
1309 TranslationBlock *tb;
a513fe19
FB
1310
1311 if (nb_tbs <= 0)
1312 return NULL;
1313 if (tc_ptr < (unsigned long)code_gen_buffer ||
1314 tc_ptr >= (unsigned long)code_gen_ptr)
1315 return NULL;
1316 /* binary search (cf Knuth) */
1317 m_min = 0;
1318 m_max = nb_tbs - 1;
1319 while (m_min <= m_max) {
1320 m = (m_min + m_max) >> 1;
1321 tb = &tbs[m];
1322 v = (unsigned long)tb->tc_ptr;
1323 if (v == tc_ptr)
1324 return tb;
1325 else if (tc_ptr < v) {
1326 m_max = m - 1;
1327 } else {
1328 m_min = m + 1;
1329 }
5fafdf24 1330 }
a513fe19
FB
1331 return &tbs[m_max];
1332}
7501267e 1333
ea041c0e
FB
1334static void tb_reset_jump_recursive(TranslationBlock *tb);
1335
1336static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1337{
1338 TranslationBlock *tb1, *tb_next, **ptb;
1339 unsigned int n1;
1340
1341 tb1 = tb->jmp_next[n];
1342 if (tb1 != NULL) {
1343 /* find head of list */
1344 for(;;) {
1345 n1 = (long)tb1 & 3;
1346 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1347 if (n1 == 2)
1348 break;
1349 tb1 = tb1->jmp_next[n1];
1350 }
1351 /* we are now sure now that tb jumps to tb1 */
1352 tb_next = tb1;
1353
1354 /* remove tb from the jmp_first list */
1355 ptb = &tb_next->jmp_first;
1356 for(;;) {
1357 tb1 = *ptb;
1358 n1 = (long)tb1 & 3;
1359 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1360 if (n1 == n && tb1 == tb)
1361 break;
1362 ptb = &tb1->jmp_next[n1];
1363 }
1364 *ptb = tb->jmp_next[n];
1365 tb->jmp_next[n] = NULL;
3b46e624 1366
ea041c0e
FB
1367 /* suppress the jump to next tb in generated code */
1368 tb_reset_jump(tb, n);
1369
0124311e 1370 /* suppress jumps in the tb on which we could have jumped */
ea041c0e
FB
1371 tb_reset_jump_recursive(tb_next);
1372 }
1373}
1374
1375static void tb_reset_jump_recursive(TranslationBlock *tb)
1376{
1377 tb_reset_jump_recursive2(tb, 0);
1378 tb_reset_jump_recursive2(tb, 1);
1379}
1380
1fddef4b 1381#if defined(TARGET_HAS_ICE)
94df27fd
PB
1382#if defined(CONFIG_USER_ONLY)
1383static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1384{
1385 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1386}
1387#else
d720b93d
FB
1388static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1389{
c227f099 1390 target_phys_addr_t addr;
9b3c35e0 1391 target_ulong pd;
c227f099 1392 ram_addr_t ram_addr;
c2f07f81 1393 PhysPageDesc *p;
d720b93d 1394
c2f07f81
PB
1395 addr = cpu_get_phys_page_debug(env, pc);
1396 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1397 if (!p) {
1398 pd = IO_MEM_UNASSIGNED;
1399 } else {
1400 pd = p->phys_offset;
1401 }
1402 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
706cd4b5 1403 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
d720b93d 1404}
c27004ec 1405#endif
94df27fd 1406#endif /* TARGET_HAS_ICE */
d720b93d 1407
c527ee8f
PB
1408#if defined(CONFIG_USER_ONLY)
1409void cpu_watchpoint_remove_all(CPUState *env, int mask)
1410
1411{
1412}
1413
1414int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1415 int flags, CPUWatchpoint **watchpoint)
1416{
1417 return -ENOSYS;
1418}
1419#else
6658ffb8 1420/* Add a watchpoint. */
a1d1bb31
AL
1421int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1422 int flags, CPUWatchpoint **watchpoint)
6658ffb8 1423{
b4051334 1424 target_ulong len_mask = ~(len - 1);
c0ce998e 1425 CPUWatchpoint *wp;
6658ffb8 1426
b4051334
AL
1427 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1428 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1429 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1430 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1431 return -EINVAL;
1432 }
a1d1bb31 1433 wp = qemu_malloc(sizeof(*wp));
a1d1bb31
AL
1434
1435 wp->vaddr = addr;
b4051334 1436 wp->len_mask = len_mask;
a1d1bb31
AL
1437 wp->flags = flags;
1438
2dc9f411 1439 /* keep all GDB-injected watchpoints in front */
c0ce998e 1440 if (flags & BP_GDB)
72cf2d4f 1441 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
c0ce998e 1442 else
72cf2d4f 1443 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
6658ffb8 1444
6658ffb8 1445 tlb_flush_page(env, addr);
a1d1bb31
AL
1446
1447 if (watchpoint)
1448 *watchpoint = wp;
1449 return 0;
6658ffb8
PB
1450}
1451
a1d1bb31
AL
1452/* Remove a specific watchpoint. */
1453int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1454 int flags)
6658ffb8 1455{
b4051334 1456 target_ulong len_mask = ~(len - 1);
a1d1bb31 1457 CPUWatchpoint *wp;
6658ffb8 1458
72cf2d4f 1459 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334 1460 if (addr == wp->vaddr && len_mask == wp->len_mask
6e140f28 1461 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
a1d1bb31 1462 cpu_watchpoint_remove_by_ref(env, wp);
6658ffb8
PB
1463 return 0;
1464 }
1465 }
a1d1bb31 1466 return -ENOENT;
6658ffb8
PB
1467}
1468
a1d1bb31
AL
1469/* Remove a specific watchpoint by reference. */
1470void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1471{
72cf2d4f 1472 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
7d03f82f 1473
a1d1bb31
AL
1474 tlb_flush_page(env, watchpoint->vaddr);
1475
1476 qemu_free(watchpoint);
1477}
1478
1479/* Remove all matching watchpoints. */
1480void cpu_watchpoint_remove_all(CPUState *env, int mask)
1481{
c0ce998e 1482 CPUWatchpoint *wp, *next;
a1d1bb31 1483
72cf2d4f 1484 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
a1d1bb31
AL
1485 if (wp->flags & mask)
1486 cpu_watchpoint_remove_by_ref(env, wp);
c0ce998e 1487 }
7d03f82f 1488}
c527ee8f 1489#endif
7d03f82f 1490
a1d1bb31
AL
1491/* Add a breakpoint. */
1492int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1493 CPUBreakpoint **breakpoint)
4c3a88a2 1494{
1fddef4b 1495#if defined(TARGET_HAS_ICE)
c0ce998e 1496 CPUBreakpoint *bp;
3b46e624 1497
a1d1bb31 1498 bp = qemu_malloc(sizeof(*bp));
4c3a88a2 1499
a1d1bb31
AL
1500 bp->pc = pc;
1501 bp->flags = flags;
1502
2dc9f411 1503 /* keep all GDB-injected breakpoints in front */
c0ce998e 1504 if (flags & BP_GDB)
72cf2d4f 1505 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
c0ce998e 1506 else
72cf2d4f 1507 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
3b46e624 1508
d720b93d 1509 breakpoint_invalidate(env, pc);
a1d1bb31
AL
1510
1511 if (breakpoint)
1512 *breakpoint = bp;
4c3a88a2
FB
1513 return 0;
1514#else
a1d1bb31 1515 return -ENOSYS;
4c3a88a2
FB
1516#endif
1517}
1518
a1d1bb31
AL
1519/* Remove a specific breakpoint. */
1520int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1521{
7d03f82f 1522#if defined(TARGET_HAS_ICE)
a1d1bb31
AL
1523 CPUBreakpoint *bp;
1524
72cf2d4f 1525 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31
AL
1526 if (bp->pc == pc && bp->flags == flags) {
1527 cpu_breakpoint_remove_by_ref(env, bp);
1528 return 0;
1529 }
7d03f82f 1530 }
a1d1bb31
AL
1531 return -ENOENT;
1532#else
1533 return -ENOSYS;
7d03f82f
EI
1534#endif
1535}
1536
a1d1bb31
AL
1537/* Remove a specific breakpoint by reference. */
1538void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
4c3a88a2 1539{
1fddef4b 1540#if defined(TARGET_HAS_ICE)
72cf2d4f 1541 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
d720b93d 1542
a1d1bb31
AL
1543 breakpoint_invalidate(env, breakpoint->pc);
1544
1545 qemu_free(breakpoint);
1546#endif
1547}
1548
1549/* Remove all matching breakpoints. */
1550void cpu_breakpoint_remove_all(CPUState *env, int mask)
1551{
1552#if defined(TARGET_HAS_ICE)
c0ce998e 1553 CPUBreakpoint *bp, *next;
a1d1bb31 1554
72cf2d4f 1555 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
a1d1bb31
AL
1556 if (bp->flags & mask)
1557 cpu_breakpoint_remove_by_ref(env, bp);
c0ce998e 1558 }
4c3a88a2
FB
1559#endif
1560}
1561
c33a346e
FB
1562/* enable or disable single step mode. EXCP_DEBUG is returned by the
1563 CPU loop after each instruction */
1564void cpu_single_step(CPUState *env, int enabled)
1565{
1fddef4b 1566#if defined(TARGET_HAS_ICE)
c33a346e
FB
1567 if (env->singlestep_enabled != enabled) {
1568 env->singlestep_enabled = enabled;
e22a25c9
AL
1569 if (kvm_enabled())
1570 kvm_update_guest_debug(env, 0);
1571 else {
ccbb4d44 1572 /* must flush all the translated code to avoid inconsistencies */
e22a25c9
AL
1573 /* XXX: only flush what is necessary */
1574 tb_flush(env);
1575 }
c33a346e
FB
1576 }
1577#endif
1578}
1579
34865134
FB
1580/* enable or disable low levels log */
1581void cpu_set_log(int log_flags)
1582{
1583 loglevel = log_flags;
1584 if (loglevel && !logfile) {
11fcfab4 1585 logfile = fopen(logfilename, log_append ? "a" : "w");
34865134
FB
1586 if (!logfile) {
1587 perror(logfilename);
1588 _exit(1);
1589 }
9fa3e853
FB
1590#if !defined(CONFIG_SOFTMMU)
1591 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1592 {
b55266b5 1593 static char logfile_buf[4096];
9fa3e853
FB
1594 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1595 }
bf65f53f
FN
1596#elif !defined(_WIN32)
1597 /* Win32 doesn't support line-buffering and requires size >= 2 */
34865134 1598 setvbuf(logfile, NULL, _IOLBF, 0);
9fa3e853 1599#endif
e735b91c
PB
1600 log_append = 1;
1601 }
1602 if (!loglevel && logfile) {
1603 fclose(logfile);
1604 logfile = NULL;
34865134
FB
1605 }
1606}
1607
1608void cpu_set_log_filename(const char *filename)
1609{
1610 logfilename = strdup(filename);
e735b91c
PB
1611 if (logfile) {
1612 fclose(logfile);
1613 logfile = NULL;
1614 }
1615 cpu_set_log(loglevel);
34865134 1616}
c33a346e 1617
3098dba0 1618static void cpu_unlink_tb(CPUState *env)
ea041c0e 1619{
3098dba0
AJ
1620 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1621 problem and hope the cpu will stop of its own accord. For userspace
1622 emulation this often isn't actually as bad as it sounds. Often
1623 signals are used primarily to interrupt blocking syscalls. */
ea041c0e 1624 TranslationBlock *tb;
c227f099 1625 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
59817ccb 1626
cab1b4bd 1627 spin_lock(&interrupt_lock);
3098dba0
AJ
1628 tb = env->current_tb;
1629 /* if the cpu is currently executing code, we must unlink it and
1630 all the potentially executing TB */
f76cfe56 1631 if (tb) {
3098dba0
AJ
1632 env->current_tb = NULL;
1633 tb_reset_jump_recursive(tb);
be214e6c 1634 }
cab1b4bd 1635 spin_unlock(&interrupt_lock);
3098dba0
AJ
1636}
1637
97ffbd8d 1638#ifndef CONFIG_USER_ONLY
3098dba0 1639/* mask must never be zero, except for A20 change call */
ec6959d0 1640static void tcg_handle_interrupt(CPUState *env, int mask)
3098dba0
AJ
1641{
1642 int old_mask;
be214e6c 1643
2e70f6ef 1644 old_mask = env->interrupt_request;
68a79315 1645 env->interrupt_request |= mask;
3098dba0 1646
8edac960
AL
1647 /*
1648 * If called from iothread context, wake the target cpu in
1649 * case its halted.
1650 */
b7680cb6 1651 if (!qemu_cpu_is_self(env)) {
8edac960
AL
1652 qemu_cpu_kick(env);
1653 return;
1654 }
8edac960 1655
2e70f6ef 1656 if (use_icount) {
266910c4 1657 env->icount_decr.u16.high = 0xffff;
2e70f6ef 1658 if (!can_do_io(env)
be214e6c 1659 && (mask & ~old_mask) != 0) {
2e70f6ef
PB
1660 cpu_abort(env, "Raised interrupt while not in I/O function");
1661 }
2e70f6ef 1662 } else {
3098dba0 1663 cpu_unlink_tb(env);
ea041c0e
FB
1664 }
1665}
1666
ec6959d0
JK
1667CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1668
97ffbd8d
JK
1669#else /* CONFIG_USER_ONLY */
1670
1671void cpu_interrupt(CPUState *env, int mask)
1672{
1673 env->interrupt_request |= mask;
1674 cpu_unlink_tb(env);
1675}
1676#endif /* CONFIG_USER_ONLY */
1677
b54ad049
FB
1678void cpu_reset_interrupt(CPUState *env, int mask)
1679{
1680 env->interrupt_request &= ~mask;
1681}
1682
3098dba0
AJ
1683void cpu_exit(CPUState *env)
1684{
1685 env->exit_request = 1;
1686 cpu_unlink_tb(env);
1687}
1688
c7cd6a37 1689const CPULogItem cpu_log_items[] = {
5fafdf24 1690 { CPU_LOG_TB_OUT_ASM, "out_asm",
f193c797
FB
1691 "show generated host assembly code for each compiled TB" },
1692 { CPU_LOG_TB_IN_ASM, "in_asm",
1693 "show target assembly code for each compiled TB" },
5fafdf24 1694 { CPU_LOG_TB_OP, "op",
57fec1fe 1695 "show micro ops for each compiled TB" },
f193c797 1696 { CPU_LOG_TB_OP_OPT, "op_opt",
e01a1157
BS
1697 "show micro ops "
1698#ifdef TARGET_I386
1699 "before eflags optimization and "
f193c797 1700#endif
e01a1157 1701 "after liveness analysis" },
f193c797
FB
1702 { CPU_LOG_INT, "int",
1703 "show interrupts/exceptions in short format" },
1704 { CPU_LOG_EXEC, "exec",
1705 "show trace before each executed TB (lots of logs)" },
9fddaa0c 1706 { CPU_LOG_TB_CPU, "cpu",
e91c8a77 1707 "show CPU state before block translation" },
f193c797
FB
1708#ifdef TARGET_I386
1709 { CPU_LOG_PCALL, "pcall",
1710 "show protected mode far calls/returns/exceptions" },
eca1bdf4
AL
1711 { CPU_LOG_RESET, "cpu_reset",
1712 "show CPU state before CPU resets" },
f193c797 1713#endif
8e3a9fd2 1714#ifdef DEBUG_IOPORT
fd872598
FB
1715 { CPU_LOG_IOPORT, "ioport",
1716 "show all i/o ports accesses" },
8e3a9fd2 1717#endif
f193c797
FB
1718 { 0, NULL, NULL },
1719};
1720
f6f3fbca
MT
1721#ifndef CONFIG_USER_ONLY
1722static QLIST_HEAD(memory_client_list, CPUPhysMemoryClient) memory_client_list
1723 = QLIST_HEAD_INITIALIZER(memory_client_list);
1724
1725static void cpu_notify_set_memory(target_phys_addr_t start_addr,
9742bf26 1726 ram_addr_t size,
0fd542fb
MT
1727 ram_addr_t phys_offset,
1728 bool log_dirty)
f6f3fbca
MT
1729{
1730 CPUPhysMemoryClient *client;
1731 QLIST_FOREACH(client, &memory_client_list, list) {
0fd542fb 1732 client->set_memory(client, start_addr, size, phys_offset, log_dirty);
f6f3fbca
MT
1733 }
1734}
1735
1736static int cpu_notify_sync_dirty_bitmap(target_phys_addr_t start,
9742bf26 1737 target_phys_addr_t end)
f6f3fbca
MT
1738{
1739 CPUPhysMemoryClient *client;
1740 QLIST_FOREACH(client, &memory_client_list, list) {
1741 int r = client->sync_dirty_bitmap(client, start, end);
1742 if (r < 0)
1743 return r;
1744 }
1745 return 0;
1746}
1747
1748static int cpu_notify_migration_log(int enable)
1749{
1750 CPUPhysMemoryClient *client;
1751 QLIST_FOREACH(client, &memory_client_list, list) {
1752 int r = client->migration_log(client, enable);
1753 if (r < 0)
1754 return r;
1755 }
1756 return 0;
1757}
1758
2173a75f
AW
1759struct last_map {
1760 target_phys_addr_t start_addr;
1761 ram_addr_t size;
1762 ram_addr_t phys_offset;
1763};
1764
8d4c78e7
AW
1765/* The l1_phys_map provides the upper P_L1_BITs of the guest physical
1766 * address. Each intermediate table provides the next L2_BITs of guest
1767 * physical address space. The number of levels vary based on host and
1768 * guest configuration, making it efficient to build the final guest
1769 * physical address by seeding the L1 offset and shifting and adding in
1770 * each L2 offset as we recurse through them. */
2173a75f
AW
1771static void phys_page_for_each_1(CPUPhysMemoryClient *client, int level,
1772 void **lp, target_phys_addr_t addr,
1773 struct last_map *map)
f6f3fbca 1774{
5cd2c5b6 1775 int i;
f6f3fbca 1776
5cd2c5b6
RH
1777 if (*lp == NULL) {
1778 return;
1779 }
1780 if (level == 0) {
1781 PhysPageDesc *pd = *lp;
8d4c78e7 1782 addr <<= L2_BITS + TARGET_PAGE_BITS;
7296abac 1783 for (i = 0; i < L2_SIZE; ++i) {
5cd2c5b6 1784 if (pd[i].phys_offset != IO_MEM_UNASSIGNED) {
2173a75f
AW
1785 target_phys_addr_t start_addr = addr | i << TARGET_PAGE_BITS;
1786
1787 if (map->size &&
1788 start_addr == map->start_addr + map->size &&
1789 pd[i].phys_offset == map->phys_offset + map->size) {
1790
1791 map->size += TARGET_PAGE_SIZE;
1792 continue;
1793 } else if (map->size) {
1794 client->set_memory(client, map->start_addr,
1795 map->size, map->phys_offset, false);
1796 }
1797
1798 map->start_addr = start_addr;
1799 map->size = TARGET_PAGE_SIZE;
1800 map->phys_offset = pd[i].phys_offset;
f6f3fbca 1801 }
5cd2c5b6
RH
1802 }
1803 } else {
1804 void **pp = *lp;
7296abac 1805 for (i = 0; i < L2_SIZE; ++i) {
8d4c78e7 1806 phys_page_for_each_1(client, level - 1, pp + i,
2173a75f 1807 (addr << L2_BITS) | i, map);
f6f3fbca
MT
1808 }
1809 }
1810}
1811
1812static void phys_page_for_each(CPUPhysMemoryClient *client)
1813{
5cd2c5b6 1814 int i;
2173a75f
AW
1815 struct last_map map = { };
1816
5cd2c5b6
RH
1817 for (i = 0; i < P_L1_SIZE; ++i) {
1818 phys_page_for_each_1(client, P_L1_SHIFT / L2_BITS - 1,
2173a75f
AW
1819 l1_phys_map + i, i, &map);
1820 }
1821 if (map.size) {
1822 client->set_memory(client, map.start_addr, map.size, map.phys_offset,
1823 false);
f6f3fbca 1824 }
f6f3fbca
MT
1825}
1826
1827void cpu_register_phys_memory_client(CPUPhysMemoryClient *client)
1828{
1829 QLIST_INSERT_HEAD(&memory_client_list, client, list);
1830 phys_page_for_each(client);
1831}
1832
1833void cpu_unregister_phys_memory_client(CPUPhysMemoryClient *client)
1834{
1835 QLIST_REMOVE(client, list);
1836}
1837#endif
1838
f193c797
FB
1839static int cmp1(const char *s1, int n, const char *s2)
1840{
1841 if (strlen(s2) != n)
1842 return 0;
1843 return memcmp(s1, s2, n) == 0;
1844}
3b46e624 1845
f193c797
FB
1846/* takes a comma separated list of log masks. Return 0 if error. */
1847int cpu_str_to_log_mask(const char *str)
1848{
c7cd6a37 1849 const CPULogItem *item;
f193c797
FB
1850 int mask;
1851 const char *p, *p1;
1852
1853 p = str;
1854 mask = 0;
1855 for(;;) {
1856 p1 = strchr(p, ',');
1857 if (!p1)
1858 p1 = p + strlen(p);
9742bf26
YT
1859 if(cmp1(p,p1-p,"all")) {
1860 for(item = cpu_log_items; item->mask != 0; item++) {
1861 mask |= item->mask;
1862 }
1863 } else {
1864 for(item = cpu_log_items; item->mask != 0; item++) {
1865 if (cmp1(p, p1 - p, item->name))
1866 goto found;
1867 }
1868 return 0;
f193c797 1869 }
f193c797
FB
1870 found:
1871 mask |= item->mask;
1872 if (*p1 != ',')
1873 break;
1874 p = p1 + 1;
1875 }
1876 return mask;
1877}
ea041c0e 1878
7501267e
FB
1879void cpu_abort(CPUState *env, const char *fmt, ...)
1880{
1881 va_list ap;
493ae1f0 1882 va_list ap2;
7501267e
FB
1883
1884 va_start(ap, fmt);
493ae1f0 1885 va_copy(ap2, ap);
7501267e
FB
1886 fprintf(stderr, "qemu: fatal: ");
1887 vfprintf(stderr, fmt, ap);
1888 fprintf(stderr, "\n");
1889#ifdef TARGET_I386
7fe48483
FB
1890 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1891#else
1892 cpu_dump_state(env, stderr, fprintf, 0);
7501267e 1893#endif
93fcfe39
AL
1894 if (qemu_log_enabled()) {
1895 qemu_log("qemu: fatal: ");
1896 qemu_log_vprintf(fmt, ap2);
1897 qemu_log("\n");
f9373291 1898#ifdef TARGET_I386
93fcfe39 1899 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
f9373291 1900#else
93fcfe39 1901 log_cpu_state(env, 0);
f9373291 1902#endif
31b1a7b4 1903 qemu_log_flush();
93fcfe39 1904 qemu_log_close();
924edcae 1905 }
493ae1f0 1906 va_end(ap2);
f9373291 1907 va_end(ap);
fd052bf6
RV
1908#if defined(CONFIG_USER_ONLY)
1909 {
1910 struct sigaction act;
1911 sigfillset(&act.sa_mask);
1912 act.sa_handler = SIG_DFL;
1913 sigaction(SIGABRT, &act, NULL);
1914 }
1915#endif
7501267e
FB
1916 abort();
1917}
1918
c5be9f08
TS
1919CPUState *cpu_copy(CPUState *env)
1920{
01ba9816 1921 CPUState *new_env = cpu_init(env->cpu_model_str);
c5be9f08
TS
1922 CPUState *next_cpu = new_env->next_cpu;
1923 int cpu_index = new_env->cpu_index;
5a38f081
AL
1924#if defined(TARGET_HAS_ICE)
1925 CPUBreakpoint *bp;
1926 CPUWatchpoint *wp;
1927#endif
1928
c5be9f08 1929 memcpy(new_env, env, sizeof(CPUState));
5a38f081
AL
1930
1931 /* Preserve chaining and index. */
c5be9f08
TS
1932 new_env->next_cpu = next_cpu;
1933 new_env->cpu_index = cpu_index;
5a38f081
AL
1934
1935 /* Clone all break/watchpoints.
1936 Note: Once we support ptrace with hw-debug register access, make sure
1937 BP_CPU break/watchpoints are handled correctly on clone. */
72cf2d4f
BS
1938 QTAILQ_INIT(&env->breakpoints);
1939 QTAILQ_INIT(&env->watchpoints);
5a38f081 1940#if defined(TARGET_HAS_ICE)
72cf2d4f 1941 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
5a38f081
AL
1942 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1943 }
72cf2d4f 1944 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
5a38f081
AL
1945 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1946 wp->flags, NULL);
1947 }
1948#endif
1949
c5be9f08
TS
1950 return new_env;
1951}
1952
0124311e
FB
1953#if !defined(CONFIG_USER_ONLY)
1954
5c751e99
EI
1955static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1956{
1957 unsigned int i;
1958
1959 /* Discard jump cache entries for any tb which might potentially
1960 overlap the flushed page. */
1961 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1962 memset (&env->tb_jmp_cache[i], 0,
9742bf26 1963 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
5c751e99
EI
1964
1965 i = tb_jmp_cache_hash_page(addr);
1966 memset (&env->tb_jmp_cache[i], 0,
9742bf26 1967 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
5c751e99
EI
1968}
1969
08738984
IK
1970static CPUTLBEntry s_cputlb_empty_entry = {
1971 .addr_read = -1,
1972 .addr_write = -1,
1973 .addr_code = -1,
1974 .addend = -1,
1975};
1976
ee8b7021
FB
1977/* NOTE: if flush_global is true, also flush global entries (not
1978 implemented yet) */
1979void tlb_flush(CPUState *env, int flush_global)
33417e70 1980{
33417e70 1981 int i;
0124311e 1982
9fa3e853
FB
1983#if defined(DEBUG_TLB)
1984 printf("tlb_flush:\n");
1985#endif
0124311e
FB
1986 /* must reset current TB so that interrupts cannot modify the
1987 links while we are modifying them */
1988 env->current_tb = NULL;
1989
33417e70 1990 for(i = 0; i < CPU_TLB_SIZE; i++) {
cfde4bd9
IY
1991 int mmu_idx;
1992 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
08738984 1993 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
cfde4bd9 1994 }
33417e70 1995 }
9fa3e853 1996
8a40a180 1997 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
9fa3e853 1998
d4c430a8
PB
1999 env->tlb_flush_addr = -1;
2000 env->tlb_flush_mask = 0;
e3db7226 2001 tlb_flush_count++;
33417e70
FB
2002}
2003
274da6b2 2004static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
61382a50 2005{
5fafdf24 2006 if (addr == (tlb_entry->addr_read &
84b7b8e7 2007 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 2008 addr == (tlb_entry->addr_write &
84b7b8e7 2009 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 2010 addr == (tlb_entry->addr_code &
84b7b8e7 2011 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
08738984 2012 *tlb_entry = s_cputlb_empty_entry;
84b7b8e7 2013 }
61382a50
FB
2014}
2015
2e12669a 2016void tlb_flush_page(CPUState *env, target_ulong addr)
33417e70 2017{
8a40a180 2018 int i;
cfde4bd9 2019 int mmu_idx;
0124311e 2020
9fa3e853 2021#if defined(DEBUG_TLB)
108c49b8 2022 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
9fa3e853 2023#endif
d4c430a8
PB
2024 /* Check if we need to flush due to large pages. */
2025 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
2026#if defined(DEBUG_TLB)
2027 printf("tlb_flush_page: forced full flush ("
2028 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
2029 env->tlb_flush_addr, env->tlb_flush_mask);
2030#endif
2031 tlb_flush(env, 1);
2032 return;
2033 }
0124311e
FB
2034 /* must reset current TB so that interrupts cannot modify the
2035 links while we are modifying them */
2036 env->current_tb = NULL;
61382a50
FB
2037
2038 addr &= TARGET_PAGE_MASK;
2039 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
cfde4bd9
IY
2040 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2041 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
0124311e 2042
5c751e99 2043 tlb_flush_jmp_cache(env, addr);
9fa3e853
FB
2044}
2045
9fa3e853
FB
2046/* update the TLBs so that writes to code in the virtual page 'addr'
2047 can be detected */
c227f099 2048static void tlb_protect_code(ram_addr_t ram_addr)
9fa3e853 2049{
5fafdf24 2050 cpu_physical_memory_reset_dirty(ram_addr,
6a00d601
FB
2051 ram_addr + TARGET_PAGE_SIZE,
2052 CODE_DIRTY_FLAG);
9fa3e853
FB
2053}
2054
9fa3e853 2055/* update the TLB so that writes in physical page 'phys_addr' are no longer
3a7d929e 2056 tested for self modifying code */
c227f099 2057static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 2058 target_ulong vaddr)
9fa3e853 2059{
f7c11b53 2060 cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
1ccde1cb
FB
2061}
2062
5fafdf24 2063static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1ccde1cb
FB
2064 unsigned long start, unsigned long length)
2065{
2066 unsigned long addr;
84b7b8e7
FB
2067 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2068 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1ccde1cb 2069 if ((addr - start) < length) {
0f459d16 2070 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1ccde1cb
FB
2071 }
2072 }
2073}
2074
5579c7f3 2075/* Note: start and end must be within the same ram block. */
c227f099 2076void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 2077 int dirty_flags)
1ccde1cb
FB
2078{
2079 CPUState *env;
4f2ac237 2080 unsigned long length, start1;
f7c11b53 2081 int i;
1ccde1cb
FB
2082
2083 start &= TARGET_PAGE_MASK;
2084 end = TARGET_PAGE_ALIGN(end);
2085
2086 length = end - start;
2087 if (length == 0)
2088 return;
f7c11b53 2089 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
f23db169 2090
1ccde1cb
FB
2091 /* we modify the TLB cache so that the dirty bit will be set again
2092 when accessing the range */
b2e0a138 2093 start1 = (unsigned long)qemu_safe_ram_ptr(start);
a57d23e4 2094 /* Check that we don't span multiple blocks - this breaks the
5579c7f3 2095 address comparisons below. */
b2e0a138 2096 if ((unsigned long)qemu_safe_ram_ptr(end - 1) - start1
5579c7f3
PB
2097 != (end - 1) - start) {
2098 abort();
2099 }
2100
6a00d601 2101 for(env = first_cpu; env != NULL; env = env->next_cpu) {
cfde4bd9
IY
2102 int mmu_idx;
2103 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2104 for(i = 0; i < CPU_TLB_SIZE; i++)
2105 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
2106 start1, length);
2107 }
6a00d601 2108 }
1ccde1cb
FB
2109}
2110
74576198
AL
2111int cpu_physical_memory_set_dirty_tracking(int enable)
2112{
f6f3fbca 2113 int ret = 0;
74576198 2114 in_migration = enable;
f6f3fbca
MT
2115 ret = cpu_notify_migration_log(!!enable);
2116 return ret;
74576198
AL
2117}
2118
2119int cpu_physical_memory_get_dirty_tracking(void)
2120{
2121 return in_migration;
2122}
2123
c227f099
AL
2124int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
2125 target_phys_addr_t end_addr)
2bec46dc 2126{
7b8f3b78 2127 int ret;
151f7749 2128
f6f3fbca 2129 ret = cpu_notify_sync_dirty_bitmap(start_addr, end_addr);
151f7749 2130 return ret;
2bec46dc
AL
2131}
2132
e5896b12
AP
2133int cpu_physical_log_start(target_phys_addr_t start_addr,
2134 ram_addr_t size)
2135{
2136 CPUPhysMemoryClient *client;
2137 QLIST_FOREACH(client, &memory_client_list, list) {
2138 if (client->log_start) {
2139 int r = client->log_start(client, start_addr, size);
2140 if (r < 0) {
2141 return r;
2142 }
2143 }
2144 }
2145 return 0;
2146}
2147
2148int cpu_physical_log_stop(target_phys_addr_t start_addr,
2149 ram_addr_t size)
2150{
2151 CPUPhysMemoryClient *client;
2152 QLIST_FOREACH(client, &memory_client_list, list) {
2153 if (client->log_stop) {
2154 int r = client->log_stop(client, start_addr, size);
2155 if (r < 0) {
2156 return r;
2157 }
2158 }
2159 }
2160 return 0;
2161}
2162
3a7d929e
FB
2163static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2164{
c227f099 2165 ram_addr_t ram_addr;
5579c7f3 2166 void *p;
3a7d929e 2167
84b7b8e7 2168 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
5579c7f3
PB
2169 p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
2170 + tlb_entry->addend);
e890261f 2171 ram_addr = qemu_ram_addr_from_host_nofail(p);
3a7d929e 2172 if (!cpu_physical_memory_is_dirty(ram_addr)) {
0f459d16 2173 tlb_entry->addr_write |= TLB_NOTDIRTY;
3a7d929e
FB
2174 }
2175 }
2176}
2177
2178/* update the TLB according to the current state of the dirty bits */
2179void cpu_tlb_update_dirty(CPUState *env)
2180{
2181 int i;
cfde4bd9
IY
2182 int mmu_idx;
2183 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2184 for(i = 0; i < CPU_TLB_SIZE; i++)
2185 tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2186 }
3a7d929e
FB
2187}
2188
0f459d16 2189static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1ccde1cb 2190{
0f459d16
PB
2191 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2192 tlb_entry->addr_write = vaddr;
1ccde1cb
FB
2193}
2194
0f459d16
PB
2195/* update the TLB corresponding to virtual page vaddr
2196 so that it is no longer dirty */
2197static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1ccde1cb 2198{
1ccde1cb 2199 int i;
cfde4bd9 2200 int mmu_idx;
1ccde1cb 2201
0f459d16 2202 vaddr &= TARGET_PAGE_MASK;
1ccde1cb 2203 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
cfde4bd9
IY
2204 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2205 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
9fa3e853
FB
2206}
2207
d4c430a8
PB
2208/* Our TLB does not support large pages, so remember the area covered by
2209 large pages and trigger a full TLB flush if these are invalidated. */
2210static void tlb_add_large_page(CPUState *env, target_ulong vaddr,
2211 target_ulong size)
2212{
2213 target_ulong mask = ~(size - 1);
2214
2215 if (env->tlb_flush_addr == (target_ulong)-1) {
2216 env->tlb_flush_addr = vaddr & mask;
2217 env->tlb_flush_mask = mask;
2218 return;
2219 }
2220 /* Extend the existing region to include the new page.
2221 This is a compromise between unnecessary flushes and the cost
2222 of maintaining a full variable size TLB. */
2223 mask &= env->tlb_flush_mask;
2224 while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
2225 mask <<= 1;
2226 }
2227 env->tlb_flush_addr &= mask;
2228 env->tlb_flush_mask = mask;
2229}
2230
2231/* Add a new TLB entry. At most one entry for a given virtual address
2232 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2233 supplied size is only used by tlb_flush_page. */
2234void tlb_set_page(CPUState *env, target_ulong vaddr,
2235 target_phys_addr_t paddr, int prot,
2236 int mmu_idx, target_ulong size)
9fa3e853 2237{
92e873b9 2238 PhysPageDesc *p;
4f2ac237 2239 unsigned long pd;
9fa3e853 2240 unsigned int index;
4f2ac237 2241 target_ulong address;
0f459d16 2242 target_ulong code_address;
355b1943 2243 unsigned long addend;
84b7b8e7 2244 CPUTLBEntry *te;
a1d1bb31 2245 CPUWatchpoint *wp;
c227f099 2246 target_phys_addr_t iotlb;
9fa3e853 2247
d4c430a8
PB
2248 assert(size >= TARGET_PAGE_SIZE);
2249 if (size != TARGET_PAGE_SIZE) {
2250 tlb_add_large_page(env, vaddr, size);
2251 }
92e873b9 2252 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
9fa3e853
FB
2253 if (!p) {
2254 pd = IO_MEM_UNASSIGNED;
9fa3e853
FB
2255 } else {
2256 pd = p->phys_offset;
9fa3e853
FB
2257 }
2258#if defined(DEBUG_TLB)
7fd3f494
SW
2259 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
2260 " prot=%x idx=%d pd=0x%08lx\n",
2261 vaddr, paddr, prot, mmu_idx, pd);
9fa3e853
FB
2262#endif
2263
0f459d16
PB
2264 address = vaddr;
2265 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2266 /* IO memory case (romd handled later) */
2267 address |= TLB_MMIO;
2268 }
5579c7f3 2269 addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
0f459d16
PB
2270 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2271 /* Normal RAM. */
2272 iotlb = pd & TARGET_PAGE_MASK;
2273 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2274 iotlb |= IO_MEM_NOTDIRTY;
2275 else
2276 iotlb |= IO_MEM_ROM;
2277 } else {
ccbb4d44 2278 /* IO handlers are currently passed a physical address.
0f459d16
PB
2279 It would be nice to pass an offset from the base address
2280 of that region. This would avoid having to special case RAM,
2281 and avoid full address decoding in every device.
2282 We can't use the high bits of pd for this because
2283 IO_MEM_ROMD uses these as a ram address. */
8da3ff18
PB
2284 iotlb = (pd & ~TARGET_PAGE_MASK);
2285 if (p) {
8da3ff18
PB
2286 iotlb += p->region_offset;
2287 } else {
2288 iotlb += paddr;
2289 }
0f459d16
PB
2290 }
2291
2292 code_address = address;
2293 /* Make accesses to pages with watchpoints go via the
2294 watchpoint trap routines. */
72cf2d4f 2295 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
a1d1bb31 2296 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
bf298f83
JK
2297 /* Avoid trapping reads of pages with a write breakpoint. */
2298 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
2299 iotlb = io_mem_watch + paddr;
2300 address |= TLB_MMIO;
2301 break;
2302 }
6658ffb8 2303 }
0f459d16 2304 }
d79acba4 2305
0f459d16
PB
2306 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2307 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2308 te = &env->tlb_table[mmu_idx][index];
2309 te->addend = addend - vaddr;
2310 if (prot & PAGE_READ) {
2311 te->addr_read = address;
2312 } else {
2313 te->addr_read = -1;
2314 }
5c751e99 2315
0f459d16
PB
2316 if (prot & PAGE_EXEC) {
2317 te->addr_code = code_address;
2318 } else {
2319 te->addr_code = -1;
2320 }
2321 if (prot & PAGE_WRITE) {
2322 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2323 (pd & IO_MEM_ROMD)) {
2324 /* Write access calls the I/O callback. */
2325 te->addr_write = address | TLB_MMIO;
2326 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2327 !cpu_physical_memory_is_dirty(pd)) {
2328 te->addr_write = address | TLB_NOTDIRTY;
9fa3e853 2329 } else {
0f459d16 2330 te->addr_write = address;
9fa3e853 2331 }
0f459d16
PB
2332 } else {
2333 te->addr_write = -1;
9fa3e853 2334 }
9fa3e853
FB
2335}
2336
0124311e
FB
2337#else
2338
ee8b7021 2339void tlb_flush(CPUState *env, int flush_global)
0124311e
FB
2340{
2341}
2342
2e12669a 2343void tlb_flush_page(CPUState *env, target_ulong addr)
0124311e
FB
2344{
2345}
2346
edf8e2af
MW
2347/*
2348 * Walks guest process memory "regions" one by one
2349 * and calls callback function 'fn' for each region.
2350 */
5cd2c5b6
RH
2351
2352struct walk_memory_regions_data
2353{
2354 walk_memory_regions_fn fn;
2355 void *priv;
2356 unsigned long start;
2357 int prot;
2358};
2359
2360static int walk_memory_regions_end(struct walk_memory_regions_data *data,
b480d9b7 2361 abi_ulong end, int new_prot)
5cd2c5b6
RH
2362{
2363 if (data->start != -1ul) {
2364 int rc = data->fn(data->priv, data->start, end, data->prot);
2365 if (rc != 0) {
2366 return rc;
2367 }
2368 }
2369
2370 data->start = (new_prot ? end : -1ul);
2371 data->prot = new_prot;
2372
2373 return 0;
2374}
2375
2376static int walk_memory_regions_1(struct walk_memory_regions_data *data,
b480d9b7 2377 abi_ulong base, int level, void **lp)
5cd2c5b6 2378{
b480d9b7 2379 abi_ulong pa;
5cd2c5b6
RH
2380 int i, rc;
2381
2382 if (*lp == NULL) {
2383 return walk_memory_regions_end(data, base, 0);
2384 }
2385
2386 if (level == 0) {
2387 PageDesc *pd = *lp;
7296abac 2388 for (i = 0; i < L2_SIZE; ++i) {
5cd2c5b6
RH
2389 int prot = pd[i].flags;
2390
2391 pa = base | (i << TARGET_PAGE_BITS);
2392 if (prot != data->prot) {
2393 rc = walk_memory_regions_end(data, pa, prot);
2394 if (rc != 0) {
2395 return rc;
9fa3e853 2396 }
9fa3e853 2397 }
5cd2c5b6
RH
2398 }
2399 } else {
2400 void **pp = *lp;
7296abac 2401 for (i = 0; i < L2_SIZE; ++i) {
b480d9b7
PB
2402 pa = base | ((abi_ulong)i <<
2403 (TARGET_PAGE_BITS + L2_BITS * level));
5cd2c5b6
RH
2404 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2405 if (rc != 0) {
2406 return rc;
2407 }
2408 }
2409 }
2410
2411 return 0;
2412}
2413
2414int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2415{
2416 struct walk_memory_regions_data data;
2417 unsigned long i;
2418
2419 data.fn = fn;
2420 data.priv = priv;
2421 data.start = -1ul;
2422 data.prot = 0;
2423
2424 for (i = 0; i < V_L1_SIZE; i++) {
b480d9b7 2425 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
5cd2c5b6
RH
2426 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2427 if (rc != 0) {
2428 return rc;
9fa3e853 2429 }
33417e70 2430 }
5cd2c5b6
RH
2431
2432 return walk_memory_regions_end(&data, 0, 0);
edf8e2af
MW
2433}
2434
b480d9b7
PB
2435static int dump_region(void *priv, abi_ulong start,
2436 abi_ulong end, unsigned long prot)
edf8e2af
MW
2437{
2438 FILE *f = (FILE *)priv;
2439
b480d9b7
PB
2440 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2441 " "TARGET_ABI_FMT_lx" %c%c%c\n",
edf8e2af
MW
2442 start, end, end - start,
2443 ((prot & PAGE_READ) ? 'r' : '-'),
2444 ((prot & PAGE_WRITE) ? 'w' : '-'),
2445 ((prot & PAGE_EXEC) ? 'x' : '-'));
2446
2447 return (0);
2448}
2449
2450/* dump memory mappings */
2451void page_dump(FILE *f)
2452{
2453 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2454 "start", "end", "size", "prot");
2455 walk_memory_regions(f, dump_region);
33417e70
FB
2456}
2457
53a5960a 2458int page_get_flags(target_ulong address)
33417e70 2459{
9fa3e853
FB
2460 PageDesc *p;
2461
2462 p = page_find(address >> TARGET_PAGE_BITS);
33417e70 2463 if (!p)
9fa3e853
FB
2464 return 0;
2465 return p->flags;
2466}
2467
376a7909
RH
2468/* Modify the flags of a page and invalidate the code if necessary.
2469 The flag PAGE_WRITE_ORG is positioned automatically depending
2470 on PAGE_WRITE. The mmap_lock should already be held. */
53a5960a 2471void page_set_flags(target_ulong start, target_ulong end, int flags)
9fa3e853 2472{
376a7909
RH
2473 target_ulong addr, len;
2474
2475 /* This function should never be called with addresses outside the
2476 guest address space. If this assert fires, it probably indicates
2477 a missing call to h2g_valid. */
b480d9b7
PB
2478#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2479 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
376a7909
RH
2480#endif
2481 assert(start < end);
9fa3e853
FB
2482
2483 start = start & TARGET_PAGE_MASK;
2484 end = TARGET_PAGE_ALIGN(end);
376a7909
RH
2485
2486 if (flags & PAGE_WRITE) {
9fa3e853 2487 flags |= PAGE_WRITE_ORG;
376a7909
RH
2488 }
2489
2490 for (addr = start, len = end - start;
2491 len != 0;
2492 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2493 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2494
2495 /* If the write protection bit is set, then we invalidate
2496 the code inside. */
5fafdf24 2497 if (!(p->flags & PAGE_WRITE) &&
9fa3e853
FB
2498 (flags & PAGE_WRITE) &&
2499 p->first_tb) {
d720b93d 2500 tb_invalidate_phys_page(addr, 0, NULL);
9fa3e853
FB
2501 }
2502 p->flags = flags;
2503 }
33417e70
FB
2504}
2505
3d97b40b
TS
2506int page_check_range(target_ulong start, target_ulong len, int flags)
2507{
2508 PageDesc *p;
2509 target_ulong end;
2510 target_ulong addr;
2511
376a7909
RH
2512 /* This function should never be called with addresses outside the
2513 guest address space. If this assert fires, it probably indicates
2514 a missing call to h2g_valid. */
338e9e6c
BS
2515#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2516 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
376a7909
RH
2517#endif
2518
3e0650a9
RH
2519 if (len == 0) {
2520 return 0;
2521 }
376a7909
RH
2522 if (start + len - 1 < start) {
2523 /* We've wrapped around. */
55f280c9 2524 return -1;
376a7909 2525 }
55f280c9 2526
3d97b40b
TS
2527 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2528 start = start & TARGET_PAGE_MASK;
2529
376a7909
RH
2530 for (addr = start, len = end - start;
2531 len != 0;
2532 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
3d97b40b
TS
2533 p = page_find(addr >> TARGET_PAGE_BITS);
2534 if( !p )
2535 return -1;
2536 if( !(p->flags & PAGE_VALID) )
2537 return -1;
2538
dae3270c 2539 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
3d97b40b 2540 return -1;
dae3270c
FB
2541 if (flags & PAGE_WRITE) {
2542 if (!(p->flags & PAGE_WRITE_ORG))
2543 return -1;
2544 /* unprotect the page if it was put read-only because it
2545 contains translated code */
2546 if (!(p->flags & PAGE_WRITE)) {
2547 if (!page_unprotect(addr, 0, NULL))
2548 return -1;
2549 }
2550 return 0;
2551 }
3d97b40b
TS
2552 }
2553 return 0;
2554}
2555
9fa3e853 2556/* called from signal handler: invalidate the code and unprotect the
ccbb4d44 2557 page. Return TRUE if the fault was successfully handled. */
53a5960a 2558int page_unprotect(target_ulong address, unsigned long pc, void *puc)
9fa3e853 2559{
45d679d6
AJ
2560 unsigned int prot;
2561 PageDesc *p;
53a5960a 2562 target_ulong host_start, host_end, addr;
9fa3e853 2563
c8a706fe
PB
2564 /* Technically this isn't safe inside a signal handler. However we
2565 know this only ever happens in a synchronous SEGV handler, so in
2566 practice it seems to be ok. */
2567 mmap_lock();
2568
45d679d6
AJ
2569 p = page_find(address >> TARGET_PAGE_BITS);
2570 if (!p) {
c8a706fe 2571 mmap_unlock();
9fa3e853 2572 return 0;
c8a706fe 2573 }
45d679d6 2574
9fa3e853
FB
2575 /* if the page was really writable, then we change its
2576 protection back to writable */
45d679d6
AJ
2577 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2578 host_start = address & qemu_host_page_mask;
2579 host_end = host_start + qemu_host_page_size;
2580
2581 prot = 0;
2582 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2583 p = page_find(addr >> TARGET_PAGE_BITS);
2584 p->flags |= PAGE_WRITE;
2585 prot |= p->flags;
2586
9fa3e853
FB
2587 /* and since the content will be modified, we must invalidate
2588 the corresponding translated code. */
45d679d6 2589 tb_invalidate_phys_page(addr, pc, puc);
9fa3e853 2590#ifdef DEBUG_TB_CHECK
45d679d6 2591 tb_invalidate_check(addr);
9fa3e853 2592#endif
9fa3e853 2593 }
45d679d6
AJ
2594 mprotect((void *)g2h(host_start), qemu_host_page_size,
2595 prot & PAGE_BITS);
2596
2597 mmap_unlock();
2598 return 1;
9fa3e853 2599 }
c8a706fe 2600 mmap_unlock();
9fa3e853
FB
2601 return 0;
2602}
2603
6a00d601
FB
2604static inline void tlb_set_dirty(CPUState *env,
2605 unsigned long addr, target_ulong vaddr)
1ccde1cb
FB
2606{
2607}
9fa3e853
FB
2608#endif /* defined(CONFIG_USER_ONLY) */
2609
e2eef170 2610#if !defined(CONFIG_USER_ONLY)
8da3ff18 2611
c04b2b78
PB
2612#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2613typedef struct subpage_t {
2614 target_phys_addr_t base;
f6405247
RH
2615 ram_addr_t sub_io_index[TARGET_PAGE_SIZE];
2616 ram_addr_t region_offset[TARGET_PAGE_SIZE];
c04b2b78
PB
2617} subpage_t;
2618
c227f099
AL
2619static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2620 ram_addr_t memory, ram_addr_t region_offset);
f6405247
RH
2621static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2622 ram_addr_t orig_memory,
2623 ram_addr_t region_offset);
db7b5426
BS
2624#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2625 need_subpage) \
2626 do { \
2627 if (addr > start_addr) \
2628 start_addr2 = 0; \
2629 else { \
2630 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2631 if (start_addr2 > 0) \
2632 need_subpage = 1; \
2633 } \
2634 \
49e9fba2 2635 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
db7b5426
BS
2636 end_addr2 = TARGET_PAGE_SIZE - 1; \
2637 else { \
2638 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2639 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2640 need_subpage = 1; \
2641 } \
2642 } while (0)
2643
8f2498f9
MT
2644/* register physical memory.
2645 For RAM, 'size' must be a multiple of the target page size.
2646 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
8da3ff18
PB
2647 io memory page. The address used when calling the IO function is
2648 the offset from the start of the region, plus region_offset. Both
ccbb4d44 2649 start_addr and region_offset are rounded down to a page boundary
8da3ff18
PB
2650 before calculating this offset. This should not be a problem unless
2651 the low bits of start_addr and region_offset differ. */
0fd542fb 2652void cpu_register_physical_memory_log(target_phys_addr_t start_addr,
c227f099
AL
2653 ram_addr_t size,
2654 ram_addr_t phys_offset,
0fd542fb
MT
2655 ram_addr_t region_offset,
2656 bool log_dirty)
33417e70 2657{
c227f099 2658 target_phys_addr_t addr, end_addr;
92e873b9 2659 PhysPageDesc *p;
9d42037b 2660 CPUState *env;
c227f099 2661 ram_addr_t orig_size = size;
f6405247 2662 subpage_t *subpage;
33417e70 2663
3b8e6a2d 2664 assert(size);
0fd542fb 2665 cpu_notify_set_memory(start_addr, size, phys_offset, log_dirty);
f6f3fbca 2666
67c4d23c
PB
2667 if (phys_offset == IO_MEM_UNASSIGNED) {
2668 region_offset = start_addr;
2669 }
8da3ff18 2670 region_offset &= TARGET_PAGE_MASK;
5fd386f6 2671 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
c227f099 2672 end_addr = start_addr + (target_phys_addr_t)size;
3b8e6a2d
EI
2673
2674 addr = start_addr;
2675 do {
db7b5426
BS
2676 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2677 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
c227f099
AL
2678 ram_addr_t orig_memory = p->phys_offset;
2679 target_phys_addr_t start_addr2, end_addr2;
db7b5426
BS
2680 int need_subpage = 0;
2681
2682 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2683 need_subpage);
f6405247 2684 if (need_subpage) {
db7b5426
BS
2685 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2686 subpage = subpage_init((addr & TARGET_PAGE_MASK),
8da3ff18
PB
2687 &p->phys_offset, orig_memory,
2688 p->region_offset);
db7b5426
BS
2689 } else {
2690 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2691 >> IO_MEM_SHIFT];
2692 }
8da3ff18
PB
2693 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2694 region_offset);
2695 p->region_offset = 0;
db7b5426
BS
2696 } else {
2697 p->phys_offset = phys_offset;
2698 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2699 (phys_offset & IO_MEM_ROMD))
2700 phys_offset += TARGET_PAGE_SIZE;
2701 }
2702 } else {
2703 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2704 p->phys_offset = phys_offset;
8da3ff18 2705 p->region_offset = region_offset;
db7b5426 2706 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
8da3ff18 2707 (phys_offset & IO_MEM_ROMD)) {
db7b5426 2708 phys_offset += TARGET_PAGE_SIZE;
0e8f0967 2709 } else {
c227f099 2710 target_phys_addr_t start_addr2, end_addr2;
db7b5426
BS
2711 int need_subpage = 0;
2712
2713 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2714 end_addr2, need_subpage);
2715
f6405247 2716 if (need_subpage) {
db7b5426 2717 subpage = subpage_init((addr & TARGET_PAGE_MASK),
8da3ff18 2718 &p->phys_offset, IO_MEM_UNASSIGNED,
67c4d23c 2719 addr & TARGET_PAGE_MASK);
db7b5426 2720 subpage_register(subpage, start_addr2, end_addr2,
8da3ff18
PB
2721 phys_offset, region_offset);
2722 p->region_offset = 0;
db7b5426
BS
2723 }
2724 }
2725 }
8da3ff18 2726 region_offset += TARGET_PAGE_SIZE;
3b8e6a2d
EI
2727 addr += TARGET_PAGE_SIZE;
2728 } while (addr != end_addr);
3b46e624 2729
9d42037b
FB
2730 /* since each CPU stores ram addresses in its TLB cache, we must
2731 reset the modified entries */
2732 /* XXX: slow ! */
2733 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2734 tlb_flush(env, 1);
2735 }
33417e70
FB
2736}
2737
ba863458 2738/* XXX: temporary until new memory mapping API */
c227f099 2739ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
ba863458
FB
2740{
2741 PhysPageDesc *p;
2742
2743 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2744 if (!p)
2745 return IO_MEM_UNASSIGNED;
2746 return p->phys_offset;
2747}
2748
c227f099 2749void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
f65ed4c1
AL
2750{
2751 if (kvm_enabled())
2752 kvm_coalesce_mmio_region(addr, size);
2753}
2754
c227f099 2755void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
f65ed4c1
AL
2756{
2757 if (kvm_enabled())
2758 kvm_uncoalesce_mmio_region(addr, size);
2759}
2760
62a2744c
SY
2761void qemu_flush_coalesced_mmio_buffer(void)
2762{
2763 if (kvm_enabled())
2764 kvm_flush_coalesced_mmio_buffer();
2765}
2766
c902760f
MT
2767#if defined(__linux__) && !defined(TARGET_S390X)
2768
2769#include <sys/vfs.h>
2770
2771#define HUGETLBFS_MAGIC 0x958458f6
2772
2773static long gethugepagesize(const char *path)
2774{
2775 struct statfs fs;
2776 int ret;
2777
2778 do {
9742bf26 2779 ret = statfs(path, &fs);
c902760f
MT
2780 } while (ret != 0 && errno == EINTR);
2781
2782 if (ret != 0) {
9742bf26
YT
2783 perror(path);
2784 return 0;
c902760f
MT
2785 }
2786
2787 if (fs.f_type != HUGETLBFS_MAGIC)
9742bf26 2788 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
c902760f
MT
2789
2790 return fs.f_bsize;
2791}
2792
04b16653
AW
2793static void *file_ram_alloc(RAMBlock *block,
2794 ram_addr_t memory,
2795 const char *path)
c902760f
MT
2796{
2797 char *filename;
2798 void *area;
2799 int fd;
2800#ifdef MAP_POPULATE
2801 int flags;
2802#endif
2803 unsigned long hpagesize;
2804
2805 hpagesize = gethugepagesize(path);
2806 if (!hpagesize) {
9742bf26 2807 return NULL;
c902760f
MT
2808 }
2809
2810 if (memory < hpagesize) {
2811 return NULL;
2812 }
2813
2814 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2815 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2816 return NULL;
2817 }
2818
2819 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
9742bf26 2820 return NULL;
c902760f
MT
2821 }
2822
2823 fd = mkstemp(filename);
2824 if (fd < 0) {
9742bf26
YT
2825 perror("unable to create backing store for hugepages");
2826 free(filename);
2827 return NULL;
c902760f
MT
2828 }
2829 unlink(filename);
2830 free(filename);
2831
2832 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2833
2834 /*
2835 * ftruncate is not supported by hugetlbfs in older
2836 * hosts, so don't bother bailing out on errors.
2837 * If anything goes wrong with it under other filesystems,
2838 * mmap will fail.
2839 */
2840 if (ftruncate(fd, memory))
9742bf26 2841 perror("ftruncate");
c902760f
MT
2842
2843#ifdef MAP_POPULATE
2844 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2845 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2846 * to sidestep this quirk.
2847 */
2848 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2849 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2850#else
2851 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2852#endif
2853 if (area == MAP_FAILED) {
9742bf26
YT
2854 perror("file_ram_alloc: can't mmap RAM pages");
2855 close(fd);
2856 return (NULL);
c902760f 2857 }
04b16653 2858 block->fd = fd;
c902760f
MT
2859 return area;
2860}
2861#endif
2862
d17b5288 2863static ram_addr_t find_ram_offset(ram_addr_t size)
04b16653
AW
2864{
2865 RAMBlock *block, *next_block;
09d7ae90 2866 ram_addr_t offset = 0, mingap = ULONG_MAX;
04b16653
AW
2867
2868 if (QLIST_EMPTY(&ram_list.blocks))
2869 return 0;
2870
2871 QLIST_FOREACH(block, &ram_list.blocks, next) {
2872 ram_addr_t end, next = ULONG_MAX;
2873
2874 end = block->offset + block->length;
2875
2876 QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2877 if (next_block->offset >= end) {
2878 next = MIN(next, next_block->offset);
2879 }
2880 }
2881 if (next - end >= size && next - end < mingap) {
2882 offset = end;
2883 mingap = next - end;
2884 }
2885 }
2886 return offset;
2887}
2888
2889static ram_addr_t last_ram_offset(void)
d17b5288
AW
2890{
2891 RAMBlock *block;
2892 ram_addr_t last = 0;
2893
2894 QLIST_FOREACH(block, &ram_list.blocks, next)
2895 last = MAX(last, block->offset + block->length);
2896
2897 return last;
2898}
2899
84b89d78 2900ram_addr_t qemu_ram_alloc_from_ptr(DeviceState *dev, const char *name,
6977dfe6 2901 ram_addr_t size, void *host)
84b89d78
CM
2902{
2903 RAMBlock *new_block, *block;
2904
2905 size = TARGET_PAGE_ALIGN(size);
2906 new_block = qemu_mallocz(sizeof(*new_block));
2907
2908 if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
2909 char *id = dev->parent_bus->info->get_dev_path(dev);
2910 if (id) {
2911 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
2912 qemu_free(id);
2913 }
2914 }
2915 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2916
2917 QLIST_FOREACH(block, &ram_list.blocks, next) {
2918 if (!strcmp(block->idstr, new_block->idstr)) {
2919 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2920 new_block->idstr);
2921 abort();
2922 }
2923 }
2924
432d268c 2925 new_block->offset = find_ram_offset(size);
6977dfe6
YT
2926 if (host) {
2927 new_block->host = host;
cd19cfa2 2928 new_block->flags |= RAM_PREALLOC_MASK;
6977dfe6
YT
2929 } else {
2930 if (mem_path) {
c902760f 2931#if defined (__linux__) && !defined(TARGET_S390X)
6977dfe6
YT
2932 new_block->host = file_ram_alloc(new_block, size, mem_path);
2933 if (!new_block->host) {
2934 new_block->host = qemu_vmalloc(size);
e78815a5 2935 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
6977dfe6 2936 }
c902760f 2937#else
6977dfe6
YT
2938 fprintf(stderr, "-mem-path option unsupported\n");
2939 exit(1);
c902760f 2940#endif
6977dfe6 2941 } else {
6b02494d 2942#if defined(TARGET_S390X) && defined(CONFIG_KVM)
ff83678a
CB
2943 /* S390 KVM requires the topmost vma of the RAM to be smaller than
2944 an system defined value, which is at least 256GB. Larger systems
2945 have larger values. We put the guest between the end of data
2946 segment (system break) and this value. We use 32GB as a base to
2947 have enough room for the system break to grow. */
2948 new_block->host = mmap((void*)0x800000000, size,
6977dfe6 2949 PROT_EXEC|PROT_READ|PROT_WRITE,
ff83678a 2950 MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
fb8b2735
AG
2951 if (new_block->host == MAP_FAILED) {
2952 fprintf(stderr, "Allocating RAM failed\n");
2953 abort();
2954 }
6b02494d 2955#else
432d268c
JN
2956 if (xen_mapcache_enabled()) {
2957 xen_ram_alloc(new_block->offset, size);
2958 } else {
2959 new_block->host = qemu_vmalloc(size);
2960 }
6b02494d 2961#endif
e78815a5 2962 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
6977dfe6 2963 }
c902760f 2964 }
94a6b54f
PB
2965 new_block->length = size;
2966
f471a17e 2967 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
94a6b54f 2968
f471a17e 2969 ram_list.phys_dirty = qemu_realloc(ram_list.phys_dirty,
04b16653 2970 last_ram_offset() >> TARGET_PAGE_BITS);
d17b5288 2971 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
94a6b54f
PB
2972 0xff, size >> TARGET_PAGE_BITS);
2973
6f0437e8
JK
2974 if (kvm_enabled())
2975 kvm_setup_guest_memory(new_block->host, size);
2976
94a6b54f
PB
2977 return new_block->offset;
2978}
e9a1ab19 2979
6977dfe6
YT
2980ram_addr_t qemu_ram_alloc(DeviceState *dev, const char *name, ram_addr_t size)
2981{
2982 return qemu_ram_alloc_from_ptr(dev, name, size, NULL);
2983}
2984
1f2e98b6
AW
2985void qemu_ram_free_from_ptr(ram_addr_t addr)
2986{
2987 RAMBlock *block;
2988
2989 QLIST_FOREACH(block, &ram_list.blocks, next) {
2990 if (addr == block->offset) {
2991 QLIST_REMOVE(block, next);
2992 qemu_free(block);
2993 return;
2994 }
2995 }
2996}
2997
c227f099 2998void qemu_ram_free(ram_addr_t addr)
e9a1ab19 2999{
04b16653
AW
3000 RAMBlock *block;
3001
3002 QLIST_FOREACH(block, &ram_list.blocks, next) {
3003 if (addr == block->offset) {
3004 QLIST_REMOVE(block, next);
cd19cfa2
HY
3005 if (block->flags & RAM_PREALLOC_MASK) {
3006 ;
3007 } else if (mem_path) {
04b16653
AW
3008#if defined (__linux__) && !defined(TARGET_S390X)
3009 if (block->fd) {
3010 munmap(block->host, block->length);
3011 close(block->fd);
3012 } else {
3013 qemu_vfree(block->host);
3014 }
fd28aa13
JK
3015#else
3016 abort();
04b16653
AW
3017#endif
3018 } else {
3019#if defined(TARGET_S390X) && defined(CONFIG_KVM)
3020 munmap(block->host, block->length);
3021#else
432d268c
JN
3022 if (xen_mapcache_enabled()) {
3023 qemu_invalidate_entry(block->host);
3024 } else {
3025 qemu_vfree(block->host);
3026 }
04b16653
AW
3027#endif
3028 }
3029 qemu_free(block);
3030 return;
3031 }
3032 }
3033
e9a1ab19
FB
3034}
3035
cd19cfa2
HY
3036#ifndef _WIN32
3037void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
3038{
3039 RAMBlock *block;
3040 ram_addr_t offset;
3041 int flags;
3042 void *area, *vaddr;
3043
3044 QLIST_FOREACH(block, &ram_list.blocks, next) {
3045 offset = addr - block->offset;
3046 if (offset < block->length) {
3047 vaddr = block->host + offset;
3048 if (block->flags & RAM_PREALLOC_MASK) {
3049 ;
3050 } else {
3051 flags = MAP_FIXED;
3052 munmap(vaddr, length);
3053 if (mem_path) {
3054#if defined(__linux__) && !defined(TARGET_S390X)
3055 if (block->fd) {
3056#ifdef MAP_POPULATE
3057 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
3058 MAP_PRIVATE;
3059#else
3060 flags |= MAP_PRIVATE;
3061#endif
3062 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3063 flags, block->fd, offset);
3064 } else {
3065 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
3066 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3067 flags, -1, 0);
3068 }
fd28aa13
JK
3069#else
3070 abort();
cd19cfa2
HY
3071#endif
3072 } else {
3073#if defined(TARGET_S390X) && defined(CONFIG_KVM)
3074 flags |= MAP_SHARED | MAP_ANONYMOUS;
3075 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
3076 flags, -1, 0);
3077#else
3078 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
3079 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3080 flags, -1, 0);
3081#endif
3082 }
3083 if (area != vaddr) {
3084 fprintf(stderr, "Could not remap addr: %lx@%lx\n",
3085 length, addr);
3086 exit(1);
3087 }
3088 qemu_madvise(vaddr, length, QEMU_MADV_MERGEABLE);
3089 }
3090 return;
3091 }
3092 }
3093}
3094#endif /* !_WIN32 */
3095
dc828ca1 3096/* Return a host pointer to ram allocated with qemu_ram_alloc.
5579c7f3
PB
3097 With the exception of the softmmu code in this file, this should
3098 only be used for local memory (e.g. video ram) that the device owns,
3099 and knows it isn't going to access beyond the end of the block.
3100
3101 It should not be used for general purpose DMA.
3102 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
3103 */
c227f099 3104void *qemu_get_ram_ptr(ram_addr_t addr)
dc828ca1 3105{
94a6b54f
PB
3106 RAMBlock *block;
3107
f471a17e
AW
3108 QLIST_FOREACH(block, &ram_list.blocks, next) {
3109 if (addr - block->offset < block->length) {
7d82af38
VP
3110 /* Move this entry to to start of the list. */
3111 if (block != QLIST_FIRST(&ram_list.blocks)) {
3112 QLIST_REMOVE(block, next);
3113 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
3114 }
432d268c
JN
3115 if (xen_mapcache_enabled()) {
3116 /* We need to check if the requested address is in the RAM
3117 * because we don't want to map the entire memory in QEMU.
712c2b41 3118 * In that case just map until the end of the page.
432d268c
JN
3119 */
3120 if (block->offset == 0) {
712c2b41 3121 return qemu_map_cache(addr, 0, 0);
432d268c 3122 } else if (block->host == NULL) {
6506e4f9 3123 block->host = qemu_map_cache(block->offset, block->length, 1);
432d268c
JN
3124 }
3125 }
f471a17e
AW
3126 return block->host + (addr - block->offset);
3127 }
94a6b54f 3128 }
f471a17e
AW
3129
3130 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3131 abort();
3132
3133 return NULL;
dc828ca1
PB
3134}
3135
b2e0a138
MT
3136/* Return a host pointer to ram allocated with qemu_ram_alloc.
3137 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
3138 */
3139void *qemu_safe_ram_ptr(ram_addr_t addr)
3140{
3141 RAMBlock *block;
3142
3143 QLIST_FOREACH(block, &ram_list.blocks, next) {
3144 if (addr - block->offset < block->length) {
432d268c
JN
3145 if (xen_mapcache_enabled()) {
3146 /* We need to check if the requested address is in the RAM
3147 * because we don't want to map the entire memory in QEMU.
712c2b41 3148 * In that case just map until the end of the page.
432d268c
JN
3149 */
3150 if (block->offset == 0) {
712c2b41 3151 return qemu_map_cache(addr, 0, 0);
432d268c 3152 } else if (block->host == NULL) {
6506e4f9 3153 block->host = qemu_map_cache(block->offset, block->length, 1);
432d268c
JN
3154 }
3155 }
b2e0a138
MT
3156 return block->host + (addr - block->offset);
3157 }
3158 }
3159
3160 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3161 abort();
3162
3163 return NULL;
3164}
3165
38bee5dc
SS
3166/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
3167 * but takes a size argument */
3168void *qemu_ram_ptr_length(target_phys_addr_t addr, target_phys_addr_t *size)
3169{
3170 if (xen_mapcache_enabled())
3171 return qemu_map_cache(addr, *size, 1);
3172 else {
3173 RAMBlock *block;
3174
3175 QLIST_FOREACH(block, &ram_list.blocks, next) {
3176 if (addr - block->offset < block->length) {
3177 if (addr - block->offset + *size > block->length)
3178 *size = block->length - addr + block->offset;
3179 return block->host + (addr - block->offset);
3180 }
3181 }
3182
3183 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3184 abort();
3185
3186 *size = 0;
3187 return NULL;
3188 }
3189}
3190
050a0ddf
AP
3191void qemu_put_ram_ptr(void *addr)
3192{
3193 trace_qemu_put_ram_ptr(addr);
050a0ddf
AP
3194}
3195
e890261f 3196int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
5579c7f3 3197{
94a6b54f
PB
3198 RAMBlock *block;
3199 uint8_t *host = ptr;
3200
712c2b41
SS
3201 if (xen_mapcache_enabled()) {
3202 *ram_addr = qemu_ram_addr_from_mapcache(ptr);
3203 return 0;
3204 }
3205
f471a17e 3206 QLIST_FOREACH(block, &ram_list.blocks, next) {
432d268c
JN
3207 /* This case append when the block is not mapped. */
3208 if (block->host == NULL) {
3209 continue;
3210 }
f471a17e 3211 if (host - block->host < block->length) {
e890261f
MT
3212 *ram_addr = block->offset + (host - block->host);
3213 return 0;
f471a17e 3214 }
94a6b54f 3215 }
432d268c 3216
e890261f
MT
3217 return -1;
3218}
f471a17e 3219
e890261f
MT
3220/* Some of the softmmu routines need to translate from a host pointer
3221 (typically a TLB entry) back to a ram offset. */
3222ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
3223{
3224 ram_addr_t ram_addr;
f471a17e 3225
e890261f
MT
3226 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
3227 fprintf(stderr, "Bad ram pointer %p\n", ptr);
3228 abort();
3229 }
3230 return ram_addr;
5579c7f3
PB
3231}
3232
c227f099 3233static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
33417e70 3234{
67d3b957 3235#ifdef DEBUG_UNASSIGNED
ab3d1727 3236 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
b4f0a316 3237#endif
5b450407 3238#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
e18231a3
BS
3239 do_unassigned_access(addr, 0, 0, 0, 1);
3240#endif
3241 return 0;
3242}
3243
c227f099 3244static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
e18231a3
BS
3245{
3246#ifdef DEBUG_UNASSIGNED
3247 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3248#endif
5b450407 3249#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
e18231a3
BS
3250 do_unassigned_access(addr, 0, 0, 0, 2);
3251#endif
3252 return 0;
3253}
3254
c227f099 3255static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
e18231a3
BS
3256{
3257#ifdef DEBUG_UNASSIGNED
3258 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3259#endif
5b450407 3260#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
e18231a3 3261 do_unassigned_access(addr, 0, 0, 0, 4);
67d3b957 3262#endif
33417e70
FB
3263 return 0;
3264}
3265
c227f099 3266static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
33417e70 3267{
67d3b957 3268#ifdef DEBUG_UNASSIGNED
ab3d1727 3269 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
67d3b957 3270#endif
5b450407 3271#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
e18231a3
BS
3272 do_unassigned_access(addr, 1, 0, 0, 1);
3273#endif
3274}
3275
c227f099 3276static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
e18231a3
BS
3277{
3278#ifdef DEBUG_UNASSIGNED
3279 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3280#endif
5b450407 3281#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
e18231a3
BS
3282 do_unassigned_access(addr, 1, 0, 0, 2);
3283#endif
3284}
3285
c227f099 3286static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
e18231a3
BS
3287{
3288#ifdef DEBUG_UNASSIGNED
3289 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3290#endif
5b450407 3291#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
e18231a3 3292 do_unassigned_access(addr, 1, 0, 0, 4);
b4f0a316 3293#endif
33417e70
FB
3294}
3295
d60efc6b 3296static CPUReadMemoryFunc * const unassigned_mem_read[3] = {
33417e70 3297 unassigned_mem_readb,
e18231a3
BS
3298 unassigned_mem_readw,
3299 unassigned_mem_readl,
33417e70
FB
3300};
3301
d60efc6b 3302static CPUWriteMemoryFunc * const unassigned_mem_write[3] = {
33417e70 3303 unassigned_mem_writeb,
e18231a3
BS
3304 unassigned_mem_writew,
3305 unassigned_mem_writel,
33417e70
FB
3306};
3307
c227f099 3308static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
0f459d16 3309 uint32_t val)
9fa3e853 3310{
3a7d929e 3311 int dirty_flags;
f7c11b53 3312 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3a7d929e 3313 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 3314#if !defined(CONFIG_USER_ONLY)
3a7d929e 3315 tb_invalidate_phys_page_fast(ram_addr, 1);
f7c11b53 3316 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
9fa3e853 3317#endif
3a7d929e 3318 }
5579c7f3 3319 stb_p(qemu_get_ram_ptr(ram_addr), val);
f23db169 3320 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
f7c11b53 3321 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
f23db169
FB
3322 /* we remove the notdirty callback only if the code has been
3323 flushed */
3324 if (dirty_flags == 0xff)
2e70f6ef 3325 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
3326}
3327
c227f099 3328static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
0f459d16 3329 uint32_t val)
9fa3e853 3330{
3a7d929e 3331 int dirty_flags;
f7c11b53 3332 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3a7d929e 3333 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 3334#if !defined(CONFIG_USER_ONLY)
3a7d929e 3335 tb_invalidate_phys_page_fast(ram_addr, 2);
f7c11b53 3336 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
9fa3e853 3337#endif
3a7d929e 3338 }
5579c7f3 3339 stw_p(qemu_get_ram_ptr(ram_addr), val);
f23db169 3340 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
f7c11b53 3341 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
f23db169
FB
3342 /* we remove the notdirty callback only if the code has been
3343 flushed */
3344 if (dirty_flags == 0xff)
2e70f6ef 3345 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
3346}
3347
c227f099 3348static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
0f459d16 3349 uint32_t val)
9fa3e853 3350{
3a7d929e 3351 int dirty_flags;
f7c11b53 3352 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3a7d929e 3353 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 3354#if !defined(CONFIG_USER_ONLY)
3a7d929e 3355 tb_invalidate_phys_page_fast(ram_addr, 4);
f7c11b53 3356 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
9fa3e853 3357#endif
3a7d929e 3358 }
5579c7f3 3359 stl_p(qemu_get_ram_ptr(ram_addr), val);
f23db169 3360 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
f7c11b53 3361 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
f23db169
FB
3362 /* we remove the notdirty callback only if the code has been
3363 flushed */
3364 if (dirty_flags == 0xff)
2e70f6ef 3365 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
3366}
3367
d60efc6b 3368static CPUReadMemoryFunc * const error_mem_read[3] = {
9fa3e853
FB
3369 NULL, /* never used */
3370 NULL, /* never used */
3371 NULL, /* never used */
3372};
3373
d60efc6b 3374static CPUWriteMemoryFunc * const notdirty_mem_write[3] = {
1ccde1cb
FB
3375 notdirty_mem_writeb,
3376 notdirty_mem_writew,
3377 notdirty_mem_writel,
3378};
3379
0f459d16 3380/* Generate a debug exception if a watchpoint has been hit. */
b4051334 3381static void check_watchpoint(int offset, int len_mask, int flags)
0f459d16
PB
3382{
3383 CPUState *env = cpu_single_env;
06d55cc1
AL
3384 target_ulong pc, cs_base;
3385 TranslationBlock *tb;
0f459d16 3386 target_ulong vaddr;
a1d1bb31 3387 CPUWatchpoint *wp;
06d55cc1 3388 int cpu_flags;
0f459d16 3389
06d55cc1
AL
3390 if (env->watchpoint_hit) {
3391 /* We re-entered the check after replacing the TB. Now raise
3392 * the debug interrupt so that is will trigger after the
3393 * current instruction. */
3394 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3395 return;
3396 }
2e70f6ef 3397 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
72cf2d4f 3398 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334
AL
3399 if ((vaddr == (wp->vaddr & len_mask) ||
3400 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
6e140f28
AL
3401 wp->flags |= BP_WATCHPOINT_HIT;
3402 if (!env->watchpoint_hit) {
3403 env->watchpoint_hit = wp;
3404 tb = tb_find_pc(env->mem_io_pc);
3405 if (!tb) {
3406 cpu_abort(env, "check_watchpoint: could not find TB for "
3407 "pc=%p", (void *)env->mem_io_pc);
3408 }
618ba8e6 3409 cpu_restore_state(tb, env, env->mem_io_pc);
6e140f28
AL
3410 tb_phys_invalidate(tb, -1);
3411 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3412 env->exception_index = EXCP_DEBUG;
3413 } else {
3414 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3415 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
3416 }
3417 cpu_resume_from_signal(env, NULL);
06d55cc1 3418 }
6e140f28
AL
3419 } else {
3420 wp->flags &= ~BP_WATCHPOINT_HIT;
0f459d16
PB
3421 }
3422 }
3423}
3424
6658ffb8
PB
3425/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3426 so these check for a hit then pass through to the normal out-of-line
3427 phys routines. */
c227f099 3428static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
6658ffb8 3429{
b4051334 3430 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
6658ffb8
PB
3431 return ldub_phys(addr);
3432}
3433
c227f099 3434static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
6658ffb8 3435{
b4051334 3436 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
6658ffb8
PB
3437 return lduw_phys(addr);
3438}
3439
c227f099 3440static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
6658ffb8 3441{
b4051334 3442 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
6658ffb8
PB
3443 return ldl_phys(addr);
3444}
3445
c227f099 3446static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
6658ffb8
PB
3447 uint32_t val)
3448{
b4051334 3449 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
6658ffb8
PB
3450 stb_phys(addr, val);
3451}
3452
c227f099 3453static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
6658ffb8
PB
3454 uint32_t val)
3455{
b4051334 3456 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
6658ffb8
PB
3457 stw_phys(addr, val);
3458}
3459
c227f099 3460static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
6658ffb8
PB
3461 uint32_t val)
3462{
b4051334 3463 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
6658ffb8
PB
3464 stl_phys(addr, val);
3465}
3466
d60efc6b 3467static CPUReadMemoryFunc * const watch_mem_read[3] = {
6658ffb8
PB
3468 watch_mem_readb,
3469 watch_mem_readw,
3470 watch_mem_readl,
3471};
3472
d60efc6b 3473static CPUWriteMemoryFunc * const watch_mem_write[3] = {
6658ffb8
PB
3474 watch_mem_writeb,
3475 watch_mem_writew,
3476 watch_mem_writel,
3477};
6658ffb8 3478
f6405247
RH
3479static inline uint32_t subpage_readlen (subpage_t *mmio,
3480 target_phys_addr_t addr,
3481 unsigned int len)
db7b5426 3482{
f6405247 3483 unsigned int idx = SUBPAGE_IDX(addr);
db7b5426
BS
3484#if defined(DEBUG_SUBPAGE)
3485 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3486 mmio, len, addr, idx);
3487#endif
db7b5426 3488
f6405247
RH
3489 addr += mmio->region_offset[idx];
3490 idx = mmio->sub_io_index[idx];
3491 return io_mem_read[idx][len](io_mem_opaque[idx], addr);
db7b5426
BS
3492}
3493
c227f099 3494static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
f6405247 3495 uint32_t value, unsigned int len)
db7b5426 3496{
f6405247 3497 unsigned int idx = SUBPAGE_IDX(addr);
db7b5426 3498#if defined(DEBUG_SUBPAGE)
f6405247
RH
3499 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n",
3500 __func__, mmio, len, addr, idx, value);
db7b5426 3501#endif
f6405247
RH
3502
3503 addr += mmio->region_offset[idx];
3504 idx = mmio->sub_io_index[idx];
3505 io_mem_write[idx][len](io_mem_opaque[idx], addr, value);
db7b5426
BS
3506}
3507
c227f099 3508static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
db7b5426 3509{
db7b5426
BS
3510 return subpage_readlen(opaque, addr, 0);
3511}
3512
c227f099 3513static void subpage_writeb (void *opaque, target_phys_addr_t addr,
db7b5426
BS
3514 uint32_t value)
3515{
db7b5426
BS
3516 subpage_writelen(opaque, addr, value, 0);
3517}
3518
c227f099 3519static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
db7b5426 3520{
db7b5426
BS
3521 return subpage_readlen(opaque, addr, 1);
3522}
3523
c227f099 3524static void subpage_writew (void *opaque, target_phys_addr_t addr,
db7b5426
BS
3525 uint32_t value)
3526{
db7b5426
BS
3527 subpage_writelen(opaque, addr, value, 1);
3528}
3529
c227f099 3530static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
db7b5426 3531{
db7b5426
BS
3532 return subpage_readlen(opaque, addr, 2);
3533}
3534
f6405247
RH
3535static void subpage_writel (void *opaque, target_phys_addr_t addr,
3536 uint32_t value)
db7b5426 3537{
db7b5426
BS
3538 subpage_writelen(opaque, addr, value, 2);
3539}
3540
d60efc6b 3541static CPUReadMemoryFunc * const subpage_read[] = {
db7b5426
BS
3542 &subpage_readb,
3543 &subpage_readw,
3544 &subpage_readl,
3545};
3546
d60efc6b 3547static CPUWriteMemoryFunc * const subpage_write[] = {
db7b5426
BS
3548 &subpage_writeb,
3549 &subpage_writew,
3550 &subpage_writel,
3551};
3552
c227f099
AL
3553static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3554 ram_addr_t memory, ram_addr_t region_offset)
db7b5426
BS
3555{
3556 int idx, eidx;
3557
3558 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3559 return -1;
3560 idx = SUBPAGE_IDX(start);
3561 eidx = SUBPAGE_IDX(end);
3562#if defined(DEBUG_SUBPAGE)
0bf9e31a 3563 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
db7b5426
BS
3564 mmio, start, end, idx, eidx, memory);
3565#endif
95c318f5
GN
3566 if ((memory & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
3567 memory = IO_MEM_UNASSIGNED;
f6405247 3568 memory = (memory >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
db7b5426 3569 for (; idx <= eidx; idx++) {
f6405247
RH
3570 mmio->sub_io_index[idx] = memory;
3571 mmio->region_offset[idx] = region_offset;
db7b5426
BS
3572 }
3573
3574 return 0;
3575}
3576
f6405247
RH
3577static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
3578 ram_addr_t orig_memory,
3579 ram_addr_t region_offset)
db7b5426 3580{
c227f099 3581 subpage_t *mmio;
db7b5426
BS
3582 int subpage_memory;
3583
c227f099 3584 mmio = qemu_mallocz(sizeof(subpage_t));
1eec614b
AL
3585
3586 mmio->base = base;
2507c12a
AG
3587 subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio,
3588 DEVICE_NATIVE_ENDIAN);
db7b5426 3589#if defined(DEBUG_SUBPAGE)
1eec614b
AL
3590 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3591 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
db7b5426 3592#endif
1eec614b 3593 *phys = subpage_memory | IO_MEM_SUBPAGE;
f6405247 3594 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, orig_memory, region_offset);
db7b5426
BS
3595
3596 return mmio;
3597}
3598
88715657
AL
3599static int get_free_io_mem_idx(void)
3600{
3601 int i;
3602
3603 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3604 if (!io_mem_used[i]) {
3605 io_mem_used[i] = 1;
3606 return i;
3607 }
c6703b47 3608 fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
88715657
AL
3609 return -1;
3610}
3611
dd310534
AG
3612/*
3613 * Usually, devices operate in little endian mode. There are devices out
3614 * there that operate in big endian too. Each device gets byte swapped
3615 * mmio if plugged onto a CPU that does the other endianness.
3616 *
3617 * CPU Device swap?
3618 *
3619 * little little no
3620 * little big yes
3621 * big little yes
3622 * big big no
3623 */
3624
3625typedef struct SwapEndianContainer {
3626 CPUReadMemoryFunc *read[3];
3627 CPUWriteMemoryFunc *write[3];
3628 void *opaque;
3629} SwapEndianContainer;
3630
3631static uint32_t swapendian_mem_readb (void *opaque, target_phys_addr_t addr)
3632{
3633 uint32_t val;
3634 SwapEndianContainer *c = opaque;
3635 val = c->read[0](c->opaque, addr);
3636 return val;
3637}
3638
3639static uint32_t swapendian_mem_readw(void *opaque, target_phys_addr_t addr)
3640{
3641 uint32_t val;
3642 SwapEndianContainer *c = opaque;
3643 val = bswap16(c->read[1](c->opaque, addr));
3644 return val;
3645}
3646
3647static uint32_t swapendian_mem_readl(void *opaque, target_phys_addr_t addr)
3648{
3649 uint32_t val;
3650 SwapEndianContainer *c = opaque;
3651 val = bswap32(c->read[2](c->opaque, addr));
3652 return val;
3653}
3654
3655static CPUReadMemoryFunc * const swapendian_readfn[3]={
3656 swapendian_mem_readb,
3657 swapendian_mem_readw,
3658 swapendian_mem_readl
3659};
3660
3661static void swapendian_mem_writeb(void *opaque, target_phys_addr_t addr,
3662 uint32_t val)
3663{
3664 SwapEndianContainer *c = opaque;
3665 c->write[0](c->opaque, addr, val);
3666}
3667
3668static void swapendian_mem_writew(void *opaque, target_phys_addr_t addr,
3669 uint32_t val)
3670{
3671 SwapEndianContainer *c = opaque;
3672 c->write[1](c->opaque, addr, bswap16(val));
3673}
3674
3675static void swapendian_mem_writel(void *opaque, target_phys_addr_t addr,
3676 uint32_t val)
3677{
3678 SwapEndianContainer *c = opaque;
3679 c->write[2](c->opaque, addr, bswap32(val));
3680}
3681
3682static CPUWriteMemoryFunc * const swapendian_writefn[3]={
3683 swapendian_mem_writeb,
3684 swapendian_mem_writew,
3685 swapendian_mem_writel
3686};
3687
3688static void swapendian_init(int io_index)
3689{
3690 SwapEndianContainer *c = qemu_malloc(sizeof(SwapEndianContainer));
3691 int i;
3692
3693 /* Swap mmio for big endian targets */
3694 c->opaque = io_mem_opaque[io_index];
3695 for (i = 0; i < 3; i++) {
3696 c->read[i] = io_mem_read[io_index][i];
3697 c->write[i] = io_mem_write[io_index][i];
3698
3699 io_mem_read[io_index][i] = swapendian_readfn[i];
3700 io_mem_write[io_index][i] = swapendian_writefn[i];
3701 }
3702 io_mem_opaque[io_index] = c;
3703}
3704
3705static void swapendian_del(int io_index)
3706{
3707 if (io_mem_read[io_index][0] == swapendian_readfn[0]) {
3708 qemu_free(io_mem_opaque[io_index]);
3709 }
3710}
3711
33417e70
FB
3712/* mem_read and mem_write are arrays of functions containing the
3713 function to access byte (index 0), word (index 1) and dword (index
0b4e6e3e 3714 2). Functions can be omitted with a NULL function pointer.
3ee89922 3715 If io_index is non zero, the corresponding io zone is
4254fab8
BS
3716 modified. If it is zero, a new io zone is allocated. The return
3717 value can be used with cpu_register_physical_memory(). (-1) is
3718 returned if error. */
1eed09cb 3719static int cpu_register_io_memory_fixed(int io_index,
d60efc6b
BS
3720 CPUReadMemoryFunc * const *mem_read,
3721 CPUWriteMemoryFunc * const *mem_write,
dd310534 3722 void *opaque, enum device_endian endian)
33417e70 3723{
3cab721d
RH
3724 int i;
3725
33417e70 3726 if (io_index <= 0) {
88715657
AL
3727 io_index = get_free_io_mem_idx();
3728 if (io_index == -1)
3729 return io_index;
33417e70 3730 } else {
1eed09cb 3731 io_index >>= IO_MEM_SHIFT;
33417e70
FB
3732 if (io_index >= IO_MEM_NB_ENTRIES)
3733 return -1;
3734 }
b5ff1b31 3735
3cab721d
RH
3736 for (i = 0; i < 3; ++i) {
3737 io_mem_read[io_index][i]
3738 = (mem_read[i] ? mem_read[i] : unassigned_mem_read[i]);
3739 }
3740 for (i = 0; i < 3; ++i) {
3741 io_mem_write[io_index][i]
3742 = (mem_write[i] ? mem_write[i] : unassigned_mem_write[i]);
3743 }
a4193c8a 3744 io_mem_opaque[io_index] = opaque;
f6405247 3745
dd310534
AG
3746 switch (endian) {
3747 case DEVICE_BIG_ENDIAN:
3748#ifndef TARGET_WORDS_BIGENDIAN
3749 swapendian_init(io_index);
3750#endif
3751 break;
3752 case DEVICE_LITTLE_ENDIAN:
3753#ifdef TARGET_WORDS_BIGENDIAN
3754 swapendian_init(io_index);
3755#endif
3756 break;
3757 case DEVICE_NATIVE_ENDIAN:
3758 default:
3759 break;
3760 }
3761
f6405247 3762 return (io_index << IO_MEM_SHIFT);
33417e70 3763}
61382a50 3764
d60efc6b
BS
3765int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
3766 CPUWriteMemoryFunc * const *mem_write,
dd310534 3767 void *opaque, enum device_endian endian)
1eed09cb 3768{
2507c12a 3769 return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque, endian);
1eed09cb
AK
3770}
3771
88715657
AL
3772void cpu_unregister_io_memory(int io_table_address)
3773{
3774 int i;
3775 int io_index = io_table_address >> IO_MEM_SHIFT;
3776
dd310534
AG
3777 swapendian_del(io_index);
3778
88715657
AL
3779 for (i=0;i < 3; i++) {
3780 io_mem_read[io_index][i] = unassigned_mem_read[i];
3781 io_mem_write[io_index][i] = unassigned_mem_write[i];
3782 }
3783 io_mem_opaque[io_index] = NULL;
3784 io_mem_used[io_index] = 0;
3785}
3786
e9179ce1
AK
3787static void io_mem_init(void)
3788{
3789 int i;
3790
2507c12a
AG
3791 cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read,
3792 unassigned_mem_write, NULL,
3793 DEVICE_NATIVE_ENDIAN);
3794 cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read,
3795 unassigned_mem_write, NULL,
3796 DEVICE_NATIVE_ENDIAN);
3797 cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read,
3798 notdirty_mem_write, NULL,
3799 DEVICE_NATIVE_ENDIAN);
e9179ce1
AK
3800 for (i=0; i<5; i++)
3801 io_mem_used[i] = 1;
3802
3803 io_mem_watch = cpu_register_io_memory(watch_mem_read,
2507c12a
AG
3804 watch_mem_write, NULL,
3805 DEVICE_NATIVE_ENDIAN);
e9179ce1
AK
3806}
3807
e2eef170
PB
3808#endif /* !defined(CONFIG_USER_ONLY) */
3809
13eb76e0
FB
3810/* physical memory access (slow version, mainly for debug) */
3811#if defined(CONFIG_USER_ONLY)
a68fe89c
PB
3812int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3813 uint8_t *buf, int len, int is_write)
13eb76e0
FB
3814{
3815 int l, flags;
3816 target_ulong page;
53a5960a 3817 void * p;
13eb76e0
FB
3818
3819 while (len > 0) {
3820 page = addr & TARGET_PAGE_MASK;
3821 l = (page + TARGET_PAGE_SIZE) - addr;
3822 if (l > len)
3823 l = len;
3824 flags = page_get_flags(page);
3825 if (!(flags & PAGE_VALID))
a68fe89c 3826 return -1;
13eb76e0
FB
3827 if (is_write) {
3828 if (!(flags & PAGE_WRITE))
a68fe89c 3829 return -1;
579a97f7 3830 /* XXX: this code should not depend on lock_user */
72fb7daa 3831 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
a68fe89c 3832 return -1;
72fb7daa
AJ
3833 memcpy(p, buf, l);
3834 unlock_user(p, addr, l);
13eb76e0
FB
3835 } else {
3836 if (!(flags & PAGE_READ))
a68fe89c 3837 return -1;
579a97f7 3838 /* XXX: this code should not depend on lock_user */
72fb7daa 3839 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
a68fe89c 3840 return -1;
72fb7daa 3841 memcpy(buf, p, l);
5b257578 3842 unlock_user(p, addr, 0);
13eb76e0
FB
3843 }
3844 len -= l;
3845 buf += l;
3846 addr += l;
3847 }
a68fe89c 3848 return 0;
13eb76e0 3849}
8df1cd07 3850
13eb76e0 3851#else
c227f099 3852void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
3853 int len, int is_write)
3854{
3855 int l, io_index;
3856 uint8_t *ptr;
3857 uint32_t val;
c227f099 3858 target_phys_addr_t page;
2e12669a 3859 unsigned long pd;
92e873b9 3860 PhysPageDesc *p;
3b46e624 3861
13eb76e0
FB
3862 while (len > 0) {
3863 page = addr & TARGET_PAGE_MASK;
3864 l = (page + TARGET_PAGE_SIZE) - addr;
3865 if (l > len)
3866 l = len;
92e873b9 3867 p = phys_page_find(page >> TARGET_PAGE_BITS);
13eb76e0
FB
3868 if (!p) {
3869 pd = IO_MEM_UNASSIGNED;
3870 } else {
3871 pd = p->phys_offset;
3872 }
3b46e624 3873
13eb76e0 3874 if (is_write) {
3a7d929e 3875 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
c227f099 3876 target_phys_addr_t addr1 = addr;
13eb76e0 3877 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18 3878 if (p)
6c2934db 3879 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
6a00d601
FB
3880 /* XXX: could force cpu_single_env to NULL to avoid
3881 potential bugs */
6c2934db 3882 if (l >= 4 && ((addr1 & 3) == 0)) {
1c213d19 3883 /* 32 bit write access */
c27004ec 3884 val = ldl_p(buf);
6c2934db 3885 io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
13eb76e0 3886 l = 4;
6c2934db 3887 } else if (l >= 2 && ((addr1 & 1) == 0)) {
1c213d19 3888 /* 16 bit write access */
c27004ec 3889 val = lduw_p(buf);
6c2934db 3890 io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
13eb76e0
FB
3891 l = 2;
3892 } else {
1c213d19 3893 /* 8 bit write access */
c27004ec 3894 val = ldub_p(buf);
6c2934db 3895 io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
13eb76e0
FB
3896 l = 1;
3897 }
3898 } else {
b448f2f3
FB
3899 unsigned long addr1;
3900 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
13eb76e0 3901 /* RAM case */
5579c7f3 3902 ptr = qemu_get_ram_ptr(addr1);
13eb76e0 3903 memcpy(ptr, buf, l);
3a7d929e
FB
3904 if (!cpu_physical_memory_is_dirty(addr1)) {
3905 /* invalidate code */
3906 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3907 /* set dirty bit */
f7c11b53
YT
3908 cpu_physical_memory_set_dirty_flags(
3909 addr1, (0xff & ~CODE_DIRTY_FLAG));
3a7d929e 3910 }
050a0ddf 3911 qemu_put_ram_ptr(ptr);
13eb76e0
FB
3912 }
3913 } else {
5fafdf24 3914 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2a4188a3 3915 !(pd & IO_MEM_ROMD)) {
c227f099 3916 target_phys_addr_t addr1 = addr;
13eb76e0
FB
3917 /* I/O case */
3918 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18 3919 if (p)
6c2934db
AJ
3920 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3921 if (l >= 4 && ((addr1 & 3) == 0)) {
13eb76e0 3922 /* 32 bit read access */
6c2934db 3923 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
c27004ec 3924 stl_p(buf, val);
13eb76e0 3925 l = 4;
6c2934db 3926 } else if (l >= 2 && ((addr1 & 1) == 0)) {
13eb76e0 3927 /* 16 bit read access */
6c2934db 3928 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
c27004ec 3929 stw_p(buf, val);
13eb76e0
FB
3930 l = 2;
3931 } else {
1c213d19 3932 /* 8 bit read access */
6c2934db 3933 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
c27004ec 3934 stb_p(buf, val);
13eb76e0
FB
3935 l = 1;
3936 }
3937 } else {
3938 /* RAM case */
050a0ddf
AP
3939 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
3940 memcpy(buf, ptr + (addr & ~TARGET_PAGE_MASK), l);
3941 qemu_put_ram_ptr(ptr);
13eb76e0
FB
3942 }
3943 }
3944 len -= l;
3945 buf += l;
3946 addr += l;
3947 }
3948}
8df1cd07 3949
d0ecd2aa 3950/* used for ROM loading : can write in RAM and ROM */
c227f099 3951void cpu_physical_memory_write_rom(target_phys_addr_t addr,
d0ecd2aa
FB
3952 const uint8_t *buf, int len)
3953{
3954 int l;
3955 uint8_t *ptr;
c227f099 3956 target_phys_addr_t page;
d0ecd2aa
FB
3957 unsigned long pd;
3958 PhysPageDesc *p;
3b46e624 3959
d0ecd2aa
FB
3960 while (len > 0) {
3961 page = addr & TARGET_PAGE_MASK;
3962 l = (page + TARGET_PAGE_SIZE) - addr;
3963 if (l > len)
3964 l = len;
3965 p = phys_page_find(page >> TARGET_PAGE_BITS);
3966 if (!p) {
3967 pd = IO_MEM_UNASSIGNED;
3968 } else {
3969 pd = p->phys_offset;
3970 }
3b46e624 3971
d0ecd2aa 3972 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2a4188a3
FB
3973 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3974 !(pd & IO_MEM_ROMD)) {
d0ecd2aa
FB
3975 /* do nothing */
3976 } else {
3977 unsigned long addr1;
3978 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3979 /* ROM/RAM case */
5579c7f3 3980 ptr = qemu_get_ram_ptr(addr1);
d0ecd2aa 3981 memcpy(ptr, buf, l);
050a0ddf 3982 qemu_put_ram_ptr(ptr);
d0ecd2aa
FB
3983 }
3984 len -= l;
3985 buf += l;
3986 addr += l;
3987 }
3988}
3989
6d16c2f8
AL
3990typedef struct {
3991 void *buffer;
c227f099
AL
3992 target_phys_addr_t addr;
3993 target_phys_addr_t len;
6d16c2f8
AL
3994} BounceBuffer;
3995
3996static BounceBuffer bounce;
3997
ba223c29
AL
3998typedef struct MapClient {
3999 void *opaque;
4000 void (*callback)(void *opaque);
72cf2d4f 4001 QLIST_ENTRY(MapClient) link;
ba223c29
AL
4002} MapClient;
4003
72cf2d4f
BS
4004static QLIST_HEAD(map_client_list, MapClient) map_client_list
4005 = QLIST_HEAD_INITIALIZER(map_client_list);
ba223c29
AL
4006
4007void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
4008{
4009 MapClient *client = qemu_malloc(sizeof(*client));
4010
4011 client->opaque = opaque;
4012 client->callback = callback;
72cf2d4f 4013 QLIST_INSERT_HEAD(&map_client_list, client, link);
ba223c29
AL
4014 return client;
4015}
4016
4017void cpu_unregister_map_client(void *_client)
4018{
4019 MapClient *client = (MapClient *)_client;
4020
72cf2d4f 4021 QLIST_REMOVE(client, link);
34d5e948 4022 qemu_free(client);
ba223c29
AL
4023}
4024
4025static void cpu_notify_map_clients(void)
4026{
4027 MapClient *client;
4028
72cf2d4f
BS
4029 while (!QLIST_EMPTY(&map_client_list)) {
4030 client = QLIST_FIRST(&map_client_list);
ba223c29 4031 client->callback(client->opaque);
34d5e948 4032 cpu_unregister_map_client(client);
ba223c29
AL
4033 }
4034}
4035
6d16c2f8
AL
4036/* Map a physical memory region into a host virtual address.
4037 * May map a subset of the requested range, given by and returned in *plen.
4038 * May return NULL if resources needed to perform the mapping are exhausted.
4039 * Use only for reads OR writes - not for read-modify-write operations.
ba223c29
AL
4040 * Use cpu_register_map_client() to know when retrying the map operation is
4041 * likely to succeed.
6d16c2f8 4042 */
c227f099
AL
4043void *cpu_physical_memory_map(target_phys_addr_t addr,
4044 target_phys_addr_t *plen,
6d16c2f8
AL
4045 int is_write)
4046{
c227f099 4047 target_phys_addr_t len = *plen;
38bee5dc 4048 target_phys_addr_t todo = 0;
6d16c2f8 4049 int l;
c227f099 4050 target_phys_addr_t page;
6d16c2f8
AL
4051 unsigned long pd;
4052 PhysPageDesc *p;
38bee5dc 4053 target_phys_addr_t addr1 = addr;
6d16c2f8
AL
4054
4055 while (len > 0) {
4056 page = addr & TARGET_PAGE_MASK;
4057 l = (page + TARGET_PAGE_SIZE) - addr;
4058 if (l > len)
4059 l = len;
4060 p = phys_page_find(page >> TARGET_PAGE_BITS);
4061 if (!p) {
4062 pd = IO_MEM_UNASSIGNED;
4063 } else {
4064 pd = p->phys_offset;
4065 }
4066
4067 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
38bee5dc 4068 if (todo || bounce.buffer) {
6d16c2f8
AL
4069 break;
4070 }
4071 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
4072 bounce.addr = addr;
4073 bounce.len = l;
4074 if (!is_write) {
54f7b4a3 4075 cpu_physical_memory_read(addr, bounce.buffer, l);
6d16c2f8 4076 }
38bee5dc
SS
4077
4078 *plen = l;
4079 return bounce.buffer;
6d16c2f8
AL
4080 }
4081
4082 len -= l;
4083 addr += l;
38bee5dc 4084 todo += l;
6d16c2f8 4085 }
38bee5dc
SS
4086 *plen = todo;
4087 return qemu_ram_ptr_length(addr1, plen);
6d16c2f8
AL
4088}
4089
4090/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
4091 * Will also mark the memory as dirty if is_write == 1. access_len gives
4092 * the amount of memory that was actually read or written by the caller.
4093 */
c227f099
AL
4094void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
4095 int is_write, target_phys_addr_t access_len)
6d16c2f8
AL
4096{
4097 if (buffer != bounce.buffer) {
4098 if (is_write) {
e890261f 4099 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
6d16c2f8
AL
4100 while (access_len) {
4101 unsigned l;
4102 l = TARGET_PAGE_SIZE;
4103 if (l > access_len)
4104 l = access_len;
4105 if (!cpu_physical_memory_is_dirty(addr1)) {
4106 /* invalidate code */
4107 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
4108 /* set dirty bit */
f7c11b53
YT
4109 cpu_physical_memory_set_dirty_flags(
4110 addr1, (0xff & ~CODE_DIRTY_FLAG));
6d16c2f8
AL
4111 }
4112 addr1 += l;
4113 access_len -= l;
4114 }
4115 }
050a0ddf 4116 if (xen_mapcache_enabled()) {
712c2b41 4117 qemu_invalidate_entry(buffer);
050a0ddf 4118 }
6d16c2f8
AL
4119 return;
4120 }
4121 if (is_write) {
4122 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
4123 }
f8a83245 4124 qemu_vfree(bounce.buffer);
6d16c2f8 4125 bounce.buffer = NULL;
ba223c29 4126 cpu_notify_map_clients();
6d16c2f8 4127}
d0ecd2aa 4128
8df1cd07 4129/* warning: addr must be aligned */
1e78bcc1
AG
4130static inline uint32_t ldl_phys_internal(target_phys_addr_t addr,
4131 enum device_endian endian)
8df1cd07
FB
4132{
4133 int io_index;
4134 uint8_t *ptr;
4135 uint32_t val;
4136 unsigned long pd;
4137 PhysPageDesc *p;
4138
4139 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4140 if (!p) {
4141 pd = IO_MEM_UNASSIGNED;
4142 } else {
4143 pd = p->phys_offset;
4144 }
3b46e624 4145
5fafdf24 4146 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2a4188a3 4147 !(pd & IO_MEM_ROMD)) {
8df1cd07
FB
4148 /* I/O case */
4149 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
4150 if (p)
4151 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
8df1cd07 4152 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
1e78bcc1
AG
4153#if defined(TARGET_WORDS_BIGENDIAN)
4154 if (endian == DEVICE_LITTLE_ENDIAN) {
4155 val = bswap32(val);
4156 }
4157#else
4158 if (endian == DEVICE_BIG_ENDIAN) {
4159 val = bswap32(val);
4160 }
4161#endif
8df1cd07
FB
4162 } else {
4163 /* RAM case */
5579c7f3 4164 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
8df1cd07 4165 (addr & ~TARGET_PAGE_MASK);
1e78bcc1
AG
4166 switch (endian) {
4167 case DEVICE_LITTLE_ENDIAN:
4168 val = ldl_le_p(ptr);
4169 break;
4170 case DEVICE_BIG_ENDIAN:
4171 val = ldl_be_p(ptr);
4172 break;
4173 default:
4174 val = ldl_p(ptr);
4175 break;
4176 }
8df1cd07
FB
4177 }
4178 return val;
4179}
4180
1e78bcc1
AG
4181uint32_t ldl_phys(target_phys_addr_t addr)
4182{
4183 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4184}
4185
4186uint32_t ldl_le_phys(target_phys_addr_t addr)
4187{
4188 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4189}
4190
4191uint32_t ldl_be_phys(target_phys_addr_t addr)
4192{
4193 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
4194}
4195
84b7b8e7 4196/* warning: addr must be aligned */
1e78bcc1
AG
4197static inline uint64_t ldq_phys_internal(target_phys_addr_t addr,
4198 enum device_endian endian)
84b7b8e7
FB
4199{
4200 int io_index;
4201 uint8_t *ptr;
4202 uint64_t val;
4203 unsigned long pd;
4204 PhysPageDesc *p;
4205
4206 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4207 if (!p) {
4208 pd = IO_MEM_UNASSIGNED;
4209 } else {
4210 pd = p->phys_offset;
4211 }
3b46e624 4212
2a4188a3
FB
4213 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
4214 !(pd & IO_MEM_ROMD)) {
84b7b8e7
FB
4215 /* I/O case */
4216 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
4217 if (p)
4218 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
1e78bcc1
AG
4219
4220 /* XXX This is broken when device endian != cpu endian.
4221 Fix and add "endian" variable check */
84b7b8e7
FB
4222#ifdef TARGET_WORDS_BIGENDIAN
4223 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
4224 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
4225#else
4226 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
4227 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
4228#endif
4229 } else {
4230 /* RAM case */
5579c7f3 4231 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
84b7b8e7 4232 (addr & ~TARGET_PAGE_MASK);
1e78bcc1
AG
4233 switch (endian) {
4234 case DEVICE_LITTLE_ENDIAN:
4235 val = ldq_le_p(ptr);
4236 break;
4237 case DEVICE_BIG_ENDIAN:
4238 val = ldq_be_p(ptr);
4239 break;
4240 default:
4241 val = ldq_p(ptr);
4242 break;
4243 }
84b7b8e7
FB
4244 }
4245 return val;
4246}
4247
1e78bcc1
AG
4248uint64_t ldq_phys(target_phys_addr_t addr)
4249{
4250 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4251}
4252
4253uint64_t ldq_le_phys(target_phys_addr_t addr)
4254{
4255 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4256}
4257
4258uint64_t ldq_be_phys(target_phys_addr_t addr)
4259{
4260 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
4261}
4262
aab33094 4263/* XXX: optimize */
c227f099 4264uint32_t ldub_phys(target_phys_addr_t addr)
aab33094
FB
4265{
4266 uint8_t val;
4267 cpu_physical_memory_read(addr, &val, 1);
4268 return val;
4269}
4270
733f0b02 4271/* warning: addr must be aligned */
1e78bcc1
AG
4272static inline uint32_t lduw_phys_internal(target_phys_addr_t addr,
4273 enum device_endian endian)
aab33094 4274{
733f0b02
MT
4275 int io_index;
4276 uint8_t *ptr;
4277 uint64_t val;
4278 unsigned long pd;
4279 PhysPageDesc *p;
4280
4281 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4282 if (!p) {
4283 pd = IO_MEM_UNASSIGNED;
4284 } else {
4285 pd = p->phys_offset;
4286 }
4287
4288 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
4289 !(pd & IO_MEM_ROMD)) {
4290 /* I/O case */
4291 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4292 if (p)
4293 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4294 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
1e78bcc1
AG
4295#if defined(TARGET_WORDS_BIGENDIAN)
4296 if (endian == DEVICE_LITTLE_ENDIAN) {
4297 val = bswap16(val);
4298 }
4299#else
4300 if (endian == DEVICE_BIG_ENDIAN) {
4301 val = bswap16(val);
4302 }
4303#endif
733f0b02
MT
4304 } else {
4305 /* RAM case */
4306 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4307 (addr & ~TARGET_PAGE_MASK);
1e78bcc1
AG
4308 switch (endian) {
4309 case DEVICE_LITTLE_ENDIAN:
4310 val = lduw_le_p(ptr);
4311 break;
4312 case DEVICE_BIG_ENDIAN:
4313 val = lduw_be_p(ptr);
4314 break;
4315 default:
4316 val = lduw_p(ptr);
4317 break;
4318 }
733f0b02
MT
4319 }
4320 return val;
aab33094
FB
4321}
4322
1e78bcc1
AG
4323uint32_t lduw_phys(target_phys_addr_t addr)
4324{
4325 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4326}
4327
4328uint32_t lduw_le_phys(target_phys_addr_t addr)
4329{
4330 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4331}
4332
4333uint32_t lduw_be_phys(target_phys_addr_t addr)
4334{
4335 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
4336}
4337
8df1cd07
FB
4338/* warning: addr must be aligned. The ram page is not masked as dirty
4339 and the code inside is not invalidated. It is useful if the dirty
4340 bits are used to track modified PTEs */
c227f099 4341void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
8df1cd07
FB
4342{
4343 int io_index;
4344 uint8_t *ptr;
4345 unsigned long pd;
4346 PhysPageDesc *p;
4347
4348 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4349 if (!p) {
4350 pd = IO_MEM_UNASSIGNED;
4351 } else {
4352 pd = p->phys_offset;
4353 }
3b46e624 4354
3a7d929e 4355 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07 4356 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
4357 if (p)
4358 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
8df1cd07
FB
4359 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4360 } else {
74576198 4361 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
5579c7f3 4362 ptr = qemu_get_ram_ptr(addr1);
8df1cd07 4363 stl_p(ptr, val);
74576198
AL
4364
4365 if (unlikely(in_migration)) {
4366 if (!cpu_physical_memory_is_dirty(addr1)) {
4367 /* invalidate code */
4368 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4369 /* set dirty bit */
f7c11b53
YT
4370 cpu_physical_memory_set_dirty_flags(
4371 addr1, (0xff & ~CODE_DIRTY_FLAG));
74576198
AL
4372 }
4373 }
8df1cd07
FB
4374 }
4375}
4376
c227f099 4377void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
bc98a7ef
JM
4378{
4379 int io_index;
4380 uint8_t *ptr;
4381 unsigned long pd;
4382 PhysPageDesc *p;
4383
4384 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4385 if (!p) {
4386 pd = IO_MEM_UNASSIGNED;
4387 } else {
4388 pd = p->phys_offset;
4389 }
3b46e624 4390
bc98a7ef
JM
4391 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4392 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
4393 if (p)
4394 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bc98a7ef
JM
4395#ifdef TARGET_WORDS_BIGENDIAN
4396 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
4397 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
4398#else
4399 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4400 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
4401#endif
4402 } else {
5579c7f3 4403 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
bc98a7ef
JM
4404 (addr & ~TARGET_PAGE_MASK);
4405 stq_p(ptr, val);
4406 }
4407}
4408
8df1cd07 4409/* warning: addr must be aligned */
1e78bcc1
AG
4410static inline void stl_phys_internal(target_phys_addr_t addr, uint32_t val,
4411 enum device_endian endian)
8df1cd07
FB
4412{
4413 int io_index;
4414 uint8_t *ptr;
4415 unsigned long pd;
4416 PhysPageDesc *p;
4417
4418 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4419 if (!p) {
4420 pd = IO_MEM_UNASSIGNED;
4421 } else {
4422 pd = p->phys_offset;
4423 }
3b46e624 4424
3a7d929e 4425 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07 4426 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
4427 if (p)
4428 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
1e78bcc1
AG
4429#if defined(TARGET_WORDS_BIGENDIAN)
4430 if (endian == DEVICE_LITTLE_ENDIAN) {
4431 val = bswap32(val);
4432 }
4433#else
4434 if (endian == DEVICE_BIG_ENDIAN) {
4435 val = bswap32(val);
4436 }
4437#endif
8df1cd07
FB
4438 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4439 } else {
4440 unsigned long addr1;
4441 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4442 /* RAM case */
5579c7f3 4443 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
4444 switch (endian) {
4445 case DEVICE_LITTLE_ENDIAN:
4446 stl_le_p(ptr, val);
4447 break;
4448 case DEVICE_BIG_ENDIAN:
4449 stl_be_p(ptr, val);
4450 break;
4451 default:
4452 stl_p(ptr, val);
4453 break;
4454 }
3a7d929e
FB
4455 if (!cpu_physical_memory_is_dirty(addr1)) {
4456 /* invalidate code */
4457 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4458 /* set dirty bit */
f7c11b53
YT
4459 cpu_physical_memory_set_dirty_flags(addr1,
4460 (0xff & ~CODE_DIRTY_FLAG));
3a7d929e 4461 }
8df1cd07
FB
4462 }
4463}
4464
1e78bcc1
AG
4465void stl_phys(target_phys_addr_t addr, uint32_t val)
4466{
4467 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4468}
4469
4470void stl_le_phys(target_phys_addr_t addr, uint32_t val)
4471{
4472 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4473}
4474
4475void stl_be_phys(target_phys_addr_t addr, uint32_t val)
4476{
4477 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4478}
4479
aab33094 4480/* XXX: optimize */
c227f099 4481void stb_phys(target_phys_addr_t addr, uint32_t val)
aab33094
FB
4482{
4483 uint8_t v = val;
4484 cpu_physical_memory_write(addr, &v, 1);
4485}
4486
733f0b02 4487/* warning: addr must be aligned */
1e78bcc1
AG
4488static inline void stw_phys_internal(target_phys_addr_t addr, uint32_t val,
4489 enum device_endian endian)
aab33094 4490{
733f0b02
MT
4491 int io_index;
4492 uint8_t *ptr;
4493 unsigned long pd;
4494 PhysPageDesc *p;
4495
4496 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4497 if (!p) {
4498 pd = IO_MEM_UNASSIGNED;
4499 } else {
4500 pd = p->phys_offset;
4501 }
4502
4503 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4504 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4505 if (p)
4506 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
1e78bcc1
AG
4507#if defined(TARGET_WORDS_BIGENDIAN)
4508 if (endian == DEVICE_LITTLE_ENDIAN) {
4509 val = bswap16(val);
4510 }
4511#else
4512 if (endian == DEVICE_BIG_ENDIAN) {
4513 val = bswap16(val);
4514 }
4515#endif
733f0b02
MT
4516 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
4517 } else {
4518 unsigned long addr1;
4519 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4520 /* RAM case */
4521 ptr = qemu_get_ram_ptr(addr1);
1e78bcc1
AG
4522 switch (endian) {
4523 case DEVICE_LITTLE_ENDIAN:
4524 stw_le_p(ptr, val);
4525 break;
4526 case DEVICE_BIG_ENDIAN:
4527 stw_be_p(ptr, val);
4528 break;
4529 default:
4530 stw_p(ptr, val);
4531 break;
4532 }
733f0b02
MT
4533 if (!cpu_physical_memory_is_dirty(addr1)) {
4534 /* invalidate code */
4535 tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
4536 /* set dirty bit */
4537 cpu_physical_memory_set_dirty_flags(addr1,
4538 (0xff & ~CODE_DIRTY_FLAG));
4539 }
4540 }
aab33094
FB
4541}
4542
1e78bcc1
AG
4543void stw_phys(target_phys_addr_t addr, uint32_t val)
4544{
4545 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4546}
4547
4548void stw_le_phys(target_phys_addr_t addr, uint32_t val)
4549{
4550 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4551}
4552
4553void stw_be_phys(target_phys_addr_t addr, uint32_t val)
4554{
4555 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4556}
4557
aab33094 4558/* XXX: optimize */
c227f099 4559void stq_phys(target_phys_addr_t addr, uint64_t val)
aab33094
FB
4560{
4561 val = tswap64(val);
71d2b725 4562 cpu_physical_memory_write(addr, &val, 8);
aab33094
FB
4563}
4564
1e78bcc1
AG
4565void stq_le_phys(target_phys_addr_t addr, uint64_t val)
4566{
4567 val = cpu_to_le64(val);
4568 cpu_physical_memory_write(addr, &val, 8);
4569}
4570
4571void stq_be_phys(target_phys_addr_t addr, uint64_t val)
4572{
4573 val = cpu_to_be64(val);
4574 cpu_physical_memory_write(addr, &val, 8);
4575}
4576
5e2972fd 4577/* virtual memory access for debug (includes writing to ROM) */
5fafdf24 4578int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
b448f2f3 4579 uint8_t *buf, int len, int is_write)
13eb76e0
FB
4580{
4581 int l;
c227f099 4582 target_phys_addr_t phys_addr;
9b3c35e0 4583 target_ulong page;
13eb76e0
FB
4584
4585 while (len > 0) {
4586 page = addr & TARGET_PAGE_MASK;
4587 phys_addr = cpu_get_phys_page_debug(env, page);
4588 /* if no physical page mapped, return an error */
4589 if (phys_addr == -1)
4590 return -1;
4591 l = (page + TARGET_PAGE_SIZE) - addr;
4592 if (l > len)
4593 l = len;
5e2972fd 4594 phys_addr += (addr & ~TARGET_PAGE_MASK);
5e2972fd
AL
4595 if (is_write)
4596 cpu_physical_memory_write_rom(phys_addr, buf, l);
4597 else
5e2972fd 4598 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
13eb76e0
FB
4599 len -= l;
4600 buf += l;
4601 addr += l;
4602 }
4603 return 0;
4604}
a68fe89c 4605#endif
13eb76e0 4606
2e70f6ef
PB
4607/* in deterministic execution mode, instructions doing device I/Os
4608 must be at the end of the TB */
4609void cpu_io_recompile(CPUState *env, void *retaddr)
4610{
4611 TranslationBlock *tb;
4612 uint32_t n, cflags;
4613 target_ulong pc, cs_base;
4614 uint64_t flags;
4615
4616 tb = tb_find_pc((unsigned long)retaddr);
4617 if (!tb) {
4618 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
4619 retaddr);
4620 }
4621 n = env->icount_decr.u16.low + tb->icount;
618ba8e6 4622 cpu_restore_state(tb, env, (unsigned long)retaddr);
2e70f6ef 4623 /* Calculate how many instructions had been executed before the fault
bf20dc07 4624 occurred. */
2e70f6ef
PB
4625 n = n - env->icount_decr.u16.low;
4626 /* Generate a new TB ending on the I/O insn. */
4627 n++;
4628 /* On MIPS and SH, delay slot instructions can only be restarted if
4629 they were already the first instruction in the TB. If this is not
bf20dc07 4630 the first instruction in a TB then re-execute the preceding
2e70f6ef
PB
4631 branch. */
4632#if defined(TARGET_MIPS)
4633 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4634 env->active_tc.PC -= 4;
4635 env->icount_decr.u16.low++;
4636 env->hflags &= ~MIPS_HFLAG_BMASK;
4637 }
4638#elif defined(TARGET_SH4)
4639 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4640 && n > 1) {
4641 env->pc -= 2;
4642 env->icount_decr.u16.low++;
4643 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4644 }
4645#endif
4646 /* This should never happen. */
4647 if (n > CF_COUNT_MASK)
4648 cpu_abort(env, "TB too big during recompile");
4649
4650 cflags = n | CF_LAST_IO;
4651 pc = tb->pc;
4652 cs_base = tb->cs_base;
4653 flags = tb->flags;
4654 tb_phys_invalidate(tb, -1);
4655 /* FIXME: In theory this could raise an exception. In practice
4656 we have already translated the block once so it's probably ok. */
4657 tb_gen_code(env, pc, cs_base, flags, cflags);
bf20dc07 4658 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
2e70f6ef
PB
4659 the first in the TB) then we end up generating a whole new TB and
4660 repeating the fault, which is horribly inefficient.
4661 Better would be to execute just this insn uncached, or generate a
4662 second new TB. */
4663 cpu_resume_from_signal(env, NULL);
4664}
4665
b3755a91
PB
4666#if !defined(CONFIG_USER_ONLY)
4667
055403b2 4668void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
e3db7226
FB
4669{
4670 int i, target_code_size, max_target_code_size;
4671 int direct_jmp_count, direct_jmp2_count, cross_page;
4672 TranslationBlock *tb;
3b46e624 4673
e3db7226
FB
4674 target_code_size = 0;
4675 max_target_code_size = 0;
4676 cross_page = 0;
4677 direct_jmp_count = 0;
4678 direct_jmp2_count = 0;
4679 for(i = 0; i < nb_tbs; i++) {
4680 tb = &tbs[i];
4681 target_code_size += tb->size;
4682 if (tb->size > max_target_code_size)
4683 max_target_code_size = tb->size;
4684 if (tb->page_addr[1] != -1)
4685 cross_page++;
4686 if (tb->tb_next_offset[0] != 0xffff) {
4687 direct_jmp_count++;
4688 if (tb->tb_next_offset[1] != 0xffff) {
4689 direct_jmp2_count++;
4690 }
4691 }
4692 }
4693 /* XXX: avoid using doubles ? */
57fec1fe 4694 cpu_fprintf(f, "Translation buffer state:\n");
055403b2 4695 cpu_fprintf(f, "gen code size %td/%ld\n",
26a5f13b
FB
4696 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4697 cpu_fprintf(f, "TB count %d/%d\n",
4698 nb_tbs, code_gen_max_blocks);
5fafdf24 4699 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
e3db7226
FB
4700 nb_tbs ? target_code_size / nb_tbs : 0,
4701 max_target_code_size);
055403b2 4702 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
e3db7226
FB
4703 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4704 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
5fafdf24
TS
4705 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4706 cross_page,
e3db7226
FB
4707 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4708 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
5fafdf24 4709 direct_jmp_count,
e3db7226
FB
4710 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4711 direct_jmp2_count,
4712 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
57fec1fe 4713 cpu_fprintf(f, "\nStatistics:\n");
e3db7226
FB
4714 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4715 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4716 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
b67d9a52 4717 tcg_dump_info(f, cpu_fprintf);
e3db7226
FB
4718}
4719
61382a50
FB
4720#define MMUSUFFIX _cmmu
4721#define GETPC() NULL
4722#define env cpu_single_env
b769d8fe 4723#define SOFTMMU_CODE_ACCESS
61382a50
FB
4724
4725#define SHIFT 0
4726#include "softmmu_template.h"
4727
4728#define SHIFT 1
4729#include "softmmu_template.h"
4730
4731#define SHIFT 2
4732#include "softmmu_template.h"
4733
4734#define SHIFT 3
4735#include "softmmu_template.h"
4736
4737#undef env
4738
4739#endif