]> git.proxmox.com Git - qemu.git/blame - exec.c
tcg-s390: Icache flush is a no-op.
[qemu.git] / exec.c
CommitLineData
54936004 1/*
fd6ce8f6 2 * virtual page mapping and translated block handling
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
54936004 18 */
67b915a5 19#include "config.h"
d5a8f07c
FB
20#ifdef _WIN32
21#include <windows.h>
22#else
a98d49b1 23#include <sys/types.h>
d5a8f07c
FB
24#include <sys/mman.h>
25#endif
54936004
FB
26#include <stdlib.h>
27#include <stdio.h>
28#include <stdarg.h>
29#include <string.h>
30#include <errno.h>
31#include <unistd.h>
32#include <inttypes.h>
33
6180a181
FB
34#include "cpu.h"
35#include "exec-all.h"
ca10f867 36#include "qemu-common.h"
b67d9a52 37#include "tcg.h"
b3c7724c 38#include "hw/hw.h"
74576198 39#include "osdep.h"
7ba1e619 40#include "kvm.h"
29e922b6 41#include "qemu-timer.h"
53a5960a
PB
42#if defined(CONFIG_USER_ONLY)
43#include <qemu.h>
fd052bf6 44#include <signal.h>
f01576f1
JL
45#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
46#include <sys/param.h>
47#if __FreeBSD_version >= 700104
48#define HAVE_KINFO_GETVMMAP
49#define sigqueue sigqueue_freebsd /* avoid redefinition */
50#include <sys/time.h>
51#include <sys/proc.h>
52#include <machine/profile.h>
53#define _KERNEL
54#include <sys/user.h>
55#undef _KERNEL
56#undef sigqueue
57#include <libutil.h>
58#endif
59#endif
53a5960a 60#endif
54936004 61
fd6ce8f6 62//#define DEBUG_TB_INVALIDATE
66e85a21 63//#define DEBUG_FLUSH
9fa3e853 64//#define DEBUG_TLB
67d3b957 65//#define DEBUG_UNASSIGNED
fd6ce8f6
FB
66
67/* make various TB consistency checks */
5fafdf24
TS
68//#define DEBUG_TB_CHECK
69//#define DEBUG_TLB_CHECK
fd6ce8f6 70
1196be37 71//#define DEBUG_IOPORT
db7b5426 72//#define DEBUG_SUBPAGE
1196be37 73
99773bd4
PB
74#if !defined(CONFIG_USER_ONLY)
75/* TB consistency checks only implemented for usermode emulation. */
76#undef DEBUG_TB_CHECK
77#endif
78
9fa3e853
FB
79#define SMC_BITMAP_USE_THRESHOLD 10
80
bdaf78e0 81static TranslationBlock *tbs;
26a5f13b 82int code_gen_max_blocks;
9fa3e853 83TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
bdaf78e0 84static int nb_tbs;
eb51d102 85/* any access to the tbs or the page table must use this lock */
c227f099 86spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
fd6ce8f6 87
141ac468
BS
88#if defined(__arm__) || defined(__sparc_v9__)
89/* The prologue must be reachable with a direct jump. ARM and Sparc64
90 have limited branch ranges (possibly also PPC) so place it in a
d03d860b
BS
91 section close to code segment. */
92#define code_gen_section \
93 __attribute__((__section__(".gen_code"))) \
94 __attribute__((aligned (32)))
f8e2af11
SW
95#elif defined(_WIN32)
96/* Maximum alignment for Win32 is 16. */
97#define code_gen_section \
98 __attribute__((aligned (16)))
d03d860b
BS
99#else
100#define code_gen_section \
101 __attribute__((aligned (32)))
102#endif
103
104uint8_t code_gen_prologue[1024] code_gen_section;
bdaf78e0
BS
105static uint8_t *code_gen_buffer;
106static unsigned long code_gen_buffer_size;
26a5f13b 107/* threshold to flush the translated code buffer */
bdaf78e0 108static unsigned long code_gen_buffer_max_size;
fd6ce8f6
FB
109uint8_t *code_gen_ptr;
110
e2eef170 111#if !defined(CONFIG_USER_ONLY)
9fa3e853 112int phys_ram_fd;
1ccde1cb 113uint8_t *phys_ram_dirty;
74576198 114static int in_migration;
94a6b54f
PB
115
116typedef struct RAMBlock {
117 uint8_t *host;
c227f099
AL
118 ram_addr_t offset;
119 ram_addr_t length;
94a6b54f
PB
120 struct RAMBlock *next;
121} RAMBlock;
122
123static RAMBlock *ram_blocks;
124/* TODO: When we implement (and use) ram deallocation (e.g. for hotplug)
ccbb4d44 125 then we can no longer assume contiguous ram offsets, and external uses
94a6b54f 126 of this variable will break. */
c227f099 127ram_addr_t last_ram_offset;
e2eef170 128#endif
9fa3e853 129
6a00d601
FB
130CPUState *first_cpu;
131/* current CPU in the current thread. It is only valid inside
132 cpu_exec() */
5fafdf24 133CPUState *cpu_single_env;
2e70f6ef 134/* 0 = Do not count executed instructions.
bf20dc07 135 1 = Precise instruction counting.
2e70f6ef
PB
136 2 = Adaptive rate instruction counting. */
137int use_icount = 0;
138/* Current instruction counter. While executing translated code this may
139 include some instructions that have not yet been executed. */
140int64_t qemu_icount;
6a00d601 141
54936004 142typedef struct PageDesc {
92e873b9 143 /* list of TBs intersecting this ram page */
fd6ce8f6 144 TranslationBlock *first_tb;
9fa3e853
FB
145 /* in order to optimize self modifying code, we count the number
146 of lookups we do to a given page to use a bitmap */
147 unsigned int code_write_count;
148 uint8_t *code_bitmap;
149#if defined(CONFIG_USER_ONLY)
150 unsigned long flags;
151#endif
54936004
FB
152} PageDesc;
153
41c1b1c9 154/* In system mode we want L1_MAP to be based on ram offsets,
5cd2c5b6
RH
155 while in user mode we want it to be based on virtual addresses. */
156#if !defined(CONFIG_USER_ONLY)
41c1b1c9
PB
157#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
158# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
159#else
5cd2c5b6 160# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
41c1b1c9 161#endif
bedb69ea 162#else
5cd2c5b6 163# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
bedb69ea 164#endif
54936004 165
5cd2c5b6
RH
166/* Size of the L2 (and L3, etc) page tables. */
167#define L2_BITS 10
54936004
FB
168#define L2_SIZE (1 << L2_BITS)
169
5cd2c5b6
RH
170/* The bits remaining after N lower levels of page tables. */
171#define P_L1_BITS_REM \
172 ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
173#define V_L1_BITS_REM \
174 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
175
176/* Size of the L1 page table. Avoid silly small sizes. */
177#if P_L1_BITS_REM < 4
178#define P_L1_BITS (P_L1_BITS_REM + L2_BITS)
179#else
180#define P_L1_BITS P_L1_BITS_REM
181#endif
182
183#if V_L1_BITS_REM < 4
184#define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
185#else
186#define V_L1_BITS V_L1_BITS_REM
187#endif
188
189#define P_L1_SIZE ((target_phys_addr_t)1 << P_L1_BITS)
190#define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
191
192#define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS)
193#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
194
83fb7adf
FB
195unsigned long qemu_real_host_page_size;
196unsigned long qemu_host_page_bits;
197unsigned long qemu_host_page_size;
198unsigned long qemu_host_page_mask;
54936004 199
5cd2c5b6
RH
200/* This is a multi-level map on the virtual address space.
201 The bottom level has pointers to PageDesc. */
202static void *l1_map[V_L1_SIZE];
54936004 203
e2eef170 204#if !defined(CONFIG_USER_ONLY)
41c1b1c9
PB
205typedef struct PhysPageDesc {
206 /* offset in host memory of the page + io_index in the low bits */
207 ram_addr_t phys_offset;
208 ram_addr_t region_offset;
209} PhysPageDesc;
210
5cd2c5b6
RH
211/* This is a multi-level map on the physical address space.
212 The bottom level has pointers to PhysPageDesc. */
213static void *l1_phys_map[P_L1_SIZE];
6d9a1304 214
e2eef170
PB
215static void io_mem_init(void);
216
33417e70 217/* io memory support */
33417e70
FB
218CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
219CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
a4193c8a 220void *io_mem_opaque[IO_MEM_NB_ENTRIES];
511d2b14 221static char io_mem_used[IO_MEM_NB_ENTRIES];
6658ffb8
PB
222static int io_mem_watch;
223#endif
33417e70 224
34865134 225/* log support */
1e8b27ca
JR
226#ifdef WIN32
227static const char *logfilename = "qemu.log";
228#else
d9b630fd 229static const char *logfilename = "/tmp/qemu.log";
1e8b27ca 230#endif
34865134
FB
231FILE *logfile;
232int loglevel;
e735b91c 233static int log_append = 0;
34865134 234
e3db7226 235/* statistics */
b3755a91 236#if !defined(CONFIG_USER_ONLY)
e3db7226 237static int tlb_flush_count;
b3755a91 238#endif
e3db7226
FB
239static int tb_flush_count;
240static int tb_phys_invalidate_count;
241
7cb69cae
FB
242#ifdef _WIN32
243static void map_exec(void *addr, long size)
244{
245 DWORD old_protect;
246 VirtualProtect(addr, size,
247 PAGE_EXECUTE_READWRITE, &old_protect);
248
249}
250#else
251static void map_exec(void *addr, long size)
252{
4369415f 253 unsigned long start, end, page_size;
7cb69cae 254
4369415f 255 page_size = getpagesize();
7cb69cae 256 start = (unsigned long)addr;
4369415f 257 start &= ~(page_size - 1);
7cb69cae
FB
258
259 end = (unsigned long)addr + size;
4369415f
FB
260 end += page_size - 1;
261 end &= ~(page_size - 1);
7cb69cae
FB
262
263 mprotect((void *)start, end - start,
264 PROT_READ | PROT_WRITE | PROT_EXEC);
265}
266#endif
267
b346ff46 268static void page_init(void)
54936004 269{
83fb7adf 270 /* NOTE: we can always suppose that qemu_host_page_size >=
54936004 271 TARGET_PAGE_SIZE */
c2b48b69
AL
272#ifdef _WIN32
273 {
274 SYSTEM_INFO system_info;
275
276 GetSystemInfo(&system_info);
277 qemu_real_host_page_size = system_info.dwPageSize;
278 }
279#else
280 qemu_real_host_page_size = getpagesize();
281#endif
83fb7adf
FB
282 if (qemu_host_page_size == 0)
283 qemu_host_page_size = qemu_real_host_page_size;
284 if (qemu_host_page_size < TARGET_PAGE_SIZE)
285 qemu_host_page_size = TARGET_PAGE_SIZE;
286 qemu_host_page_bits = 0;
287 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
288 qemu_host_page_bits++;
289 qemu_host_page_mask = ~(qemu_host_page_size - 1);
50a9569b 290
2e9a5713 291#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
50a9569b 292 {
f01576f1
JL
293#ifdef HAVE_KINFO_GETVMMAP
294 struct kinfo_vmentry *freep;
295 int i, cnt;
296
297 freep = kinfo_getvmmap(getpid(), &cnt);
298 if (freep) {
299 mmap_lock();
300 for (i = 0; i < cnt; i++) {
301 unsigned long startaddr, endaddr;
302
303 startaddr = freep[i].kve_start;
304 endaddr = freep[i].kve_end;
305 if (h2g_valid(startaddr)) {
306 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
307
308 if (h2g_valid(endaddr)) {
309 endaddr = h2g(endaddr);
fd436907 310 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
f01576f1
JL
311 } else {
312#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
313 endaddr = ~0ul;
fd436907 314 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
f01576f1
JL
315#endif
316 }
317 }
318 }
319 free(freep);
320 mmap_unlock();
321 }
322#else
50a9569b 323 FILE *f;
50a9569b 324
0776590d 325 last_brk = (unsigned long)sbrk(0);
5cd2c5b6 326
fd436907 327 f = fopen("/compat/linux/proc/self/maps", "r");
50a9569b 328 if (f) {
5cd2c5b6
RH
329 mmap_lock();
330
50a9569b 331 do {
5cd2c5b6
RH
332 unsigned long startaddr, endaddr;
333 int n;
334
335 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
336
337 if (n == 2 && h2g_valid(startaddr)) {
338 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
339
340 if (h2g_valid(endaddr)) {
341 endaddr = h2g(endaddr);
342 } else {
343 endaddr = ~0ul;
344 }
345 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
50a9569b
AZ
346 }
347 } while (!feof(f));
5cd2c5b6 348
50a9569b 349 fclose(f);
5cd2c5b6 350 mmap_unlock();
50a9569b 351 }
f01576f1 352#endif
50a9569b
AZ
353 }
354#endif
54936004
FB
355}
356
41c1b1c9 357static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
54936004 358{
41c1b1c9
PB
359 PageDesc *pd;
360 void **lp;
361 int i;
362
5cd2c5b6 363#if defined(CONFIG_USER_ONLY)
2e9a5713 364 /* We can't use qemu_malloc because it may recurse into a locked mutex. */
5cd2c5b6
RH
365# define ALLOC(P, SIZE) \
366 do { \
367 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
368 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
5cd2c5b6
RH
369 } while (0)
370#else
371# define ALLOC(P, SIZE) \
372 do { P = qemu_mallocz(SIZE); } while (0)
17e2377a 373#endif
434929bf 374
5cd2c5b6
RH
375 /* Level 1. Always allocated. */
376 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
377
378 /* Level 2..N-1. */
379 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
380 void **p = *lp;
381
382 if (p == NULL) {
383 if (!alloc) {
384 return NULL;
385 }
386 ALLOC(p, sizeof(void *) * L2_SIZE);
387 *lp = p;
17e2377a 388 }
5cd2c5b6
RH
389
390 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
391 }
392
393 pd = *lp;
394 if (pd == NULL) {
395 if (!alloc) {
396 return NULL;
397 }
398 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
399 *lp = pd;
54936004 400 }
5cd2c5b6
RH
401
402#undef ALLOC
5cd2c5b6
RH
403
404 return pd + (index & (L2_SIZE - 1));
54936004
FB
405}
406
41c1b1c9 407static inline PageDesc *page_find(tb_page_addr_t index)
54936004 408{
5cd2c5b6 409 return page_find_alloc(index, 0);
fd6ce8f6
FB
410}
411
6d9a1304 412#if !defined(CONFIG_USER_ONLY)
c227f099 413static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
92e873b9 414{
e3f4e2a4 415 PhysPageDesc *pd;
5cd2c5b6
RH
416 void **lp;
417 int i;
92e873b9 418
5cd2c5b6
RH
419 /* Level 1. Always allocated. */
420 lp = l1_phys_map + ((index >> P_L1_SHIFT) & (P_L1_SIZE - 1));
108c49b8 421
5cd2c5b6
RH
422 /* Level 2..N-1. */
423 for (i = P_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
424 void **p = *lp;
425 if (p == NULL) {
426 if (!alloc) {
427 return NULL;
428 }
429 *lp = p = qemu_mallocz(sizeof(void *) * L2_SIZE);
430 }
431 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
108c49b8 432 }
5cd2c5b6 433
e3f4e2a4 434 pd = *lp;
5cd2c5b6 435 if (pd == NULL) {
e3f4e2a4 436 int i;
5cd2c5b6
RH
437
438 if (!alloc) {
108c49b8 439 return NULL;
5cd2c5b6
RH
440 }
441
442 *lp = pd = qemu_malloc(sizeof(PhysPageDesc) * L2_SIZE);
443
67c4d23c 444 for (i = 0; i < L2_SIZE; i++) {
5cd2c5b6
RH
445 pd[i].phys_offset = IO_MEM_UNASSIGNED;
446 pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
67c4d23c 447 }
92e873b9 448 }
5cd2c5b6
RH
449
450 return pd + (index & (L2_SIZE - 1));
92e873b9
FB
451}
452
c227f099 453static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
92e873b9 454{
108c49b8 455 return phys_page_find_alloc(index, 0);
92e873b9
FB
456}
457
c227f099
AL
458static void tlb_protect_code(ram_addr_t ram_addr);
459static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 460 target_ulong vaddr);
c8a706fe
PB
461#define mmap_lock() do { } while(0)
462#define mmap_unlock() do { } while(0)
9fa3e853 463#endif
fd6ce8f6 464
4369415f
FB
465#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
466
467#if defined(CONFIG_USER_ONLY)
ccbb4d44 468/* Currently it is not recommended to allocate big chunks of data in
4369415f
FB
469 user mode. It will change when a dedicated libc will be used */
470#define USE_STATIC_CODE_GEN_BUFFER
471#endif
472
473#ifdef USE_STATIC_CODE_GEN_BUFFER
ebf50fb3
AJ
474static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
475 __attribute__((aligned (CODE_GEN_ALIGN)));
4369415f
FB
476#endif
477
8fcd3692 478static void code_gen_alloc(unsigned long tb_size)
26a5f13b 479{
4369415f
FB
480#ifdef USE_STATIC_CODE_GEN_BUFFER
481 code_gen_buffer = static_code_gen_buffer;
482 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
483 map_exec(code_gen_buffer, code_gen_buffer_size);
484#else
26a5f13b
FB
485 code_gen_buffer_size = tb_size;
486 if (code_gen_buffer_size == 0) {
4369415f
FB
487#if defined(CONFIG_USER_ONLY)
488 /* in user mode, phys_ram_size is not meaningful */
489 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
490#else
ccbb4d44 491 /* XXX: needs adjustments */
94a6b54f 492 code_gen_buffer_size = (unsigned long)(ram_size / 4);
4369415f 493#endif
26a5f13b
FB
494 }
495 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
496 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
497 /* The code gen buffer location may have constraints depending on
498 the host cpu and OS */
499#if defined(__linux__)
500 {
501 int flags;
141ac468
BS
502 void *start = NULL;
503
26a5f13b
FB
504 flags = MAP_PRIVATE | MAP_ANONYMOUS;
505#if defined(__x86_64__)
506 flags |= MAP_32BIT;
507 /* Cannot map more than that */
508 if (code_gen_buffer_size > (800 * 1024 * 1024))
509 code_gen_buffer_size = (800 * 1024 * 1024);
141ac468
BS
510#elif defined(__sparc_v9__)
511 // Map the buffer below 2G, so we can use direct calls and branches
512 flags |= MAP_FIXED;
513 start = (void *) 0x60000000UL;
514 if (code_gen_buffer_size > (512 * 1024 * 1024))
515 code_gen_buffer_size = (512 * 1024 * 1024);
1cb0661e 516#elif defined(__arm__)
63d41246 517 /* Map the buffer below 32M, so we can use direct calls and branches */
1cb0661e
AZ
518 flags |= MAP_FIXED;
519 start = (void *) 0x01000000UL;
520 if (code_gen_buffer_size > 16 * 1024 * 1024)
521 code_gen_buffer_size = 16 * 1024 * 1024;
26a5f13b 522#endif
141ac468
BS
523 code_gen_buffer = mmap(start, code_gen_buffer_size,
524 PROT_WRITE | PROT_READ | PROT_EXEC,
26a5f13b
FB
525 flags, -1, 0);
526 if (code_gen_buffer == MAP_FAILED) {
527 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
528 exit(1);
529 }
530 }
a167ba50 531#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__DragonFly__)
06e67a82
AL
532 {
533 int flags;
534 void *addr = NULL;
535 flags = MAP_PRIVATE | MAP_ANONYMOUS;
536#if defined(__x86_64__)
537 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
538 * 0x40000000 is free */
539 flags |= MAP_FIXED;
540 addr = (void *)0x40000000;
541 /* Cannot map more than that */
542 if (code_gen_buffer_size > (800 * 1024 * 1024))
543 code_gen_buffer_size = (800 * 1024 * 1024);
544#endif
545 code_gen_buffer = mmap(addr, code_gen_buffer_size,
546 PROT_WRITE | PROT_READ | PROT_EXEC,
547 flags, -1, 0);
548 if (code_gen_buffer == MAP_FAILED) {
549 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
550 exit(1);
551 }
552 }
26a5f13b
FB
553#else
554 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
26a5f13b
FB
555 map_exec(code_gen_buffer, code_gen_buffer_size);
556#endif
4369415f 557#endif /* !USE_STATIC_CODE_GEN_BUFFER */
26a5f13b
FB
558 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
559 code_gen_buffer_max_size = code_gen_buffer_size -
239fda31 560 (TCG_MAX_OP_SIZE * OPC_MAX_SIZE);
26a5f13b
FB
561 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
562 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
563}
564
565/* Must be called before using the QEMU cpus. 'tb_size' is the size
566 (in bytes) allocated to the translation buffer. Zero means default
567 size. */
568void cpu_exec_init_all(unsigned long tb_size)
569{
26a5f13b
FB
570 cpu_gen_init();
571 code_gen_alloc(tb_size);
572 code_gen_ptr = code_gen_buffer;
4369415f 573 page_init();
e2eef170 574#if !defined(CONFIG_USER_ONLY)
26a5f13b 575 io_mem_init();
e2eef170 576#endif
9002ec79
RH
577#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
578 /* There's no guest base to take into account, so go ahead and
579 initialize the prologue now. */
580 tcg_prologue_init(&tcg_ctx);
581#endif
26a5f13b
FB
582}
583
9656f324
PB
584#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
585
e59fb374 586static int cpu_common_post_load(void *opaque, int version_id)
e7f4eff7
JQ
587{
588 CPUState *env = opaque;
9656f324 589
3098dba0
AJ
590 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
591 version_id is increased. */
592 env->interrupt_request &= ~0x01;
9656f324
PB
593 tlb_flush(env, 1);
594
595 return 0;
596}
e7f4eff7
JQ
597
598static const VMStateDescription vmstate_cpu_common = {
599 .name = "cpu_common",
600 .version_id = 1,
601 .minimum_version_id = 1,
602 .minimum_version_id_old = 1,
e7f4eff7
JQ
603 .post_load = cpu_common_post_load,
604 .fields = (VMStateField []) {
605 VMSTATE_UINT32(halted, CPUState),
606 VMSTATE_UINT32(interrupt_request, CPUState),
607 VMSTATE_END_OF_LIST()
608 }
609};
9656f324
PB
610#endif
611
950f1472
GC
612CPUState *qemu_get_cpu(int cpu)
613{
614 CPUState *env = first_cpu;
615
616 while (env) {
617 if (env->cpu_index == cpu)
618 break;
619 env = env->next_cpu;
620 }
621
622 return env;
623}
624
6a00d601 625void cpu_exec_init(CPUState *env)
fd6ce8f6 626{
6a00d601
FB
627 CPUState **penv;
628 int cpu_index;
629
c2764719
PB
630#if defined(CONFIG_USER_ONLY)
631 cpu_list_lock();
632#endif
6a00d601
FB
633 env->next_cpu = NULL;
634 penv = &first_cpu;
635 cpu_index = 0;
636 while (*penv != NULL) {
1e9fa730 637 penv = &(*penv)->next_cpu;
6a00d601
FB
638 cpu_index++;
639 }
640 env->cpu_index = cpu_index;
268a362c 641 env->numa_node = 0;
72cf2d4f
BS
642 QTAILQ_INIT(&env->breakpoints);
643 QTAILQ_INIT(&env->watchpoints);
6a00d601 644 *penv = env;
c2764719
PB
645#if defined(CONFIG_USER_ONLY)
646 cpu_list_unlock();
647#endif
b3c7724c 648#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
e7f4eff7 649 vmstate_register(cpu_index, &vmstate_cpu_common, env);
b3c7724c
PB
650 register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
651 cpu_save, cpu_load, env);
652#endif
fd6ce8f6
FB
653}
654
9fa3e853
FB
655static inline void invalidate_page_bitmap(PageDesc *p)
656{
657 if (p->code_bitmap) {
59817ccb 658 qemu_free(p->code_bitmap);
9fa3e853
FB
659 p->code_bitmap = NULL;
660 }
661 p->code_write_count = 0;
662}
663
5cd2c5b6
RH
664/* Set to NULL all the 'first_tb' fields in all PageDescs. */
665
666static void page_flush_tb_1 (int level, void **lp)
fd6ce8f6 667{
5cd2c5b6 668 int i;
fd6ce8f6 669
5cd2c5b6
RH
670 if (*lp == NULL) {
671 return;
672 }
673 if (level == 0) {
674 PageDesc *pd = *lp;
7296abac 675 for (i = 0; i < L2_SIZE; ++i) {
5cd2c5b6
RH
676 pd[i].first_tb = NULL;
677 invalidate_page_bitmap(pd + i);
fd6ce8f6 678 }
5cd2c5b6
RH
679 } else {
680 void **pp = *lp;
7296abac 681 for (i = 0; i < L2_SIZE; ++i) {
5cd2c5b6
RH
682 page_flush_tb_1 (level - 1, pp + i);
683 }
684 }
685}
686
687static void page_flush_tb(void)
688{
689 int i;
690 for (i = 0; i < V_L1_SIZE; i++) {
691 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
fd6ce8f6
FB
692 }
693}
694
695/* flush all the translation blocks */
d4e8164f 696/* XXX: tb_flush is currently not thread safe */
6a00d601 697void tb_flush(CPUState *env1)
fd6ce8f6 698{
6a00d601 699 CPUState *env;
0124311e 700#if defined(DEBUG_FLUSH)
ab3d1727
BS
701 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
702 (unsigned long)(code_gen_ptr - code_gen_buffer),
703 nb_tbs, nb_tbs > 0 ?
704 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
fd6ce8f6 705#endif
26a5f13b 706 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
a208e54a
PB
707 cpu_abort(env1, "Internal error: code buffer overflow\n");
708
fd6ce8f6 709 nb_tbs = 0;
3b46e624 710
6a00d601
FB
711 for(env = first_cpu; env != NULL; env = env->next_cpu) {
712 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
713 }
9fa3e853 714
8a8a608f 715 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
fd6ce8f6 716 page_flush_tb();
9fa3e853 717
fd6ce8f6 718 code_gen_ptr = code_gen_buffer;
d4e8164f
FB
719 /* XXX: flush processor icache at this point if cache flush is
720 expensive */
e3db7226 721 tb_flush_count++;
fd6ce8f6
FB
722}
723
724#ifdef DEBUG_TB_CHECK
725
bc98a7ef 726static void tb_invalidate_check(target_ulong address)
fd6ce8f6
FB
727{
728 TranslationBlock *tb;
729 int i;
730 address &= TARGET_PAGE_MASK;
99773bd4
PB
731 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
732 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
733 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
734 address >= tb->pc + tb->size)) {
0bf9e31a
BS
735 printf("ERROR invalidate: address=" TARGET_FMT_lx
736 " PC=%08lx size=%04x\n",
99773bd4 737 address, (long)tb->pc, tb->size);
fd6ce8f6
FB
738 }
739 }
740 }
741}
742
743/* verify that all the pages have correct rights for code */
744static void tb_page_check(void)
745{
746 TranslationBlock *tb;
747 int i, flags1, flags2;
3b46e624 748
99773bd4
PB
749 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
750 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
751 flags1 = page_get_flags(tb->pc);
752 flags2 = page_get_flags(tb->pc + tb->size - 1);
753 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
754 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
99773bd4 755 (long)tb->pc, tb->size, flags1, flags2);
fd6ce8f6
FB
756 }
757 }
758 }
759}
760
761#endif
762
763/* invalidate one TB */
764static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
765 int next_offset)
766{
767 TranslationBlock *tb1;
768 for(;;) {
769 tb1 = *ptb;
770 if (tb1 == tb) {
771 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
772 break;
773 }
774 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
775 }
776}
777
9fa3e853
FB
778static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
779{
780 TranslationBlock *tb1;
781 unsigned int n1;
782
783 for(;;) {
784 tb1 = *ptb;
785 n1 = (long)tb1 & 3;
786 tb1 = (TranslationBlock *)((long)tb1 & ~3);
787 if (tb1 == tb) {
788 *ptb = tb1->page_next[n1];
789 break;
790 }
791 ptb = &tb1->page_next[n1];
792 }
793}
794
d4e8164f
FB
795static inline void tb_jmp_remove(TranslationBlock *tb, int n)
796{
797 TranslationBlock *tb1, **ptb;
798 unsigned int n1;
799
800 ptb = &tb->jmp_next[n];
801 tb1 = *ptb;
802 if (tb1) {
803 /* find tb(n) in circular list */
804 for(;;) {
805 tb1 = *ptb;
806 n1 = (long)tb1 & 3;
807 tb1 = (TranslationBlock *)((long)tb1 & ~3);
808 if (n1 == n && tb1 == tb)
809 break;
810 if (n1 == 2) {
811 ptb = &tb1->jmp_first;
812 } else {
813 ptb = &tb1->jmp_next[n1];
814 }
815 }
816 /* now we can suppress tb(n) from the list */
817 *ptb = tb->jmp_next[n];
818
819 tb->jmp_next[n] = NULL;
820 }
821}
822
823/* reset the jump entry 'n' of a TB so that it is not chained to
824 another TB */
825static inline void tb_reset_jump(TranslationBlock *tb, int n)
826{
827 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
828}
829
41c1b1c9 830void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
fd6ce8f6 831{
6a00d601 832 CPUState *env;
8a40a180 833 PageDesc *p;
d4e8164f 834 unsigned int h, n1;
41c1b1c9 835 tb_page_addr_t phys_pc;
8a40a180 836 TranslationBlock *tb1, *tb2;
3b46e624 837
8a40a180
FB
838 /* remove the TB from the hash list */
839 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
840 h = tb_phys_hash_func(phys_pc);
5fafdf24 841 tb_remove(&tb_phys_hash[h], tb,
8a40a180
FB
842 offsetof(TranslationBlock, phys_hash_next));
843
844 /* remove the TB from the page list */
845 if (tb->page_addr[0] != page_addr) {
846 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
847 tb_page_remove(&p->first_tb, tb);
848 invalidate_page_bitmap(p);
849 }
850 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
851 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
852 tb_page_remove(&p->first_tb, tb);
853 invalidate_page_bitmap(p);
854 }
855
36bdbe54 856 tb_invalidated_flag = 1;
59817ccb 857
fd6ce8f6 858 /* remove the TB from the hash list */
8a40a180 859 h = tb_jmp_cache_hash_func(tb->pc);
6a00d601
FB
860 for(env = first_cpu; env != NULL; env = env->next_cpu) {
861 if (env->tb_jmp_cache[h] == tb)
862 env->tb_jmp_cache[h] = NULL;
863 }
d4e8164f
FB
864
865 /* suppress this TB from the two jump lists */
866 tb_jmp_remove(tb, 0);
867 tb_jmp_remove(tb, 1);
868
869 /* suppress any remaining jumps to this TB */
870 tb1 = tb->jmp_first;
871 for(;;) {
872 n1 = (long)tb1 & 3;
873 if (n1 == 2)
874 break;
875 tb1 = (TranslationBlock *)((long)tb1 & ~3);
876 tb2 = tb1->jmp_next[n1];
877 tb_reset_jump(tb1, n1);
878 tb1->jmp_next[n1] = NULL;
879 tb1 = tb2;
880 }
881 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
9fa3e853 882
e3db7226 883 tb_phys_invalidate_count++;
9fa3e853
FB
884}
885
886static inline void set_bits(uint8_t *tab, int start, int len)
887{
888 int end, mask, end1;
889
890 end = start + len;
891 tab += start >> 3;
892 mask = 0xff << (start & 7);
893 if ((start & ~7) == (end & ~7)) {
894 if (start < end) {
895 mask &= ~(0xff << (end & 7));
896 *tab |= mask;
897 }
898 } else {
899 *tab++ |= mask;
900 start = (start + 8) & ~7;
901 end1 = end & ~7;
902 while (start < end1) {
903 *tab++ = 0xff;
904 start += 8;
905 }
906 if (start < end) {
907 mask = ~(0xff << (end & 7));
908 *tab |= mask;
909 }
910 }
911}
912
913static void build_page_bitmap(PageDesc *p)
914{
915 int n, tb_start, tb_end;
916 TranslationBlock *tb;
3b46e624 917
b2a7081a 918 p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
9fa3e853
FB
919
920 tb = p->first_tb;
921 while (tb != NULL) {
922 n = (long)tb & 3;
923 tb = (TranslationBlock *)((long)tb & ~3);
924 /* NOTE: this is subtle as a TB may span two physical pages */
925 if (n == 0) {
926 /* NOTE: tb_end may be after the end of the page, but
927 it is not a problem */
928 tb_start = tb->pc & ~TARGET_PAGE_MASK;
929 tb_end = tb_start + tb->size;
930 if (tb_end > TARGET_PAGE_SIZE)
931 tb_end = TARGET_PAGE_SIZE;
932 } else {
933 tb_start = 0;
934 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
935 }
936 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
937 tb = tb->page_next[n];
938 }
939}
940
2e70f6ef
PB
941TranslationBlock *tb_gen_code(CPUState *env,
942 target_ulong pc, target_ulong cs_base,
943 int flags, int cflags)
d720b93d
FB
944{
945 TranslationBlock *tb;
946 uint8_t *tc_ptr;
41c1b1c9
PB
947 tb_page_addr_t phys_pc, phys_page2;
948 target_ulong virt_page2;
d720b93d
FB
949 int code_gen_size;
950
41c1b1c9 951 phys_pc = get_page_addr_code(env, pc);
c27004ec 952 tb = tb_alloc(pc);
d720b93d
FB
953 if (!tb) {
954 /* flush must be done */
955 tb_flush(env);
956 /* cannot fail at this point */
c27004ec 957 tb = tb_alloc(pc);
2e70f6ef
PB
958 /* Don't forget to invalidate previous TB info. */
959 tb_invalidated_flag = 1;
d720b93d
FB
960 }
961 tc_ptr = code_gen_ptr;
962 tb->tc_ptr = tc_ptr;
963 tb->cs_base = cs_base;
964 tb->flags = flags;
965 tb->cflags = cflags;
d07bde88 966 cpu_gen_code(env, tb, &code_gen_size);
d720b93d 967 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
3b46e624 968
d720b93d 969 /* check next page if needed */
c27004ec 970 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
d720b93d 971 phys_page2 = -1;
c27004ec 972 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
41c1b1c9 973 phys_page2 = get_page_addr_code(env, virt_page2);
d720b93d 974 }
41c1b1c9 975 tb_link_page(tb, phys_pc, phys_page2);
2e70f6ef 976 return tb;
d720b93d 977}
3b46e624 978
9fa3e853
FB
979/* invalidate all TBs which intersect with the target physical page
980 starting in range [start;end[. NOTE: start and end must refer to
d720b93d
FB
981 the same physical page. 'is_cpu_write_access' should be true if called
982 from a real cpu write access: the virtual CPU will exit the current
983 TB if code is modified inside this TB. */
41c1b1c9 984void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
d720b93d
FB
985 int is_cpu_write_access)
986{
6b917547 987 TranslationBlock *tb, *tb_next, *saved_tb;
d720b93d 988 CPUState *env = cpu_single_env;
41c1b1c9 989 tb_page_addr_t tb_start, tb_end;
6b917547
AL
990 PageDesc *p;
991 int n;
992#ifdef TARGET_HAS_PRECISE_SMC
993 int current_tb_not_found = is_cpu_write_access;
994 TranslationBlock *current_tb = NULL;
995 int current_tb_modified = 0;
996 target_ulong current_pc = 0;
997 target_ulong current_cs_base = 0;
998 int current_flags = 0;
999#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
1000
1001 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 1002 if (!p)
9fa3e853 1003 return;
5fafdf24 1004 if (!p->code_bitmap &&
d720b93d
FB
1005 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1006 is_cpu_write_access) {
9fa3e853
FB
1007 /* build code bitmap */
1008 build_page_bitmap(p);
1009 }
1010
1011 /* we remove all the TBs in the range [start, end[ */
1012 /* XXX: see if in some cases it could be faster to invalidate all the code */
1013 tb = p->first_tb;
1014 while (tb != NULL) {
1015 n = (long)tb & 3;
1016 tb = (TranslationBlock *)((long)tb & ~3);
1017 tb_next = tb->page_next[n];
1018 /* NOTE: this is subtle as a TB may span two physical pages */
1019 if (n == 0) {
1020 /* NOTE: tb_end may be after the end of the page, but
1021 it is not a problem */
1022 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1023 tb_end = tb_start + tb->size;
1024 } else {
1025 tb_start = tb->page_addr[1];
1026 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1027 }
1028 if (!(tb_end <= start || tb_start >= end)) {
d720b93d
FB
1029#ifdef TARGET_HAS_PRECISE_SMC
1030 if (current_tb_not_found) {
1031 current_tb_not_found = 0;
1032 current_tb = NULL;
2e70f6ef 1033 if (env->mem_io_pc) {
d720b93d 1034 /* now we have a real cpu fault */
2e70f6ef 1035 current_tb = tb_find_pc(env->mem_io_pc);
d720b93d
FB
1036 }
1037 }
1038 if (current_tb == tb &&
2e70f6ef 1039 (current_tb->cflags & CF_COUNT_MASK) != 1) {
d720b93d
FB
1040 /* If we are modifying the current TB, we must stop
1041 its execution. We could be more precise by checking
1042 that the modification is after the current PC, but it
1043 would require a specialized function to partially
1044 restore the CPU state */
3b46e624 1045
d720b93d 1046 current_tb_modified = 1;
5fafdf24 1047 cpu_restore_state(current_tb, env,
2e70f6ef 1048 env->mem_io_pc, NULL);
6b917547
AL
1049 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1050 &current_flags);
d720b93d
FB
1051 }
1052#endif /* TARGET_HAS_PRECISE_SMC */
6f5a9f7e
FB
1053 /* we need to do that to handle the case where a signal
1054 occurs while doing tb_phys_invalidate() */
1055 saved_tb = NULL;
1056 if (env) {
1057 saved_tb = env->current_tb;
1058 env->current_tb = NULL;
1059 }
9fa3e853 1060 tb_phys_invalidate(tb, -1);
6f5a9f7e
FB
1061 if (env) {
1062 env->current_tb = saved_tb;
1063 if (env->interrupt_request && env->current_tb)
1064 cpu_interrupt(env, env->interrupt_request);
1065 }
9fa3e853
FB
1066 }
1067 tb = tb_next;
1068 }
1069#if !defined(CONFIG_USER_ONLY)
1070 /* if no code remaining, no need to continue to use slow writes */
1071 if (!p->first_tb) {
1072 invalidate_page_bitmap(p);
d720b93d 1073 if (is_cpu_write_access) {
2e70f6ef 1074 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
d720b93d
FB
1075 }
1076 }
1077#endif
1078#ifdef TARGET_HAS_PRECISE_SMC
1079 if (current_tb_modified) {
1080 /* we generate a block containing just the instruction
1081 modifying the memory. It will ensure that it cannot modify
1082 itself */
ea1c1802 1083 env->current_tb = NULL;
2e70f6ef 1084 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
d720b93d 1085 cpu_resume_from_signal(env, NULL);
9fa3e853 1086 }
fd6ce8f6 1087#endif
9fa3e853 1088}
fd6ce8f6 1089
9fa3e853 1090/* len must be <= 8 and start must be a multiple of len */
41c1b1c9 1091static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
9fa3e853
FB
1092{
1093 PageDesc *p;
1094 int offset, b;
59817ccb 1095#if 0
a4193c8a 1096 if (1) {
93fcfe39
AL
1097 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1098 cpu_single_env->mem_io_vaddr, len,
1099 cpu_single_env->eip,
1100 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
59817ccb
FB
1101 }
1102#endif
9fa3e853 1103 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 1104 if (!p)
9fa3e853
FB
1105 return;
1106 if (p->code_bitmap) {
1107 offset = start & ~TARGET_PAGE_MASK;
1108 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1109 if (b & ((1 << len) - 1))
1110 goto do_invalidate;
1111 } else {
1112 do_invalidate:
d720b93d 1113 tb_invalidate_phys_page_range(start, start + len, 1);
9fa3e853
FB
1114 }
1115}
1116
9fa3e853 1117#if !defined(CONFIG_SOFTMMU)
41c1b1c9 1118static void tb_invalidate_phys_page(tb_page_addr_t addr,
d720b93d 1119 unsigned long pc, void *puc)
9fa3e853 1120{
6b917547 1121 TranslationBlock *tb;
9fa3e853 1122 PageDesc *p;
6b917547 1123 int n;
d720b93d 1124#ifdef TARGET_HAS_PRECISE_SMC
6b917547 1125 TranslationBlock *current_tb = NULL;
d720b93d 1126 CPUState *env = cpu_single_env;
6b917547
AL
1127 int current_tb_modified = 0;
1128 target_ulong current_pc = 0;
1129 target_ulong current_cs_base = 0;
1130 int current_flags = 0;
d720b93d 1131#endif
9fa3e853
FB
1132
1133 addr &= TARGET_PAGE_MASK;
1134 p = page_find(addr >> TARGET_PAGE_BITS);
5fafdf24 1135 if (!p)
9fa3e853
FB
1136 return;
1137 tb = p->first_tb;
d720b93d
FB
1138#ifdef TARGET_HAS_PRECISE_SMC
1139 if (tb && pc != 0) {
1140 current_tb = tb_find_pc(pc);
1141 }
1142#endif
9fa3e853
FB
1143 while (tb != NULL) {
1144 n = (long)tb & 3;
1145 tb = (TranslationBlock *)((long)tb & ~3);
d720b93d
FB
1146#ifdef TARGET_HAS_PRECISE_SMC
1147 if (current_tb == tb &&
2e70f6ef 1148 (current_tb->cflags & CF_COUNT_MASK) != 1) {
d720b93d
FB
1149 /* If we are modifying the current TB, we must stop
1150 its execution. We could be more precise by checking
1151 that the modification is after the current PC, but it
1152 would require a specialized function to partially
1153 restore the CPU state */
3b46e624 1154
d720b93d
FB
1155 current_tb_modified = 1;
1156 cpu_restore_state(current_tb, env, pc, puc);
6b917547
AL
1157 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1158 &current_flags);
d720b93d
FB
1159 }
1160#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
1161 tb_phys_invalidate(tb, addr);
1162 tb = tb->page_next[n];
1163 }
fd6ce8f6 1164 p->first_tb = NULL;
d720b93d
FB
1165#ifdef TARGET_HAS_PRECISE_SMC
1166 if (current_tb_modified) {
1167 /* we generate a block containing just the instruction
1168 modifying the memory. It will ensure that it cannot modify
1169 itself */
ea1c1802 1170 env->current_tb = NULL;
2e70f6ef 1171 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
d720b93d
FB
1172 cpu_resume_from_signal(env, puc);
1173 }
1174#endif
fd6ce8f6 1175}
9fa3e853 1176#endif
fd6ce8f6
FB
1177
1178/* add the tb in the target page and protect it if necessary */
5fafdf24 1179static inline void tb_alloc_page(TranslationBlock *tb,
41c1b1c9 1180 unsigned int n, tb_page_addr_t page_addr)
fd6ce8f6
FB
1181{
1182 PageDesc *p;
9fa3e853
FB
1183 TranslationBlock *last_first_tb;
1184
1185 tb->page_addr[n] = page_addr;
5cd2c5b6 1186 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
9fa3e853
FB
1187 tb->page_next[n] = p->first_tb;
1188 last_first_tb = p->first_tb;
1189 p->first_tb = (TranslationBlock *)((long)tb | n);
1190 invalidate_page_bitmap(p);
fd6ce8f6 1191
107db443 1192#if defined(TARGET_HAS_SMC) || 1
d720b93d 1193
9fa3e853 1194#if defined(CONFIG_USER_ONLY)
fd6ce8f6 1195 if (p->flags & PAGE_WRITE) {
53a5960a
PB
1196 target_ulong addr;
1197 PageDesc *p2;
9fa3e853
FB
1198 int prot;
1199
fd6ce8f6
FB
1200 /* force the host page as non writable (writes will have a
1201 page fault + mprotect overhead) */
53a5960a 1202 page_addr &= qemu_host_page_mask;
fd6ce8f6 1203 prot = 0;
53a5960a
PB
1204 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1205 addr += TARGET_PAGE_SIZE) {
1206
1207 p2 = page_find (addr >> TARGET_PAGE_BITS);
1208 if (!p2)
1209 continue;
1210 prot |= p2->flags;
1211 p2->flags &= ~PAGE_WRITE;
53a5960a 1212 }
5fafdf24 1213 mprotect(g2h(page_addr), qemu_host_page_size,
fd6ce8f6
FB
1214 (prot & PAGE_BITS) & ~PAGE_WRITE);
1215#ifdef DEBUG_TB_INVALIDATE
ab3d1727 1216 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
53a5960a 1217 page_addr);
fd6ce8f6 1218#endif
fd6ce8f6 1219 }
9fa3e853
FB
1220#else
1221 /* if some code is already present, then the pages are already
1222 protected. So we handle the case where only the first TB is
1223 allocated in a physical page */
1224 if (!last_first_tb) {
6a00d601 1225 tlb_protect_code(page_addr);
9fa3e853
FB
1226 }
1227#endif
d720b93d
FB
1228
1229#endif /* TARGET_HAS_SMC */
fd6ce8f6
FB
1230}
1231
1232/* Allocate a new translation block. Flush the translation buffer if
1233 too many translation blocks or too much generated code. */
c27004ec 1234TranslationBlock *tb_alloc(target_ulong pc)
fd6ce8f6
FB
1235{
1236 TranslationBlock *tb;
fd6ce8f6 1237
26a5f13b
FB
1238 if (nb_tbs >= code_gen_max_blocks ||
1239 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
d4e8164f 1240 return NULL;
fd6ce8f6
FB
1241 tb = &tbs[nb_tbs++];
1242 tb->pc = pc;
b448f2f3 1243 tb->cflags = 0;
d4e8164f
FB
1244 return tb;
1245}
1246
2e70f6ef
PB
1247void tb_free(TranslationBlock *tb)
1248{
bf20dc07 1249 /* In practice this is mostly used for single use temporary TB
2e70f6ef
PB
1250 Ignore the hard cases and just back up if this TB happens to
1251 be the last one generated. */
1252 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1253 code_gen_ptr = tb->tc_ptr;
1254 nb_tbs--;
1255 }
1256}
1257
9fa3e853
FB
1258/* add a new TB and link it to the physical page tables. phys_page2 is
1259 (-1) to indicate that only one page contains the TB. */
41c1b1c9
PB
1260void tb_link_page(TranslationBlock *tb,
1261 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
d4e8164f 1262{
9fa3e853
FB
1263 unsigned int h;
1264 TranslationBlock **ptb;
1265
c8a706fe
PB
1266 /* Grab the mmap lock to stop another thread invalidating this TB
1267 before we are done. */
1268 mmap_lock();
9fa3e853
FB
1269 /* add in the physical hash table */
1270 h = tb_phys_hash_func(phys_pc);
1271 ptb = &tb_phys_hash[h];
1272 tb->phys_hash_next = *ptb;
1273 *ptb = tb;
fd6ce8f6
FB
1274
1275 /* add in the page list */
9fa3e853
FB
1276 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1277 if (phys_page2 != -1)
1278 tb_alloc_page(tb, 1, phys_page2);
1279 else
1280 tb->page_addr[1] = -1;
9fa3e853 1281
d4e8164f
FB
1282 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1283 tb->jmp_next[0] = NULL;
1284 tb->jmp_next[1] = NULL;
1285
1286 /* init original jump addresses */
1287 if (tb->tb_next_offset[0] != 0xffff)
1288 tb_reset_jump(tb, 0);
1289 if (tb->tb_next_offset[1] != 0xffff)
1290 tb_reset_jump(tb, 1);
8a40a180
FB
1291
1292#ifdef DEBUG_TB_CHECK
1293 tb_page_check();
1294#endif
c8a706fe 1295 mmap_unlock();
fd6ce8f6
FB
1296}
1297
9fa3e853
FB
1298/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1299 tb[1].tc_ptr. Return NULL if not found */
1300TranslationBlock *tb_find_pc(unsigned long tc_ptr)
fd6ce8f6 1301{
9fa3e853
FB
1302 int m_min, m_max, m;
1303 unsigned long v;
1304 TranslationBlock *tb;
a513fe19
FB
1305
1306 if (nb_tbs <= 0)
1307 return NULL;
1308 if (tc_ptr < (unsigned long)code_gen_buffer ||
1309 tc_ptr >= (unsigned long)code_gen_ptr)
1310 return NULL;
1311 /* binary search (cf Knuth) */
1312 m_min = 0;
1313 m_max = nb_tbs - 1;
1314 while (m_min <= m_max) {
1315 m = (m_min + m_max) >> 1;
1316 tb = &tbs[m];
1317 v = (unsigned long)tb->tc_ptr;
1318 if (v == tc_ptr)
1319 return tb;
1320 else if (tc_ptr < v) {
1321 m_max = m - 1;
1322 } else {
1323 m_min = m + 1;
1324 }
5fafdf24 1325 }
a513fe19
FB
1326 return &tbs[m_max];
1327}
7501267e 1328
ea041c0e
FB
1329static void tb_reset_jump_recursive(TranslationBlock *tb);
1330
1331static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1332{
1333 TranslationBlock *tb1, *tb_next, **ptb;
1334 unsigned int n1;
1335
1336 tb1 = tb->jmp_next[n];
1337 if (tb1 != NULL) {
1338 /* find head of list */
1339 for(;;) {
1340 n1 = (long)tb1 & 3;
1341 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1342 if (n1 == 2)
1343 break;
1344 tb1 = tb1->jmp_next[n1];
1345 }
1346 /* we are now sure now that tb jumps to tb1 */
1347 tb_next = tb1;
1348
1349 /* remove tb from the jmp_first list */
1350 ptb = &tb_next->jmp_first;
1351 for(;;) {
1352 tb1 = *ptb;
1353 n1 = (long)tb1 & 3;
1354 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1355 if (n1 == n && tb1 == tb)
1356 break;
1357 ptb = &tb1->jmp_next[n1];
1358 }
1359 *ptb = tb->jmp_next[n];
1360 tb->jmp_next[n] = NULL;
3b46e624 1361
ea041c0e
FB
1362 /* suppress the jump to next tb in generated code */
1363 tb_reset_jump(tb, n);
1364
0124311e 1365 /* suppress jumps in the tb on which we could have jumped */
ea041c0e
FB
1366 tb_reset_jump_recursive(tb_next);
1367 }
1368}
1369
1370static void tb_reset_jump_recursive(TranslationBlock *tb)
1371{
1372 tb_reset_jump_recursive2(tb, 0);
1373 tb_reset_jump_recursive2(tb, 1);
1374}
1375
1fddef4b 1376#if defined(TARGET_HAS_ICE)
94df27fd
PB
1377#if defined(CONFIG_USER_ONLY)
1378static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1379{
1380 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1381}
1382#else
d720b93d
FB
1383static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1384{
c227f099 1385 target_phys_addr_t addr;
9b3c35e0 1386 target_ulong pd;
c227f099 1387 ram_addr_t ram_addr;
c2f07f81 1388 PhysPageDesc *p;
d720b93d 1389
c2f07f81
PB
1390 addr = cpu_get_phys_page_debug(env, pc);
1391 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1392 if (!p) {
1393 pd = IO_MEM_UNASSIGNED;
1394 } else {
1395 pd = p->phys_offset;
1396 }
1397 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
706cd4b5 1398 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
d720b93d 1399}
c27004ec 1400#endif
94df27fd 1401#endif /* TARGET_HAS_ICE */
d720b93d 1402
c527ee8f
PB
1403#if defined(CONFIG_USER_ONLY)
1404void cpu_watchpoint_remove_all(CPUState *env, int mask)
1405
1406{
1407}
1408
1409int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1410 int flags, CPUWatchpoint **watchpoint)
1411{
1412 return -ENOSYS;
1413}
1414#else
6658ffb8 1415/* Add a watchpoint. */
a1d1bb31
AL
1416int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1417 int flags, CPUWatchpoint **watchpoint)
6658ffb8 1418{
b4051334 1419 target_ulong len_mask = ~(len - 1);
c0ce998e 1420 CPUWatchpoint *wp;
6658ffb8 1421
b4051334
AL
1422 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1423 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1424 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1425 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1426 return -EINVAL;
1427 }
a1d1bb31 1428 wp = qemu_malloc(sizeof(*wp));
a1d1bb31
AL
1429
1430 wp->vaddr = addr;
b4051334 1431 wp->len_mask = len_mask;
a1d1bb31
AL
1432 wp->flags = flags;
1433
2dc9f411 1434 /* keep all GDB-injected watchpoints in front */
c0ce998e 1435 if (flags & BP_GDB)
72cf2d4f 1436 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
c0ce998e 1437 else
72cf2d4f 1438 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
6658ffb8 1439
6658ffb8 1440 tlb_flush_page(env, addr);
a1d1bb31
AL
1441
1442 if (watchpoint)
1443 *watchpoint = wp;
1444 return 0;
6658ffb8
PB
1445}
1446
a1d1bb31
AL
1447/* Remove a specific watchpoint. */
1448int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1449 int flags)
6658ffb8 1450{
b4051334 1451 target_ulong len_mask = ~(len - 1);
a1d1bb31 1452 CPUWatchpoint *wp;
6658ffb8 1453
72cf2d4f 1454 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334 1455 if (addr == wp->vaddr && len_mask == wp->len_mask
6e140f28 1456 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
a1d1bb31 1457 cpu_watchpoint_remove_by_ref(env, wp);
6658ffb8
PB
1458 return 0;
1459 }
1460 }
a1d1bb31 1461 return -ENOENT;
6658ffb8
PB
1462}
1463
a1d1bb31
AL
1464/* Remove a specific watchpoint by reference. */
1465void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1466{
72cf2d4f 1467 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
7d03f82f 1468
a1d1bb31
AL
1469 tlb_flush_page(env, watchpoint->vaddr);
1470
1471 qemu_free(watchpoint);
1472}
1473
1474/* Remove all matching watchpoints. */
1475void cpu_watchpoint_remove_all(CPUState *env, int mask)
1476{
c0ce998e 1477 CPUWatchpoint *wp, *next;
a1d1bb31 1478
72cf2d4f 1479 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
a1d1bb31
AL
1480 if (wp->flags & mask)
1481 cpu_watchpoint_remove_by_ref(env, wp);
c0ce998e 1482 }
7d03f82f 1483}
c527ee8f 1484#endif
7d03f82f 1485
a1d1bb31
AL
1486/* Add a breakpoint. */
1487int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1488 CPUBreakpoint **breakpoint)
4c3a88a2 1489{
1fddef4b 1490#if defined(TARGET_HAS_ICE)
c0ce998e 1491 CPUBreakpoint *bp;
3b46e624 1492
a1d1bb31 1493 bp = qemu_malloc(sizeof(*bp));
4c3a88a2 1494
a1d1bb31
AL
1495 bp->pc = pc;
1496 bp->flags = flags;
1497
2dc9f411 1498 /* keep all GDB-injected breakpoints in front */
c0ce998e 1499 if (flags & BP_GDB)
72cf2d4f 1500 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
c0ce998e 1501 else
72cf2d4f 1502 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
3b46e624 1503
d720b93d 1504 breakpoint_invalidate(env, pc);
a1d1bb31
AL
1505
1506 if (breakpoint)
1507 *breakpoint = bp;
4c3a88a2
FB
1508 return 0;
1509#else
a1d1bb31 1510 return -ENOSYS;
4c3a88a2
FB
1511#endif
1512}
1513
a1d1bb31
AL
1514/* Remove a specific breakpoint. */
1515int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1516{
7d03f82f 1517#if defined(TARGET_HAS_ICE)
a1d1bb31
AL
1518 CPUBreakpoint *bp;
1519
72cf2d4f 1520 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31
AL
1521 if (bp->pc == pc && bp->flags == flags) {
1522 cpu_breakpoint_remove_by_ref(env, bp);
1523 return 0;
1524 }
7d03f82f 1525 }
a1d1bb31
AL
1526 return -ENOENT;
1527#else
1528 return -ENOSYS;
7d03f82f
EI
1529#endif
1530}
1531
a1d1bb31
AL
1532/* Remove a specific breakpoint by reference. */
1533void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
4c3a88a2 1534{
1fddef4b 1535#if defined(TARGET_HAS_ICE)
72cf2d4f 1536 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
d720b93d 1537
a1d1bb31
AL
1538 breakpoint_invalidate(env, breakpoint->pc);
1539
1540 qemu_free(breakpoint);
1541#endif
1542}
1543
1544/* Remove all matching breakpoints. */
1545void cpu_breakpoint_remove_all(CPUState *env, int mask)
1546{
1547#if defined(TARGET_HAS_ICE)
c0ce998e 1548 CPUBreakpoint *bp, *next;
a1d1bb31 1549
72cf2d4f 1550 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
a1d1bb31
AL
1551 if (bp->flags & mask)
1552 cpu_breakpoint_remove_by_ref(env, bp);
c0ce998e 1553 }
4c3a88a2
FB
1554#endif
1555}
1556
c33a346e
FB
1557/* enable or disable single step mode. EXCP_DEBUG is returned by the
1558 CPU loop after each instruction */
1559void cpu_single_step(CPUState *env, int enabled)
1560{
1fddef4b 1561#if defined(TARGET_HAS_ICE)
c33a346e
FB
1562 if (env->singlestep_enabled != enabled) {
1563 env->singlestep_enabled = enabled;
e22a25c9
AL
1564 if (kvm_enabled())
1565 kvm_update_guest_debug(env, 0);
1566 else {
ccbb4d44 1567 /* must flush all the translated code to avoid inconsistencies */
e22a25c9
AL
1568 /* XXX: only flush what is necessary */
1569 tb_flush(env);
1570 }
c33a346e
FB
1571 }
1572#endif
1573}
1574
34865134
FB
1575/* enable or disable low levels log */
1576void cpu_set_log(int log_flags)
1577{
1578 loglevel = log_flags;
1579 if (loglevel && !logfile) {
11fcfab4 1580 logfile = fopen(logfilename, log_append ? "a" : "w");
34865134
FB
1581 if (!logfile) {
1582 perror(logfilename);
1583 _exit(1);
1584 }
9fa3e853
FB
1585#if !defined(CONFIG_SOFTMMU)
1586 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1587 {
b55266b5 1588 static char logfile_buf[4096];
9fa3e853
FB
1589 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1590 }
bf65f53f
FN
1591#elif !defined(_WIN32)
1592 /* Win32 doesn't support line-buffering and requires size >= 2 */
34865134 1593 setvbuf(logfile, NULL, _IOLBF, 0);
9fa3e853 1594#endif
e735b91c
PB
1595 log_append = 1;
1596 }
1597 if (!loglevel && logfile) {
1598 fclose(logfile);
1599 logfile = NULL;
34865134
FB
1600 }
1601}
1602
1603void cpu_set_log_filename(const char *filename)
1604{
1605 logfilename = strdup(filename);
e735b91c
PB
1606 if (logfile) {
1607 fclose(logfile);
1608 logfile = NULL;
1609 }
1610 cpu_set_log(loglevel);
34865134 1611}
c33a346e 1612
3098dba0 1613static void cpu_unlink_tb(CPUState *env)
ea041c0e 1614{
3098dba0
AJ
1615 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1616 problem and hope the cpu will stop of its own accord. For userspace
1617 emulation this often isn't actually as bad as it sounds. Often
1618 signals are used primarily to interrupt blocking syscalls. */
ea041c0e 1619 TranslationBlock *tb;
c227f099 1620 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
59817ccb 1621
cab1b4bd 1622 spin_lock(&interrupt_lock);
3098dba0
AJ
1623 tb = env->current_tb;
1624 /* if the cpu is currently executing code, we must unlink it and
1625 all the potentially executing TB */
f76cfe56 1626 if (tb) {
3098dba0
AJ
1627 env->current_tb = NULL;
1628 tb_reset_jump_recursive(tb);
be214e6c 1629 }
cab1b4bd 1630 spin_unlock(&interrupt_lock);
3098dba0
AJ
1631}
1632
1633/* mask must never be zero, except for A20 change call */
1634void cpu_interrupt(CPUState *env, int mask)
1635{
1636 int old_mask;
be214e6c 1637
2e70f6ef 1638 old_mask = env->interrupt_request;
68a79315 1639 env->interrupt_request |= mask;
3098dba0 1640
8edac960
AL
1641#ifndef CONFIG_USER_ONLY
1642 /*
1643 * If called from iothread context, wake the target cpu in
1644 * case its halted.
1645 */
1646 if (!qemu_cpu_self(env)) {
1647 qemu_cpu_kick(env);
1648 return;
1649 }
1650#endif
1651
2e70f6ef 1652 if (use_icount) {
266910c4 1653 env->icount_decr.u16.high = 0xffff;
2e70f6ef 1654#ifndef CONFIG_USER_ONLY
2e70f6ef 1655 if (!can_do_io(env)
be214e6c 1656 && (mask & ~old_mask) != 0) {
2e70f6ef
PB
1657 cpu_abort(env, "Raised interrupt while not in I/O function");
1658 }
1659#endif
1660 } else {
3098dba0 1661 cpu_unlink_tb(env);
ea041c0e
FB
1662 }
1663}
1664
b54ad049
FB
1665void cpu_reset_interrupt(CPUState *env, int mask)
1666{
1667 env->interrupt_request &= ~mask;
1668}
1669
3098dba0
AJ
1670void cpu_exit(CPUState *env)
1671{
1672 env->exit_request = 1;
1673 cpu_unlink_tb(env);
1674}
1675
c7cd6a37 1676const CPULogItem cpu_log_items[] = {
5fafdf24 1677 { CPU_LOG_TB_OUT_ASM, "out_asm",
f193c797
FB
1678 "show generated host assembly code for each compiled TB" },
1679 { CPU_LOG_TB_IN_ASM, "in_asm",
1680 "show target assembly code for each compiled TB" },
5fafdf24 1681 { CPU_LOG_TB_OP, "op",
57fec1fe 1682 "show micro ops for each compiled TB" },
f193c797 1683 { CPU_LOG_TB_OP_OPT, "op_opt",
e01a1157
BS
1684 "show micro ops "
1685#ifdef TARGET_I386
1686 "before eflags optimization and "
f193c797 1687#endif
e01a1157 1688 "after liveness analysis" },
f193c797
FB
1689 { CPU_LOG_INT, "int",
1690 "show interrupts/exceptions in short format" },
1691 { CPU_LOG_EXEC, "exec",
1692 "show trace before each executed TB (lots of logs)" },
9fddaa0c 1693 { CPU_LOG_TB_CPU, "cpu",
e91c8a77 1694 "show CPU state before block translation" },
f193c797
FB
1695#ifdef TARGET_I386
1696 { CPU_LOG_PCALL, "pcall",
1697 "show protected mode far calls/returns/exceptions" },
eca1bdf4
AL
1698 { CPU_LOG_RESET, "cpu_reset",
1699 "show CPU state before CPU resets" },
f193c797 1700#endif
8e3a9fd2 1701#ifdef DEBUG_IOPORT
fd872598
FB
1702 { CPU_LOG_IOPORT, "ioport",
1703 "show all i/o ports accesses" },
8e3a9fd2 1704#endif
f193c797
FB
1705 { 0, NULL, NULL },
1706};
1707
f6f3fbca
MT
1708#ifndef CONFIG_USER_ONLY
1709static QLIST_HEAD(memory_client_list, CPUPhysMemoryClient) memory_client_list
1710 = QLIST_HEAD_INITIALIZER(memory_client_list);
1711
1712static void cpu_notify_set_memory(target_phys_addr_t start_addr,
1713 ram_addr_t size,
1714 ram_addr_t phys_offset)
1715{
1716 CPUPhysMemoryClient *client;
1717 QLIST_FOREACH(client, &memory_client_list, list) {
1718 client->set_memory(client, start_addr, size, phys_offset);
1719 }
1720}
1721
1722static int cpu_notify_sync_dirty_bitmap(target_phys_addr_t start,
1723 target_phys_addr_t end)
1724{
1725 CPUPhysMemoryClient *client;
1726 QLIST_FOREACH(client, &memory_client_list, list) {
1727 int r = client->sync_dirty_bitmap(client, start, end);
1728 if (r < 0)
1729 return r;
1730 }
1731 return 0;
1732}
1733
1734static int cpu_notify_migration_log(int enable)
1735{
1736 CPUPhysMemoryClient *client;
1737 QLIST_FOREACH(client, &memory_client_list, list) {
1738 int r = client->migration_log(client, enable);
1739 if (r < 0)
1740 return r;
1741 }
1742 return 0;
1743}
1744
5cd2c5b6
RH
1745static void phys_page_for_each_1(CPUPhysMemoryClient *client,
1746 int level, void **lp)
f6f3fbca 1747{
5cd2c5b6 1748 int i;
f6f3fbca 1749
5cd2c5b6
RH
1750 if (*lp == NULL) {
1751 return;
1752 }
1753 if (level == 0) {
1754 PhysPageDesc *pd = *lp;
7296abac 1755 for (i = 0; i < L2_SIZE; ++i) {
5cd2c5b6
RH
1756 if (pd[i].phys_offset != IO_MEM_UNASSIGNED) {
1757 client->set_memory(client, pd[i].region_offset,
1758 TARGET_PAGE_SIZE, pd[i].phys_offset);
f6f3fbca 1759 }
5cd2c5b6
RH
1760 }
1761 } else {
1762 void **pp = *lp;
7296abac 1763 for (i = 0; i < L2_SIZE; ++i) {
5cd2c5b6 1764 phys_page_for_each_1(client, level - 1, pp + i);
f6f3fbca
MT
1765 }
1766 }
1767}
1768
1769static void phys_page_for_each(CPUPhysMemoryClient *client)
1770{
5cd2c5b6
RH
1771 int i;
1772 for (i = 0; i < P_L1_SIZE; ++i) {
1773 phys_page_for_each_1(client, P_L1_SHIFT / L2_BITS - 1,
1774 l1_phys_map + 1);
f6f3fbca 1775 }
f6f3fbca
MT
1776}
1777
1778void cpu_register_phys_memory_client(CPUPhysMemoryClient *client)
1779{
1780 QLIST_INSERT_HEAD(&memory_client_list, client, list);
1781 phys_page_for_each(client);
1782}
1783
1784void cpu_unregister_phys_memory_client(CPUPhysMemoryClient *client)
1785{
1786 QLIST_REMOVE(client, list);
1787}
1788#endif
1789
f193c797
FB
1790static int cmp1(const char *s1, int n, const char *s2)
1791{
1792 if (strlen(s2) != n)
1793 return 0;
1794 return memcmp(s1, s2, n) == 0;
1795}
3b46e624 1796
f193c797
FB
1797/* takes a comma separated list of log masks. Return 0 if error. */
1798int cpu_str_to_log_mask(const char *str)
1799{
c7cd6a37 1800 const CPULogItem *item;
f193c797
FB
1801 int mask;
1802 const char *p, *p1;
1803
1804 p = str;
1805 mask = 0;
1806 for(;;) {
1807 p1 = strchr(p, ',');
1808 if (!p1)
1809 p1 = p + strlen(p);
8e3a9fd2
FB
1810 if(cmp1(p,p1-p,"all")) {
1811 for(item = cpu_log_items; item->mask != 0; item++) {
1812 mask |= item->mask;
1813 }
1814 } else {
f193c797
FB
1815 for(item = cpu_log_items; item->mask != 0; item++) {
1816 if (cmp1(p, p1 - p, item->name))
1817 goto found;
1818 }
1819 return 0;
8e3a9fd2 1820 }
f193c797
FB
1821 found:
1822 mask |= item->mask;
1823 if (*p1 != ',')
1824 break;
1825 p = p1 + 1;
1826 }
1827 return mask;
1828}
ea041c0e 1829
7501267e
FB
1830void cpu_abort(CPUState *env, const char *fmt, ...)
1831{
1832 va_list ap;
493ae1f0 1833 va_list ap2;
7501267e
FB
1834
1835 va_start(ap, fmt);
493ae1f0 1836 va_copy(ap2, ap);
7501267e
FB
1837 fprintf(stderr, "qemu: fatal: ");
1838 vfprintf(stderr, fmt, ap);
1839 fprintf(stderr, "\n");
1840#ifdef TARGET_I386
7fe48483
FB
1841 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1842#else
1843 cpu_dump_state(env, stderr, fprintf, 0);
7501267e 1844#endif
93fcfe39
AL
1845 if (qemu_log_enabled()) {
1846 qemu_log("qemu: fatal: ");
1847 qemu_log_vprintf(fmt, ap2);
1848 qemu_log("\n");
f9373291 1849#ifdef TARGET_I386
93fcfe39 1850 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
f9373291 1851#else
93fcfe39 1852 log_cpu_state(env, 0);
f9373291 1853#endif
31b1a7b4 1854 qemu_log_flush();
93fcfe39 1855 qemu_log_close();
924edcae 1856 }
493ae1f0 1857 va_end(ap2);
f9373291 1858 va_end(ap);
fd052bf6
RV
1859#if defined(CONFIG_USER_ONLY)
1860 {
1861 struct sigaction act;
1862 sigfillset(&act.sa_mask);
1863 act.sa_handler = SIG_DFL;
1864 sigaction(SIGABRT, &act, NULL);
1865 }
1866#endif
7501267e
FB
1867 abort();
1868}
1869
c5be9f08
TS
1870CPUState *cpu_copy(CPUState *env)
1871{
01ba9816 1872 CPUState *new_env = cpu_init(env->cpu_model_str);
c5be9f08
TS
1873 CPUState *next_cpu = new_env->next_cpu;
1874 int cpu_index = new_env->cpu_index;
5a38f081
AL
1875#if defined(TARGET_HAS_ICE)
1876 CPUBreakpoint *bp;
1877 CPUWatchpoint *wp;
1878#endif
1879
c5be9f08 1880 memcpy(new_env, env, sizeof(CPUState));
5a38f081
AL
1881
1882 /* Preserve chaining and index. */
c5be9f08
TS
1883 new_env->next_cpu = next_cpu;
1884 new_env->cpu_index = cpu_index;
5a38f081
AL
1885
1886 /* Clone all break/watchpoints.
1887 Note: Once we support ptrace with hw-debug register access, make sure
1888 BP_CPU break/watchpoints are handled correctly on clone. */
72cf2d4f
BS
1889 QTAILQ_INIT(&env->breakpoints);
1890 QTAILQ_INIT(&env->watchpoints);
5a38f081 1891#if defined(TARGET_HAS_ICE)
72cf2d4f 1892 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
5a38f081
AL
1893 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1894 }
72cf2d4f 1895 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
5a38f081
AL
1896 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1897 wp->flags, NULL);
1898 }
1899#endif
1900
c5be9f08
TS
1901 return new_env;
1902}
1903
0124311e
FB
1904#if !defined(CONFIG_USER_ONLY)
1905
5c751e99
EI
1906static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1907{
1908 unsigned int i;
1909
1910 /* Discard jump cache entries for any tb which might potentially
1911 overlap the flushed page. */
1912 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1913 memset (&env->tb_jmp_cache[i], 0,
1914 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1915
1916 i = tb_jmp_cache_hash_page(addr);
1917 memset (&env->tb_jmp_cache[i], 0,
1918 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1919}
1920
08738984
IK
1921static CPUTLBEntry s_cputlb_empty_entry = {
1922 .addr_read = -1,
1923 .addr_write = -1,
1924 .addr_code = -1,
1925 .addend = -1,
1926};
1927
ee8b7021
FB
1928/* NOTE: if flush_global is true, also flush global entries (not
1929 implemented yet) */
1930void tlb_flush(CPUState *env, int flush_global)
33417e70 1931{
33417e70 1932 int i;
0124311e 1933
9fa3e853
FB
1934#if defined(DEBUG_TLB)
1935 printf("tlb_flush:\n");
1936#endif
0124311e
FB
1937 /* must reset current TB so that interrupts cannot modify the
1938 links while we are modifying them */
1939 env->current_tb = NULL;
1940
33417e70 1941 for(i = 0; i < CPU_TLB_SIZE; i++) {
cfde4bd9
IY
1942 int mmu_idx;
1943 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
08738984 1944 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
cfde4bd9 1945 }
33417e70 1946 }
9fa3e853 1947
8a40a180 1948 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
9fa3e853 1949
d4c430a8
PB
1950 env->tlb_flush_addr = -1;
1951 env->tlb_flush_mask = 0;
e3db7226 1952 tlb_flush_count++;
33417e70
FB
1953}
1954
274da6b2 1955static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
61382a50 1956{
5fafdf24 1957 if (addr == (tlb_entry->addr_read &
84b7b8e7 1958 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1959 addr == (tlb_entry->addr_write &
84b7b8e7 1960 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1961 addr == (tlb_entry->addr_code &
84b7b8e7 1962 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
08738984 1963 *tlb_entry = s_cputlb_empty_entry;
84b7b8e7 1964 }
61382a50
FB
1965}
1966
2e12669a 1967void tlb_flush_page(CPUState *env, target_ulong addr)
33417e70 1968{
8a40a180 1969 int i;
cfde4bd9 1970 int mmu_idx;
0124311e 1971
9fa3e853 1972#if defined(DEBUG_TLB)
108c49b8 1973 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
9fa3e853 1974#endif
d4c430a8
PB
1975 /* Check if we need to flush due to large pages. */
1976 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
1977#if defined(DEBUG_TLB)
1978 printf("tlb_flush_page: forced full flush ("
1979 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
1980 env->tlb_flush_addr, env->tlb_flush_mask);
1981#endif
1982 tlb_flush(env, 1);
1983 return;
1984 }
0124311e
FB
1985 /* must reset current TB so that interrupts cannot modify the
1986 links while we are modifying them */
1987 env->current_tb = NULL;
61382a50
FB
1988
1989 addr &= TARGET_PAGE_MASK;
1990 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
cfde4bd9
IY
1991 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1992 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
0124311e 1993
5c751e99 1994 tlb_flush_jmp_cache(env, addr);
9fa3e853
FB
1995}
1996
9fa3e853
FB
1997/* update the TLBs so that writes to code in the virtual page 'addr'
1998 can be detected */
c227f099 1999static void tlb_protect_code(ram_addr_t ram_addr)
9fa3e853 2000{
5fafdf24 2001 cpu_physical_memory_reset_dirty(ram_addr,
6a00d601
FB
2002 ram_addr + TARGET_PAGE_SIZE,
2003 CODE_DIRTY_FLAG);
9fa3e853
FB
2004}
2005
9fa3e853 2006/* update the TLB so that writes in physical page 'phys_addr' are no longer
3a7d929e 2007 tested for self modifying code */
c227f099 2008static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 2009 target_ulong vaddr)
9fa3e853 2010{
f7c11b53 2011 cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
1ccde1cb
FB
2012}
2013
5fafdf24 2014static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1ccde1cb
FB
2015 unsigned long start, unsigned long length)
2016{
2017 unsigned long addr;
84b7b8e7
FB
2018 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2019 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1ccde1cb 2020 if ((addr - start) < length) {
0f459d16 2021 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1ccde1cb
FB
2022 }
2023 }
2024}
2025
5579c7f3 2026/* Note: start and end must be within the same ram block. */
c227f099 2027void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 2028 int dirty_flags)
1ccde1cb
FB
2029{
2030 CPUState *env;
4f2ac237 2031 unsigned long length, start1;
f7c11b53 2032 int i;
1ccde1cb
FB
2033
2034 start &= TARGET_PAGE_MASK;
2035 end = TARGET_PAGE_ALIGN(end);
2036
2037 length = end - start;
2038 if (length == 0)
2039 return;
f7c11b53 2040 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
f23db169 2041
1ccde1cb
FB
2042 /* we modify the TLB cache so that the dirty bit will be set again
2043 when accessing the range */
5579c7f3
PB
2044 start1 = (unsigned long)qemu_get_ram_ptr(start);
2045 /* Chek that we don't span multiple blocks - this breaks the
2046 address comparisons below. */
2047 if ((unsigned long)qemu_get_ram_ptr(end - 1) - start1
2048 != (end - 1) - start) {
2049 abort();
2050 }
2051
6a00d601 2052 for(env = first_cpu; env != NULL; env = env->next_cpu) {
cfde4bd9
IY
2053 int mmu_idx;
2054 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2055 for(i = 0; i < CPU_TLB_SIZE; i++)
2056 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
2057 start1, length);
2058 }
6a00d601 2059 }
1ccde1cb
FB
2060}
2061
74576198
AL
2062int cpu_physical_memory_set_dirty_tracking(int enable)
2063{
f6f3fbca 2064 int ret = 0;
74576198 2065 in_migration = enable;
f6f3fbca
MT
2066 ret = cpu_notify_migration_log(!!enable);
2067 return ret;
74576198
AL
2068}
2069
2070int cpu_physical_memory_get_dirty_tracking(void)
2071{
2072 return in_migration;
2073}
2074
c227f099
AL
2075int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
2076 target_phys_addr_t end_addr)
2bec46dc 2077{
7b8f3b78 2078 int ret;
151f7749 2079
f6f3fbca 2080 ret = cpu_notify_sync_dirty_bitmap(start_addr, end_addr);
151f7749 2081 return ret;
2bec46dc
AL
2082}
2083
3a7d929e
FB
2084static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2085{
c227f099 2086 ram_addr_t ram_addr;
5579c7f3 2087 void *p;
3a7d929e 2088
84b7b8e7 2089 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
5579c7f3
PB
2090 p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
2091 + tlb_entry->addend);
2092 ram_addr = qemu_ram_addr_from_host(p);
3a7d929e 2093 if (!cpu_physical_memory_is_dirty(ram_addr)) {
0f459d16 2094 tlb_entry->addr_write |= TLB_NOTDIRTY;
3a7d929e
FB
2095 }
2096 }
2097}
2098
2099/* update the TLB according to the current state of the dirty bits */
2100void cpu_tlb_update_dirty(CPUState *env)
2101{
2102 int i;
cfde4bd9
IY
2103 int mmu_idx;
2104 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2105 for(i = 0; i < CPU_TLB_SIZE; i++)
2106 tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2107 }
3a7d929e
FB
2108}
2109
0f459d16 2110static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1ccde1cb 2111{
0f459d16
PB
2112 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2113 tlb_entry->addr_write = vaddr;
1ccde1cb
FB
2114}
2115
0f459d16
PB
2116/* update the TLB corresponding to virtual page vaddr
2117 so that it is no longer dirty */
2118static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1ccde1cb 2119{
1ccde1cb 2120 int i;
cfde4bd9 2121 int mmu_idx;
1ccde1cb 2122
0f459d16 2123 vaddr &= TARGET_PAGE_MASK;
1ccde1cb 2124 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
cfde4bd9
IY
2125 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2126 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
9fa3e853
FB
2127}
2128
d4c430a8
PB
2129/* Our TLB does not support large pages, so remember the area covered by
2130 large pages and trigger a full TLB flush if these are invalidated. */
2131static void tlb_add_large_page(CPUState *env, target_ulong vaddr,
2132 target_ulong size)
2133{
2134 target_ulong mask = ~(size - 1);
2135
2136 if (env->tlb_flush_addr == (target_ulong)-1) {
2137 env->tlb_flush_addr = vaddr & mask;
2138 env->tlb_flush_mask = mask;
2139 return;
2140 }
2141 /* Extend the existing region to include the new page.
2142 This is a compromise between unnecessary flushes and the cost
2143 of maintaining a full variable size TLB. */
2144 mask &= env->tlb_flush_mask;
2145 while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
2146 mask <<= 1;
2147 }
2148 env->tlb_flush_addr &= mask;
2149 env->tlb_flush_mask = mask;
2150}
2151
2152/* Add a new TLB entry. At most one entry for a given virtual address
2153 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2154 supplied size is only used by tlb_flush_page. */
2155void tlb_set_page(CPUState *env, target_ulong vaddr,
2156 target_phys_addr_t paddr, int prot,
2157 int mmu_idx, target_ulong size)
9fa3e853 2158{
92e873b9 2159 PhysPageDesc *p;
4f2ac237 2160 unsigned long pd;
9fa3e853 2161 unsigned int index;
4f2ac237 2162 target_ulong address;
0f459d16 2163 target_ulong code_address;
355b1943 2164 unsigned long addend;
84b7b8e7 2165 CPUTLBEntry *te;
a1d1bb31 2166 CPUWatchpoint *wp;
c227f099 2167 target_phys_addr_t iotlb;
9fa3e853 2168
d4c430a8
PB
2169 assert(size >= TARGET_PAGE_SIZE);
2170 if (size != TARGET_PAGE_SIZE) {
2171 tlb_add_large_page(env, vaddr, size);
2172 }
92e873b9 2173 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
9fa3e853
FB
2174 if (!p) {
2175 pd = IO_MEM_UNASSIGNED;
9fa3e853
FB
2176 } else {
2177 pd = p->phys_offset;
9fa3e853
FB
2178 }
2179#if defined(DEBUG_TLB)
6ebbf390
JM
2180 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
2181 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
9fa3e853
FB
2182#endif
2183
0f459d16
PB
2184 address = vaddr;
2185 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2186 /* IO memory case (romd handled later) */
2187 address |= TLB_MMIO;
2188 }
5579c7f3 2189 addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
0f459d16
PB
2190 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2191 /* Normal RAM. */
2192 iotlb = pd & TARGET_PAGE_MASK;
2193 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2194 iotlb |= IO_MEM_NOTDIRTY;
2195 else
2196 iotlb |= IO_MEM_ROM;
2197 } else {
ccbb4d44 2198 /* IO handlers are currently passed a physical address.
0f459d16
PB
2199 It would be nice to pass an offset from the base address
2200 of that region. This would avoid having to special case RAM,
2201 and avoid full address decoding in every device.
2202 We can't use the high bits of pd for this because
2203 IO_MEM_ROMD uses these as a ram address. */
8da3ff18
PB
2204 iotlb = (pd & ~TARGET_PAGE_MASK);
2205 if (p) {
8da3ff18
PB
2206 iotlb += p->region_offset;
2207 } else {
2208 iotlb += paddr;
2209 }
0f459d16
PB
2210 }
2211
2212 code_address = address;
2213 /* Make accesses to pages with watchpoints go via the
2214 watchpoint trap routines. */
72cf2d4f 2215 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
a1d1bb31 2216 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
0f459d16
PB
2217 iotlb = io_mem_watch + paddr;
2218 /* TODO: The memory case can be optimized by not trapping
2219 reads of pages with a write breakpoint. */
2220 address |= TLB_MMIO;
6658ffb8 2221 }
0f459d16 2222 }
d79acba4 2223
0f459d16
PB
2224 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2225 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2226 te = &env->tlb_table[mmu_idx][index];
2227 te->addend = addend - vaddr;
2228 if (prot & PAGE_READ) {
2229 te->addr_read = address;
2230 } else {
2231 te->addr_read = -1;
2232 }
5c751e99 2233
0f459d16
PB
2234 if (prot & PAGE_EXEC) {
2235 te->addr_code = code_address;
2236 } else {
2237 te->addr_code = -1;
2238 }
2239 if (prot & PAGE_WRITE) {
2240 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2241 (pd & IO_MEM_ROMD)) {
2242 /* Write access calls the I/O callback. */
2243 te->addr_write = address | TLB_MMIO;
2244 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2245 !cpu_physical_memory_is_dirty(pd)) {
2246 te->addr_write = address | TLB_NOTDIRTY;
9fa3e853 2247 } else {
0f459d16 2248 te->addr_write = address;
9fa3e853 2249 }
0f459d16
PB
2250 } else {
2251 te->addr_write = -1;
9fa3e853 2252 }
9fa3e853
FB
2253}
2254
0124311e
FB
2255#else
2256
ee8b7021 2257void tlb_flush(CPUState *env, int flush_global)
0124311e
FB
2258{
2259}
2260
2e12669a 2261void tlb_flush_page(CPUState *env, target_ulong addr)
0124311e
FB
2262{
2263}
2264
edf8e2af
MW
2265/*
2266 * Walks guest process memory "regions" one by one
2267 * and calls callback function 'fn' for each region.
2268 */
5cd2c5b6
RH
2269
2270struct walk_memory_regions_data
2271{
2272 walk_memory_regions_fn fn;
2273 void *priv;
2274 unsigned long start;
2275 int prot;
2276};
2277
2278static int walk_memory_regions_end(struct walk_memory_regions_data *data,
b480d9b7 2279 abi_ulong end, int new_prot)
5cd2c5b6
RH
2280{
2281 if (data->start != -1ul) {
2282 int rc = data->fn(data->priv, data->start, end, data->prot);
2283 if (rc != 0) {
2284 return rc;
2285 }
2286 }
2287
2288 data->start = (new_prot ? end : -1ul);
2289 data->prot = new_prot;
2290
2291 return 0;
2292}
2293
2294static int walk_memory_regions_1(struct walk_memory_regions_data *data,
b480d9b7 2295 abi_ulong base, int level, void **lp)
5cd2c5b6 2296{
b480d9b7 2297 abi_ulong pa;
5cd2c5b6
RH
2298 int i, rc;
2299
2300 if (*lp == NULL) {
2301 return walk_memory_regions_end(data, base, 0);
2302 }
2303
2304 if (level == 0) {
2305 PageDesc *pd = *lp;
7296abac 2306 for (i = 0; i < L2_SIZE; ++i) {
5cd2c5b6
RH
2307 int prot = pd[i].flags;
2308
2309 pa = base | (i << TARGET_PAGE_BITS);
2310 if (prot != data->prot) {
2311 rc = walk_memory_regions_end(data, pa, prot);
2312 if (rc != 0) {
2313 return rc;
9fa3e853 2314 }
9fa3e853 2315 }
5cd2c5b6
RH
2316 }
2317 } else {
2318 void **pp = *lp;
7296abac 2319 for (i = 0; i < L2_SIZE; ++i) {
b480d9b7
PB
2320 pa = base | ((abi_ulong)i <<
2321 (TARGET_PAGE_BITS + L2_BITS * level));
5cd2c5b6
RH
2322 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2323 if (rc != 0) {
2324 return rc;
2325 }
2326 }
2327 }
2328
2329 return 0;
2330}
2331
2332int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2333{
2334 struct walk_memory_regions_data data;
2335 unsigned long i;
2336
2337 data.fn = fn;
2338 data.priv = priv;
2339 data.start = -1ul;
2340 data.prot = 0;
2341
2342 for (i = 0; i < V_L1_SIZE; i++) {
b480d9b7 2343 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
5cd2c5b6
RH
2344 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2345 if (rc != 0) {
2346 return rc;
9fa3e853 2347 }
33417e70 2348 }
5cd2c5b6
RH
2349
2350 return walk_memory_regions_end(&data, 0, 0);
edf8e2af
MW
2351}
2352
b480d9b7
PB
2353static int dump_region(void *priv, abi_ulong start,
2354 abi_ulong end, unsigned long prot)
edf8e2af
MW
2355{
2356 FILE *f = (FILE *)priv;
2357
b480d9b7
PB
2358 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2359 " "TARGET_ABI_FMT_lx" %c%c%c\n",
edf8e2af
MW
2360 start, end, end - start,
2361 ((prot & PAGE_READ) ? 'r' : '-'),
2362 ((prot & PAGE_WRITE) ? 'w' : '-'),
2363 ((prot & PAGE_EXEC) ? 'x' : '-'));
2364
2365 return (0);
2366}
2367
2368/* dump memory mappings */
2369void page_dump(FILE *f)
2370{
2371 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2372 "start", "end", "size", "prot");
2373 walk_memory_regions(f, dump_region);
33417e70
FB
2374}
2375
53a5960a 2376int page_get_flags(target_ulong address)
33417e70 2377{
9fa3e853
FB
2378 PageDesc *p;
2379
2380 p = page_find(address >> TARGET_PAGE_BITS);
33417e70 2381 if (!p)
9fa3e853
FB
2382 return 0;
2383 return p->flags;
2384}
2385
376a7909
RH
2386/* Modify the flags of a page and invalidate the code if necessary.
2387 The flag PAGE_WRITE_ORG is positioned automatically depending
2388 on PAGE_WRITE. The mmap_lock should already be held. */
53a5960a 2389void page_set_flags(target_ulong start, target_ulong end, int flags)
9fa3e853 2390{
376a7909
RH
2391 target_ulong addr, len;
2392
2393 /* This function should never be called with addresses outside the
2394 guest address space. If this assert fires, it probably indicates
2395 a missing call to h2g_valid. */
b480d9b7
PB
2396#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2397 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
376a7909
RH
2398#endif
2399 assert(start < end);
9fa3e853
FB
2400
2401 start = start & TARGET_PAGE_MASK;
2402 end = TARGET_PAGE_ALIGN(end);
376a7909
RH
2403
2404 if (flags & PAGE_WRITE) {
9fa3e853 2405 flags |= PAGE_WRITE_ORG;
376a7909
RH
2406 }
2407
2408 for (addr = start, len = end - start;
2409 len != 0;
2410 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2411 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2412
2413 /* If the write protection bit is set, then we invalidate
2414 the code inside. */
5fafdf24 2415 if (!(p->flags & PAGE_WRITE) &&
9fa3e853
FB
2416 (flags & PAGE_WRITE) &&
2417 p->first_tb) {
d720b93d 2418 tb_invalidate_phys_page(addr, 0, NULL);
9fa3e853
FB
2419 }
2420 p->flags = flags;
2421 }
33417e70
FB
2422}
2423
3d97b40b
TS
2424int page_check_range(target_ulong start, target_ulong len, int flags)
2425{
2426 PageDesc *p;
2427 target_ulong end;
2428 target_ulong addr;
2429
376a7909
RH
2430 /* This function should never be called with addresses outside the
2431 guest address space. If this assert fires, it probably indicates
2432 a missing call to h2g_valid. */
338e9e6c
BS
2433#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2434 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
376a7909
RH
2435#endif
2436
3e0650a9
RH
2437 if (len == 0) {
2438 return 0;
2439 }
376a7909
RH
2440 if (start + len - 1 < start) {
2441 /* We've wrapped around. */
55f280c9 2442 return -1;
376a7909 2443 }
55f280c9 2444
3d97b40b
TS
2445 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2446 start = start & TARGET_PAGE_MASK;
2447
376a7909
RH
2448 for (addr = start, len = end - start;
2449 len != 0;
2450 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
3d97b40b
TS
2451 p = page_find(addr >> TARGET_PAGE_BITS);
2452 if( !p )
2453 return -1;
2454 if( !(p->flags & PAGE_VALID) )
2455 return -1;
2456
dae3270c 2457 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
3d97b40b 2458 return -1;
dae3270c
FB
2459 if (flags & PAGE_WRITE) {
2460 if (!(p->flags & PAGE_WRITE_ORG))
2461 return -1;
2462 /* unprotect the page if it was put read-only because it
2463 contains translated code */
2464 if (!(p->flags & PAGE_WRITE)) {
2465 if (!page_unprotect(addr, 0, NULL))
2466 return -1;
2467 }
2468 return 0;
2469 }
3d97b40b
TS
2470 }
2471 return 0;
2472}
2473
9fa3e853 2474/* called from signal handler: invalidate the code and unprotect the
ccbb4d44 2475 page. Return TRUE if the fault was successfully handled. */
53a5960a 2476int page_unprotect(target_ulong address, unsigned long pc, void *puc)
9fa3e853 2477{
45d679d6
AJ
2478 unsigned int prot;
2479 PageDesc *p;
53a5960a 2480 target_ulong host_start, host_end, addr;
9fa3e853 2481
c8a706fe
PB
2482 /* Technically this isn't safe inside a signal handler. However we
2483 know this only ever happens in a synchronous SEGV handler, so in
2484 practice it seems to be ok. */
2485 mmap_lock();
2486
45d679d6
AJ
2487 p = page_find(address >> TARGET_PAGE_BITS);
2488 if (!p) {
c8a706fe 2489 mmap_unlock();
9fa3e853 2490 return 0;
c8a706fe 2491 }
45d679d6 2492
9fa3e853
FB
2493 /* if the page was really writable, then we change its
2494 protection back to writable */
45d679d6
AJ
2495 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2496 host_start = address & qemu_host_page_mask;
2497 host_end = host_start + qemu_host_page_size;
2498
2499 prot = 0;
2500 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2501 p = page_find(addr >> TARGET_PAGE_BITS);
2502 p->flags |= PAGE_WRITE;
2503 prot |= p->flags;
2504
9fa3e853
FB
2505 /* and since the content will be modified, we must invalidate
2506 the corresponding translated code. */
45d679d6 2507 tb_invalidate_phys_page(addr, pc, puc);
9fa3e853 2508#ifdef DEBUG_TB_CHECK
45d679d6 2509 tb_invalidate_check(addr);
9fa3e853 2510#endif
9fa3e853 2511 }
45d679d6
AJ
2512 mprotect((void *)g2h(host_start), qemu_host_page_size,
2513 prot & PAGE_BITS);
2514
2515 mmap_unlock();
2516 return 1;
9fa3e853 2517 }
c8a706fe 2518 mmap_unlock();
9fa3e853
FB
2519 return 0;
2520}
2521
6a00d601
FB
2522static inline void tlb_set_dirty(CPUState *env,
2523 unsigned long addr, target_ulong vaddr)
1ccde1cb
FB
2524{
2525}
9fa3e853
FB
2526#endif /* defined(CONFIG_USER_ONLY) */
2527
e2eef170 2528#if !defined(CONFIG_USER_ONLY)
8da3ff18 2529
c04b2b78
PB
2530#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2531typedef struct subpage_t {
2532 target_phys_addr_t base;
f6405247
RH
2533 ram_addr_t sub_io_index[TARGET_PAGE_SIZE];
2534 ram_addr_t region_offset[TARGET_PAGE_SIZE];
c04b2b78
PB
2535} subpage_t;
2536
c227f099
AL
2537static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2538 ram_addr_t memory, ram_addr_t region_offset);
f6405247
RH
2539static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2540 ram_addr_t orig_memory,
2541 ram_addr_t region_offset);
db7b5426
BS
2542#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2543 need_subpage) \
2544 do { \
2545 if (addr > start_addr) \
2546 start_addr2 = 0; \
2547 else { \
2548 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2549 if (start_addr2 > 0) \
2550 need_subpage = 1; \
2551 } \
2552 \
49e9fba2 2553 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
db7b5426
BS
2554 end_addr2 = TARGET_PAGE_SIZE - 1; \
2555 else { \
2556 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2557 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2558 need_subpage = 1; \
2559 } \
2560 } while (0)
2561
8f2498f9
MT
2562/* register physical memory.
2563 For RAM, 'size' must be a multiple of the target page size.
2564 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
8da3ff18
PB
2565 io memory page. The address used when calling the IO function is
2566 the offset from the start of the region, plus region_offset. Both
ccbb4d44 2567 start_addr and region_offset are rounded down to a page boundary
8da3ff18
PB
2568 before calculating this offset. This should not be a problem unless
2569 the low bits of start_addr and region_offset differ. */
c227f099
AL
2570void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2571 ram_addr_t size,
2572 ram_addr_t phys_offset,
2573 ram_addr_t region_offset)
33417e70 2574{
c227f099 2575 target_phys_addr_t addr, end_addr;
92e873b9 2576 PhysPageDesc *p;
9d42037b 2577 CPUState *env;
c227f099 2578 ram_addr_t orig_size = size;
f6405247 2579 subpage_t *subpage;
33417e70 2580
f6f3fbca
MT
2581 cpu_notify_set_memory(start_addr, size, phys_offset);
2582
67c4d23c
PB
2583 if (phys_offset == IO_MEM_UNASSIGNED) {
2584 region_offset = start_addr;
2585 }
8da3ff18 2586 region_offset &= TARGET_PAGE_MASK;
5fd386f6 2587 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
c227f099 2588 end_addr = start_addr + (target_phys_addr_t)size;
49e9fba2 2589 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
db7b5426
BS
2590 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2591 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
c227f099
AL
2592 ram_addr_t orig_memory = p->phys_offset;
2593 target_phys_addr_t start_addr2, end_addr2;
db7b5426
BS
2594 int need_subpage = 0;
2595
2596 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2597 need_subpage);
f6405247 2598 if (need_subpage) {
db7b5426
BS
2599 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2600 subpage = subpage_init((addr & TARGET_PAGE_MASK),
8da3ff18
PB
2601 &p->phys_offset, orig_memory,
2602 p->region_offset);
db7b5426
BS
2603 } else {
2604 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2605 >> IO_MEM_SHIFT];
2606 }
8da3ff18
PB
2607 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2608 region_offset);
2609 p->region_offset = 0;
db7b5426
BS
2610 } else {
2611 p->phys_offset = phys_offset;
2612 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2613 (phys_offset & IO_MEM_ROMD))
2614 phys_offset += TARGET_PAGE_SIZE;
2615 }
2616 } else {
2617 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2618 p->phys_offset = phys_offset;
8da3ff18 2619 p->region_offset = region_offset;
db7b5426 2620 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
8da3ff18 2621 (phys_offset & IO_MEM_ROMD)) {
db7b5426 2622 phys_offset += TARGET_PAGE_SIZE;
0e8f0967 2623 } else {
c227f099 2624 target_phys_addr_t start_addr2, end_addr2;
db7b5426
BS
2625 int need_subpage = 0;
2626
2627 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2628 end_addr2, need_subpage);
2629
f6405247 2630 if (need_subpage) {
db7b5426 2631 subpage = subpage_init((addr & TARGET_PAGE_MASK),
8da3ff18 2632 &p->phys_offset, IO_MEM_UNASSIGNED,
67c4d23c 2633 addr & TARGET_PAGE_MASK);
db7b5426 2634 subpage_register(subpage, start_addr2, end_addr2,
8da3ff18
PB
2635 phys_offset, region_offset);
2636 p->region_offset = 0;
db7b5426
BS
2637 }
2638 }
2639 }
8da3ff18 2640 region_offset += TARGET_PAGE_SIZE;
33417e70 2641 }
3b46e624 2642
9d42037b
FB
2643 /* since each CPU stores ram addresses in its TLB cache, we must
2644 reset the modified entries */
2645 /* XXX: slow ! */
2646 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2647 tlb_flush(env, 1);
2648 }
33417e70
FB
2649}
2650
ba863458 2651/* XXX: temporary until new memory mapping API */
c227f099 2652ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
ba863458
FB
2653{
2654 PhysPageDesc *p;
2655
2656 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2657 if (!p)
2658 return IO_MEM_UNASSIGNED;
2659 return p->phys_offset;
2660}
2661
c227f099 2662void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
f65ed4c1
AL
2663{
2664 if (kvm_enabled())
2665 kvm_coalesce_mmio_region(addr, size);
2666}
2667
c227f099 2668void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
f65ed4c1
AL
2669{
2670 if (kvm_enabled())
2671 kvm_uncoalesce_mmio_region(addr, size);
2672}
2673
62a2744c
SY
2674void qemu_flush_coalesced_mmio_buffer(void)
2675{
2676 if (kvm_enabled())
2677 kvm_flush_coalesced_mmio_buffer();
2678}
2679
c902760f
MT
2680#if defined(__linux__) && !defined(TARGET_S390X)
2681
2682#include <sys/vfs.h>
2683
2684#define HUGETLBFS_MAGIC 0x958458f6
2685
2686static long gethugepagesize(const char *path)
2687{
2688 struct statfs fs;
2689 int ret;
2690
2691 do {
2692 ret = statfs(path, &fs);
2693 } while (ret != 0 && errno == EINTR);
2694
2695 if (ret != 0) {
6adc0549 2696 perror(path);
c902760f
MT
2697 return 0;
2698 }
2699
2700 if (fs.f_type != HUGETLBFS_MAGIC)
2701 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
2702
2703 return fs.f_bsize;
2704}
2705
2706static void *file_ram_alloc(ram_addr_t memory, const char *path)
2707{
2708 char *filename;
2709 void *area;
2710 int fd;
2711#ifdef MAP_POPULATE
2712 int flags;
2713#endif
2714 unsigned long hpagesize;
2715
2716 hpagesize = gethugepagesize(path);
2717 if (!hpagesize) {
2718 return NULL;
2719 }
2720
2721 if (memory < hpagesize) {
2722 return NULL;
2723 }
2724
2725 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2726 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2727 return NULL;
2728 }
2729
2730 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
2731 return NULL;
2732 }
2733
2734 fd = mkstemp(filename);
2735 if (fd < 0) {
6adc0549 2736 perror("unable to create backing store for hugepages");
c902760f
MT
2737 free(filename);
2738 return NULL;
2739 }
2740 unlink(filename);
2741 free(filename);
2742
2743 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2744
2745 /*
2746 * ftruncate is not supported by hugetlbfs in older
2747 * hosts, so don't bother bailing out on errors.
2748 * If anything goes wrong with it under other filesystems,
2749 * mmap will fail.
2750 */
2751 if (ftruncate(fd, memory))
2752 perror("ftruncate");
2753
2754#ifdef MAP_POPULATE
2755 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2756 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2757 * to sidestep this quirk.
2758 */
2759 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2760 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2761#else
2762 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2763#endif
2764 if (area == MAP_FAILED) {
2765 perror("file_ram_alloc: can't mmap RAM pages");
2766 close(fd);
2767 return (NULL);
2768 }
2769 return area;
2770}
2771#endif
2772
c227f099 2773ram_addr_t qemu_ram_alloc(ram_addr_t size)
94a6b54f
PB
2774{
2775 RAMBlock *new_block;
2776
94a6b54f
PB
2777 size = TARGET_PAGE_ALIGN(size);
2778 new_block = qemu_malloc(sizeof(*new_block));
2779
c902760f
MT
2780 if (mem_path) {
2781#if defined (__linux__) && !defined(TARGET_S390X)
2782 new_block->host = file_ram_alloc(size, mem_path);
618a568d
MT
2783 if (!new_block->host) {
2784 new_block->host = qemu_vmalloc(size);
2785#ifdef MADV_MERGEABLE
2786 madvise(new_block->host, size, MADV_MERGEABLE);
2787#endif
2788 }
c902760f
MT
2789#else
2790 fprintf(stderr, "-mem-path option unsupported\n");
2791 exit(1);
2792#endif
2793 } else {
6b02494d 2794#if defined(TARGET_S390X) && defined(CONFIG_KVM)
c902760f
MT
2795 /* XXX S390 KVM requires the topmost vma of the RAM to be < 256GB */
2796 new_block->host = mmap((void*)0x1000000, size,
2797 PROT_EXEC|PROT_READ|PROT_WRITE,
2798 MAP_SHARED | MAP_ANONYMOUS, -1, 0);
6b02494d 2799#else
c902760f 2800 new_block->host = qemu_vmalloc(size);
6b02494d 2801#endif
ccb167e9 2802#ifdef MADV_MERGEABLE
c902760f 2803 madvise(new_block->host, size, MADV_MERGEABLE);
ccb167e9 2804#endif
c902760f 2805 }
94a6b54f
PB
2806 new_block->offset = last_ram_offset;
2807 new_block->length = size;
2808
2809 new_block->next = ram_blocks;
2810 ram_blocks = new_block;
2811
2812 phys_ram_dirty = qemu_realloc(phys_ram_dirty,
2813 (last_ram_offset + size) >> TARGET_PAGE_BITS);
2814 memset(phys_ram_dirty + (last_ram_offset >> TARGET_PAGE_BITS),
2815 0xff, size >> TARGET_PAGE_BITS);
2816
2817 last_ram_offset += size;
2818
6f0437e8
JK
2819 if (kvm_enabled())
2820 kvm_setup_guest_memory(new_block->host, size);
2821
94a6b54f
PB
2822 return new_block->offset;
2823}
e9a1ab19 2824
c227f099 2825void qemu_ram_free(ram_addr_t addr)
e9a1ab19 2826{
94a6b54f 2827 /* TODO: implement this. */
e9a1ab19
FB
2828}
2829
dc828ca1 2830/* Return a host pointer to ram allocated with qemu_ram_alloc.
5579c7f3
PB
2831 With the exception of the softmmu code in this file, this should
2832 only be used for local memory (e.g. video ram) that the device owns,
2833 and knows it isn't going to access beyond the end of the block.
2834
2835 It should not be used for general purpose DMA.
2836 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2837 */
c227f099 2838void *qemu_get_ram_ptr(ram_addr_t addr)
dc828ca1 2839{
94a6b54f
PB
2840 RAMBlock *prev;
2841 RAMBlock **prevp;
2842 RAMBlock *block;
2843
94a6b54f
PB
2844 prev = NULL;
2845 prevp = &ram_blocks;
2846 block = ram_blocks;
2847 while (block && (block->offset > addr
2848 || block->offset + block->length <= addr)) {
2849 if (prev)
2850 prevp = &prev->next;
2851 prev = block;
2852 block = block->next;
2853 }
2854 if (!block) {
2855 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2856 abort();
2857 }
2858 /* Move this entry to to start of the list. */
2859 if (prev) {
2860 prev->next = block->next;
2861 block->next = *prevp;
2862 *prevp = block;
2863 }
2864 return block->host + (addr - block->offset);
dc828ca1
PB
2865}
2866
5579c7f3
PB
2867/* Some of the softmmu routines need to translate from a host pointer
2868 (typically a TLB entry) back to a ram offset. */
c227f099 2869ram_addr_t qemu_ram_addr_from_host(void *ptr)
5579c7f3 2870{
94a6b54f
PB
2871 RAMBlock *block;
2872 uint8_t *host = ptr;
2873
94a6b54f
PB
2874 block = ram_blocks;
2875 while (block && (block->host > host
2876 || block->host + block->length <= host)) {
94a6b54f
PB
2877 block = block->next;
2878 }
2879 if (!block) {
2880 fprintf(stderr, "Bad ram pointer %p\n", ptr);
2881 abort();
2882 }
2883 return block->offset + (host - block->host);
5579c7f3
PB
2884}
2885
c227f099 2886static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
33417e70 2887{
67d3b957 2888#ifdef DEBUG_UNASSIGNED
ab3d1727 2889 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
b4f0a316 2890#endif
faed1c2a 2891#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
e18231a3
BS
2892 do_unassigned_access(addr, 0, 0, 0, 1);
2893#endif
2894 return 0;
2895}
2896
c227f099 2897static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
e18231a3
BS
2898{
2899#ifdef DEBUG_UNASSIGNED
2900 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2901#endif
faed1c2a 2902#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
e18231a3
BS
2903 do_unassigned_access(addr, 0, 0, 0, 2);
2904#endif
2905 return 0;
2906}
2907
c227f099 2908static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
e18231a3
BS
2909{
2910#ifdef DEBUG_UNASSIGNED
2911 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2912#endif
faed1c2a 2913#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
e18231a3 2914 do_unassigned_access(addr, 0, 0, 0, 4);
67d3b957 2915#endif
33417e70
FB
2916 return 0;
2917}
2918
c227f099 2919static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
33417e70 2920{
67d3b957 2921#ifdef DEBUG_UNASSIGNED
ab3d1727 2922 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
67d3b957 2923#endif
faed1c2a 2924#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
e18231a3
BS
2925 do_unassigned_access(addr, 1, 0, 0, 1);
2926#endif
2927}
2928
c227f099 2929static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
e18231a3
BS
2930{
2931#ifdef DEBUG_UNASSIGNED
2932 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2933#endif
faed1c2a 2934#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
e18231a3
BS
2935 do_unassigned_access(addr, 1, 0, 0, 2);
2936#endif
2937}
2938
c227f099 2939static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
e18231a3
BS
2940{
2941#ifdef DEBUG_UNASSIGNED
2942 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2943#endif
faed1c2a 2944#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
e18231a3 2945 do_unassigned_access(addr, 1, 0, 0, 4);
b4f0a316 2946#endif
33417e70
FB
2947}
2948
d60efc6b 2949static CPUReadMemoryFunc * const unassigned_mem_read[3] = {
33417e70 2950 unassigned_mem_readb,
e18231a3
BS
2951 unassigned_mem_readw,
2952 unassigned_mem_readl,
33417e70
FB
2953};
2954
d60efc6b 2955static CPUWriteMemoryFunc * const unassigned_mem_write[3] = {
33417e70 2956 unassigned_mem_writeb,
e18231a3
BS
2957 unassigned_mem_writew,
2958 unassigned_mem_writel,
33417e70
FB
2959};
2960
c227f099 2961static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
0f459d16 2962 uint32_t val)
9fa3e853 2963{
3a7d929e 2964 int dirty_flags;
f7c11b53 2965 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3a7d929e 2966 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2967#if !defined(CONFIG_USER_ONLY)
3a7d929e 2968 tb_invalidate_phys_page_fast(ram_addr, 1);
f7c11b53 2969 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
9fa3e853 2970#endif
3a7d929e 2971 }
5579c7f3 2972 stb_p(qemu_get_ram_ptr(ram_addr), val);
f23db169 2973 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
f7c11b53 2974 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
f23db169
FB
2975 /* we remove the notdirty callback only if the code has been
2976 flushed */
2977 if (dirty_flags == 0xff)
2e70f6ef 2978 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
2979}
2980
c227f099 2981static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
0f459d16 2982 uint32_t val)
9fa3e853 2983{
3a7d929e 2984 int dirty_flags;
f7c11b53 2985 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3a7d929e 2986 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2987#if !defined(CONFIG_USER_ONLY)
3a7d929e 2988 tb_invalidate_phys_page_fast(ram_addr, 2);
f7c11b53 2989 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
9fa3e853 2990#endif
3a7d929e 2991 }
5579c7f3 2992 stw_p(qemu_get_ram_ptr(ram_addr), val);
f23db169 2993 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
f7c11b53 2994 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
f23db169
FB
2995 /* we remove the notdirty callback only if the code has been
2996 flushed */
2997 if (dirty_flags == 0xff)
2e70f6ef 2998 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
2999}
3000
c227f099 3001static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
0f459d16 3002 uint32_t val)
9fa3e853 3003{
3a7d929e 3004 int dirty_flags;
f7c11b53 3005 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3a7d929e 3006 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 3007#if !defined(CONFIG_USER_ONLY)
3a7d929e 3008 tb_invalidate_phys_page_fast(ram_addr, 4);
f7c11b53 3009 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
9fa3e853 3010#endif
3a7d929e 3011 }
5579c7f3 3012 stl_p(qemu_get_ram_ptr(ram_addr), val);
f23db169 3013 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
f7c11b53 3014 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
f23db169
FB
3015 /* we remove the notdirty callback only if the code has been
3016 flushed */
3017 if (dirty_flags == 0xff)
2e70f6ef 3018 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
3019}
3020
d60efc6b 3021static CPUReadMemoryFunc * const error_mem_read[3] = {
9fa3e853
FB
3022 NULL, /* never used */
3023 NULL, /* never used */
3024 NULL, /* never used */
3025};
3026
d60efc6b 3027static CPUWriteMemoryFunc * const notdirty_mem_write[3] = {
1ccde1cb
FB
3028 notdirty_mem_writeb,
3029 notdirty_mem_writew,
3030 notdirty_mem_writel,
3031};
3032
0f459d16 3033/* Generate a debug exception if a watchpoint has been hit. */
b4051334 3034static void check_watchpoint(int offset, int len_mask, int flags)
0f459d16
PB
3035{
3036 CPUState *env = cpu_single_env;
06d55cc1
AL
3037 target_ulong pc, cs_base;
3038 TranslationBlock *tb;
0f459d16 3039 target_ulong vaddr;
a1d1bb31 3040 CPUWatchpoint *wp;
06d55cc1 3041 int cpu_flags;
0f459d16 3042
06d55cc1
AL
3043 if (env->watchpoint_hit) {
3044 /* We re-entered the check after replacing the TB. Now raise
3045 * the debug interrupt so that is will trigger after the
3046 * current instruction. */
3047 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3048 return;
3049 }
2e70f6ef 3050 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
72cf2d4f 3051 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334
AL
3052 if ((vaddr == (wp->vaddr & len_mask) ||
3053 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
6e140f28
AL
3054 wp->flags |= BP_WATCHPOINT_HIT;
3055 if (!env->watchpoint_hit) {
3056 env->watchpoint_hit = wp;
3057 tb = tb_find_pc(env->mem_io_pc);
3058 if (!tb) {
3059 cpu_abort(env, "check_watchpoint: could not find TB for "
3060 "pc=%p", (void *)env->mem_io_pc);
3061 }
3062 cpu_restore_state(tb, env, env->mem_io_pc, NULL);
3063 tb_phys_invalidate(tb, -1);
3064 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3065 env->exception_index = EXCP_DEBUG;
3066 } else {
3067 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3068 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
3069 }
3070 cpu_resume_from_signal(env, NULL);
06d55cc1 3071 }
6e140f28
AL
3072 } else {
3073 wp->flags &= ~BP_WATCHPOINT_HIT;
0f459d16
PB
3074 }
3075 }
3076}
3077
6658ffb8
PB
3078/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3079 so these check for a hit then pass through to the normal out-of-line
3080 phys routines. */
c227f099 3081static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
6658ffb8 3082{
b4051334 3083 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
6658ffb8
PB
3084 return ldub_phys(addr);
3085}
3086
c227f099 3087static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
6658ffb8 3088{
b4051334 3089 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
6658ffb8
PB
3090 return lduw_phys(addr);
3091}
3092
c227f099 3093static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
6658ffb8 3094{
b4051334 3095 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
6658ffb8
PB
3096 return ldl_phys(addr);
3097}
3098
c227f099 3099static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
6658ffb8
PB
3100 uint32_t val)
3101{
b4051334 3102 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
6658ffb8
PB
3103 stb_phys(addr, val);
3104}
3105
c227f099 3106static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
6658ffb8
PB
3107 uint32_t val)
3108{
b4051334 3109 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
6658ffb8
PB
3110 stw_phys(addr, val);
3111}
3112
c227f099 3113static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
6658ffb8
PB
3114 uint32_t val)
3115{
b4051334 3116 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
6658ffb8
PB
3117 stl_phys(addr, val);
3118}
3119
d60efc6b 3120static CPUReadMemoryFunc * const watch_mem_read[3] = {
6658ffb8
PB
3121 watch_mem_readb,
3122 watch_mem_readw,
3123 watch_mem_readl,
3124};
3125
d60efc6b 3126static CPUWriteMemoryFunc * const watch_mem_write[3] = {
6658ffb8
PB
3127 watch_mem_writeb,
3128 watch_mem_writew,
3129 watch_mem_writel,
3130};
6658ffb8 3131
f6405247
RH
3132static inline uint32_t subpage_readlen (subpage_t *mmio,
3133 target_phys_addr_t addr,
3134 unsigned int len)
db7b5426 3135{
f6405247 3136 unsigned int idx = SUBPAGE_IDX(addr);
db7b5426
BS
3137#if defined(DEBUG_SUBPAGE)
3138 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3139 mmio, len, addr, idx);
3140#endif
db7b5426 3141
f6405247
RH
3142 addr += mmio->region_offset[idx];
3143 idx = mmio->sub_io_index[idx];
3144 return io_mem_read[idx][len](io_mem_opaque[idx], addr);
db7b5426
BS
3145}
3146
c227f099 3147static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
f6405247 3148 uint32_t value, unsigned int len)
db7b5426 3149{
f6405247 3150 unsigned int idx = SUBPAGE_IDX(addr);
db7b5426 3151#if defined(DEBUG_SUBPAGE)
f6405247
RH
3152 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n",
3153 __func__, mmio, len, addr, idx, value);
db7b5426 3154#endif
f6405247
RH
3155
3156 addr += mmio->region_offset[idx];
3157 idx = mmio->sub_io_index[idx];
3158 io_mem_write[idx][len](io_mem_opaque[idx], addr, value);
db7b5426
BS
3159}
3160
c227f099 3161static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
db7b5426 3162{
db7b5426
BS
3163 return subpage_readlen(opaque, addr, 0);
3164}
3165
c227f099 3166static void subpage_writeb (void *opaque, target_phys_addr_t addr,
db7b5426
BS
3167 uint32_t value)
3168{
db7b5426
BS
3169 subpage_writelen(opaque, addr, value, 0);
3170}
3171
c227f099 3172static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
db7b5426 3173{
db7b5426
BS
3174 return subpage_readlen(opaque, addr, 1);
3175}
3176
c227f099 3177static void subpage_writew (void *opaque, target_phys_addr_t addr,
db7b5426
BS
3178 uint32_t value)
3179{
db7b5426
BS
3180 subpage_writelen(opaque, addr, value, 1);
3181}
3182
c227f099 3183static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
db7b5426 3184{
db7b5426
BS
3185 return subpage_readlen(opaque, addr, 2);
3186}
3187
f6405247
RH
3188static void subpage_writel (void *opaque, target_phys_addr_t addr,
3189 uint32_t value)
db7b5426 3190{
db7b5426
BS
3191 subpage_writelen(opaque, addr, value, 2);
3192}
3193
d60efc6b 3194static CPUReadMemoryFunc * const subpage_read[] = {
db7b5426
BS
3195 &subpage_readb,
3196 &subpage_readw,
3197 &subpage_readl,
3198};
3199
d60efc6b 3200static CPUWriteMemoryFunc * const subpage_write[] = {
db7b5426
BS
3201 &subpage_writeb,
3202 &subpage_writew,
3203 &subpage_writel,
3204};
3205
c227f099
AL
3206static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3207 ram_addr_t memory, ram_addr_t region_offset)
db7b5426
BS
3208{
3209 int idx, eidx;
3210
3211 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3212 return -1;
3213 idx = SUBPAGE_IDX(start);
3214 eidx = SUBPAGE_IDX(end);
3215#if defined(DEBUG_SUBPAGE)
0bf9e31a 3216 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
db7b5426
BS
3217 mmio, start, end, idx, eidx, memory);
3218#endif
f6405247 3219 memory = (memory >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
db7b5426 3220 for (; idx <= eidx; idx++) {
f6405247
RH
3221 mmio->sub_io_index[idx] = memory;
3222 mmio->region_offset[idx] = region_offset;
db7b5426
BS
3223 }
3224
3225 return 0;
3226}
3227
f6405247
RH
3228static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
3229 ram_addr_t orig_memory,
3230 ram_addr_t region_offset)
db7b5426 3231{
c227f099 3232 subpage_t *mmio;
db7b5426
BS
3233 int subpage_memory;
3234
c227f099 3235 mmio = qemu_mallocz(sizeof(subpage_t));
1eec614b
AL
3236
3237 mmio->base = base;
1eed09cb 3238 subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio);
db7b5426 3239#if defined(DEBUG_SUBPAGE)
1eec614b
AL
3240 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3241 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
db7b5426 3242#endif
1eec614b 3243 *phys = subpage_memory | IO_MEM_SUBPAGE;
f6405247 3244 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, orig_memory, region_offset);
db7b5426
BS
3245
3246 return mmio;
3247}
3248
88715657
AL
3249static int get_free_io_mem_idx(void)
3250{
3251 int i;
3252
3253 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3254 if (!io_mem_used[i]) {
3255 io_mem_used[i] = 1;
3256 return i;
3257 }
c6703b47 3258 fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
88715657
AL
3259 return -1;
3260}
3261
33417e70
FB
3262/* mem_read and mem_write are arrays of functions containing the
3263 function to access byte (index 0), word (index 1) and dword (index
0b4e6e3e 3264 2). Functions can be omitted with a NULL function pointer.
3ee89922 3265 If io_index is non zero, the corresponding io zone is
4254fab8
BS
3266 modified. If it is zero, a new io zone is allocated. The return
3267 value can be used with cpu_register_physical_memory(). (-1) is
3268 returned if error. */
1eed09cb 3269static int cpu_register_io_memory_fixed(int io_index,
d60efc6b
BS
3270 CPUReadMemoryFunc * const *mem_read,
3271 CPUWriteMemoryFunc * const *mem_write,
1eed09cb 3272 void *opaque)
33417e70 3273{
3cab721d
RH
3274 int i;
3275
33417e70 3276 if (io_index <= 0) {
88715657
AL
3277 io_index = get_free_io_mem_idx();
3278 if (io_index == -1)
3279 return io_index;
33417e70 3280 } else {
1eed09cb 3281 io_index >>= IO_MEM_SHIFT;
33417e70
FB
3282 if (io_index >= IO_MEM_NB_ENTRIES)
3283 return -1;
3284 }
b5ff1b31 3285
3cab721d
RH
3286 for (i = 0; i < 3; ++i) {
3287 io_mem_read[io_index][i]
3288 = (mem_read[i] ? mem_read[i] : unassigned_mem_read[i]);
3289 }
3290 for (i = 0; i < 3; ++i) {
3291 io_mem_write[io_index][i]
3292 = (mem_write[i] ? mem_write[i] : unassigned_mem_write[i]);
3293 }
a4193c8a 3294 io_mem_opaque[io_index] = opaque;
f6405247
RH
3295
3296 return (io_index << IO_MEM_SHIFT);
33417e70 3297}
61382a50 3298
d60efc6b
BS
3299int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
3300 CPUWriteMemoryFunc * const *mem_write,
1eed09cb
AK
3301 void *opaque)
3302{
3303 return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque);
3304}
3305
88715657
AL
3306void cpu_unregister_io_memory(int io_table_address)
3307{
3308 int i;
3309 int io_index = io_table_address >> IO_MEM_SHIFT;
3310
3311 for (i=0;i < 3; i++) {
3312 io_mem_read[io_index][i] = unassigned_mem_read[i];
3313 io_mem_write[io_index][i] = unassigned_mem_write[i];
3314 }
3315 io_mem_opaque[io_index] = NULL;
3316 io_mem_used[io_index] = 0;
3317}
3318
e9179ce1
AK
3319static void io_mem_init(void)
3320{
3321 int i;
3322
3323 cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read, unassigned_mem_write, NULL);
3324 cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read, unassigned_mem_write, NULL);
3325 cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read, notdirty_mem_write, NULL);
3326 for (i=0; i<5; i++)
3327 io_mem_used[i] = 1;
3328
3329 io_mem_watch = cpu_register_io_memory(watch_mem_read,
3330 watch_mem_write, NULL);
e9179ce1
AK
3331}
3332
e2eef170
PB
3333#endif /* !defined(CONFIG_USER_ONLY) */
3334
13eb76e0
FB
3335/* physical memory access (slow version, mainly for debug) */
3336#if defined(CONFIG_USER_ONLY)
a68fe89c
PB
3337int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3338 uint8_t *buf, int len, int is_write)
13eb76e0
FB
3339{
3340 int l, flags;
3341 target_ulong page;
53a5960a 3342 void * p;
13eb76e0
FB
3343
3344 while (len > 0) {
3345 page = addr & TARGET_PAGE_MASK;
3346 l = (page + TARGET_PAGE_SIZE) - addr;
3347 if (l > len)
3348 l = len;
3349 flags = page_get_flags(page);
3350 if (!(flags & PAGE_VALID))
a68fe89c 3351 return -1;
13eb76e0
FB
3352 if (is_write) {
3353 if (!(flags & PAGE_WRITE))
a68fe89c 3354 return -1;
579a97f7 3355 /* XXX: this code should not depend on lock_user */
72fb7daa 3356 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
a68fe89c 3357 return -1;
72fb7daa
AJ
3358 memcpy(p, buf, l);
3359 unlock_user(p, addr, l);
13eb76e0
FB
3360 } else {
3361 if (!(flags & PAGE_READ))
a68fe89c 3362 return -1;
579a97f7 3363 /* XXX: this code should not depend on lock_user */
72fb7daa 3364 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
a68fe89c 3365 return -1;
72fb7daa 3366 memcpy(buf, p, l);
5b257578 3367 unlock_user(p, addr, 0);
13eb76e0
FB
3368 }
3369 len -= l;
3370 buf += l;
3371 addr += l;
3372 }
a68fe89c 3373 return 0;
13eb76e0 3374}
8df1cd07 3375
13eb76e0 3376#else
c227f099 3377void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
3378 int len, int is_write)
3379{
3380 int l, io_index;
3381 uint8_t *ptr;
3382 uint32_t val;
c227f099 3383 target_phys_addr_t page;
2e12669a 3384 unsigned long pd;
92e873b9 3385 PhysPageDesc *p;
3b46e624 3386
13eb76e0
FB
3387 while (len > 0) {
3388 page = addr & TARGET_PAGE_MASK;
3389 l = (page + TARGET_PAGE_SIZE) - addr;
3390 if (l > len)
3391 l = len;
92e873b9 3392 p = phys_page_find(page >> TARGET_PAGE_BITS);
13eb76e0
FB
3393 if (!p) {
3394 pd = IO_MEM_UNASSIGNED;
3395 } else {
3396 pd = p->phys_offset;
3397 }
3b46e624 3398
13eb76e0 3399 if (is_write) {
3a7d929e 3400 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
c227f099 3401 target_phys_addr_t addr1 = addr;
13eb76e0 3402 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18 3403 if (p)
6c2934db 3404 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
6a00d601
FB
3405 /* XXX: could force cpu_single_env to NULL to avoid
3406 potential bugs */
6c2934db 3407 if (l >= 4 && ((addr1 & 3) == 0)) {
1c213d19 3408 /* 32 bit write access */
c27004ec 3409 val = ldl_p(buf);
6c2934db 3410 io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
13eb76e0 3411 l = 4;
6c2934db 3412 } else if (l >= 2 && ((addr1 & 1) == 0)) {
1c213d19 3413 /* 16 bit write access */
c27004ec 3414 val = lduw_p(buf);
6c2934db 3415 io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
13eb76e0
FB
3416 l = 2;
3417 } else {
1c213d19 3418 /* 8 bit write access */
c27004ec 3419 val = ldub_p(buf);
6c2934db 3420 io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
13eb76e0
FB
3421 l = 1;
3422 }
3423 } else {
b448f2f3
FB
3424 unsigned long addr1;
3425 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
13eb76e0 3426 /* RAM case */
5579c7f3 3427 ptr = qemu_get_ram_ptr(addr1);
13eb76e0 3428 memcpy(ptr, buf, l);
3a7d929e
FB
3429 if (!cpu_physical_memory_is_dirty(addr1)) {
3430 /* invalidate code */
3431 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3432 /* set dirty bit */
f7c11b53
YT
3433 cpu_physical_memory_set_dirty_flags(
3434 addr1, (0xff & ~CODE_DIRTY_FLAG));
3a7d929e 3435 }
13eb76e0
FB
3436 }
3437 } else {
5fafdf24 3438 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2a4188a3 3439 !(pd & IO_MEM_ROMD)) {
c227f099 3440 target_phys_addr_t addr1 = addr;
13eb76e0
FB
3441 /* I/O case */
3442 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18 3443 if (p)
6c2934db
AJ
3444 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3445 if (l >= 4 && ((addr1 & 3) == 0)) {
13eb76e0 3446 /* 32 bit read access */
6c2934db 3447 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
c27004ec 3448 stl_p(buf, val);
13eb76e0 3449 l = 4;
6c2934db 3450 } else if (l >= 2 && ((addr1 & 1) == 0)) {
13eb76e0 3451 /* 16 bit read access */
6c2934db 3452 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
c27004ec 3453 stw_p(buf, val);
13eb76e0
FB
3454 l = 2;
3455 } else {
1c213d19 3456 /* 8 bit read access */
6c2934db 3457 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
c27004ec 3458 stb_p(buf, val);
13eb76e0
FB
3459 l = 1;
3460 }
3461 } else {
3462 /* RAM case */
5579c7f3 3463 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
13eb76e0
FB
3464 (addr & ~TARGET_PAGE_MASK);
3465 memcpy(buf, ptr, l);
3466 }
3467 }
3468 len -= l;
3469 buf += l;
3470 addr += l;
3471 }
3472}
8df1cd07 3473
d0ecd2aa 3474/* used for ROM loading : can write in RAM and ROM */
c227f099 3475void cpu_physical_memory_write_rom(target_phys_addr_t addr,
d0ecd2aa
FB
3476 const uint8_t *buf, int len)
3477{
3478 int l;
3479 uint8_t *ptr;
c227f099 3480 target_phys_addr_t page;
d0ecd2aa
FB
3481 unsigned long pd;
3482 PhysPageDesc *p;
3b46e624 3483
d0ecd2aa
FB
3484 while (len > 0) {
3485 page = addr & TARGET_PAGE_MASK;
3486 l = (page + TARGET_PAGE_SIZE) - addr;
3487 if (l > len)
3488 l = len;
3489 p = phys_page_find(page >> TARGET_PAGE_BITS);
3490 if (!p) {
3491 pd = IO_MEM_UNASSIGNED;
3492 } else {
3493 pd = p->phys_offset;
3494 }
3b46e624 3495
d0ecd2aa 3496 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2a4188a3
FB
3497 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3498 !(pd & IO_MEM_ROMD)) {
d0ecd2aa
FB
3499 /* do nothing */
3500 } else {
3501 unsigned long addr1;
3502 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3503 /* ROM/RAM case */
5579c7f3 3504 ptr = qemu_get_ram_ptr(addr1);
d0ecd2aa
FB
3505 memcpy(ptr, buf, l);
3506 }
3507 len -= l;
3508 buf += l;
3509 addr += l;
3510 }
3511}
3512
6d16c2f8
AL
3513typedef struct {
3514 void *buffer;
c227f099
AL
3515 target_phys_addr_t addr;
3516 target_phys_addr_t len;
6d16c2f8
AL
3517} BounceBuffer;
3518
3519static BounceBuffer bounce;
3520
ba223c29
AL
3521typedef struct MapClient {
3522 void *opaque;
3523 void (*callback)(void *opaque);
72cf2d4f 3524 QLIST_ENTRY(MapClient) link;
ba223c29
AL
3525} MapClient;
3526
72cf2d4f
BS
3527static QLIST_HEAD(map_client_list, MapClient) map_client_list
3528 = QLIST_HEAD_INITIALIZER(map_client_list);
ba223c29
AL
3529
3530void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3531{
3532 MapClient *client = qemu_malloc(sizeof(*client));
3533
3534 client->opaque = opaque;
3535 client->callback = callback;
72cf2d4f 3536 QLIST_INSERT_HEAD(&map_client_list, client, link);
ba223c29
AL
3537 return client;
3538}
3539
3540void cpu_unregister_map_client(void *_client)
3541{
3542 MapClient *client = (MapClient *)_client;
3543
72cf2d4f 3544 QLIST_REMOVE(client, link);
34d5e948 3545 qemu_free(client);
ba223c29
AL
3546}
3547
3548static void cpu_notify_map_clients(void)
3549{
3550 MapClient *client;
3551
72cf2d4f
BS
3552 while (!QLIST_EMPTY(&map_client_list)) {
3553 client = QLIST_FIRST(&map_client_list);
ba223c29 3554 client->callback(client->opaque);
34d5e948 3555 cpu_unregister_map_client(client);
ba223c29
AL
3556 }
3557}
3558
6d16c2f8
AL
3559/* Map a physical memory region into a host virtual address.
3560 * May map a subset of the requested range, given by and returned in *plen.
3561 * May return NULL if resources needed to perform the mapping are exhausted.
3562 * Use only for reads OR writes - not for read-modify-write operations.
ba223c29
AL
3563 * Use cpu_register_map_client() to know when retrying the map operation is
3564 * likely to succeed.
6d16c2f8 3565 */
c227f099
AL
3566void *cpu_physical_memory_map(target_phys_addr_t addr,
3567 target_phys_addr_t *plen,
6d16c2f8
AL
3568 int is_write)
3569{
c227f099
AL
3570 target_phys_addr_t len = *plen;
3571 target_phys_addr_t done = 0;
6d16c2f8
AL
3572 int l;
3573 uint8_t *ret = NULL;
3574 uint8_t *ptr;
c227f099 3575 target_phys_addr_t page;
6d16c2f8
AL
3576 unsigned long pd;
3577 PhysPageDesc *p;
3578 unsigned long addr1;
3579
3580 while (len > 0) {
3581 page = addr & TARGET_PAGE_MASK;
3582 l = (page + TARGET_PAGE_SIZE) - addr;
3583 if (l > len)
3584 l = len;
3585 p = phys_page_find(page >> TARGET_PAGE_BITS);
3586 if (!p) {
3587 pd = IO_MEM_UNASSIGNED;
3588 } else {
3589 pd = p->phys_offset;
3590 }
3591
3592 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3593 if (done || bounce.buffer) {
3594 break;
3595 }
3596 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3597 bounce.addr = addr;
3598 bounce.len = l;
3599 if (!is_write) {
3600 cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
3601 }
3602 ptr = bounce.buffer;
3603 } else {
3604 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
5579c7f3 3605 ptr = qemu_get_ram_ptr(addr1);
6d16c2f8
AL
3606 }
3607 if (!done) {
3608 ret = ptr;
3609 } else if (ret + done != ptr) {
3610 break;
3611 }
3612
3613 len -= l;
3614 addr += l;
3615 done += l;
3616 }
3617 *plen = done;
3618 return ret;
3619}
3620
3621/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3622 * Will also mark the memory as dirty if is_write == 1. access_len gives
3623 * the amount of memory that was actually read or written by the caller.
3624 */
c227f099
AL
3625void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3626 int is_write, target_phys_addr_t access_len)
6d16c2f8
AL
3627{
3628 if (buffer != bounce.buffer) {
3629 if (is_write) {
c227f099 3630 ram_addr_t addr1 = qemu_ram_addr_from_host(buffer);
6d16c2f8
AL
3631 while (access_len) {
3632 unsigned l;
3633 l = TARGET_PAGE_SIZE;
3634 if (l > access_len)
3635 l = access_len;
3636 if (!cpu_physical_memory_is_dirty(addr1)) {
3637 /* invalidate code */
3638 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3639 /* set dirty bit */
f7c11b53
YT
3640 cpu_physical_memory_set_dirty_flags(
3641 addr1, (0xff & ~CODE_DIRTY_FLAG));
6d16c2f8
AL
3642 }
3643 addr1 += l;
3644 access_len -= l;
3645 }
3646 }
3647 return;
3648 }
3649 if (is_write) {
3650 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3651 }
f8a83245 3652 qemu_vfree(bounce.buffer);
6d16c2f8 3653 bounce.buffer = NULL;
ba223c29 3654 cpu_notify_map_clients();
6d16c2f8 3655}
d0ecd2aa 3656
8df1cd07 3657/* warning: addr must be aligned */
c227f099 3658uint32_t ldl_phys(target_phys_addr_t addr)
8df1cd07
FB
3659{
3660 int io_index;
3661 uint8_t *ptr;
3662 uint32_t val;
3663 unsigned long pd;
3664 PhysPageDesc *p;
3665
3666 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3667 if (!p) {
3668 pd = IO_MEM_UNASSIGNED;
3669 } else {
3670 pd = p->phys_offset;
3671 }
3b46e624 3672
5fafdf24 3673 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2a4188a3 3674 !(pd & IO_MEM_ROMD)) {
8df1cd07
FB
3675 /* I/O case */
3676 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
3677 if (p)
3678 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
8df1cd07
FB
3679 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3680 } else {
3681 /* RAM case */
5579c7f3 3682 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
8df1cd07
FB
3683 (addr & ~TARGET_PAGE_MASK);
3684 val = ldl_p(ptr);
3685 }
3686 return val;
3687}
3688
84b7b8e7 3689/* warning: addr must be aligned */
c227f099 3690uint64_t ldq_phys(target_phys_addr_t addr)
84b7b8e7
FB
3691{
3692 int io_index;
3693 uint8_t *ptr;
3694 uint64_t val;
3695 unsigned long pd;
3696 PhysPageDesc *p;
3697
3698 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3699 if (!p) {
3700 pd = IO_MEM_UNASSIGNED;
3701 } else {
3702 pd = p->phys_offset;
3703 }
3b46e624 3704
2a4188a3
FB
3705 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3706 !(pd & IO_MEM_ROMD)) {
84b7b8e7
FB
3707 /* I/O case */
3708 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
3709 if (p)
3710 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
84b7b8e7
FB
3711#ifdef TARGET_WORDS_BIGENDIAN
3712 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3713 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3714#else
3715 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3716 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3717#endif
3718 } else {
3719 /* RAM case */
5579c7f3 3720 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
84b7b8e7
FB
3721 (addr & ~TARGET_PAGE_MASK);
3722 val = ldq_p(ptr);
3723 }
3724 return val;
3725}
3726
aab33094 3727/* XXX: optimize */
c227f099 3728uint32_t ldub_phys(target_phys_addr_t addr)
aab33094
FB
3729{
3730 uint8_t val;
3731 cpu_physical_memory_read(addr, &val, 1);
3732 return val;
3733}
3734
733f0b02 3735/* warning: addr must be aligned */
c227f099 3736uint32_t lduw_phys(target_phys_addr_t addr)
aab33094 3737{
733f0b02
MT
3738 int io_index;
3739 uint8_t *ptr;
3740 uint64_t val;
3741 unsigned long pd;
3742 PhysPageDesc *p;
3743
3744 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3745 if (!p) {
3746 pd = IO_MEM_UNASSIGNED;
3747 } else {
3748 pd = p->phys_offset;
3749 }
3750
3751 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3752 !(pd & IO_MEM_ROMD)) {
3753 /* I/O case */
3754 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3755 if (p)
3756 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3757 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
3758 } else {
3759 /* RAM case */
3760 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3761 (addr & ~TARGET_PAGE_MASK);
3762 val = lduw_p(ptr);
3763 }
3764 return val;
aab33094
FB
3765}
3766
8df1cd07
FB
3767/* warning: addr must be aligned. The ram page is not masked as dirty
3768 and the code inside is not invalidated. It is useful if the dirty
3769 bits are used to track modified PTEs */
c227f099 3770void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
8df1cd07
FB
3771{
3772 int io_index;
3773 uint8_t *ptr;
3774 unsigned long pd;
3775 PhysPageDesc *p;
3776
3777 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3778 if (!p) {
3779 pd = IO_MEM_UNASSIGNED;
3780 } else {
3781 pd = p->phys_offset;
3782 }
3b46e624 3783
3a7d929e 3784 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07 3785 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
3786 if (p)
3787 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
8df1cd07
FB
3788 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3789 } else {
74576198 3790 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
5579c7f3 3791 ptr = qemu_get_ram_ptr(addr1);
8df1cd07 3792 stl_p(ptr, val);
74576198
AL
3793
3794 if (unlikely(in_migration)) {
3795 if (!cpu_physical_memory_is_dirty(addr1)) {
3796 /* invalidate code */
3797 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3798 /* set dirty bit */
f7c11b53
YT
3799 cpu_physical_memory_set_dirty_flags(
3800 addr1, (0xff & ~CODE_DIRTY_FLAG));
74576198
AL
3801 }
3802 }
8df1cd07
FB
3803 }
3804}
3805
c227f099 3806void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
bc98a7ef
JM
3807{
3808 int io_index;
3809 uint8_t *ptr;
3810 unsigned long pd;
3811 PhysPageDesc *p;
3812
3813 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3814 if (!p) {
3815 pd = IO_MEM_UNASSIGNED;
3816 } else {
3817 pd = p->phys_offset;
3818 }
3b46e624 3819
bc98a7ef
JM
3820 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3821 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
3822 if (p)
3823 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bc98a7ef
JM
3824#ifdef TARGET_WORDS_BIGENDIAN
3825 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3826 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3827#else
3828 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3829 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3830#endif
3831 } else {
5579c7f3 3832 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
bc98a7ef
JM
3833 (addr & ~TARGET_PAGE_MASK);
3834 stq_p(ptr, val);
3835 }
3836}
3837
8df1cd07 3838/* warning: addr must be aligned */
c227f099 3839void stl_phys(target_phys_addr_t addr, uint32_t val)
8df1cd07
FB
3840{
3841 int io_index;
3842 uint8_t *ptr;
3843 unsigned long pd;
3844 PhysPageDesc *p;
3845
3846 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3847 if (!p) {
3848 pd = IO_MEM_UNASSIGNED;
3849 } else {
3850 pd = p->phys_offset;
3851 }
3b46e624 3852
3a7d929e 3853 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07 3854 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
3855 if (p)
3856 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
8df1cd07
FB
3857 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3858 } else {
3859 unsigned long addr1;
3860 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3861 /* RAM case */
5579c7f3 3862 ptr = qemu_get_ram_ptr(addr1);
8df1cd07 3863 stl_p(ptr, val);
3a7d929e
FB
3864 if (!cpu_physical_memory_is_dirty(addr1)) {
3865 /* invalidate code */
3866 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3867 /* set dirty bit */
f7c11b53
YT
3868 cpu_physical_memory_set_dirty_flags(addr1,
3869 (0xff & ~CODE_DIRTY_FLAG));
3a7d929e 3870 }
8df1cd07
FB
3871 }
3872}
3873
aab33094 3874/* XXX: optimize */
c227f099 3875void stb_phys(target_phys_addr_t addr, uint32_t val)
aab33094
FB
3876{
3877 uint8_t v = val;
3878 cpu_physical_memory_write(addr, &v, 1);
3879}
3880
733f0b02 3881/* warning: addr must be aligned */
c227f099 3882void stw_phys(target_phys_addr_t addr, uint32_t val)
aab33094 3883{
733f0b02
MT
3884 int io_index;
3885 uint8_t *ptr;
3886 unsigned long pd;
3887 PhysPageDesc *p;
3888
3889 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3890 if (!p) {
3891 pd = IO_MEM_UNASSIGNED;
3892 } else {
3893 pd = p->phys_offset;
3894 }
3895
3896 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3897 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3898 if (p)
3899 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3900 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
3901 } else {
3902 unsigned long addr1;
3903 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3904 /* RAM case */
3905 ptr = qemu_get_ram_ptr(addr1);
3906 stw_p(ptr, val);
3907 if (!cpu_physical_memory_is_dirty(addr1)) {
3908 /* invalidate code */
3909 tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
3910 /* set dirty bit */
3911 cpu_physical_memory_set_dirty_flags(addr1,
3912 (0xff & ~CODE_DIRTY_FLAG));
3913 }
3914 }
aab33094
FB
3915}
3916
3917/* XXX: optimize */
c227f099 3918void stq_phys(target_phys_addr_t addr, uint64_t val)
aab33094
FB
3919{
3920 val = tswap64(val);
3921 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3922}
3923
5e2972fd 3924/* virtual memory access for debug (includes writing to ROM) */
5fafdf24 3925int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
b448f2f3 3926 uint8_t *buf, int len, int is_write)
13eb76e0
FB
3927{
3928 int l;
c227f099 3929 target_phys_addr_t phys_addr;
9b3c35e0 3930 target_ulong page;
13eb76e0
FB
3931
3932 while (len > 0) {
3933 page = addr & TARGET_PAGE_MASK;
3934 phys_addr = cpu_get_phys_page_debug(env, page);
3935 /* if no physical page mapped, return an error */
3936 if (phys_addr == -1)
3937 return -1;
3938 l = (page + TARGET_PAGE_SIZE) - addr;
3939 if (l > len)
3940 l = len;
5e2972fd 3941 phys_addr += (addr & ~TARGET_PAGE_MASK);
5e2972fd
AL
3942 if (is_write)
3943 cpu_physical_memory_write_rom(phys_addr, buf, l);
3944 else
5e2972fd 3945 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
13eb76e0
FB
3946 len -= l;
3947 buf += l;
3948 addr += l;
3949 }
3950 return 0;
3951}
a68fe89c 3952#endif
13eb76e0 3953
2e70f6ef
PB
3954/* in deterministic execution mode, instructions doing device I/Os
3955 must be at the end of the TB */
3956void cpu_io_recompile(CPUState *env, void *retaddr)
3957{
3958 TranslationBlock *tb;
3959 uint32_t n, cflags;
3960 target_ulong pc, cs_base;
3961 uint64_t flags;
3962
3963 tb = tb_find_pc((unsigned long)retaddr);
3964 if (!tb) {
3965 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
3966 retaddr);
3967 }
3968 n = env->icount_decr.u16.low + tb->icount;
3969 cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3970 /* Calculate how many instructions had been executed before the fault
bf20dc07 3971 occurred. */
2e70f6ef
PB
3972 n = n - env->icount_decr.u16.low;
3973 /* Generate a new TB ending on the I/O insn. */
3974 n++;
3975 /* On MIPS and SH, delay slot instructions can only be restarted if
3976 they were already the first instruction in the TB. If this is not
bf20dc07 3977 the first instruction in a TB then re-execute the preceding
2e70f6ef
PB
3978 branch. */
3979#if defined(TARGET_MIPS)
3980 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3981 env->active_tc.PC -= 4;
3982 env->icount_decr.u16.low++;
3983 env->hflags &= ~MIPS_HFLAG_BMASK;
3984 }
3985#elif defined(TARGET_SH4)
3986 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3987 && n > 1) {
3988 env->pc -= 2;
3989 env->icount_decr.u16.low++;
3990 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3991 }
3992#endif
3993 /* This should never happen. */
3994 if (n > CF_COUNT_MASK)
3995 cpu_abort(env, "TB too big during recompile");
3996
3997 cflags = n | CF_LAST_IO;
3998 pc = tb->pc;
3999 cs_base = tb->cs_base;
4000 flags = tb->flags;
4001 tb_phys_invalidate(tb, -1);
4002 /* FIXME: In theory this could raise an exception. In practice
4003 we have already translated the block once so it's probably ok. */
4004 tb_gen_code(env, pc, cs_base, flags, cflags);
bf20dc07 4005 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
2e70f6ef
PB
4006 the first in the TB) then we end up generating a whole new TB and
4007 repeating the fault, which is horribly inefficient.
4008 Better would be to execute just this insn uncached, or generate a
4009 second new TB. */
4010 cpu_resume_from_signal(env, NULL);
4011}
4012
b3755a91
PB
4013#if !defined(CONFIG_USER_ONLY)
4014
e3db7226
FB
4015void dump_exec_info(FILE *f,
4016 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
4017{
4018 int i, target_code_size, max_target_code_size;
4019 int direct_jmp_count, direct_jmp2_count, cross_page;
4020 TranslationBlock *tb;
3b46e624 4021
e3db7226
FB
4022 target_code_size = 0;
4023 max_target_code_size = 0;
4024 cross_page = 0;
4025 direct_jmp_count = 0;
4026 direct_jmp2_count = 0;
4027 for(i = 0; i < nb_tbs; i++) {
4028 tb = &tbs[i];
4029 target_code_size += tb->size;
4030 if (tb->size > max_target_code_size)
4031 max_target_code_size = tb->size;
4032 if (tb->page_addr[1] != -1)
4033 cross_page++;
4034 if (tb->tb_next_offset[0] != 0xffff) {
4035 direct_jmp_count++;
4036 if (tb->tb_next_offset[1] != 0xffff) {
4037 direct_jmp2_count++;
4038 }
4039 }
4040 }
4041 /* XXX: avoid using doubles ? */
57fec1fe 4042 cpu_fprintf(f, "Translation buffer state:\n");
26a5f13b
FB
4043 cpu_fprintf(f, "gen code size %ld/%ld\n",
4044 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4045 cpu_fprintf(f, "TB count %d/%d\n",
4046 nb_tbs, code_gen_max_blocks);
5fafdf24 4047 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
e3db7226
FB
4048 nb_tbs ? target_code_size / nb_tbs : 0,
4049 max_target_code_size);
5fafdf24 4050 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
e3db7226
FB
4051 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4052 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
5fafdf24
TS
4053 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4054 cross_page,
e3db7226
FB
4055 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4056 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
5fafdf24 4057 direct_jmp_count,
e3db7226
FB
4058 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4059 direct_jmp2_count,
4060 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
57fec1fe 4061 cpu_fprintf(f, "\nStatistics:\n");
e3db7226
FB
4062 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4063 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4064 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
b67d9a52 4065 tcg_dump_info(f, cpu_fprintf);
e3db7226
FB
4066}
4067
61382a50
FB
4068#define MMUSUFFIX _cmmu
4069#define GETPC() NULL
4070#define env cpu_single_env
b769d8fe 4071#define SOFTMMU_CODE_ACCESS
61382a50
FB
4072
4073#define SHIFT 0
4074#include "softmmu_template.h"
4075
4076#define SHIFT 1
4077#include "softmmu_template.h"
4078
4079#define SHIFT 2
4080#include "softmmu_template.h"
4081
4082#define SHIFT 3
4083#include "softmmu_template.h"
4084
4085#undef env
4086
4087#endif