]> git.proxmox.com Git - mirror_qemu.git/blame - exec.c
Fix usermode virtual address type
[mirror_qemu.git] / exec.c
CommitLineData
54936004 1/*
fd6ce8f6 2 * virtual page mapping and translated block handling
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
54936004 18 */
67b915a5 19#include "config.h"
d5a8f07c
FB
20#ifdef _WIN32
21#include <windows.h>
22#else
a98d49b1 23#include <sys/types.h>
d5a8f07c
FB
24#include <sys/mman.h>
25#endif
54936004
FB
26#include <stdlib.h>
27#include <stdio.h>
28#include <stdarg.h>
29#include <string.h>
30#include <errno.h>
31#include <unistd.h>
32#include <inttypes.h>
33
6180a181
FB
34#include "cpu.h"
35#include "exec-all.h"
ca10f867 36#include "qemu-common.h"
b67d9a52 37#include "tcg.h"
b3c7724c 38#include "hw/hw.h"
74576198 39#include "osdep.h"
7ba1e619 40#include "kvm.h"
53a5960a
PB
41#if defined(CONFIG_USER_ONLY)
42#include <qemu.h>
fd052bf6 43#include <signal.h>
53a5960a 44#endif
54936004 45
fd6ce8f6 46//#define DEBUG_TB_INVALIDATE
66e85a21 47//#define DEBUG_FLUSH
9fa3e853 48//#define DEBUG_TLB
67d3b957 49//#define DEBUG_UNASSIGNED
fd6ce8f6
FB
50
51/* make various TB consistency checks */
5fafdf24
TS
52//#define DEBUG_TB_CHECK
53//#define DEBUG_TLB_CHECK
fd6ce8f6 54
1196be37 55//#define DEBUG_IOPORT
db7b5426 56//#define DEBUG_SUBPAGE
1196be37 57
99773bd4
PB
58#if !defined(CONFIG_USER_ONLY)
59/* TB consistency checks only implemented for usermode emulation. */
60#undef DEBUG_TB_CHECK
61#endif
62
9fa3e853
FB
63#define SMC_BITMAP_USE_THRESHOLD 10
64
bdaf78e0 65static TranslationBlock *tbs;
26a5f13b 66int code_gen_max_blocks;
9fa3e853 67TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
bdaf78e0 68static int nb_tbs;
eb51d102 69/* any access to the tbs or the page table must use this lock */
c227f099 70spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
fd6ce8f6 71
141ac468
BS
72#if defined(__arm__) || defined(__sparc_v9__)
73/* The prologue must be reachable with a direct jump. ARM and Sparc64
74 have limited branch ranges (possibly also PPC) so place it in a
d03d860b
BS
75 section close to code segment. */
76#define code_gen_section \
77 __attribute__((__section__(".gen_code"))) \
78 __attribute__((aligned (32)))
f8e2af11
SW
79#elif defined(_WIN32)
80/* Maximum alignment for Win32 is 16. */
81#define code_gen_section \
82 __attribute__((aligned (16)))
d03d860b
BS
83#else
84#define code_gen_section \
85 __attribute__((aligned (32)))
86#endif
87
88uint8_t code_gen_prologue[1024] code_gen_section;
bdaf78e0
BS
89static uint8_t *code_gen_buffer;
90static unsigned long code_gen_buffer_size;
26a5f13b 91/* threshold to flush the translated code buffer */
bdaf78e0 92static unsigned long code_gen_buffer_max_size;
fd6ce8f6
FB
93uint8_t *code_gen_ptr;
94
e2eef170 95#if !defined(CONFIG_USER_ONLY)
9fa3e853 96int phys_ram_fd;
1ccde1cb 97uint8_t *phys_ram_dirty;
74576198 98static int in_migration;
94a6b54f
PB
99
100typedef struct RAMBlock {
101 uint8_t *host;
c227f099
AL
102 ram_addr_t offset;
103 ram_addr_t length;
94a6b54f
PB
104 struct RAMBlock *next;
105} RAMBlock;
106
107static RAMBlock *ram_blocks;
108/* TODO: When we implement (and use) ram deallocation (e.g. for hotplug)
ccbb4d44 109 then we can no longer assume contiguous ram offsets, and external uses
94a6b54f 110 of this variable will break. */
c227f099 111ram_addr_t last_ram_offset;
e2eef170 112#endif
9fa3e853 113
6a00d601
FB
114CPUState *first_cpu;
115/* current CPU in the current thread. It is only valid inside
116 cpu_exec() */
5fafdf24 117CPUState *cpu_single_env;
2e70f6ef 118/* 0 = Do not count executed instructions.
bf20dc07 119 1 = Precise instruction counting.
2e70f6ef
PB
120 2 = Adaptive rate instruction counting. */
121int use_icount = 0;
122/* Current instruction counter. While executing translated code this may
123 include some instructions that have not yet been executed. */
124int64_t qemu_icount;
6a00d601 125
54936004 126typedef struct PageDesc {
92e873b9 127 /* list of TBs intersecting this ram page */
fd6ce8f6 128 TranslationBlock *first_tb;
9fa3e853
FB
129 /* in order to optimize self modifying code, we count the number
130 of lookups we do to a given page to use a bitmap */
131 unsigned int code_write_count;
132 uint8_t *code_bitmap;
133#if defined(CONFIG_USER_ONLY)
134 unsigned long flags;
135#endif
54936004
FB
136} PageDesc;
137
41c1b1c9 138/* In system mode we want L1_MAP to be based on ram offsets,
5cd2c5b6
RH
139 while in user mode we want it to be based on virtual addresses. */
140#if !defined(CONFIG_USER_ONLY)
41c1b1c9
PB
141#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
142# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
143#else
5cd2c5b6 144# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
41c1b1c9 145#endif
bedb69ea 146#else
5cd2c5b6 147# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
bedb69ea 148#endif
54936004 149
5cd2c5b6
RH
150/* Size of the L2 (and L3, etc) page tables. */
151#define L2_BITS 10
54936004
FB
152#define L2_SIZE (1 << L2_BITS)
153
5cd2c5b6
RH
154/* The bits remaining after N lower levels of page tables. */
155#define P_L1_BITS_REM \
156 ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
157#define V_L1_BITS_REM \
158 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
159
160/* Size of the L1 page table. Avoid silly small sizes. */
161#if P_L1_BITS_REM < 4
162#define P_L1_BITS (P_L1_BITS_REM + L2_BITS)
163#else
164#define P_L1_BITS P_L1_BITS_REM
165#endif
166
167#if V_L1_BITS_REM < 4
168#define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
169#else
170#define V_L1_BITS V_L1_BITS_REM
171#endif
172
173#define P_L1_SIZE ((target_phys_addr_t)1 << P_L1_BITS)
174#define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
175
176#define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS)
177#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
178
83fb7adf
FB
179unsigned long qemu_real_host_page_size;
180unsigned long qemu_host_page_bits;
181unsigned long qemu_host_page_size;
182unsigned long qemu_host_page_mask;
54936004 183
5cd2c5b6
RH
184/* This is a multi-level map on the virtual address space.
185 The bottom level has pointers to PageDesc. */
186static void *l1_map[V_L1_SIZE];
54936004 187
e2eef170 188#if !defined(CONFIG_USER_ONLY)
41c1b1c9
PB
189typedef struct PhysPageDesc {
190 /* offset in host memory of the page + io_index in the low bits */
191 ram_addr_t phys_offset;
192 ram_addr_t region_offset;
193} PhysPageDesc;
194
5cd2c5b6
RH
195/* This is a multi-level map on the physical address space.
196 The bottom level has pointers to PhysPageDesc. */
197static void *l1_phys_map[P_L1_SIZE];
6d9a1304 198
e2eef170
PB
199static void io_mem_init(void);
200
33417e70 201/* io memory support */
33417e70
FB
202CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
203CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
a4193c8a 204void *io_mem_opaque[IO_MEM_NB_ENTRIES];
511d2b14 205static char io_mem_used[IO_MEM_NB_ENTRIES];
6658ffb8
PB
206static int io_mem_watch;
207#endif
33417e70 208
34865134 209/* log support */
1e8b27ca
JR
210#ifdef WIN32
211static const char *logfilename = "qemu.log";
212#else
d9b630fd 213static const char *logfilename = "/tmp/qemu.log";
1e8b27ca 214#endif
34865134
FB
215FILE *logfile;
216int loglevel;
e735b91c 217static int log_append = 0;
34865134 218
e3db7226 219/* statistics */
b3755a91 220#if !defined(CONFIG_USER_ONLY)
e3db7226 221static int tlb_flush_count;
b3755a91 222#endif
e3db7226
FB
223static int tb_flush_count;
224static int tb_phys_invalidate_count;
225
7cb69cae
FB
226#ifdef _WIN32
227static void map_exec(void *addr, long size)
228{
229 DWORD old_protect;
230 VirtualProtect(addr, size,
231 PAGE_EXECUTE_READWRITE, &old_protect);
232
233}
234#else
235static void map_exec(void *addr, long size)
236{
4369415f 237 unsigned long start, end, page_size;
7cb69cae 238
4369415f 239 page_size = getpagesize();
7cb69cae 240 start = (unsigned long)addr;
4369415f 241 start &= ~(page_size - 1);
7cb69cae
FB
242
243 end = (unsigned long)addr + size;
4369415f
FB
244 end += page_size - 1;
245 end &= ~(page_size - 1);
7cb69cae
FB
246
247 mprotect((void *)start, end - start,
248 PROT_READ | PROT_WRITE | PROT_EXEC);
249}
250#endif
251
b346ff46 252static void page_init(void)
54936004 253{
83fb7adf 254 /* NOTE: we can always suppose that qemu_host_page_size >=
54936004 255 TARGET_PAGE_SIZE */
c2b48b69
AL
256#ifdef _WIN32
257 {
258 SYSTEM_INFO system_info;
259
260 GetSystemInfo(&system_info);
261 qemu_real_host_page_size = system_info.dwPageSize;
262 }
263#else
264 qemu_real_host_page_size = getpagesize();
265#endif
83fb7adf
FB
266 if (qemu_host_page_size == 0)
267 qemu_host_page_size = qemu_real_host_page_size;
268 if (qemu_host_page_size < TARGET_PAGE_SIZE)
269 qemu_host_page_size = TARGET_PAGE_SIZE;
270 qemu_host_page_bits = 0;
271 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
272 qemu_host_page_bits++;
273 qemu_host_page_mask = ~(qemu_host_page_size - 1);
50a9569b
AZ
274
275#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
276 {
50a9569b 277 FILE *f;
50a9569b 278
0776590d 279 last_brk = (unsigned long)sbrk(0);
5cd2c5b6 280
50a9569b
AZ
281 f = fopen("/proc/self/maps", "r");
282 if (f) {
5cd2c5b6
RH
283 mmap_lock();
284
50a9569b 285 do {
5cd2c5b6
RH
286 unsigned long startaddr, endaddr;
287 int n;
288
289 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
290
291 if (n == 2 && h2g_valid(startaddr)) {
292 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
293
294 if (h2g_valid(endaddr)) {
295 endaddr = h2g(endaddr);
296 } else {
297 endaddr = ~0ul;
298 }
299 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
50a9569b
AZ
300 }
301 } while (!feof(f));
5cd2c5b6 302
50a9569b 303 fclose(f);
5cd2c5b6 304 mmap_unlock();
50a9569b
AZ
305 }
306 }
307#endif
54936004
FB
308}
309
41c1b1c9 310static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
54936004 311{
41c1b1c9
PB
312 PageDesc *pd;
313 void **lp;
314 int i;
315
5cd2c5b6
RH
316#if defined(CONFIG_USER_ONLY)
317 /* We can't use qemu_malloc because it may recurse into a locked mutex.
318 Neither can we record the new pages we reserve while allocating a
319 given page because that may recurse into an unallocated page table
320 entry. Stuff the allocations we do make into a queue and process
321 them after having completed one entire page table allocation. */
322
323 unsigned long reserve[2 * (V_L1_SHIFT / L2_BITS)];
324 int reserve_idx = 0;
325
326# define ALLOC(P, SIZE) \
327 do { \
328 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
329 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
330 if (h2g_valid(P)) { \
331 reserve[reserve_idx] = h2g(P); \
332 reserve[reserve_idx + 1] = SIZE; \
333 reserve_idx += 2; \
334 } \
335 } while (0)
336#else
337# define ALLOC(P, SIZE) \
338 do { P = qemu_mallocz(SIZE); } while (0)
17e2377a 339#endif
434929bf 340
5cd2c5b6
RH
341 /* Level 1. Always allocated. */
342 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
343
344 /* Level 2..N-1. */
345 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
346 void **p = *lp;
347
348 if (p == NULL) {
349 if (!alloc) {
350 return NULL;
351 }
352 ALLOC(p, sizeof(void *) * L2_SIZE);
353 *lp = p;
17e2377a 354 }
5cd2c5b6
RH
355
356 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
357 }
358
359 pd = *lp;
360 if (pd == NULL) {
361 if (!alloc) {
362 return NULL;
363 }
364 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
365 *lp = pd;
54936004 366 }
5cd2c5b6
RH
367
368#undef ALLOC
369#if defined(CONFIG_USER_ONLY)
370 for (i = 0; i < reserve_idx; i += 2) {
371 unsigned long addr = reserve[i];
372 unsigned long len = reserve[i + 1];
373
374 page_set_flags(addr & TARGET_PAGE_MASK,
375 TARGET_PAGE_ALIGN(addr + len),
376 PAGE_RESERVED);
377 }
378#endif
379
380 return pd + (index & (L2_SIZE - 1));
54936004
FB
381}
382
41c1b1c9 383static inline PageDesc *page_find(tb_page_addr_t index)
54936004 384{
5cd2c5b6 385 return page_find_alloc(index, 0);
fd6ce8f6
FB
386}
387
6d9a1304 388#if !defined(CONFIG_USER_ONLY)
c227f099 389static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
92e873b9 390{
e3f4e2a4 391 PhysPageDesc *pd;
5cd2c5b6
RH
392 void **lp;
393 int i;
92e873b9 394
5cd2c5b6
RH
395 /* Level 1. Always allocated. */
396 lp = l1_phys_map + ((index >> P_L1_SHIFT) & (P_L1_SIZE - 1));
108c49b8 397
5cd2c5b6
RH
398 /* Level 2..N-1. */
399 for (i = P_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
400 void **p = *lp;
401 if (p == NULL) {
402 if (!alloc) {
403 return NULL;
404 }
405 *lp = p = qemu_mallocz(sizeof(void *) * L2_SIZE);
406 }
407 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
108c49b8 408 }
5cd2c5b6 409
e3f4e2a4 410 pd = *lp;
5cd2c5b6 411 if (pd == NULL) {
e3f4e2a4 412 int i;
5cd2c5b6
RH
413
414 if (!alloc) {
108c49b8 415 return NULL;
5cd2c5b6
RH
416 }
417
418 *lp = pd = qemu_malloc(sizeof(PhysPageDesc) * L2_SIZE);
419
67c4d23c 420 for (i = 0; i < L2_SIZE; i++) {
5cd2c5b6
RH
421 pd[i].phys_offset = IO_MEM_UNASSIGNED;
422 pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
67c4d23c 423 }
92e873b9 424 }
5cd2c5b6
RH
425
426 return pd + (index & (L2_SIZE - 1));
92e873b9
FB
427}
428
c227f099 429static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
92e873b9 430{
108c49b8 431 return phys_page_find_alloc(index, 0);
92e873b9
FB
432}
433
c227f099
AL
434static void tlb_protect_code(ram_addr_t ram_addr);
435static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 436 target_ulong vaddr);
c8a706fe
PB
437#define mmap_lock() do { } while(0)
438#define mmap_unlock() do { } while(0)
9fa3e853 439#endif
fd6ce8f6 440
4369415f
FB
441#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
442
443#if defined(CONFIG_USER_ONLY)
ccbb4d44 444/* Currently it is not recommended to allocate big chunks of data in
4369415f
FB
445 user mode. It will change when a dedicated libc will be used */
446#define USE_STATIC_CODE_GEN_BUFFER
447#endif
448
449#ifdef USE_STATIC_CODE_GEN_BUFFER
450static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
451#endif
452
8fcd3692 453static void code_gen_alloc(unsigned long tb_size)
26a5f13b 454{
4369415f
FB
455#ifdef USE_STATIC_CODE_GEN_BUFFER
456 code_gen_buffer = static_code_gen_buffer;
457 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
458 map_exec(code_gen_buffer, code_gen_buffer_size);
459#else
26a5f13b
FB
460 code_gen_buffer_size = tb_size;
461 if (code_gen_buffer_size == 0) {
4369415f
FB
462#if defined(CONFIG_USER_ONLY)
463 /* in user mode, phys_ram_size is not meaningful */
464 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
465#else
ccbb4d44 466 /* XXX: needs adjustments */
94a6b54f 467 code_gen_buffer_size = (unsigned long)(ram_size / 4);
4369415f 468#endif
26a5f13b
FB
469 }
470 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
471 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
472 /* The code gen buffer location may have constraints depending on
473 the host cpu and OS */
474#if defined(__linux__)
475 {
476 int flags;
141ac468
BS
477 void *start = NULL;
478
26a5f13b
FB
479 flags = MAP_PRIVATE | MAP_ANONYMOUS;
480#if defined(__x86_64__)
481 flags |= MAP_32BIT;
482 /* Cannot map more than that */
483 if (code_gen_buffer_size > (800 * 1024 * 1024))
484 code_gen_buffer_size = (800 * 1024 * 1024);
141ac468
BS
485#elif defined(__sparc_v9__)
486 // Map the buffer below 2G, so we can use direct calls and branches
487 flags |= MAP_FIXED;
488 start = (void *) 0x60000000UL;
489 if (code_gen_buffer_size > (512 * 1024 * 1024))
490 code_gen_buffer_size = (512 * 1024 * 1024);
1cb0661e 491#elif defined(__arm__)
63d41246 492 /* Map the buffer below 32M, so we can use direct calls and branches */
1cb0661e
AZ
493 flags |= MAP_FIXED;
494 start = (void *) 0x01000000UL;
495 if (code_gen_buffer_size > 16 * 1024 * 1024)
496 code_gen_buffer_size = 16 * 1024 * 1024;
26a5f13b 497#endif
141ac468
BS
498 code_gen_buffer = mmap(start, code_gen_buffer_size,
499 PROT_WRITE | PROT_READ | PROT_EXEC,
26a5f13b
FB
500 flags, -1, 0);
501 if (code_gen_buffer == MAP_FAILED) {
502 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
503 exit(1);
504 }
505 }
a167ba50 506#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__DragonFly__)
06e67a82
AL
507 {
508 int flags;
509 void *addr = NULL;
510 flags = MAP_PRIVATE | MAP_ANONYMOUS;
511#if defined(__x86_64__)
512 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
513 * 0x40000000 is free */
514 flags |= MAP_FIXED;
515 addr = (void *)0x40000000;
516 /* Cannot map more than that */
517 if (code_gen_buffer_size > (800 * 1024 * 1024))
518 code_gen_buffer_size = (800 * 1024 * 1024);
519#endif
520 code_gen_buffer = mmap(addr, code_gen_buffer_size,
521 PROT_WRITE | PROT_READ | PROT_EXEC,
522 flags, -1, 0);
523 if (code_gen_buffer == MAP_FAILED) {
524 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
525 exit(1);
526 }
527 }
26a5f13b
FB
528#else
529 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
26a5f13b
FB
530 map_exec(code_gen_buffer, code_gen_buffer_size);
531#endif
4369415f 532#endif /* !USE_STATIC_CODE_GEN_BUFFER */
26a5f13b
FB
533 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
534 code_gen_buffer_max_size = code_gen_buffer_size -
535 code_gen_max_block_size();
536 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
537 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
538}
539
540/* Must be called before using the QEMU cpus. 'tb_size' is the size
541 (in bytes) allocated to the translation buffer. Zero means default
542 size. */
543void cpu_exec_init_all(unsigned long tb_size)
544{
26a5f13b
FB
545 cpu_gen_init();
546 code_gen_alloc(tb_size);
547 code_gen_ptr = code_gen_buffer;
4369415f 548 page_init();
e2eef170 549#if !defined(CONFIG_USER_ONLY)
26a5f13b 550 io_mem_init();
e2eef170 551#endif
26a5f13b
FB
552}
553
9656f324
PB
554#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
555
e59fb374 556static int cpu_common_post_load(void *opaque, int version_id)
e7f4eff7
JQ
557{
558 CPUState *env = opaque;
9656f324 559
3098dba0
AJ
560 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
561 version_id is increased. */
562 env->interrupt_request &= ~0x01;
9656f324
PB
563 tlb_flush(env, 1);
564
565 return 0;
566}
e7f4eff7
JQ
567
568static const VMStateDescription vmstate_cpu_common = {
569 .name = "cpu_common",
570 .version_id = 1,
571 .minimum_version_id = 1,
572 .minimum_version_id_old = 1,
e7f4eff7
JQ
573 .post_load = cpu_common_post_load,
574 .fields = (VMStateField []) {
575 VMSTATE_UINT32(halted, CPUState),
576 VMSTATE_UINT32(interrupt_request, CPUState),
577 VMSTATE_END_OF_LIST()
578 }
579};
9656f324
PB
580#endif
581
950f1472
GC
582CPUState *qemu_get_cpu(int cpu)
583{
584 CPUState *env = first_cpu;
585
586 while (env) {
587 if (env->cpu_index == cpu)
588 break;
589 env = env->next_cpu;
590 }
591
592 return env;
593}
594
6a00d601 595void cpu_exec_init(CPUState *env)
fd6ce8f6 596{
6a00d601
FB
597 CPUState **penv;
598 int cpu_index;
599
c2764719
PB
600#if defined(CONFIG_USER_ONLY)
601 cpu_list_lock();
602#endif
6a00d601
FB
603 env->next_cpu = NULL;
604 penv = &first_cpu;
605 cpu_index = 0;
606 while (*penv != NULL) {
1e9fa730 607 penv = &(*penv)->next_cpu;
6a00d601
FB
608 cpu_index++;
609 }
610 env->cpu_index = cpu_index;
268a362c 611 env->numa_node = 0;
72cf2d4f
BS
612 QTAILQ_INIT(&env->breakpoints);
613 QTAILQ_INIT(&env->watchpoints);
6a00d601 614 *penv = env;
c2764719
PB
615#if defined(CONFIG_USER_ONLY)
616 cpu_list_unlock();
617#endif
b3c7724c 618#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
e7f4eff7 619 vmstate_register(cpu_index, &vmstate_cpu_common, env);
b3c7724c
PB
620 register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
621 cpu_save, cpu_load, env);
622#endif
fd6ce8f6
FB
623}
624
9fa3e853
FB
625static inline void invalidate_page_bitmap(PageDesc *p)
626{
627 if (p->code_bitmap) {
59817ccb 628 qemu_free(p->code_bitmap);
9fa3e853
FB
629 p->code_bitmap = NULL;
630 }
631 p->code_write_count = 0;
632}
633
5cd2c5b6
RH
634/* Set to NULL all the 'first_tb' fields in all PageDescs. */
635
636static void page_flush_tb_1 (int level, void **lp)
fd6ce8f6 637{
5cd2c5b6 638 int i;
fd6ce8f6 639
5cd2c5b6
RH
640 if (*lp == NULL) {
641 return;
642 }
643 if (level == 0) {
644 PageDesc *pd = *lp;
645 for (i = 0; i < L2_BITS; ++i) {
646 pd[i].first_tb = NULL;
647 invalidate_page_bitmap(pd + i);
fd6ce8f6 648 }
5cd2c5b6
RH
649 } else {
650 void **pp = *lp;
651 for (i = 0; i < L2_BITS; ++i) {
652 page_flush_tb_1 (level - 1, pp + i);
653 }
654 }
655}
656
657static void page_flush_tb(void)
658{
659 int i;
660 for (i = 0; i < V_L1_SIZE; i++) {
661 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
fd6ce8f6
FB
662 }
663}
664
665/* flush all the translation blocks */
d4e8164f 666/* XXX: tb_flush is currently not thread safe */
6a00d601 667void tb_flush(CPUState *env1)
fd6ce8f6 668{
6a00d601 669 CPUState *env;
0124311e 670#if defined(DEBUG_FLUSH)
ab3d1727
BS
671 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
672 (unsigned long)(code_gen_ptr - code_gen_buffer),
673 nb_tbs, nb_tbs > 0 ?
674 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
fd6ce8f6 675#endif
26a5f13b 676 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
a208e54a
PB
677 cpu_abort(env1, "Internal error: code buffer overflow\n");
678
fd6ce8f6 679 nb_tbs = 0;
3b46e624 680
6a00d601
FB
681 for(env = first_cpu; env != NULL; env = env->next_cpu) {
682 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
683 }
9fa3e853 684
8a8a608f 685 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
fd6ce8f6 686 page_flush_tb();
9fa3e853 687
fd6ce8f6 688 code_gen_ptr = code_gen_buffer;
d4e8164f
FB
689 /* XXX: flush processor icache at this point if cache flush is
690 expensive */
e3db7226 691 tb_flush_count++;
fd6ce8f6
FB
692}
693
694#ifdef DEBUG_TB_CHECK
695
bc98a7ef 696static void tb_invalidate_check(target_ulong address)
fd6ce8f6
FB
697{
698 TranslationBlock *tb;
699 int i;
700 address &= TARGET_PAGE_MASK;
99773bd4
PB
701 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
702 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
703 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
704 address >= tb->pc + tb->size)) {
0bf9e31a
BS
705 printf("ERROR invalidate: address=" TARGET_FMT_lx
706 " PC=%08lx size=%04x\n",
99773bd4 707 address, (long)tb->pc, tb->size);
fd6ce8f6
FB
708 }
709 }
710 }
711}
712
713/* verify that all the pages have correct rights for code */
714static void tb_page_check(void)
715{
716 TranslationBlock *tb;
717 int i, flags1, flags2;
3b46e624 718
99773bd4
PB
719 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
720 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
721 flags1 = page_get_flags(tb->pc);
722 flags2 = page_get_flags(tb->pc + tb->size - 1);
723 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
724 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
99773bd4 725 (long)tb->pc, tb->size, flags1, flags2);
fd6ce8f6
FB
726 }
727 }
728 }
729}
730
731#endif
732
733/* invalidate one TB */
734static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
735 int next_offset)
736{
737 TranslationBlock *tb1;
738 for(;;) {
739 tb1 = *ptb;
740 if (tb1 == tb) {
741 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
742 break;
743 }
744 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
745 }
746}
747
9fa3e853
FB
748static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
749{
750 TranslationBlock *tb1;
751 unsigned int n1;
752
753 for(;;) {
754 tb1 = *ptb;
755 n1 = (long)tb1 & 3;
756 tb1 = (TranslationBlock *)((long)tb1 & ~3);
757 if (tb1 == tb) {
758 *ptb = tb1->page_next[n1];
759 break;
760 }
761 ptb = &tb1->page_next[n1];
762 }
763}
764
d4e8164f
FB
765static inline void tb_jmp_remove(TranslationBlock *tb, int n)
766{
767 TranslationBlock *tb1, **ptb;
768 unsigned int n1;
769
770 ptb = &tb->jmp_next[n];
771 tb1 = *ptb;
772 if (tb1) {
773 /* find tb(n) in circular list */
774 for(;;) {
775 tb1 = *ptb;
776 n1 = (long)tb1 & 3;
777 tb1 = (TranslationBlock *)((long)tb1 & ~3);
778 if (n1 == n && tb1 == tb)
779 break;
780 if (n1 == 2) {
781 ptb = &tb1->jmp_first;
782 } else {
783 ptb = &tb1->jmp_next[n1];
784 }
785 }
786 /* now we can suppress tb(n) from the list */
787 *ptb = tb->jmp_next[n];
788
789 tb->jmp_next[n] = NULL;
790 }
791}
792
793/* reset the jump entry 'n' of a TB so that it is not chained to
794 another TB */
795static inline void tb_reset_jump(TranslationBlock *tb, int n)
796{
797 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
798}
799
41c1b1c9 800void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
fd6ce8f6 801{
6a00d601 802 CPUState *env;
8a40a180 803 PageDesc *p;
d4e8164f 804 unsigned int h, n1;
41c1b1c9 805 tb_page_addr_t phys_pc;
8a40a180 806 TranslationBlock *tb1, *tb2;
3b46e624 807
8a40a180
FB
808 /* remove the TB from the hash list */
809 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
810 h = tb_phys_hash_func(phys_pc);
5fafdf24 811 tb_remove(&tb_phys_hash[h], tb,
8a40a180
FB
812 offsetof(TranslationBlock, phys_hash_next));
813
814 /* remove the TB from the page list */
815 if (tb->page_addr[0] != page_addr) {
816 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
817 tb_page_remove(&p->first_tb, tb);
818 invalidate_page_bitmap(p);
819 }
820 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
821 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
822 tb_page_remove(&p->first_tb, tb);
823 invalidate_page_bitmap(p);
824 }
825
36bdbe54 826 tb_invalidated_flag = 1;
59817ccb 827
fd6ce8f6 828 /* remove the TB from the hash list */
8a40a180 829 h = tb_jmp_cache_hash_func(tb->pc);
6a00d601
FB
830 for(env = first_cpu; env != NULL; env = env->next_cpu) {
831 if (env->tb_jmp_cache[h] == tb)
832 env->tb_jmp_cache[h] = NULL;
833 }
d4e8164f
FB
834
835 /* suppress this TB from the two jump lists */
836 tb_jmp_remove(tb, 0);
837 tb_jmp_remove(tb, 1);
838
839 /* suppress any remaining jumps to this TB */
840 tb1 = tb->jmp_first;
841 for(;;) {
842 n1 = (long)tb1 & 3;
843 if (n1 == 2)
844 break;
845 tb1 = (TranslationBlock *)((long)tb1 & ~3);
846 tb2 = tb1->jmp_next[n1];
847 tb_reset_jump(tb1, n1);
848 tb1->jmp_next[n1] = NULL;
849 tb1 = tb2;
850 }
851 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
9fa3e853 852
e3db7226 853 tb_phys_invalidate_count++;
9fa3e853
FB
854}
855
856static inline void set_bits(uint8_t *tab, int start, int len)
857{
858 int end, mask, end1;
859
860 end = start + len;
861 tab += start >> 3;
862 mask = 0xff << (start & 7);
863 if ((start & ~7) == (end & ~7)) {
864 if (start < end) {
865 mask &= ~(0xff << (end & 7));
866 *tab |= mask;
867 }
868 } else {
869 *tab++ |= mask;
870 start = (start + 8) & ~7;
871 end1 = end & ~7;
872 while (start < end1) {
873 *tab++ = 0xff;
874 start += 8;
875 }
876 if (start < end) {
877 mask = ~(0xff << (end & 7));
878 *tab |= mask;
879 }
880 }
881}
882
883static void build_page_bitmap(PageDesc *p)
884{
885 int n, tb_start, tb_end;
886 TranslationBlock *tb;
3b46e624 887
b2a7081a 888 p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
9fa3e853
FB
889
890 tb = p->first_tb;
891 while (tb != NULL) {
892 n = (long)tb & 3;
893 tb = (TranslationBlock *)((long)tb & ~3);
894 /* NOTE: this is subtle as a TB may span two physical pages */
895 if (n == 0) {
896 /* NOTE: tb_end may be after the end of the page, but
897 it is not a problem */
898 tb_start = tb->pc & ~TARGET_PAGE_MASK;
899 tb_end = tb_start + tb->size;
900 if (tb_end > TARGET_PAGE_SIZE)
901 tb_end = TARGET_PAGE_SIZE;
902 } else {
903 tb_start = 0;
904 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
905 }
906 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
907 tb = tb->page_next[n];
908 }
909}
910
2e70f6ef
PB
911TranslationBlock *tb_gen_code(CPUState *env,
912 target_ulong pc, target_ulong cs_base,
913 int flags, int cflags)
d720b93d
FB
914{
915 TranslationBlock *tb;
916 uint8_t *tc_ptr;
41c1b1c9
PB
917 tb_page_addr_t phys_pc, phys_page2;
918 target_ulong virt_page2;
d720b93d
FB
919 int code_gen_size;
920
41c1b1c9 921 phys_pc = get_page_addr_code(env, pc);
c27004ec 922 tb = tb_alloc(pc);
d720b93d
FB
923 if (!tb) {
924 /* flush must be done */
925 tb_flush(env);
926 /* cannot fail at this point */
c27004ec 927 tb = tb_alloc(pc);
2e70f6ef
PB
928 /* Don't forget to invalidate previous TB info. */
929 tb_invalidated_flag = 1;
d720b93d
FB
930 }
931 tc_ptr = code_gen_ptr;
932 tb->tc_ptr = tc_ptr;
933 tb->cs_base = cs_base;
934 tb->flags = flags;
935 tb->cflags = cflags;
d07bde88 936 cpu_gen_code(env, tb, &code_gen_size);
d720b93d 937 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
3b46e624 938
d720b93d 939 /* check next page if needed */
c27004ec 940 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
d720b93d 941 phys_page2 = -1;
c27004ec 942 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
41c1b1c9 943 phys_page2 = get_page_addr_code(env, virt_page2);
d720b93d 944 }
41c1b1c9 945 tb_link_page(tb, phys_pc, phys_page2);
2e70f6ef 946 return tb;
d720b93d 947}
3b46e624 948
9fa3e853
FB
949/* invalidate all TBs which intersect with the target physical page
950 starting in range [start;end[. NOTE: start and end must refer to
d720b93d
FB
951 the same physical page. 'is_cpu_write_access' should be true if called
952 from a real cpu write access: the virtual CPU will exit the current
953 TB if code is modified inside this TB. */
41c1b1c9 954void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
d720b93d
FB
955 int is_cpu_write_access)
956{
6b917547 957 TranslationBlock *tb, *tb_next, *saved_tb;
d720b93d 958 CPUState *env = cpu_single_env;
41c1b1c9 959 tb_page_addr_t tb_start, tb_end;
6b917547
AL
960 PageDesc *p;
961 int n;
962#ifdef TARGET_HAS_PRECISE_SMC
963 int current_tb_not_found = is_cpu_write_access;
964 TranslationBlock *current_tb = NULL;
965 int current_tb_modified = 0;
966 target_ulong current_pc = 0;
967 target_ulong current_cs_base = 0;
968 int current_flags = 0;
969#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
970
971 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 972 if (!p)
9fa3e853 973 return;
5fafdf24 974 if (!p->code_bitmap &&
d720b93d
FB
975 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
976 is_cpu_write_access) {
9fa3e853
FB
977 /* build code bitmap */
978 build_page_bitmap(p);
979 }
980
981 /* we remove all the TBs in the range [start, end[ */
982 /* XXX: see if in some cases it could be faster to invalidate all the code */
983 tb = p->first_tb;
984 while (tb != NULL) {
985 n = (long)tb & 3;
986 tb = (TranslationBlock *)((long)tb & ~3);
987 tb_next = tb->page_next[n];
988 /* NOTE: this is subtle as a TB may span two physical pages */
989 if (n == 0) {
990 /* NOTE: tb_end may be after the end of the page, but
991 it is not a problem */
992 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
993 tb_end = tb_start + tb->size;
994 } else {
995 tb_start = tb->page_addr[1];
996 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
997 }
998 if (!(tb_end <= start || tb_start >= end)) {
d720b93d
FB
999#ifdef TARGET_HAS_PRECISE_SMC
1000 if (current_tb_not_found) {
1001 current_tb_not_found = 0;
1002 current_tb = NULL;
2e70f6ef 1003 if (env->mem_io_pc) {
d720b93d 1004 /* now we have a real cpu fault */
2e70f6ef 1005 current_tb = tb_find_pc(env->mem_io_pc);
d720b93d
FB
1006 }
1007 }
1008 if (current_tb == tb &&
2e70f6ef 1009 (current_tb->cflags & CF_COUNT_MASK) != 1) {
d720b93d
FB
1010 /* If we are modifying the current TB, we must stop
1011 its execution. We could be more precise by checking
1012 that the modification is after the current PC, but it
1013 would require a specialized function to partially
1014 restore the CPU state */
3b46e624 1015
d720b93d 1016 current_tb_modified = 1;
5fafdf24 1017 cpu_restore_state(current_tb, env,
2e70f6ef 1018 env->mem_io_pc, NULL);
6b917547
AL
1019 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1020 &current_flags);
d720b93d
FB
1021 }
1022#endif /* TARGET_HAS_PRECISE_SMC */
6f5a9f7e
FB
1023 /* we need to do that to handle the case where a signal
1024 occurs while doing tb_phys_invalidate() */
1025 saved_tb = NULL;
1026 if (env) {
1027 saved_tb = env->current_tb;
1028 env->current_tb = NULL;
1029 }
9fa3e853 1030 tb_phys_invalidate(tb, -1);
6f5a9f7e
FB
1031 if (env) {
1032 env->current_tb = saved_tb;
1033 if (env->interrupt_request && env->current_tb)
1034 cpu_interrupt(env, env->interrupt_request);
1035 }
9fa3e853
FB
1036 }
1037 tb = tb_next;
1038 }
1039#if !defined(CONFIG_USER_ONLY)
1040 /* if no code remaining, no need to continue to use slow writes */
1041 if (!p->first_tb) {
1042 invalidate_page_bitmap(p);
d720b93d 1043 if (is_cpu_write_access) {
2e70f6ef 1044 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
d720b93d
FB
1045 }
1046 }
1047#endif
1048#ifdef TARGET_HAS_PRECISE_SMC
1049 if (current_tb_modified) {
1050 /* we generate a block containing just the instruction
1051 modifying the memory. It will ensure that it cannot modify
1052 itself */
ea1c1802 1053 env->current_tb = NULL;
2e70f6ef 1054 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
d720b93d 1055 cpu_resume_from_signal(env, NULL);
9fa3e853 1056 }
fd6ce8f6 1057#endif
9fa3e853 1058}
fd6ce8f6 1059
9fa3e853 1060/* len must be <= 8 and start must be a multiple of len */
41c1b1c9 1061static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
9fa3e853
FB
1062{
1063 PageDesc *p;
1064 int offset, b;
59817ccb 1065#if 0
a4193c8a 1066 if (1) {
93fcfe39
AL
1067 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1068 cpu_single_env->mem_io_vaddr, len,
1069 cpu_single_env->eip,
1070 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
59817ccb
FB
1071 }
1072#endif
9fa3e853 1073 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 1074 if (!p)
9fa3e853
FB
1075 return;
1076 if (p->code_bitmap) {
1077 offset = start & ~TARGET_PAGE_MASK;
1078 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1079 if (b & ((1 << len) - 1))
1080 goto do_invalidate;
1081 } else {
1082 do_invalidate:
d720b93d 1083 tb_invalidate_phys_page_range(start, start + len, 1);
9fa3e853
FB
1084 }
1085}
1086
9fa3e853 1087#if !defined(CONFIG_SOFTMMU)
41c1b1c9 1088static void tb_invalidate_phys_page(tb_page_addr_t addr,
d720b93d 1089 unsigned long pc, void *puc)
9fa3e853 1090{
6b917547 1091 TranslationBlock *tb;
9fa3e853 1092 PageDesc *p;
6b917547 1093 int n;
d720b93d 1094#ifdef TARGET_HAS_PRECISE_SMC
6b917547 1095 TranslationBlock *current_tb = NULL;
d720b93d 1096 CPUState *env = cpu_single_env;
6b917547
AL
1097 int current_tb_modified = 0;
1098 target_ulong current_pc = 0;
1099 target_ulong current_cs_base = 0;
1100 int current_flags = 0;
d720b93d 1101#endif
9fa3e853
FB
1102
1103 addr &= TARGET_PAGE_MASK;
1104 p = page_find(addr >> TARGET_PAGE_BITS);
5fafdf24 1105 if (!p)
9fa3e853
FB
1106 return;
1107 tb = p->first_tb;
d720b93d
FB
1108#ifdef TARGET_HAS_PRECISE_SMC
1109 if (tb && pc != 0) {
1110 current_tb = tb_find_pc(pc);
1111 }
1112#endif
9fa3e853
FB
1113 while (tb != NULL) {
1114 n = (long)tb & 3;
1115 tb = (TranslationBlock *)((long)tb & ~3);
d720b93d
FB
1116#ifdef TARGET_HAS_PRECISE_SMC
1117 if (current_tb == tb &&
2e70f6ef 1118 (current_tb->cflags & CF_COUNT_MASK) != 1) {
d720b93d
FB
1119 /* If we are modifying the current TB, we must stop
1120 its execution. We could be more precise by checking
1121 that the modification is after the current PC, but it
1122 would require a specialized function to partially
1123 restore the CPU state */
3b46e624 1124
d720b93d
FB
1125 current_tb_modified = 1;
1126 cpu_restore_state(current_tb, env, pc, puc);
6b917547
AL
1127 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1128 &current_flags);
d720b93d
FB
1129 }
1130#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
1131 tb_phys_invalidate(tb, addr);
1132 tb = tb->page_next[n];
1133 }
fd6ce8f6 1134 p->first_tb = NULL;
d720b93d
FB
1135#ifdef TARGET_HAS_PRECISE_SMC
1136 if (current_tb_modified) {
1137 /* we generate a block containing just the instruction
1138 modifying the memory. It will ensure that it cannot modify
1139 itself */
ea1c1802 1140 env->current_tb = NULL;
2e70f6ef 1141 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
d720b93d
FB
1142 cpu_resume_from_signal(env, puc);
1143 }
1144#endif
fd6ce8f6 1145}
9fa3e853 1146#endif
fd6ce8f6
FB
1147
1148/* add the tb in the target page and protect it if necessary */
5fafdf24 1149static inline void tb_alloc_page(TranslationBlock *tb,
41c1b1c9 1150 unsigned int n, tb_page_addr_t page_addr)
fd6ce8f6
FB
1151{
1152 PageDesc *p;
9fa3e853
FB
1153 TranslationBlock *last_first_tb;
1154
1155 tb->page_addr[n] = page_addr;
5cd2c5b6 1156 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
9fa3e853
FB
1157 tb->page_next[n] = p->first_tb;
1158 last_first_tb = p->first_tb;
1159 p->first_tb = (TranslationBlock *)((long)tb | n);
1160 invalidate_page_bitmap(p);
fd6ce8f6 1161
107db443 1162#if defined(TARGET_HAS_SMC) || 1
d720b93d 1163
9fa3e853 1164#if defined(CONFIG_USER_ONLY)
fd6ce8f6 1165 if (p->flags & PAGE_WRITE) {
53a5960a
PB
1166 target_ulong addr;
1167 PageDesc *p2;
9fa3e853
FB
1168 int prot;
1169
fd6ce8f6
FB
1170 /* force the host page as non writable (writes will have a
1171 page fault + mprotect overhead) */
53a5960a 1172 page_addr &= qemu_host_page_mask;
fd6ce8f6 1173 prot = 0;
53a5960a
PB
1174 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1175 addr += TARGET_PAGE_SIZE) {
1176
1177 p2 = page_find (addr >> TARGET_PAGE_BITS);
1178 if (!p2)
1179 continue;
1180 prot |= p2->flags;
1181 p2->flags &= ~PAGE_WRITE;
1182 page_get_flags(addr);
1183 }
5fafdf24 1184 mprotect(g2h(page_addr), qemu_host_page_size,
fd6ce8f6
FB
1185 (prot & PAGE_BITS) & ~PAGE_WRITE);
1186#ifdef DEBUG_TB_INVALIDATE
ab3d1727 1187 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
53a5960a 1188 page_addr);
fd6ce8f6 1189#endif
fd6ce8f6 1190 }
9fa3e853
FB
1191#else
1192 /* if some code is already present, then the pages are already
1193 protected. So we handle the case where only the first TB is
1194 allocated in a physical page */
1195 if (!last_first_tb) {
6a00d601 1196 tlb_protect_code(page_addr);
9fa3e853
FB
1197 }
1198#endif
d720b93d
FB
1199
1200#endif /* TARGET_HAS_SMC */
fd6ce8f6
FB
1201}
1202
1203/* Allocate a new translation block. Flush the translation buffer if
1204 too many translation blocks or too much generated code. */
c27004ec 1205TranslationBlock *tb_alloc(target_ulong pc)
fd6ce8f6
FB
1206{
1207 TranslationBlock *tb;
fd6ce8f6 1208
26a5f13b
FB
1209 if (nb_tbs >= code_gen_max_blocks ||
1210 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
d4e8164f 1211 return NULL;
fd6ce8f6
FB
1212 tb = &tbs[nb_tbs++];
1213 tb->pc = pc;
b448f2f3 1214 tb->cflags = 0;
d4e8164f
FB
1215 return tb;
1216}
1217
2e70f6ef
PB
1218void tb_free(TranslationBlock *tb)
1219{
bf20dc07 1220 /* In practice this is mostly used for single use temporary TB
2e70f6ef
PB
1221 Ignore the hard cases and just back up if this TB happens to
1222 be the last one generated. */
1223 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1224 code_gen_ptr = tb->tc_ptr;
1225 nb_tbs--;
1226 }
1227}
1228
9fa3e853
FB
1229/* add a new TB and link it to the physical page tables. phys_page2 is
1230 (-1) to indicate that only one page contains the TB. */
41c1b1c9
PB
1231void tb_link_page(TranslationBlock *tb,
1232 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
d4e8164f 1233{
9fa3e853
FB
1234 unsigned int h;
1235 TranslationBlock **ptb;
1236
c8a706fe
PB
1237 /* Grab the mmap lock to stop another thread invalidating this TB
1238 before we are done. */
1239 mmap_lock();
9fa3e853
FB
1240 /* add in the physical hash table */
1241 h = tb_phys_hash_func(phys_pc);
1242 ptb = &tb_phys_hash[h];
1243 tb->phys_hash_next = *ptb;
1244 *ptb = tb;
fd6ce8f6
FB
1245
1246 /* add in the page list */
9fa3e853
FB
1247 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1248 if (phys_page2 != -1)
1249 tb_alloc_page(tb, 1, phys_page2);
1250 else
1251 tb->page_addr[1] = -1;
9fa3e853 1252
d4e8164f
FB
1253 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1254 tb->jmp_next[0] = NULL;
1255 tb->jmp_next[1] = NULL;
1256
1257 /* init original jump addresses */
1258 if (tb->tb_next_offset[0] != 0xffff)
1259 tb_reset_jump(tb, 0);
1260 if (tb->tb_next_offset[1] != 0xffff)
1261 tb_reset_jump(tb, 1);
8a40a180
FB
1262
1263#ifdef DEBUG_TB_CHECK
1264 tb_page_check();
1265#endif
c8a706fe 1266 mmap_unlock();
fd6ce8f6
FB
1267}
1268
9fa3e853
FB
1269/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1270 tb[1].tc_ptr. Return NULL if not found */
1271TranslationBlock *tb_find_pc(unsigned long tc_ptr)
fd6ce8f6 1272{
9fa3e853
FB
1273 int m_min, m_max, m;
1274 unsigned long v;
1275 TranslationBlock *tb;
a513fe19
FB
1276
1277 if (nb_tbs <= 0)
1278 return NULL;
1279 if (tc_ptr < (unsigned long)code_gen_buffer ||
1280 tc_ptr >= (unsigned long)code_gen_ptr)
1281 return NULL;
1282 /* binary search (cf Knuth) */
1283 m_min = 0;
1284 m_max = nb_tbs - 1;
1285 while (m_min <= m_max) {
1286 m = (m_min + m_max) >> 1;
1287 tb = &tbs[m];
1288 v = (unsigned long)tb->tc_ptr;
1289 if (v == tc_ptr)
1290 return tb;
1291 else if (tc_ptr < v) {
1292 m_max = m - 1;
1293 } else {
1294 m_min = m + 1;
1295 }
5fafdf24 1296 }
a513fe19
FB
1297 return &tbs[m_max];
1298}
7501267e 1299
ea041c0e
FB
1300static void tb_reset_jump_recursive(TranslationBlock *tb);
1301
1302static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1303{
1304 TranslationBlock *tb1, *tb_next, **ptb;
1305 unsigned int n1;
1306
1307 tb1 = tb->jmp_next[n];
1308 if (tb1 != NULL) {
1309 /* find head of list */
1310 for(;;) {
1311 n1 = (long)tb1 & 3;
1312 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1313 if (n1 == 2)
1314 break;
1315 tb1 = tb1->jmp_next[n1];
1316 }
1317 /* we are now sure now that tb jumps to tb1 */
1318 tb_next = tb1;
1319
1320 /* remove tb from the jmp_first list */
1321 ptb = &tb_next->jmp_first;
1322 for(;;) {
1323 tb1 = *ptb;
1324 n1 = (long)tb1 & 3;
1325 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1326 if (n1 == n && tb1 == tb)
1327 break;
1328 ptb = &tb1->jmp_next[n1];
1329 }
1330 *ptb = tb->jmp_next[n];
1331 tb->jmp_next[n] = NULL;
3b46e624 1332
ea041c0e
FB
1333 /* suppress the jump to next tb in generated code */
1334 tb_reset_jump(tb, n);
1335
0124311e 1336 /* suppress jumps in the tb on which we could have jumped */
ea041c0e
FB
1337 tb_reset_jump_recursive(tb_next);
1338 }
1339}
1340
1341static void tb_reset_jump_recursive(TranslationBlock *tb)
1342{
1343 tb_reset_jump_recursive2(tb, 0);
1344 tb_reset_jump_recursive2(tb, 1);
1345}
1346
1fddef4b 1347#if defined(TARGET_HAS_ICE)
94df27fd
PB
1348#if defined(CONFIG_USER_ONLY)
1349static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1350{
1351 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1352}
1353#else
d720b93d
FB
1354static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1355{
c227f099 1356 target_phys_addr_t addr;
9b3c35e0 1357 target_ulong pd;
c227f099 1358 ram_addr_t ram_addr;
c2f07f81 1359 PhysPageDesc *p;
d720b93d 1360
c2f07f81
PB
1361 addr = cpu_get_phys_page_debug(env, pc);
1362 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1363 if (!p) {
1364 pd = IO_MEM_UNASSIGNED;
1365 } else {
1366 pd = p->phys_offset;
1367 }
1368 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
706cd4b5 1369 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
d720b93d 1370}
c27004ec 1371#endif
94df27fd 1372#endif /* TARGET_HAS_ICE */
d720b93d 1373
c527ee8f
PB
1374#if defined(CONFIG_USER_ONLY)
1375void cpu_watchpoint_remove_all(CPUState *env, int mask)
1376
1377{
1378}
1379
1380int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1381 int flags, CPUWatchpoint **watchpoint)
1382{
1383 return -ENOSYS;
1384}
1385#else
6658ffb8 1386/* Add a watchpoint. */
a1d1bb31
AL
1387int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1388 int flags, CPUWatchpoint **watchpoint)
6658ffb8 1389{
b4051334 1390 target_ulong len_mask = ~(len - 1);
c0ce998e 1391 CPUWatchpoint *wp;
6658ffb8 1392
b4051334
AL
1393 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1394 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1395 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1396 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1397 return -EINVAL;
1398 }
a1d1bb31 1399 wp = qemu_malloc(sizeof(*wp));
a1d1bb31
AL
1400
1401 wp->vaddr = addr;
b4051334 1402 wp->len_mask = len_mask;
a1d1bb31
AL
1403 wp->flags = flags;
1404
2dc9f411 1405 /* keep all GDB-injected watchpoints in front */
c0ce998e 1406 if (flags & BP_GDB)
72cf2d4f 1407 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
c0ce998e 1408 else
72cf2d4f 1409 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
6658ffb8 1410
6658ffb8 1411 tlb_flush_page(env, addr);
a1d1bb31
AL
1412
1413 if (watchpoint)
1414 *watchpoint = wp;
1415 return 0;
6658ffb8
PB
1416}
1417
a1d1bb31
AL
1418/* Remove a specific watchpoint. */
1419int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1420 int flags)
6658ffb8 1421{
b4051334 1422 target_ulong len_mask = ~(len - 1);
a1d1bb31 1423 CPUWatchpoint *wp;
6658ffb8 1424
72cf2d4f 1425 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334 1426 if (addr == wp->vaddr && len_mask == wp->len_mask
6e140f28 1427 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
a1d1bb31 1428 cpu_watchpoint_remove_by_ref(env, wp);
6658ffb8
PB
1429 return 0;
1430 }
1431 }
a1d1bb31 1432 return -ENOENT;
6658ffb8
PB
1433}
1434
a1d1bb31
AL
1435/* Remove a specific watchpoint by reference. */
1436void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1437{
72cf2d4f 1438 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
7d03f82f 1439
a1d1bb31
AL
1440 tlb_flush_page(env, watchpoint->vaddr);
1441
1442 qemu_free(watchpoint);
1443}
1444
1445/* Remove all matching watchpoints. */
1446void cpu_watchpoint_remove_all(CPUState *env, int mask)
1447{
c0ce998e 1448 CPUWatchpoint *wp, *next;
a1d1bb31 1449
72cf2d4f 1450 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
a1d1bb31
AL
1451 if (wp->flags & mask)
1452 cpu_watchpoint_remove_by_ref(env, wp);
c0ce998e 1453 }
7d03f82f 1454}
c527ee8f 1455#endif
7d03f82f 1456
a1d1bb31
AL
1457/* Add a breakpoint. */
1458int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1459 CPUBreakpoint **breakpoint)
4c3a88a2 1460{
1fddef4b 1461#if defined(TARGET_HAS_ICE)
c0ce998e 1462 CPUBreakpoint *bp;
3b46e624 1463
a1d1bb31 1464 bp = qemu_malloc(sizeof(*bp));
4c3a88a2 1465
a1d1bb31
AL
1466 bp->pc = pc;
1467 bp->flags = flags;
1468
2dc9f411 1469 /* keep all GDB-injected breakpoints in front */
c0ce998e 1470 if (flags & BP_GDB)
72cf2d4f 1471 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
c0ce998e 1472 else
72cf2d4f 1473 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
3b46e624 1474
d720b93d 1475 breakpoint_invalidate(env, pc);
a1d1bb31
AL
1476
1477 if (breakpoint)
1478 *breakpoint = bp;
4c3a88a2
FB
1479 return 0;
1480#else
a1d1bb31 1481 return -ENOSYS;
4c3a88a2
FB
1482#endif
1483}
1484
a1d1bb31
AL
1485/* Remove a specific breakpoint. */
1486int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1487{
7d03f82f 1488#if defined(TARGET_HAS_ICE)
a1d1bb31
AL
1489 CPUBreakpoint *bp;
1490
72cf2d4f 1491 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31
AL
1492 if (bp->pc == pc && bp->flags == flags) {
1493 cpu_breakpoint_remove_by_ref(env, bp);
1494 return 0;
1495 }
7d03f82f 1496 }
a1d1bb31
AL
1497 return -ENOENT;
1498#else
1499 return -ENOSYS;
7d03f82f
EI
1500#endif
1501}
1502
a1d1bb31
AL
1503/* Remove a specific breakpoint by reference. */
1504void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
4c3a88a2 1505{
1fddef4b 1506#if defined(TARGET_HAS_ICE)
72cf2d4f 1507 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
d720b93d 1508
a1d1bb31
AL
1509 breakpoint_invalidate(env, breakpoint->pc);
1510
1511 qemu_free(breakpoint);
1512#endif
1513}
1514
1515/* Remove all matching breakpoints. */
1516void cpu_breakpoint_remove_all(CPUState *env, int mask)
1517{
1518#if defined(TARGET_HAS_ICE)
c0ce998e 1519 CPUBreakpoint *bp, *next;
a1d1bb31 1520
72cf2d4f 1521 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
a1d1bb31
AL
1522 if (bp->flags & mask)
1523 cpu_breakpoint_remove_by_ref(env, bp);
c0ce998e 1524 }
4c3a88a2
FB
1525#endif
1526}
1527
c33a346e
FB
1528/* enable or disable single step mode. EXCP_DEBUG is returned by the
1529 CPU loop after each instruction */
1530void cpu_single_step(CPUState *env, int enabled)
1531{
1fddef4b 1532#if defined(TARGET_HAS_ICE)
c33a346e
FB
1533 if (env->singlestep_enabled != enabled) {
1534 env->singlestep_enabled = enabled;
e22a25c9
AL
1535 if (kvm_enabled())
1536 kvm_update_guest_debug(env, 0);
1537 else {
ccbb4d44 1538 /* must flush all the translated code to avoid inconsistencies */
e22a25c9
AL
1539 /* XXX: only flush what is necessary */
1540 tb_flush(env);
1541 }
c33a346e
FB
1542 }
1543#endif
1544}
1545
34865134
FB
1546/* enable or disable low levels log */
1547void cpu_set_log(int log_flags)
1548{
1549 loglevel = log_flags;
1550 if (loglevel && !logfile) {
11fcfab4 1551 logfile = fopen(logfilename, log_append ? "a" : "w");
34865134
FB
1552 if (!logfile) {
1553 perror(logfilename);
1554 _exit(1);
1555 }
9fa3e853
FB
1556#if !defined(CONFIG_SOFTMMU)
1557 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1558 {
b55266b5 1559 static char logfile_buf[4096];
9fa3e853
FB
1560 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1561 }
bf65f53f
FN
1562#elif !defined(_WIN32)
1563 /* Win32 doesn't support line-buffering and requires size >= 2 */
34865134 1564 setvbuf(logfile, NULL, _IOLBF, 0);
9fa3e853 1565#endif
e735b91c
PB
1566 log_append = 1;
1567 }
1568 if (!loglevel && logfile) {
1569 fclose(logfile);
1570 logfile = NULL;
34865134
FB
1571 }
1572}
1573
1574void cpu_set_log_filename(const char *filename)
1575{
1576 logfilename = strdup(filename);
e735b91c
PB
1577 if (logfile) {
1578 fclose(logfile);
1579 logfile = NULL;
1580 }
1581 cpu_set_log(loglevel);
34865134 1582}
c33a346e 1583
3098dba0 1584static void cpu_unlink_tb(CPUState *env)
ea041c0e 1585{
3098dba0
AJ
1586 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1587 problem and hope the cpu will stop of its own accord. For userspace
1588 emulation this often isn't actually as bad as it sounds. Often
1589 signals are used primarily to interrupt blocking syscalls. */
ea041c0e 1590 TranslationBlock *tb;
c227f099 1591 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
59817ccb 1592
cab1b4bd 1593 spin_lock(&interrupt_lock);
3098dba0
AJ
1594 tb = env->current_tb;
1595 /* if the cpu is currently executing code, we must unlink it and
1596 all the potentially executing TB */
f76cfe56 1597 if (tb) {
3098dba0
AJ
1598 env->current_tb = NULL;
1599 tb_reset_jump_recursive(tb);
be214e6c 1600 }
cab1b4bd 1601 spin_unlock(&interrupt_lock);
3098dba0
AJ
1602}
1603
1604/* mask must never be zero, except for A20 change call */
1605void cpu_interrupt(CPUState *env, int mask)
1606{
1607 int old_mask;
be214e6c 1608
2e70f6ef 1609 old_mask = env->interrupt_request;
68a79315 1610 env->interrupt_request |= mask;
3098dba0 1611
8edac960
AL
1612#ifndef CONFIG_USER_ONLY
1613 /*
1614 * If called from iothread context, wake the target cpu in
1615 * case its halted.
1616 */
1617 if (!qemu_cpu_self(env)) {
1618 qemu_cpu_kick(env);
1619 return;
1620 }
1621#endif
1622
2e70f6ef 1623 if (use_icount) {
266910c4 1624 env->icount_decr.u16.high = 0xffff;
2e70f6ef 1625#ifndef CONFIG_USER_ONLY
2e70f6ef 1626 if (!can_do_io(env)
be214e6c 1627 && (mask & ~old_mask) != 0) {
2e70f6ef
PB
1628 cpu_abort(env, "Raised interrupt while not in I/O function");
1629 }
1630#endif
1631 } else {
3098dba0 1632 cpu_unlink_tb(env);
ea041c0e
FB
1633 }
1634}
1635
b54ad049
FB
1636void cpu_reset_interrupt(CPUState *env, int mask)
1637{
1638 env->interrupt_request &= ~mask;
1639}
1640
3098dba0
AJ
1641void cpu_exit(CPUState *env)
1642{
1643 env->exit_request = 1;
1644 cpu_unlink_tb(env);
1645}
1646
c7cd6a37 1647const CPULogItem cpu_log_items[] = {
5fafdf24 1648 { CPU_LOG_TB_OUT_ASM, "out_asm",
f193c797
FB
1649 "show generated host assembly code for each compiled TB" },
1650 { CPU_LOG_TB_IN_ASM, "in_asm",
1651 "show target assembly code for each compiled TB" },
5fafdf24 1652 { CPU_LOG_TB_OP, "op",
57fec1fe 1653 "show micro ops for each compiled TB" },
f193c797 1654 { CPU_LOG_TB_OP_OPT, "op_opt",
e01a1157
BS
1655 "show micro ops "
1656#ifdef TARGET_I386
1657 "before eflags optimization and "
f193c797 1658#endif
e01a1157 1659 "after liveness analysis" },
f193c797
FB
1660 { CPU_LOG_INT, "int",
1661 "show interrupts/exceptions in short format" },
1662 { CPU_LOG_EXEC, "exec",
1663 "show trace before each executed TB (lots of logs)" },
9fddaa0c 1664 { CPU_LOG_TB_CPU, "cpu",
e91c8a77 1665 "show CPU state before block translation" },
f193c797
FB
1666#ifdef TARGET_I386
1667 { CPU_LOG_PCALL, "pcall",
1668 "show protected mode far calls/returns/exceptions" },
eca1bdf4
AL
1669 { CPU_LOG_RESET, "cpu_reset",
1670 "show CPU state before CPU resets" },
f193c797 1671#endif
8e3a9fd2 1672#ifdef DEBUG_IOPORT
fd872598
FB
1673 { CPU_LOG_IOPORT, "ioport",
1674 "show all i/o ports accesses" },
8e3a9fd2 1675#endif
f193c797
FB
1676 { 0, NULL, NULL },
1677};
1678
f6f3fbca
MT
1679#ifndef CONFIG_USER_ONLY
1680static QLIST_HEAD(memory_client_list, CPUPhysMemoryClient) memory_client_list
1681 = QLIST_HEAD_INITIALIZER(memory_client_list);
1682
1683static void cpu_notify_set_memory(target_phys_addr_t start_addr,
1684 ram_addr_t size,
1685 ram_addr_t phys_offset)
1686{
1687 CPUPhysMemoryClient *client;
1688 QLIST_FOREACH(client, &memory_client_list, list) {
1689 client->set_memory(client, start_addr, size, phys_offset);
1690 }
1691}
1692
1693static int cpu_notify_sync_dirty_bitmap(target_phys_addr_t start,
1694 target_phys_addr_t end)
1695{
1696 CPUPhysMemoryClient *client;
1697 QLIST_FOREACH(client, &memory_client_list, list) {
1698 int r = client->sync_dirty_bitmap(client, start, end);
1699 if (r < 0)
1700 return r;
1701 }
1702 return 0;
1703}
1704
1705static int cpu_notify_migration_log(int enable)
1706{
1707 CPUPhysMemoryClient *client;
1708 QLIST_FOREACH(client, &memory_client_list, list) {
1709 int r = client->migration_log(client, enable);
1710 if (r < 0)
1711 return r;
1712 }
1713 return 0;
1714}
1715
5cd2c5b6
RH
1716static void phys_page_for_each_1(CPUPhysMemoryClient *client,
1717 int level, void **lp)
f6f3fbca 1718{
5cd2c5b6 1719 int i;
f6f3fbca 1720
5cd2c5b6
RH
1721 if (*lp == NULL) {
1722 return;
1723 }
1724 if (level == 0) {
1725 PhysPageDesc *pd = *lp;
1726 for (i = 0; i < L2_BITS; ++i) {
1727 if (pd[i].phys_offset != IO_MEM_UNASSIGNED) {
1728 client->set_memory(client, pd[i].region_offset,
1729 TARGET_PAGE_SIZE, pd[i].phys_offset);
f6f3fbca 1730 }
5cd2c5b6
RH
1731 }
1732 } else {
1733 void **pp = *lp;
1734 for (i = 0; i < L2_BITS; ++i) {
1735 phys_page_for_each_1(client, level - 1, pp + i);
f6f3fbca
MT
1736 }
1737 }
1738}
1739
1740static void phys_page_for_each(CPUPhysMemoryClient *client)
1741{
5cd2c5b6
RH
1742 int i;
1743 for (i = 0; i < P_L1_SIZE; ++i) {
1744 phys_page_for_each_1(client, P_L1_SHIFT / L2_BITS - 1,
1745 l1_phys_map + 1);
f6f3fbca 1746 }
f6f3fbca
MT
1747}
1748
1749void cpu_register_phys_memory_client(CPUPhysMemoryClient *client)
1750{
1751 QLIST_INSERT_HEAD(&memory_client_list, client, list);
1752 phys_page_for_each(client);
1753}
1754
1755void cpu_unregister_phys_memory_client(CPUPhysMemoryClient *client)
1756{
1757 QLIST_REMOVE(client, list);
1758}
1759#endif
1760
f193c797
FB
1761static int cmp1(const char *s1, int n, const char *s2)
1762{
1763 if (strlen(s2) != n)
1764 return 0;
1765 return memcmp(s1, s2, n) == 0;
1766}
3b46e624 1767
f193c797
FB
1768/* takes a comma separated list of log masks. Return 0 if error. */
1769int cpu_str_to_log_mask(const char *str)
1770{
c7cd6a37 1771 const CPULogItem *item;
f193c797
FB
1772 int mask;
1773 const char *p, *p1;
1774
1775 p = str;
1776 mask = 0;
1777 for(;;) {
1778 p1 = strchr(p, ',');
1779 if (!p1)
1780 p1 = p + strlen(p);
8e3a9fd2
FB
1781 if(cmp1(p,p1-p,"all")) {
1782 for(item = cpu_log_items; item->mask != 0; item++) {
1783 mask |= item->mask;
1784 }
1785 } else {
f193c797
FB
1786 for(item = cpu_log_items; item->mask != 0; item++) {
1787 if (cmp1(p, p1 - p, item->name))
1788 goto found;
1789 }
1790 return 0;
8e3a9fd2 1791 }
f193c797
FB
1792 found:
1793 mask |= item->mask;
1794 if (*p1 != ',')
1795 break;
1796 p = p1 + 1;
1797 }
1798 return mask;
1799}
ea041c0e 1800
7501267e
FB
1801void cpu_abort(CPUState *env, const char *fmt, ...)
1802{
1803 va_list ap;
493ae1f0 1804 va_list ap2;
7501267e
FB
1805
1806 va_start(ap, fmt);
493ae1f0 1807 va_copy(ap2, ap);
7501267e
FB
1808 fprintf(stderr, "qemu: fatal: ");
1809 vfprintf(stderr, fmt, ap);
1810 fprintf(stderr, "\n");
1811#ifdef TARGET_I386
7fe48483
FB
1812 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1813#else
1814 cpu_dump_state(env, stderr, fprintf, 0);
7501267e 1815#endif
93fcfe39
AL
1816 if (qemu_log_enabled()) {
1817 qemu_log("qemu: fatal: ");
1818 qemu_log_vprintf(fmt, ap2);
1819 qemu_log("\n");
f9373291 1820#ifdef TARGET_I386
93fcfe39 1821 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
f9373291 1822#else
93fcfe39 1823 log_cpu_state(env, 0);
f9373291 1824#endif
31b1a7b4 1825 qemu_log_flush();
93fcfe39 1826 qemu_log_close();
924edcae 1827 }
493ae1f0 1828 va_end(ap2);
f9373291 1829 va_end(ap);
fd052bf6
RV
1830#if defined(CONFIG_USER_ONLY)
1831 {
1832 struct sigaction act;
1833 sigfillset(&act.sa_mask);
1834 act.sa_handler = SIG_DFL;
1835 sigaction(SIGABRT, &act, NULL);
1836 }
1837#endif
7501267e
FB
1838 abort();
1839}
1840
c5be9f08
TS
1841CPUState *cpu_copy(CPUState *env)
1842{
01ba9816 1843 CPUState *new_env = cpu_init(env->cpu_model_str);
c5be9f08
TS
1844 CPUState *next_cpu = new_env->next_cpu;
1845 int cpu_index = new_env->cpu_index;
5a38f081
AL
1846#if defined(TARGET_HAS_ICE)
1847 CPUBreakpoint *bp;
1848 CPUWatchpoint *wp;
1849#endif
1850
c5be9f08 1851 memcpy(new_env, env, sizeof(CPUState));
5a38f081
AL
1852
1853 /* Preserve chaining and index. */
c5be9f08
TS
1854 new_env->next_cpu = next_cpu;
1855 new_env->cpu_index = cpu_index;
5a38f081
AL
1856
1857 /* Clone all break/watchpoints.
1858 Note: Once we support ptrace with hw-debug register access, make sure
1859 BP_CPU break/watchpoints are handled correctly on clone. */
72cf2d4f
BS
1860 QTAILQ_INIT(&env->breakpoints);
1861 QTAILQ_INIT(&env->watchpoints);
5a38f081 1862#if defined(TARGET_HAS_ICE)
72cf2d4f 1863 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
5a38f081
AL
1864 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1865 }
72cf2d4f 1866 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
5a38f081
AL
1867 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1868 wp->flags, NULL);
1869 }
1870#endif
1871
c5be9f08
TS
1872 return new_env;
1873}
1874
0124311e
FB
1875#if !defined(CONFIG_USER_ONLY)
1876
5c751e99
EI
1877static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1878{
1879 unsigned int i;
1880
1881 /* Discard jump cache entries for any tb which might potentially
1882 overlap the flushed page. */
1883 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1884 memset (&env->tb_jmp_cache[i], 0,
1885 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1886
1887 i = tb_jmp_cache_hash_page(addr);
1888 memset (&env->tb_jmp_cache[i], 0,
1889 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1890}
1891
08738984
IK
1892static CPUTLBEntry s_cputlb_empty_entry = {
1893 .addr_read = -1,
1894 .addr_write = -1,
1895 .addr_code = -1,
1896 .addend = -1,
1897};
1898
ee8b7021
FB
1899/* NOTE: if flush_global is true, also flush global entries (not
1900 implemented yet) */
1901void tlb_flush(CPUState *env, int flush_global)
33417e70 1902{
33417e70 1903 int i;
0124311e 1904
9fa3e853
FB
1905#if defined(DEBUG_TLB)
1906 printf("tlb_flush:\n");
1907#endif
0124311e
FB
1908 /* must reset current TB so that interrupts cannot modify the
1909 links while we are modifying them */
1910 env->current_tb = NULL;
1911
33417e70 1912 for(i = 0; i < CPU_TLB_SIZE; i++) {
cfde4bd9
IY
1913 int mmu_idx;
1914 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
08738984 1915 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
cfde4bd9 1916 }
33417e70 1917 }
9fa3e853 1918
8a40a180 1919 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
9fa3e853 1920
e3db7226 1921 tlb_flush_count++;
33417e70
FB
1922}
1923
274da6b2 1924static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
61382a50 1925{
5fafdf24 1926 if (addr == (tlb_entry->addr_read &
84b7b8e7 1927 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1928 addr == (tlb_entry->addr_write &
84b7b8e7 1929 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1930 addr == (tlb_entry->addr_code &
84b7b8e7 1931 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
08738984 1932 *tlb_entry = s_cputlb_empty_entry;
84b7b8e7 1933 }
61382a50
FB
1934}
1935
2e12669a 1936void tlb_flush_page(CPUState *env, target_ulong addr)
33417e70 1937{
8a40a180 1938 int i;
cfde4bd9 1939 int mmu_idx;
0124311e 1940
9fa3e853 1941#if defined(DEBUG_TLB)
108c49b8 1942 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
9fa3e853 1943#endif
0124311e
FB
1944 /* must reset current TB so that interrupts cannot modify the
1945 links while we are modifying them */
1946 env->current_tb = NULL;
61382a50
FB
1947
1948 addr &= TARGET_PAGE_MASK;
1949 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
cfde4bd9
IY
1950 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1951 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
0124311e 1952
5c751e99 1953 tlb_flush_jmp_cache(env, addr);
9fa3e853
FB
1954}
1955
9fa3e853
FB
1956/* update the TLBs so that writes to code in the virtual page 'addr'
1957 can be detected */
c227f099 1958static void tlb_protect_code(ram_addr_t ram_addr)
9fa3e853 1959{
5fafdf24 1960 cpu_physical_memory_reset_dirty(ram_addr,
6a00d601
FB
1961 ram_addr + TARGET_PAGE_SIZE,
1962 CODE_DIRTY_FLAG);
9fa3e853
FB
1963}
1964
9fa3e853 1965/* update the TLB so that writes in physical page 'phys_addr' are no longer
3a7d929e 1966 tested for self modifying code */
c227f099 1967static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 1968 target_ulong vaddr)
9fa3e853 1969{
3a7d929e 1970 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1ccde1cb
FB
1971}
1972
5fafdf24 1973static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1ccde1cb
FB
1974 unsigned long start, unsigned long length)
1975{
1976 unsigned long addr;
84b7b8e7
FB
1977 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1978 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1ccde1cb 1979 if ((addr - start) < length) {
0f459d16 1980 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1ccde1cb
FB
1981 }
1982 }
1983}
1984
5579c7f3 1985/* Note: start and end must be within the same ram block. */
c227f099 1986void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 1987 int dirty_flags)
1ccde1cb
FB
1988{
1989 CPUState *env;
4f2ac237 1990 unsigned long length, start1;
0a962c02
FB
1991 int i, mask, len;
1992 uint8_t *p;
1ccde1cb
FB
1993
1994 start &= TARGET_PAGE_MASK;
1995 end = TARGET_PAGE_ALIGN(end);
1996
1997 length = end - start;
1998 if (length == 0)
1999 return;
0a962c02 2000 len = length >> TARGET_PAGE_BITS;
f23db169
FB
2001 mask = ~dirty_flags;
2002 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
2003 for(i = 0; i < len; i++)
2004 p[i] &= mask;
2005
1ccde1cb
FB
2006 /* we modify the TLB cache so that the dirty bit will be set again
2007 when accessing the range */
5579c7f3
PB
2008 start1 = (unsigned long)qemu_get_ram_ptr(start);
2009 /* Chek that we don't span multiple blocks - this breaks the
2010 address comparisons below. */
2011 if ((unsigned long)qemu_get_ram_ptr(end - 1) - start1
2012 != (end - 1) - start) {
2013 abort();
2014 }
2015
6a00d601 2016 for(env = first_cpu; env != NULL; env = env->next_cpu) {
cfde4bd9
IY
2017 int mmu_idx;
2018 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2019 for(i = 0; i < CPU_TLB_SIZE; i++)
2020 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
2021 start1, length);
2022 }
6a00d601 2023 }
1ccde1cb
FB
2024}
2025
74576198
AL
2026int cpu_physical_memory_set_dirty_tracking(int enable)
2027{
f6f3fbca 2028 int ret = 0;
74576198 2029 in_migration = enable;
f6f3fbca
MT
2030 ret = cpu_notify_migration_log(!!enable);
2031 return ret;
74576198
AL
2032}
2033
2034int cpu_physical_memory_get_dirty_tracking(void)
2035{
2036 return in_migration;
2037}
2038
c227f099
AL
2039int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
2040 target_phys_addr_t end_addr)
2bec46dc 2041{
7b8f3b78 2042 int ret;
151f7749 2043
f6f3fbca 2044 ret = cpu_notify_sync_dirty_bitmap(start_addr, end_addr);
151f7749 2045 return ret;
2bec46dc
AL
2046}
2047
3a7d929e
FB
2048static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2049{
c227f099 2050 ram_addr_t ram_addr;
5579c7f3 2051 void *p;
3a7d929e 2052
84b7b8e7 2053 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
5579c7f3
PB
2054 p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
2055 + tlb_entry->addend);
2056 ram_addr = qemu_ram_addr_from_host(p);
3a7d929e 2057 if (!cpu_physical_memory_is_dirty(ram_addr)) {
0f459d16 2058 tlb_entry->addr_write |= TLB_NOTDIRTY;
3a7d929e
FB
2059 }
2060 }
2061}
2062
2063/* update the TLB according to the current state of the dirty bits */
2064void cpu_tlb_update_dirty(CPUState *env)
2065{
2066 int i;
cfde4bd9
IY
2067 int mmu_idx;
2068 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2069 for(i = 0; i < CPU_TLB_SIZE; i++)
2070 tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2071 }
3a7d929e
FB
2072}
2073
0f459d16 2074static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1ccde1cb 2075{
0f459d16
PB
2076 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2077 tlb_entry->addr_write = vaddr;
1ccde1cb
FB
2078}
2079
0f459d16
PB
2080/* update the TLB corresponding to virtual page vaddr
2081 so that it is no longer dirty */
2082static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1ccde1cb 2083{
1ccde1cb 2084 int i;
cfde4bd9 2085 int mmu_idx;
1ccde1cb 2086
0f459d16 2087 vaddr &= TARGET_PAGE_MASK;
1ccde1cb 2088 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
cfde4bd9
IY
2089 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2090 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
9fa3e853
FB
2091}
2092
59817ccb
FB
2093/* add a new TLB entry. At most one entry for a given virtual address
2094 is permitted. Return 0 if OK or 2 if the page could not be mapped
2095 (can only happen in non SOFTMMU mode for I/O pages or pages
2096 conflicting with the host address space). */
5fafdf24 2097int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
c227f099 2098 target_phys_addr_t paddr, int prot,
6ebbf390 2099 int mmu_idx, int is_softmmu)
9fa3e853 2100{
92e873b9 2101 PhysPageDesc *p;
4f2ac237 2102 unsigned long pd;
9fa3e853 2103 unsigned int index;
4f2ac237 2104 target_ulong address;
0f459d16 2105 target_ulong code_address;
c227f099 2106 target_phys_addr_t addend;
9fa3e853 2107 int ret;
84b7b8e7 2108 CPUTLBEntry *te;
a1d1bb31 2109 CPUWatchpoint *wp;
c227f099 2110 target_phys_addr_t iotlb;
9fa3e853 2111
92e873b9 2112 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
9fa3e853
FB
2113 if (!p) {
2114 pd = IO_MEM_UNASSIGNED;
9fa3e853
FB
2115 } else {
2116 pd = p->phys_offset;
9fa3e853
FB
2117 }
2118#if defined(DEBUG_TLB)
6ebbf390
JM
2119 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
2120 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
9fa3e853
FB
2121#endif
2122
2123 ret = 0;
0f459d16
PB
2124 address = vaddr;
2125 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2126 /* IO memory case (romd handled later) */
2127 address |= TLB_MMIO;
2128 }
5579c7f3 2129 addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
0f459d16
PB
2130 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2131 /* Normal RAM. */
2132 iotlb = pd & TARGET_PAGE_MASK;
2133 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2134 iotlb |= IO_MEM_NOTDIRTY;
2135 else
2136 iotlb |= IO_MEM_ROM;
2137 } else {
ccbb4d44 2138 /* IO handlers are currently passed a physical address.
0f459d16
PB
2139 It would be nice to pass an offset from the base address
2140 of that region. This would avoid having to special case RAM,
2141 and avoid full address decoding in every device.
2142 We can't use the high bits of pd for this because
2143 IO_MEM_ROMD uses these as a ram address. */
8da3ff18
PB
2144 iotlb = (pd & ~TARGET_PAGE_MASK);
2145 if (p) {
8da3ff18
PB
2146 iotlb += p->region_offset;
2147 } else {
2148 iotlb += paddr;
2149 }
0f459d16
PB
2150 }
2151
2152 code_address = address;
2153 /* Make accesses to pages with watchpoints go via the
2154 watchpoint trap routines. */
72cf2d4f 2155 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
a1d1bb31 2156 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
0f459d16
PB
2157 iotlb = io_mem_watch + paddr;
2158 /* TODO: The memory case can be optimized by not trapping
2159 reads of pages with a write breakpoint. */
2160 address |= TLB_MMIO;
6658ffb8 2161 }
0f459d16 2162 }
d79acba4 2163
0f459d16
PB
2164 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2165 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2166 te = &env->tlb_table[mmu_idx][index];
2167 te->addend = addend - vaddr;
2168 if (prot & PAGE_READ) {
2169 te->addr_read = address;
2170 } else {
2171 te->addr_read = -1;
2172 }
5c751e99 2173
0f459d16
PB
2174 if (prot & PAGE_EXEC) {
2175 te->addr_code = code_address;
2176 } else {
2177 te->addr_code = -1;
2178 }
2179 if (prot & PAGE_WRITE) {
2180 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2181 (pd & IO_MEM_ROMD)) {
2182 /* Write access calls the I/O callback. */
2183 te->addr_write = address | TLB_MMIO;
2184 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2185 !cpu_physical_memory_is_dirty(pd)) {
2186 te->addr_write = address | TLB_NOTDIRTY;
9fa3e853 2187 } else {
0f459d16 2188 te->addr_write = address;
9fa3e853 2189 }
0f459d16
PB
2190 } else {
2191 te->addr_write = -1;
9fa3e853 2192 }
9fa3e853
FB
2193 return ret;
2194}
2195
0124311e
FB
2196#else
2197
ee8b7021 2198void tlb_flush(CPUState *env, int flush_global)
0124311e
FB
2199{
2200}
2201
2e12669a 2202void tlb_flush_page(CPUState *env, target_ulong addr)
0124311e
FB
2203{
2204}
2205
edf8e2af
MW
2206/*
2207 * Walks guest process memory "regions" one by one
2208 * and calls callback function 'fn' for each region.
2209 */
5cd2c5b6
RH
2210
2211struct walk_memory_regions_data
2212{
2213 walk_memory_regions_fn fn;
2214 void *priv;
2215 unsigned long start;
2216 int prot;
2217};
2218
2219static int walk_memory_regions_end(struct walk_memory_regions_data *data,
b480d9b7 2220 abi_ulong end, int new_prot)
5cd2c5b6
RH
2221{
2222 if (data->start != -1ul) {
2223 int rc = data->fn(data->priv, data->start, end, data->prot);
2224 if (rc != 0) {
2225 return rc;
2226 }
2227 }
2228
2229 data->start = (new_prot ? end : -1ul);
2230 data->prot = new_prot;
2231
2232 return 0;
2233}
2234
2235static int walk_memory_regions_1(struct walk_memory_regions_data *data,
b480d9b7 2236 abi_ulong base, int level, void **lp)
5cd2c5b6 2237{
b480d9b7 2238 abi_ulong pa;
5cd2c5b6
RH
2239 int i, rc;
2240
2241 if (*lp == NULL) {
2242 return walk_memory_regions_end(data, base, 0);
2243 }
2244
2245 if (level == 0) {
2246 PageDesc *pd = *lp;
2247 for (i = 0; i < L2_BITS; ++i) {
2248 int prot = pd[i].flags;
2249
2250 pa = base | (i << TARGET_PAGE_BITS);
2251 if (prot != data->prot) {
2252 rc = walk_memory_regions_end(data, pa, prot);
2253 if (rc != 0) {
2254 return rc;
9fa3e853 2255 }
9fa3e853 2256 }
5cd2c5b6
RH
2257 }
2258 } else {
2259 void **pp = *lp;
2260 for (i = 0; i < L2_BITS; ++i) {
b480d9b7
PB
2261 pa = base | ((abi_ulong)i <<
2262 (TARGET_PAGE_BITS + L2_BITS * level));
5cd2c5b6
RH
2263 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2264 if (rc != 0) {
2265 return rc;
2266 }
2267 }
2268 }
2269
2270 return 0;
2271}
2272
2273int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2274{
2275 struct walk_memory_regions_data data;
2276 unsigned long i;
2277
2278 data.fn = fn;
2279 data.priv = priv;
2280 data.start = -1ul;
2281 data.prot = 0;
2282
2283 for (i = 0; i < V_L1_SIZE; i++) {
b480d9b7 2284 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
5cd2c5b6
RH
2285 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2286 if (rc != 0) {
2287 return rc;
9fa3e853 2288 }
33417e70 2289 }
5cd2c5b6
RH
2290
2291 return walk_memory_regions_end(&data, 0, 0);
edf8e2af
MW
2292}
2293
b480d9b7
PB
2294static int dump_region(void *priv, abi_ulong start,
2295 abi_ulong end, unsigned long prot)
edf8e2af
MW
2296{
2297 FILE *f = (FILE *)priv;
2298
b480d9b7
PB
2299 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2300 " "TARGET_ABI_FMT_lx" %c%c%c\n",
edf8e2af
MW
2301 start, end, end - start,
2302 ((prot & PAGE_READ) ? 'r' : '-'),
2303 ((prot & PAGE_WRITE) ? 'w' : '-'),
2304 ((prot & PAGE_EXEC) ? 'x' : '-'));
2305
2306 return (0);
2307}
2308
2309/* dump memory mappings */
2310void page_dump(FILE *f)
2311{
2312 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2313 "start", "end", "size", "prot");
2314 walk_memory_regions(f, dump_region);
33417e70
FB
2315}
2316
53a5960a 2317int page_get_flags(target_ulong address)
33417e70 2318{
9fa3e853
FB
2319 PageDesc *p;
2320
2321 p = page_find(address >> TARGET_PAGE_BITS);
33417e70 2322 if (!p)
9fa3e853
FB
2323 return 0;
2324 return p->flags;
2325}
2326
376a7909
RH
2327/* Modify the flags of a page and invalidate the code if necessary.
2328 The flag PAGE_WRITE_ORG is positioned automatically depending
2329 on PAGE_WRITE. The mmap_lock should already be held. */
53a5960a 2330void page_set_flags(target_ulong start, target_ulong end, int flags)
9fa3e853 2331{
376a7909
RH
2332 target_ulong addr, len;
2333
2334 /* This function should never be called with addresses outside the
2335 guest address space. If this assert fires, it probably indicates
2336 a missing call to h2g_valid. */
b480d9b7
PB
2337#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2338 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
376a7909
RH
2339#endif
2340 assert(start < end);
9fa3e853
FB
2341
2342 start = start & TARGET_PAGE_MASK;
2343 end = TARGET_PAGE_ALIGN(end);
376a7909
RH
2344
2345 if (flags & PAGE_WRITE) {
9fa3e853 2346 flags |= PAGE_WRITE_ORG;
376a7909
RH
2347 }
2348
2349 for (addr = start, len = end - start;
2350 len != 0;
2351 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2352 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2353
2354 /* If the write protection bit is set, then we invalidate
2355 the code inside. */
5fafdf24 2356 if (!(p->flags & PAGE_WRITE) &&
9fa3e853
FB
2357 (flags & PAGE_WRITE) &&
2358 p->first_tb) {
d720b93d 2359 tb_invalidate_phys_page(addr, 0, NULL);
9fa3e853
FB
2360 }
2361 p->flags = flags;
2362 }
33417e70
FB
2363}
2364
3d97b40b
TS
2365int page_check_range(target_ulong start, target_ulong len, int flags)
2366{
2367 PageDesc *p;
2368 target_ulong end;
2369 target_ulong addr;
2370
376a7909
RH
2371 /* This function should never be called with addresses outside the
2372 guest address space. If this assert fires, it probably indicates
2373 a missing call to h2g_valid. */
2374#if HOST_LONG_BITS > L1_MAP_ADDR_SPACE_BITS
2375 assert(start < (1ul << L1_MAP_ADDR_SPACE_BITS));
2376#endif
2377
2378 if (start + len - 1 < start) {
2379 /* We've wrapped around. */
55f280c9 2380 return -1;
376a7909 2381 }
55f280c9 2382
3d97b40b
TS
2383 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2384 start = start & TARGET_PAGE_MASK;
2385
376a7909
RH
2386 for (addr = start, len = end - start;
2387 len != 0;
2388 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
3d97b40b
TS
2389 p = page_find(addr >> TARGET_PAGE_BITS);
2390 if( !p )
2391 return -1;
2392 if( !(p->flags & PAGE_VALID) )
2393 return -1;
2394
dae3270c 2395 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
3d97b40b 2396 return -1;
dae3270c
FB
2397 if (flags & PAGE_WRITE) {
2398 if (!(p->flags & PAGE_WRITE_ORG))
2399 return -1;
2400 /* unprotect the page if it was put read-only because it
2401 contains translated code */
2402 if (!(p->flags & PAGE_WRITE)) {
2403 if (!page_unprotect(addr, 0, NULL))
2404 return -1;
2405 }
2406 return 0;
2407 }
3d97b40b
TS
2408 }
2409 return 0;
2410}
2411
9fa3e853 2412/* called from signal handler: invalidate the code and unprotect the
ccbb4d44 2413 page. Return TRUE if the fault was successfully handled. */
53a5960a 2414int page_unprotect(target_ulong address, unsigned long pc, void *puc)
9fa3e853
FB
2415{
2416 unsigned int page_index, prot, pindex;
2417 PageDesc *p, *p1;
53a5960a 2418 target_ulong host_start, host_end, addr;
9fa3e853 2419
c8a706fe
PB
2420 /* Technically this isn't safe inside a signal handler. However we
2421 know this only ever happens in a synchronous SEGV handler, so in
2422 practice it seems to be ok. */
2423 mmap_lock();
2424
83fb7adf 2425 host_start = address & qemu_host_page_mask;
9fa3e853
FB
2426 page_index = host_start >> TARGET_PAGE_BITS;
2427 p1 = page_find(page_index);
c8a706fe
PB
2428 if (!p1) {
2429 mmap_unlock();
9fa3e853 2430 return 0;
c8a706fe 2431 }
83fb7adf 2432 host_end = host_start + qemu_host_page_size;
9fa3e853
FB
2433 p = p1;
2434 prot = 0;
2435 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2436 prot |= p->flags;
2437 p++;
2438 }
2439 /* if the page was really writable, then we change its
2440 protection back to writable */
2441 if (prot & PAGE_WRITE_ORG) {
2442 pindex = (address - host_start) >> TARGET_PAGE_BITS;
2443 if (!(p1[pindex].flags & PAGE_WRITE)) {
5fafdf24 2444 mprotect((void *)g2h(host_start), qemu_host_page_size,
9fa3e853
FB
2445 (prot & PAGE_BITS) | PAGE_WRITE);
2446 p1[pindex].flags |= PAGE_WRITE;
2447 /* and since the content will be modified, we must invalidate
2448 the corresponding translated code. */
d720b93d 2449 tb_invalidate_phys_page(address, pc, puc);
9fa3e853
FB
2450#ifdef DEBUG_TB_CHECK
2451 tb_invalidate_check(address);
2452#endif
c8a706fe 2453 mmap_unlock();
9fa3e853
FB
2454 return 1;
2455 }
2456 }
c8a706fe 2457 mmap_unlock();
9fa3e853
FB
2458 return 0;
2459}
2460
6a00d601
FB
2461static inline void tlb_set_dirty(CPUState *env,
2462 unsigned long addr, target_ulong vaddr)
1ccde1cb
FB
2463{
2464}
9fa3e853
FB
2465#endif /* defined(CONFIG_USER_ONLY) */
2466
e2eef170 2467#if !defined(CONFIG_USER_ONLY)
8da3ff18 2468
c04b2b78
PB
2469#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2470typedef struct subpage_t {
2471 target_phys_addr_t base;
2472 CPUReadMemoryFunc * const *mem_read[TARGET_PAGE_SIZE][4];
2473 CPUWriteMemoryFunc * const *mem_write[TARGET_PAGE_SIZE][4];
2474 void *opaque[TARGET_PAGE_SIZE][2][4];
2475 ram_addr_t region_offset[TARGET_PAGE_SIZE][2][4];
2476} subpage_t;
2477
c227f099
AL
2478static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2479 ram_addr_t memory, ram_addr_t region_offset);
2480static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2481 ram_addr_t orig_memory, ram_addr_t region_offset);
db7b5426
BS
2482#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2483 need_subpage) \
2484 do { \
2485 if (addr > start_addr) \
2486 start_addr2 = 0; \
2487 else { \
2488 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2489 if (start_addr2 > 0) \
2490 need_subpage = 1; \
2491 } \
2492 \
49e9fba2 2493 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
db7b5426
BS
2494 end_addr2 = TARGET_PAGE_SIZE - 1; \
2495 else { \
2496 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2497 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2498 need_subpage = 1; \
2499 } \
2500 } while (0)
2501
8f2498f9
MT
2502/* register physical memory.
2503 For RAM, 'size' must be a multiple of the target page size.
2504 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
8da3ff18
PB
2505 io memory page. The address used when calling the IO function is
2506 the offset from the start of the region, plus region_offset. Both
ccbb4d44 2507 start_addr and region_offset are rounded down to a page boundary
8da3ff18
PB
2508 before calculating this offset. This should not be a problem unless
2509 the low bits of start_addr and region_offset differ. */
c227f099
AL
2510void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2511 ram_addr_t size,
2512 ram_addr_t phys_offset,
2513 ram_addr_t region_offset)
33417e70 2514{
c227f099 2515 target_phys_addr_t addr, end_addr;
92e873b9 2516 PhysPageDesc *p;
9d42037b 2517 CPUState *env;
c227f099 2518 ram_addr_t orig_size = size;
db7b5426 2519 void *subpage;
33417e70 2520
f6f3fbca
MT
2521 cpu_notify_set_memory(start_addr, size, phys_offset);
2522
67c4d23c
PB
2523 if (phys_offset == IO_MEM_UNASSIGNED) {
2524 region_offset = start_addr;
2525 }
8da3ff18 2526 region_offset &= TARGET_PAGE_MASK;
5fd386f6 2527 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
c227f099 2528 end_addr = start_addr + (target_phys_addr_t)size;
49e9fba2 2529 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
db7b5426
BS
2530 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2531 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
c227f099
AL
2532 ram_addr_t orig_memory = p->phys_offset;
2533 target_phys_addr_t start_addr2, end_addr2;
db7b5426
BS
2534 int need_subpage = 0;
2535
2536 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2537 need_subpage);
4254fab8 2538 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
db7b5426
BS
2539 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2540 subpage = subpage_init((addr & TARGET_PAGE_MASK),
8da3ff18
PB
2541 &p->phys_offset, orig_memory,
2542 p->region_offset);
db7b5426
BS
2543 } else {
2544 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2545 >> IO_MEM_SHIFT];
2546 }
8da3ff18
PB
2547 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2548 region_offset);
2549 p->region_offset = 0;
db7b5426
BS
2550 } else {
2551 p->phys_offset = phys_offset;
2552 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2553 (phys_offset & IO_MEM_ROMD))
2554 phys_offset += TARGET_PAGE_SIZE;
2555 }
2556 } else {
2557 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2558 p->phys_offset = phys_offset;
8da3ff18 2559 p->region_offset = region_offset;
db7b5426 2560 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
8da3ff18 2561 (phys_offset & IO_MEM_ROMD)) {
db7b5426 2562 phys_offset += TARGET_PAGE_SIZE;
0e8f0967 2563 } else {
c227f099 2564 target_phys_addr_t start_addr2, end_addr2;
db7b5426
BS
2565 int need_subpage = 0;
2566
2567 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2568 end_addr2, need_subpage);
2569
4254fab8 2570 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
db7b5426 2571 subpage = subpage_init((addr & TARGET_PAGE_MASK),
8da3ff18 2572 &p->phys_offset, IO_MEM_UNASSIGNED,
67c4d23c 2573 addr & TARGET_PAGE_MASK);
db7b5426 2574 subpage_register(subpage, start_addr2, end_addr2,
8da3ff18
PB
2575 phys_offset, region_offset);
2576 p->region_offset = 0;
db7b5426
BS
2577 }
2578 }
2579 }
8da3ff18 2580 region_offset += TARGET_PAGE_SIZE;
33417e70 2581 }
3b46e624 2582
9d42037b
FB
2583 /* since each CPU stores ram addresses in its TLB cache, we must
2584 reset the modified entries */
2585 /* XXX: slow ! */
2586 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2587 tlb_flush(env, 1);
2588 }
33417e70
FB
2589}
2590
ba863458 2591/* XXX: temporary until new memory mapping API */
c227f099 2592ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
ba863458
FB
2593{
2594 PhysPageDesc *p;
2595
2596 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2597 if (!p)
2598 return IO_MEM_UNASSIGNED;
2599 return p->phys_offset;
2600}
2601
c227f099 2602void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
f65ed4c1
AL
2603{
2604 if (kvm_enabled())
2605 kvm_coalesce_mmio_region(addr, size);
2606}
2607
c227f099 2608void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
f65ed4c1
AL
2609{
2610 if (kvm_enabled())
2611 kvm_uncoalesce_mmio_region(addr, size);
2612}
2613
62a2744c
SY
2614void qemu_flush_coalesced_mmio_buffer(void)
2615{
2616 if (kvm_enabled())
2617 kvm_flush_coalesced_mmio_buffer();
2618}
2619
c902760f
MT
2620#if defined(__linux__) && !defined(TARGET_S390X)
2621
2622#include <sys/vfs.h>
2623
2624#define HUGETLBFS_MAGIC 0x958458f6
2625
2626static long gethugepagesize(const char *path)
2627{
2628 struct statfs fs;
2629 int ret;
2630
2631 do {
2632 ret = statfs(path, &fs);
2633 } while (ret != 0 && errno == EINTR);
2634
2635 if (ret != 0) {
2636 perror("statfs");
2637 return 0;
2638 }
2639
2640 if (fs.f_type != HUGETLBFS_MAGIC)
2641 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
2642
2643 return fs.f_bsize;
2644}
2645
2646static void *file_ram_alloc(ram_addr_t memory, const char *path)
2647{
2648 char *filename;
2649 void *area;
2650 int fd;
2651#ifdef MAP_POPULATE
2652 int flags;
2653#endif
2654 unsigned long hpagesize;
2655
2656 hpagesize = gethugepagesize(path);
2657 if (!hpagesize) {
2658 return NULL;
2659 }
2660
2661 if (memory < hpagesize) {
2662 return NULL;
2663 }
2664
2665 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2666 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2667 return NULL;
2668 }
2669
2670 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
2671 return NULL;
2672 }
2673
2674 fd = mkstemp(filename);
2675 if (fd < 0) {
2676 perror("mkstemp");
2677 free(filename);
2678 return NULL;
2679 }
2680 unlink(filename);
2681 free(filename);
2682
2683 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2684
2685 /*
2686 * ftruncate is not supported by hugetlbfs in older
2687 * hosts, so don't bother bailing out on errors.
2688 * If anything goes wrong with it under other filesystems,
2689 * mmap will fail.
2690 */
2691 if (ftruncate(fd, memory))
2692 perror("ftruncate");
2693
2694#ifdef MAP_POPULATE
2695 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2696 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2697 * to sidestep this quirk.
2698 */
2699 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2700 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2701#else
2702 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2703#endif
2704 if (area == MAP_FAILED) {
2705 perror("file_ram_alloc: can't mmap RAM pages");
2706 close(fd);
2707 return (NULL);
2708 }
2709 return area;
2710}
2711#endif
2712
c227f099 2713ram_addr_t qemu_ram_alloc(ram_addr_t size)
94a6b54f
PB
2714{
2715 RAMBlock *new_block;
2716
94a6b54f
PB
2717 size = TARGET_PAGE_ALIGN(size);
2718 new_block = qemu_malloc(sizeof(*new_block));
2719
c902760f
MT
2720 if (mem_path) {
2721#if defined (__linux__) && !defined(TARGET_S390X)
2722 new_block->host = file_ram_alloc(size, mem_path);
2723 if (!new_block->host)
2724 exit(1);
2725#else
2726 fprintf(stderr, "-mem-path option unsupported\n");
2727 exit(1);
2728#endif
2729 } else {
6b02494d 2730#if defined(TARGET_S390X) && defined(CONFIG_KVM)
c902760f
MT
2731 /* XXX S390 KVM requires the topmost vma of the RAM to be < 256GB */
2732 new_block->host = mmap((void*)0x1000000, size,
2733 PROT_EXEC|PROT_READ|PROT_WRITE,
2734 MAP_SHARED | MAP_ANONYMOUS, -1, 0);
6b02494d 2735#else
c902760f 2736 new_block->host = qemu_vmalloc(size);
6b02494d 2737#endif
ccb167e9 2738#ifdef MADV_MERGEABLE
c902760f 2739 madvise(new_block->host, size, MADV_MERGEABLE);
ccb167e9 2740#endif
c902760f 2741 }
94a6b54f
PB
2742 new_block->offset = last_ram_offset;
2743 new_block->length = size;
2744
2745 new_block->next = ram_blocks;
2746 ram_blocks = new_block;
2747
2748 phys_ram_dirty = qemu_realloc(phys_ram_dirty,
2749 (last_ram_offset + size) >> TARGET_PAGE_BITS);
2750 memset(phys_ram_dirty + (last_ram_offset >> TARGET_PAGE_BITS),
2751 0xff, size >> TARGET_PAGE_BITS);
2752
2753 last_ram_offset += size;
2754
6f0437e8
JK
2755 if (kvm_enabled())
2756 kvm_setup_guest_memory(new_block->host, size);
2757
94a6b54f
PB
2758 return new_block->offset;
2759}
e9a1ab19 2760
c227f099 2761void qemu_ram_free(ram_addr_t addr)
e9a1ab19 2762{
94a6b54f 2763 /* TODO: implement this. */
e9a1ab19
FB
2764}
2765
dc828ca1 2766/* Return a host pointer to ram allocated with qemu_ram_alloc.
5579c7f3
PB
2767 With the exception of the softmmu code in this file, this should
2768 only be used for local memory (e.g. video ram) that the device owns,
2769 and knows it isn't going to access beyond the end of the block.
2770
2771 It should not be used for general purpose DMA.
2772 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2773 */
c227f099 2774void *qemu_get_ram_ptr(ram_addr_t addr)
dc828ca1 2775{
94a6b54f
PB
2776 RAMBlock *prev;
2777 RAMBlock **prevp;
2778 RAMBlock *block;
2779
94a6b54f
PB
2780 prev = NULL;
2781 prevp = &ram_blocks;
2782 block = ram_blocks;
2783 while (block && (block->offset > addr
2784 || block->offset + block->length <= addr)) {
2785 if (prev)
2786 prevp = &prev->next;
2787 prev = block;
2788 block = block->next;
2789 }
2790 if (!block) {
2791 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2792 abort();
2793 }
2794 /* Move this entry to to start of the list. */
2795 if (prev) {
2796 prev->next = block->next;
2797 block->next = *prevp;
2798 *prevp = block;
2799 }
2800 return block->host + (addr - block->offset);
dc828ca1
PB
2801}
2802
5579c7f3
PB
2803/* Some of the softmmu routines need to translate from a host pointer
2804 (typically a TLB entry) back to a ram offset. */
c227f099 2805ram_addr_t qemu_ram_addr_from_host(void *ptr)
5579c7f3 2806{
94a6b54f 2807 RAMBlock *prev;
94a6b54f
PB
2808 RAMBlock *block;
2809 uint8_t *host = ptr;
2810
94a6b54f 2811 prev = NULL;
94a6b54f
PB
2812 block = ram_blocks;
2813 while (block && (block->host > host
2814 || block->host + block->length <= host)) {
94a6b54f
PB
2815 prev = block;
2816 block = block->next;
2817 }
2818 if (!block) {
2819 fprintf(stderr, "Bad ram pointer %p\n", ptr);
2820 abort();
2821 }
2822 return block->offset + (host - block->host);
5579c7f3
PB
2823}
2824
c227f099 2825static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
33417e70 2826{
67d3b957 2827#ifdef DEBUG_UNASSIGNED
ab3d1727 2828 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
b4f0a316 2829#endif
faed1c2a 2830#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
e18231a3
BS
2831 do_unassigned_access(addr, 0, 0, 0, 1);
2832#endif
2833 return 0;
2834}
2835
c227f099 2836static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
e18231a3
BS
2837{
2838#ifdef DEBUG_UNASSIGNED
2839 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2840#endif
faed1c2a 2841#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
e18231a3
BS
2842 do_unassigned_access(addr, 0, 0, 0, 2);
2843#endif
2844 return 0;
2845}
2846
c227f099 2847static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
e18231a3
BS
2848{
2849#ifdef DEBUG_UNASSIGNED
2850 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2851#endif
faed1c2a 2852#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
e18231a3 2853 do_unassigned_access(addr, 0, 0, 0, 4);
67d3b957 2854#endif
33417e70
FB
2855 return 0;
2856}
2857
c227f099 2858static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
33417e70 2859{
67d3b957 2860#ifdef DEBUG_UNASSIGNED
ab3d1727 2861 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
67d3b957 2862#endif
faed1c2a 2863#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
e18231a3
BS
2864 do_unassigned_access(addr, 1, 0, 0, 1);
2865#endif
2866}
2867
c227f099 2868static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
e18231a3
BS
2869{
2870#ifdef DEBUG_UNASSIGNED
2871 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2872#endif
faed1c2a 2873#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
e18231a3
BS
2874 do_unassigned_access(addr, 1, 0, 0, 2);
2875#endif
2876}
2877
c227f099 2878static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
e18231a3
BS
2879{
2880#ifdef DEBUG_UNASSIGNED
2881 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2882#endif
faed1c2a 2883#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
e18231a3 2884 do_unassigned_access(addr, 1, 0, 0, 4);
b4f0a316 2885#endif
33417e70
FB
2886}
2887
d60efc6b 2888static CPUReadMemoryFunc * const unassigned_mem_read[3] = {
33417e70 2889 unassigned_mem_readb,
e18231a3
BS
2890 unassigned_mem_readw,
2891 unassigned_mem_readl,
33417e70
FB
2892};
2893
d60efc6b 2894static CPUWriteMemoryFunc * const unassigned_mem_write[3] = {
33417e70 2895 unassigned_mem_writeb,
e18231a3
BS
2896 unassigned_mem_writew,
2897 unassigned_mem_writel,
33417e70
FB
2898};
2899
c227f099 2900static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
0f459d16 2901 uint32_t val)
9fa3e853 2902{
3a7d929e 2903 int dirty_flags;
3a7d929e
FB
2904 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2905 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2906#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2907 tb_invalidate_phys_page_fast(ram_addr, 1);
2908 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2909#endif
3a7d929e 2910 }
5579c7f3 2911 stb_p(qemu_get_ram_ptr(ram_addr), val);
f23db169
FB
2912 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2913 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2914 /* we remove the notdirty callback only if the code has been
2915 flushed */
2916 if (dirty_flags == 0xff)
2e70f6ef 2917 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
2918}
2919
c227f099 2920static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
0f459d16 2921 uint32_t val)
9fa3e853 2922{
3a7d929e 2923 int dirty_flags;
3a7d929e
FB
2924 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2925 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2926#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2927 tb_invalidate_phys_page_fast(ram_addr, 2);
2928 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2929#endif
3a7d929e 2930 }
5579c7f3 2931 stw_p(qemu_get_ram_ptr(ram_addr), val);
f23db169
FB
2932 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2933 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2934 /* we remove the notdirty callback only if the code has been
2935 flushed */
2936 if (dirty_flags == 0xff)
2e70f6ef 2937 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
2938}
2939
c227f099 2940static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
0f459d16 2941 uint32_t val)
9fa3e853 2942{
3a7d929e 2943 int dirty_flags;
3a7d929e
FB
2944 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2945 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2946#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2947 tb_invalidate_phys_page_fast(ram_addr, 4);
2948 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2949#endif
3a7d929e 2950 }
5579c7f3 2951 stl_p(qemu_get_ram_ptr(ram_addr), val);
f23db169
FB
2952 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2953 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2954 /* we remove the notdirty callback only if the code has been
2955 flushed */
2956 if (dirty_flags == 0xff)
2e70f6ef 2957 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
2958}
2959
d60efc6b 2960static CPUReadMemoryFunc * const error_mem_read[3] = {
9fa3e853
FB
2961 NULL, /* never used */
2962 NULL, /* never used */
2963 NULL, /* never used */
2964};
2965
d60efc6b 2966static CPUWriteMemoryFunc * const notdirty_mem_write[3] = {
1ccde1cb
FB
2967 notdirty_mem_writeb,
2968 notdirty_mem_writew,
2969 notdirty_mem_writel,
2970};
2971
0f459d16 2972/* Generate a debug exception if a watchpoint has been hit. */
b4051334 2973static void check_watchpoint(int offset, int len_mask, int flags)
0f459d16
PB
2974{
2975 CPUState *env = cpu_single_env;
06d55cc1
AL
2976 target_ulong pc, cs_base;
2977 TranslationBlock *tb;
0f459d16 2978 target_ulong vaddr;
a1d1bb31 2979 CPUWatchpoint *wp;
06d55cc1 2980 int cpu_flags;
0f459d16 2981
06d55cc1
AL
2982 if (env->watchpoint_hit) {
2983 /* We re-entered the check after replacing the TB. Now raise
2984 * the debug interrupt so that is will trigger after the
2985 * current instruction. */
2986 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2987 return;
2988 }
2e70f6ef 2989 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
72cf2d4f 2990 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334
AL
2991 if ((vaddr == (wp->vaddr & len_mask) ||
2992 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
6e140f28
AL
2993 wp->flags |= BP_WATCHPOINT_HIT;
2994 if (!env->watchpoint_hit) {
2995 env->watchpoint_hit = wp;
2996 tb = tb_find_pc(env->mem_io_pc);
2997 if (!tb) {
2998 cpu_abort(env, "check_watchpoint: could not find TB for "
2999 "pc=%p", (void *)env->mem_io_pc);
3000 }
3001 cpu_restore_state(tb, env, env->mem_io_pc, NULL);
3002 tb_phys_invalidate(tb, -1);
3003 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3004 env->exception_index = EXCP_DEBUG;
3005 } else {
3006 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3007 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
3008 }
3009 cpu_resume_from_signal(env, NULL);
06d55cc1 3010 }
6e140f28
AL
3011 } else {
3012 wp->flags &= ~BP_WATCHPOINT_HIT;
0f459d16
PB
3013 }
3014 }
3015}
3016
6658ffb8
PB
3017/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3018 so these check for a hit then pass through to the normal out-of-line
3019 phys routines. */
c227f099 3020static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
6658ffb8 3021{
b4051334 3022 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
6658ffb8
PB
3023 return ldub_phys(addr);
3024}
3025
c227f099 3026static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
6658ffb8 3027{
b4051334 3028 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
6658ffb8
PB
3029 return lduw_phys(addr);
3030}
3031
c227f099 3032static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
6658ffb8 3033{
b4051334 3034 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
6658ffb8
PB
3035 return ldl_phys(addr);
3036}
3037
c227f099 3038static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
6658ffb8
PB
3039 uint32_t val)
3040{
b4051334 3041 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
6658ffb8
PB
3042 stb_phys(addr, val);
3043}
3044
c227f099 3045static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
6658ffb8
PB
3046 uint32_t val)
3047{
b4051334 3048 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
6658ffb8
PB
3049 stw_phys(addr, val);
3050}
3051
c227f099 3052static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
6658ffb8
PB
3053 uint32_t val)
3054{
b4051334 3055 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
6658ffb8
PB
3056 stl_phys(addr, val);
3057}
3058
d60efc6b 3059static CPUReadMemoryFunc * const watch_mem_read[3] = {
6658ffb8
PB
3060 watch_mem_readb,
3061 watch_mem_readw,
3062 watch_mem_readl,
3063};
3064
d60efc6b 3065static CPUWriteMemoryFunc * const watch_mem_write[3] = {
6658ffb8
PB
3066 watch_mem_writeb,
3067 watch_mem_writew,
3068 watch_mem_writel,
3069};
6658ffb8 3070
c227f099 3071static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
db7b5426
BS
3072 unsigned int len)
3073{
db7b5426
BS
3074 uint32_t ret;
3075 unsigned int idx;
3076
8da3ff18 3077 idx = SUBPAGE_IDX(addr);
db7b5426
BS
3078#if defined(DEBUG_SUBPAGE)
3079 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3080 mmio, len, addr, idx);
3081#endif
8da3ff18
PB
3082 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len],
3083 addr + mmio->region_offset[idx][0][len]);
db7b5426
BS
3084
3085 return ret;
3086}
3087
c227f099 3088static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
db7b5426
BS
3089 uint32_t value, unsigned int len)
3090{
db7b5426
BS
3091 unsigned int idx;
3092
8da3ff18 3093 idx = SUBPAGE_IDX(addr);
db7b5426
BS
3094#if defined(DEBUG_SUBPAGE)
3095 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
3096 mmio, len, addr, idx, value);
3097#endif
8da3ff18
PB
3098 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len],
3099 addr + mmio->region_offset[idx][1][len],
3100 value);
db7b5426
BS
3101}
3102
c227f099 3103static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
db7b5426
BS
3104{
3105#if defined(DEBUG_SUBPAGE)
3106 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
3107#endif
3108
3109 return subpage_readlen(opaque, addr, 0);
3110}
3111
c227f099 3112static void subpage_writeb (void *opaque, target_phys_addr_t addr,
db7b5426
BS
3113 uint32_t value)
3114{
3115#if defined(DEBUG_SUBPAGE)
3116 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
3117#endif
3118 subpage_writelen(opaque, addr, value, 0);
3119}
3120
c227f099 3121static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
db7b5426
BS
3122{
3123#if defined(DEBUG_SUBPAGE)
3124 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
3125#endif
3126
3127 return subpage_readlen(opaque, addr, 1);
3128}
3129
c227f099 3130static void subpage_writew (void *opaque, target_phys_addr_t addr,
db7b5426
BS
3131 uint32_t value)
3132{
3133#if defined(DEBUG_SUBPAGE)
3134 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
3135#endif
3136 subpage_writelen(opaque, addr, value, 1);
3137}
3138
c227f099 3139static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
db7b5426
BS
3140{
3141#if defined(DEBUG_SUBPAGE)
3142 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
3143#endif
3144
3145 return subpage_readlen(opaque, addr, 2);
3146}
3147
3148static void subpage_writel (void *opaque,
c227f099 3149 target_phys_addr_t addr, uint32_t value)
db7b5426
BS
3150{
3151#if defined(DEBUG_SUBPAGE)
3152 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
3153#endif
3154 subpage_writelen(opaque, addr, value, 2);
3155}
3156
d60efc6b 3157static CPUReadMemoryFunc * const subpage_read[] = {
db7b5426
BS
3158 &subpage_readb,
3159 &subpage_readw,
3160 &subpage_readl,
3161};
3162
d60efc6b 3163static CPUWriteMemoryFunc * const subpage_write[] = {
db7b5426
BS
3164 &subpage_writeb,
3165 &subpage_writew,
3166 &subpage_writel,
3167};
3168
c227f099
AL
3169static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3170 ram_addr_t memory, ram_addr_t region_offset)
db7b5426
BS
3171{
3172 int idx, eidx;
4254fab8 3173 unsigned int i;
db7b5426
BS
3174
3175 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3176 return -1;
3177 idx = SUBPAGE_IDX(start);
3178 eidx = SUBPAGE_IDX(end);
3179#if defined(DEBUG_SUBPAGE)
0bf9e31a 3180 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
db7b5426
BS
3181 mmio, start, end, idx, eidx, memory);
3182#endif
3183 memory >>= IO_MEM_SHIFT;
3184 for (; idx <= eidx; idx++) {
4254fab8 3185 for (i = 0; i < 4; i++) {
3ee89922
BS
3186 if (io_mem_read[memory][i]) {
3187 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
3188 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
8da3ff18 3189 mmio->region_offset[idx][0][i] = region_offset;
3ee89922
BS
3190 }
3191 if (io_mem_write[memory][i]) {
3192 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
3193 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
8da3ff18 3194 mmio->region_offset[idx][1][i] = region_offset;
3ee89922 3195 }
4254fab8 3196 }
db7b5426
BS
3197 }
3198
3199 return 0;
3200}
3201
c227f099
AL
3202static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
3203 ram_addr_t orig_memory, ram_addr_t region_offset)
db7b5426 3204{
c227f099 3205 subpage_t *mmio;
db7b5426
BS
3206 int subpage_memory;
3207
c227f099 3208 mmio = qemu_mallocz(sizeof(subpage_t));
1eec614b
AL
3209
3210 mmio->base = base;
1eed09cb 3211 subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio);
db7b5426 3212#if defined(DEBUG_SUBPAGE)
1eec614b
AL
3213 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3214 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
db7b5426 3215#endif
1eec614b
AL
3216 *phys = subpage_memory | IO_MEM_SUBPAGE;
3217 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory,
8da3ff18 3218 region_offset);
db7b5426
BS
3219
3220 return mmio;
3221}
3222
88715657
AL
3223static int get_free_io_mem_idx(void)
3224{
3225 int i;
3226
3227 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3228 if (!io_mem_used[i]) {
3229 io_mem_used[i] = 1;
3230 return i;
3231 }
c6703b47 3232 fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
88715657
AL
3233 return -1;
3234}
3235
33417e70
FB
3236/* mem_read and mem_write are arrays of functions containing the
3237 function to access byte (index 0), word (index 1) and dword (index
0b4e6e3e 3238 2). Functions can be omitted with a NULL function pointer.
3ee89922 3239 If io_index is non zero, the corresponding io zone is
4254fab8
BS
3240 modified. If it is zero, a new io zone is allocated. The return
3241 value can be used with cpu_register_physical_memory(). (-1) is
3242 returned if error. */
1eed09cb 3243static int cpu_register_io_memory_fixed(int io_index,
d60efc6b
BS
3244 CPUReadMemoryFunc * const *mem_read,
3245 CPUWriteMemoryFunc * const *mem_write,
1eed09cb 3246 void *opaque)
33417e70 3247{
4254fab8 3248 int i, subwidth = 0;
33417e70
FB
3249
3250 if (io_index <= 0) {
88715657
AL
3251 io_index = get_free_io_mem_idx();
3252 if (io_index == -1)
3253 return io_index;
33417e70 3254 } else {
1eed09cb 3255 io_index >>= IO_MEM_SHIFT;
33417e70
FB
3256 if (io_index >= IO_MEM_NB_ENTRIES)
3257 return -1;
3258 }
b5ff1b31 3259
33417e70 3260 for(i = 0;i < 3; i++) {
4254fab8
BS
3261 if (!mem_read[i] || !mem_write[i])
3262 subwidth = IO_MEM_SUBWIDTH;
33417e70
FB
3263 io_mem_read[io_index][i] = mem_read[i];
3264 io_mem_write[io_index][i] = mem_write[i];
3265 }
a4193c8a 3266 io_mem_opaque[io_index] = opaque;
4254fab8 3267 return (io_index << IO_MEM_SHIFT) | subwidth;
33417e70 3268}
61382a50 3269
d60efc6b
BS
3270int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
3271 CPUWriteMemoryFunc * const *mem_write,
1eed09cb
AK
3272 void *opaque)
3273{
3274 return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque);
3275}
3276
88715657
AL
3277void cpu_unregister_io_memory(int io_table_address)
3278{
3279 int i;
3280 int io_index = io_table_address >> IO_MEM_SHIFT;
3281
3282 for (i=0;i < 3; i++) {
3283 io_mem_read[io_index][i] = unassigned_mem_read[i];
3284 io_mem_write[io_index][i] = unassigned_mem_write[i];
3285 }
3286 io_mem_opaque[io_index] = NULL;
3287 io_mem_used[io_index] = 0;
3288}
3289
e9179ce1
AK
3290static void io_mem_init(void)
3291{
3292 int i;
3293
3294 cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read, unassigned_mem_write, NULL);
3295 cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read, unassigned_mem_write, NULL);
3296 cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read, notdirty_mem_write, NULL);
3297 for (i=0; i<5; i++)
3298 io_mem_used[i] = 1;
3299
3300 io_mem_watch = cpu_register_io_memory(watch_mem_read,
3301 watch_mem_write, NULL);
e9179ce1
AK
3302}
3303
e2eef170
PB
3304#endif /* !defined(CONFIG_USER_ONLY) */
3305
13eb76e0
FB
3306/* physical memory access (slow version, mainly for debug) */
3307#if defined(CONFIG_USER_ONLY)
a68fe89c
PB
3308int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3309 uint8_t *buf, int len, int is_write)
13eb76e0
FB
3310{
3311 int l, flags;
3312 target_ulong page;
53a5960a 3313 void * p;
13eb76e0
FB
3314
3315 while (len > 0) {
3316 page = addr & TARGET_PAGE_MASK;
3317 l = (page + TARGET_PAGE_SIZE) - addr;
3318 if (l > len)
3319 l = len;
3320 flags = page_get_flags(page);
3321 if (!(flags & PAGE_VALID))
a68fe89c 3322 return -1;
13eb76e0
FB
3323 if (is_write) {
3324 if (!(flags & PAGE_WRITE))
a68fe89c 3325 return -1;
579a97f7 3326 /* XXX: this code should not depend on lock_user */
72fb7daa 3327 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
a68fe89c 3328 return -1;
72fb7daa
AJ
3329 memcpy(p, buf, l);
3330 unlock_user(p, addr, l);
13eb76e0
FB
3331 } else {
3332 if (!(flags & PAGE_READ))
a68fe89c 3333 return -1;
579a97f7 3334 /* XXX: this code should not depend on lock_user */
72fb7daa 3335 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
a68fe89c 3336 return -1;
72fb7daa 3337 memcpy(buf, p, l);
5b257578 3338 unlock_user(p, addr, 0);
13eb76e0
FB
3339 }
3340 len -= l;
3341 buf += l;
3342 addr += l;
3343 }
a68fe89c 3344 return 0;
13eb76e0 3345}
8df1cd07 3346
13eb76e0 3347#else
c227f099 3348void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
3349 int len, int is_write)
3350{
3351 int l, io_index;
3352 uint8_t *ptr;
3353 uint32_t val;
c227f099 3354 target_phys_addr_t page;
2e12669a 3355 unsigned long pd;
92e873b9 3356 PhysPageDesc *p;
3b46e624 3357
13eb76e0
FB
3358 while (len > 0) {
3359 page = addr & TARGET_PAGE_MASK;
3360 l = (page + TARGET_PAGE_SIZE) - addr;
3361 if (l > len)
3362 l = len;
92e873b9 3363 p = phys_page_find(page >> TARGET_PAGE_BITS);
13eb76e0
FB
3364 if (!p) {
3365 pd = IO_MEM_UNASSIGNED;
3366 } else {
3367 pd = p->phys_offset;
3368 }
3b46e624 3369
13eb76e0 3370 if (is_write) {
3a7d929e 3371 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
c227f099 3372 target_phys_addr_t addr1 = addr;
13eb76e0 3373 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18 3374 if (p)
6c2934db 3375 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
6a00d601
FB
3376 /* XXX: could force cpu_single_env to NULL to avoid
3377 potential bugs */
6c2934db 3378 if (l >= 4 && ((addr1 & 3) == 0)) {
1c213d19 3379 /* 32 bit write access */
c27004ec 3380 val = ldl_p(buf);
6c2934db 3381 io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
13eb76e0 3382 l = 4;
6c2934db 3383 } else if (l >= 2 && ((addr1 & 1) == 0)) {
1c213d19 3384 /* 16 bit write access */
c27004ec 3385 val = lduw_p(buf);
6c2934db 3386 io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
13eb76e0
FB
3387 l = 2;
3388 } else {
1c213d19 3389 /* 8 bit write access */
c27004ec 3390 val = ldub_p(buf);
6c2934db 3391 io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
13eb76e0
FB
3392 l = 1;
3393 }
3394 } else {
b448f2f3
FB
3395 unsigned long addr1;
3396 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
13eb76e0 3397 /* RAM case */
5579c7f3 3398 ptr = qemu_get_ram_ptr(addr1);
13eb76e0 3399 memcpy(ptr, buf, l);
3a7d929e
FB
3400 if (!cpu_physical_memory_is_dirty(addr1)) {
3401 /* invalidate code */
3402 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3403 /* set dirty bit */
5fafdf24 3404 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
f23db169 3405 (0xff & ~CODE_DIRTY_FLAG);
3a7d929e 3406 }
13eb76e0
FB
3407 }
3408 } else {
5fafdf24 3409 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2a4188a3 3410 !(pd & IO_MEM_ROMD)) {
c227f099 3411 target_phys_addr_t addr1 = addr;
13eb76e0
FB
3412 /* I/O case */
3413 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18 3414 if (p)
6c2934db
AJ
3415 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3416 if (l >= 4 && ((addr1 & 3) == 0)) {
13eb76e0 3417 /* 32 bit read access */
6c2934db 3418 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
c27004ec 3419 stl_p(buf, val);
13eb76e0 3420 l = 4;
6c2934db 3421 } else if (l >= 2 && ((addr1 & 1) == 0)) {
13eb76e0 3422 /* 16 bit read access */
6c2934db 3423 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
c27004ec 3424 stw_p(buf, val);
13eb76e0
FB
3425 l = 2;
3426 } else {
1c213d19 3427 /* 8 bit read access */
6c2934db 3428 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
c27004ec 3429 stb_p(buf, val);
13eb76e0
FB
3430 l = 1;
3431 }
3432 } else {
3433 /* RAM case */
5579c7f3 3434 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
13eb76e0
FB
3435 (addr & ~TARGET_PAGE_MASK);
3436 memcpy(buf, ptr, l);
3437 }
3438 }
3439 len -= l;
3440 buf += l;
3441 addr += l;
3442 }
3443}
8df1cd07 3444
d0ecd2aa 3445/* used for ROM loading : can write in RAM and ROM */
c227f099 3446void cpu_physical_memory_write_rom(target_phys_addr_t addr,
d0ecd2aa
FB
3447 const uint8_t *buf, int len)
3448{
3449 int l;
3450 uint8_t *ptr;
c227f099 3451 target_phys_addr_t page;
d0ecd2aa
FB
3452 unsigned long pd;
3453 PhysPageDesc *p;
3b46e624 3454
d0ecd2aa
FB
3455 while (len > 0) {
3456 page = addr & TARGET_PAGE_MASK;
3457 l = (page + TARGET_PAGE_SIZE) - addr;
3458 if (l > len)
3459 l = len;
3460 p = phys_page_find(page >> TARGET_PAGE_BITS);
3461 if (!p) {
3462 pd = IO_MEM_UNASSIGNED;
3463 } else {
3464 pd = p->phys_offset;
3465 }
3b46e624 3466
d0ecd2aa 3467 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2a4188a3
FB
3468 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3469 !(pd & IO_MEM_ROMD)) {
d0ecd2aa
FB
3470 /* do nothing */
3471 } else {
3472 unsigned long addr1;
3473 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3474 /* ROM/RAM case */
5579c7f3 3475 ptr = qemu_get_ram_ptr(addr1);
d0ecd2aa
FB
3476 memcpy(ptr, buf, l);
3477 }
3478 len -= l;
3479 buf += l;
3480 addr += l;
3481 }
3482}
3483
6d16c2f8
AL
3484typedef struct {
3485 void *buffer;
c227f099
AL
3486 target_phys_addr_t addr;
3487 target_phys_addr_t len;
6d16c2f8
AL
3488} BounceBuffer;
3489
3490static BounceBuffer bounce;
3491
ba223c29
AL
3492typedef struct MapClient {
3493 void *opaque;
3494 void (*callback)(void *opaque);
72cf2d4f 3495 QLIST_ENTRY(MapClient) link;
ba223c29
AL
3496} MapClient;
3497
72cf2d4f
BS
3498static QLIST_HEAD(map_client_list, MapClient) map_client_list
3499 = QLIST_HEAD_INITIALIZER(map_client_list);
ba223c29
AL
3500
3501void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3502{
3503 MapClient *client = qemu_malloc(sizeof(*client));
3504
3505 client->opaque = opaque;
3506 client->callback = callback;
72cf2d4f 3507 QLIST_INSERT_HEAD(&map_client_list, client, link);
ba223c29
AL
3508 return client;
3509}
3510
3511void cpu_unregister_map_client(void *_client)
3512{
3513 MapClient *client = (MapClient *)_client;
3514
72cf2d4f 3515 QLIST_REMOVE(client, link);
34d5e948 3516 qemu_free(client);
ba223c29
AL
3517}
3518
3519static void cpu_notify_map_clients(void)
3520{
3521 MapClient *client;
3522
72cf2d4f
BS
3523 while (!QLIST_EMPTY(&map_client_list)) {
3524 client = QLIST_FIRST(&map_client_list);
ba223c29 3525 client->callback(client->opaque);
34d5e948 3526 cpu_unregister_map_client(client);
ba223c29
AL
3527 }
3528}
3529
6d16c2f8
AL
3530/* Map a physical memory region into a host virtual address.
3531 * May map a subset of the requested range, given by and returned in *plen.
3532 * May return NULL if resources needed to perform the mapping are exhausted.
3533 * Use only for reads OR writes - not for read-modify-write operations.
ba223c29
AL
3534 * Use cpu_register_map_client() to know when retrying the map operation is
3535 * likely to succeed.
6d16c2f8 3536 */
c227f099
AL
3537void *cpu_physical_memory_map(target_phys_addr_t addr,
3538 target_phys_addr_t *plen,
6d16c2f8
AL
3539 int is_write)
3540{
c227f099
AL
3541 target_phys_addr_t len = *plen;
3542 target_phys_addr_t done = 0;
6d16c2f8
AL
3543 int l;
3544 uint8_t *ret = NULL;
3545 uint8_t *ptr;
c227f099 3546 target_phys_addr_t page;
6d16c2f8
AL
3547 unsigned long pd;
3548 PhysPageDesc *p;
3549 unsigned long addr1;
3550
3551 while (len > 0) {
3552 page = addr & TARGET_PAGE_MASK;
3553 l = (page + TARGET_PAGE_SIZE) - addr;
3554 if (l > len)
3555 l = len;
3556 p = phys_page_find(page >> TARGET_PAGE_BITS);
3557 if (!p) {
3558 pd = IO_MEM_UNASSIGNED;
3559 } else {
3560 pd = p->phys_offset;
3561 }
3562
3563 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3564 if (done || bounce.buffer) {
3565 break;
3566 }
3567 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3568 bounce.addr = addr;
3569 bounce.len = l;
3570 if (!is_write) {
3571 cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
3572 }
3573 ptr = bounce.buffer;
3574 } else {
3575 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
5579c7f3 3576 ptr = qemu_get_ram_ptr(addr1);
6d16c2f8
AL
3577 }
3578 if (!done) {
3579 ret = ptr;
3580 } else if (ret + done != ptr) {
3581 break;
3582 }
3583
3584 len -= l;
3585 addr += l;
3586 done += l;
3587 }
3588 *plen = done;
3589 return ret;
3590}
3591
3592/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3593 * Will also mark the memory as dirty if is_write == 1. access_len gives
3594 * the amount of memory that was actually read or written by the caller.
3595 */
c227f099
AL
3596void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3597 int is_write, target_phys_addr_t access_len)
6d16c2f8
AL
3598{
3599 if (buffer != bounce.buffer) {
3600 if (is_write) {
c227f099 3601 ram_addr_t addr1 = qemu_ram_addr_from_host(buffer);
6d16c2f8
AL
3602 while (access_len) {
3603 unsigned l;
3604 l = TARGET_PAGE_SIZE;
3605 if (l > access_len)
3606 l = access_len;
3607 if (!cpu_physical_memory_is_dirty(addr1)) {
3608 /* invalidate code */
3609 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3610 /* set dirty bit */
3611 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3612 (0xff & ~CODE_DIRTY_FLAG);
3613 }
3614 addr1 += l;
3615 access_len -= l;
3616 }
3617 }
3618 return;
3619 }
3620 if (is_write) {
3621 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3622 }
f8a83245 3623 qemu_vfree(bounce.buffer);
6d16c2f8 3624 bounce.buffer = NULL;
ba223c29 3625 cpu_notify_map_clients();
6d16c2f8 3626}
d0ecd2aa 3627
8df1cd07 3628/* warning: addr must be aligned */
c227f099 3629uint32_t ldl_phys(target_phys_addr_t addr)
8df1cd07
FB
3630{
3631 int io_index;
3632 uint8_t *ptr;
3633 uint32_t val;
3634 unsigned long pd;
3635 PhysPageDesc *p;
3636
3637 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3638 if (!p) {
3639 pd = IO_MEM_UNASSIGNED;
3640 } else {
3641 pd = p->phys_offset;
3642 }
3b46e624 3643
5fafdf24 3644 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2a4188a3 3645 !(pd & IO_MEM_ROMD)) {
8df1cd07
FB
3646 /* I/O case */
3647 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
3648 if (p)
3649 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
8df1cd07
FB
3650 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3651 } else {
3652 /* RAM case */
5579c7f3 3653 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
8df1cd07
FB
3654 (addr & ~TARGET_PAGE_MASK);
3655 val = ldl_p(ptr);
3656 }
3657 return val;
3658}
3659
84b7b8e7 3660/* warning: addr must be aligned */
c227f099 3661uint64_t ldq_phys(target_phys_addr_t addr)
84b7b8e7
FB
3662{
3663 int io_index;
3664 uint8_t *ptr;
3665 uint64_t val;
3666 unsigned long pd;
3667 PhysPageDesc *p;
3668
3669 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3670 if (!p) {
3671 pd = IO_MEM_UNASSIGNED;
3672 } else {
3673 pd = p->phys_offset;
3674 }
3b46e624 3675
2a4188a3
FB
3676 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3677 !(pd & IO_MEM_ROMD)) {
84b7b8e7
FB
3678 /* I/O case */
3679 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
3680 if (p)
3681 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
84b7b8e7
FB
3682#ifdef TARGET_WORDS_BIGENDIAN
3683 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3684 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3685#else
3686 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3687 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3688#endif
3689 } else {
3690 /* RAM case */
5579c7f3 3691 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
84b7b8e7
FB
3692 (addr & ~TARGET_PAGE_MASK);
3693 val = ldq_p(ptr);
3694 }
3695 return val;
3696}
3697
aab33094 3698/* XXX: optimize */
c227f099 3699uint32_t ldub_phys(target_phys_addr_t addr)
aab33094
FB
3700{
3701 uint8_t val;
3702 cpu_physical_memory_read(addr, &val, 1);
3703 return val;
3704}
3705
3706/* XXX: optimize */
c227f099 3707uint32_t lduw_phys(target_phys_addr_t addr)
aab33094
FB
3708{
3709 uint16_t val;
3710 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3711 return tswap16(val);
3712}
3713
8df1cd07
FB
3714/* warning: addr must be aligned. The ram page is not masked as dirty
3715 and the code inside is not invalidated. It is useful if the dirty
3716 bits are used to track modified PTEs */
c227f099 3717void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
8df1cd07
FB
3718{
3719 int io_index;
3720 uint8_t *ptr;
3721 unsigned long pd;
3722 PhysPageDesc *p;
3723
3724 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3725 if (!p) {
3726 pd = IO_MEM_UNASSIGNED;
3727 } else {
3728 pd = p->phys_offset;
3729 }
3b46e624 3730
3a7d929e 3731 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07 3732 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
3733 if (p)
3734 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
8df1cd07
FB
3735 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3736 } else {
74576198 3737 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
5579c7f3 3738 ptr = qemu_get_ram_ptr(addr1);
8df1cd07 3739 stl_p(ptr, val);
74576198
AL
3740
3741 if (unlikely(in_migration)) {
3742 if (!cpu_physical_memory_is_dirty(addr1)) {
3743 /* invalidate code */
3744 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3745 /* set dirty bit */
3746 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3747 (0xff & ~CODE_DIRTY_FLAG);
3748 }
3749 }
8df1cd07
FB
3750 }
3751}
3752
c227f099 3753void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
bc98a7ef
JM
3754{
3755 int io_index;
3756 uint8_t *ptr;
3757 unsigned long pd;
3758 PhysPageDesc *p;
3759
3760 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3761 if (!p) {
3762 pd = IO_MEM_UNASSIGNED;
3763 } else {
3764 pd = p->phys_offset;
3765 }
3b46e624 3766
bc98a7ef
JM
3767 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3768 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
3769 if (p)
3770 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bc98a7ef
JM
3771#ifdef TARGET_WORDS_BIGENDIAN
3772 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3773 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3774#else
3775 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3776 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3777#endif
3778 } else {
5579c7f3 3779 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
bc98a7ef
JM
3780 (addr & ~TARGET_PAGE_MASK);
3781 stq_p(ptr, val);
3782 }
3783}
3784
8df1cd07 3785/* warning: addr must be aligned */
c227f099 3786void stl_phys(target_phys_addr_t addr, uint32_t val)
8df1cd07
FB
3787{
3788 int io_index;
3789 uint8_t *ptr;
3790 unsigned long pd;
3791 PhysPageDesc *p;
3792
3793 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3794 if (!p) {
3795 pd = IO_MEM_UNASSIGNED;
3796 } else {
3797 pd = p->phys_offset;
3798 }
3b46e624 3799
3a7d929e 3800 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07 3801 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
3802 if (p)
3803 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
8df1cd07
FB
3804 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3805 } else {
3806 unsigned long addr1;
3807 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3808 /* RAM case */
5579c7f3 3809 ptr = qemu_get_ram_ptr(addr1);
8df1cd07 3810 stl_p(ptr, val);
3a7d929e
FB
3811 if (!cpu_physical_memory_is_dirty(addr1)) {
3812 /* invalidate code */
3813 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3814 /* set dirty bit */
f23db169
FB
3815 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3816 (0xff & ~CODE_DIRTY_FLAG);
3a7d929e 3817 }
8df1cd07
FB
3818 }
3819}
3820
aab33094 3821/* XXX: optimize */
c227f099 3822void stb_phys(target_phys_addr_t addr, uint32_t val)
aab33094
FB
3823{
3824 uint8_t v = val;
3825 cpu_physical_memory_write(addr, &v, 1);
3826}
3827
3828/* XXX: optimize */
c227f099 3829void stw_phys(target_phys_addr_t addr, uint32_t val)
aab33094
FB
3830{
3831 uint16_t v = tswap16(val);
3832 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3833}
3834
3835/* XXX: optimize */
c227f099 3836void stq_phys(target_phys_addr_t addr, uint64_t val)
aab33094
FB
3837{
3838 val = tswap64(val);
3839 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3840}
3841
5e2972fd 3842/* virtual memory access for debug (includes writing to ROM) */
5fafdf24 3843int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
b448f2f3 3844 uint8_t *buf, int len, int is_write)
13eb76e0
FB
3845{
3846 int l;
c227f099 3847 target_phys_addr_t phys_addr;
9b3c35e0 3848 target_ulong page;
13eb76e0
FB
3849
3850 while (len > 0) {
3851 page = addr & TARGET_PAGE_MASK;
3852 phys_addr = cpu_get_phys_page_debug(env, page);
3853 /* if no physical page mapped, return an error */
3854 if (phys_addr == -1)
3855 return -1;
3856 l = (page + TARGET_PAGE_SIZE) - addr;
3857 if (l > len)
3858 l = len;
5e2972fd 3859 phys_addr += (addr & ~TARGET_PAGE_MASK);
5e2972fd
AL
3860 if (is_write)
3861 cpu_physical_memory_write_rom(phys_addr, buf, l);
3862 else
5e2972fd 3863 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
13eb76e0
FB
3864 len -= l;
3865 buf += l;
3866 addr += l;
3867 }
3868 return 0;
3869}
a68fe89c 3870#endif
13eb76e0 3871
2e70f6ef
PB
3872/* in deterministic execution mode, instructions doing device I/Os
3873 must be at the end of the TB */
3874void cpu_io_recompile(CPUState *env, void *retaddr)
3875{
3876 TranslationBlock *tb;
3877 uint32_t n, cflags;
3878 target_ulong pc, cs_base;
3879 uint64_t flags;
3880
3881 tb = tb_find_pc((unsigned long)retaddr);
3882 if (!tb) {
3883 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
3884 retaddr);
3885 }
3886 n = env->icount_decr.u16.low + tb->icount;
3887 cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3888 /* Calculate how many instructions had been executed before the fault
bf20dc07 3889 occurred. */
2e70f6ef
PB
3890 n = n - env->icount_decr.u16.low;
3891 /* Generate a new TB ending on the I/O insn. */
3892 n++;
3893 /* On MIPS and SH, delay slot instructions can only be restarted if
3894 they were already the first instruction in the TB. If this is not
bf20dc07 3895 the first instruction in a TB then re-execute the preceding
2e70f6ef
PB
3896 branch. */
3897#if defined(TARGET_MIPS)
3898 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3899 env->active_tc.PC -= 4;
3900 env->icount_decr.u16.low++;
3901 env->hflags &= ~MIPS_HFLAG_BMASK;
3902 }
3903#elif defined(TARGET_SH4)
3904 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3905 && n > 1) {
3906 env->pc -= 2;
3907 env->icount_decr.u16.low++;
3908 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3909 }
3910#endif
3911 /* This should never happen. */
3912 if (n > CF_COUNT_MASK)
3913 cpu_abort(env, "TB too big during recompile");
3914
3915 cflags = n | CF_LAST_IO;
3916 pc = tb->pc;
3917 cs_base = tb->cs_base;
3918 flags = tb->flags;
3919 tb_phys_invalidate(tb, -1);
3920 /* FIXME: In theory this could raise an exception. In practice
3921 we have already translated the block once so it's probably ok. */
3922 tb_gen_code(env, pc, cs_base, flags, cflags);
bf20dc07 3923 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
2e70f6ef
PB
3924 the first in the TB) then we end up generating a whole new TB and
3925 repeating the fault, which is horribly inefficient.
3926 Better would be to execute just this insn uncached, or generate a
3927 second new TB. */
3928 cpu_resume_from_signal(env, NULL);
3929}
3930
b3755a91
PB
3931#if !defined(CONFIG_USER_ONLY)
3932
e3db7226
FB
3933void dump_exec_info(FILE *f,
3934 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3935{
3936 int i, target_code_size, max_target_code_size;
3937 int direct_jmp_count, direct_jmp2_count, cross_page;
3938 TranslationBlock *tb;
3b46e624 3939
e3db7226
FB
3940 target_code_size = 0;
3941 max_target_code_size = 0;
3942 cross_page = 0;
3943 direct_jmp_count = 0;
3944 direct_jmp2_count = 0;
3945 for(i = 0; i < nb_tbs; i++) {
3946 tb = &tbs[i];
3947 target_code_size += tb->size;
3948 if (tb->size > max_target_code_size)
3949 max_target_code_size = tb->size;
3950 if (tb->page_addr[1] != -1)
3951 cross_page++;
3952 if (tb->tb_next_offset[0] != 0xffff) {
3953 direct_jmp_count++;
3954 if (tb->tb_next_offset[1] != 0xffff) {
3955 direct_jmp2_count++;
3956 }
3957 }
3958 }
3959 /* XXX: avoid using doubles ? */
57fec1fe 3960 cpu_fprintf(f, "Translation buffer state:\n");
26a5f13b
FB
3961 cpu_fprintf(f, "gen code size %ld/%ld\n",
3962 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3963 cpu_fprintf(f, "TB count %d/%d\n",
3964 nb_tbs, code_gen_max_blocks);
5fafdf24 3965 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
e3db7226
FB
3966 nb_tbs ? target_code_size / nb_tbs : 0,
3967 max_target_code_size);
5fafdf24 3968 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
e3db7226
FB
3969 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3970 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
5fafdf24
TS
3971 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3972 cross_page,
e3db7226
FB
3973 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3974 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
5fafdf24 3975 direct_jmp_count,
e3db7226
FB
3976 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3977 direct_jmp2_count,
3978 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
57fec1fe 3979 cpu_fprintf(f, "\nStatistics:\n");
e3db7226
FB
3980 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
3981 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3982 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
b67d9a52 3983 tcg_dump_info(f, cpu_fprintf);
e3db7226
FB
3984}
3985
61382a50
FB
3986#define MMUSUFFIX _cmmu
3987#define GETPC() NULL
3988#define env cpu_single_env
b769d8fe 3989#define SOFTMMU_CODE_ACCESS
61382a50
FB
3990
3991#define SHIFT 0
3992#include "softmmu_template.h"
3993
3994#define SHIFT 1
3995#include "softmmu_template.h"
3996
3997#define SHIFT 2
3998#include "softmmu_template.h"
3999
4000#define SHIFT 3
4001#include "softmmu_template.h"
4002
4003#undef env
4004
4005#endif