]> git.proxmox.com Git - mirror_qemu.git/blame - exec.c
Fix curses interaction with keymaps
[mirror_qemu.git] / exec.c
CommitLineData
54936004 1/*
fd6ce8f6 2 * virtual page mapping and translated block handling
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
54936004 18 */
67b915a5 19#include "config.h"
d5a8f07c
FB
20#ifdef _WIN32
21#include <windows.h>
22#else
a98d49b1 23#include <sys/types.h>
d5a8f07c
FB
24#include <sys/mman.h>
25#endif
54936004
FB
26#include <stdlib.h>
27#include <stdio.h>
28#include <stdarg.h>
29#include <string.h>
30#include <errno.h>
31#include <unistd.h>
32#include <inttypes.h>
33
6180a181
FB
34#include "cpu.h"
35#include "exec-all.h"
ca10f867 36#include "qemu-common.h"
b67d9a52 37#include "tcg.h"
b3c7724c 38#include "hw/hw.h"
74576198 39#include "osdep.h"
7ba1e619 40#include "kvm.h"
53a5960a
PB
41#if defined(CONFIG_USER_ONLY)
42#include <qemu.h>
fd052bf6 43#include <signal.h>
53a5960a 44#endif
54936004 45
fd6ce8f6 46//#define DEBUG_TB_INVALIDATE
66e85a21 47//#define DEBUG_FLUSH
9fa3e853 48//#define DEBUG_TLB
67d3b957 49//#define DEBUG_UNASSIGNED
fd6ce8f6
FB
50
51/* make various TB consistency checks */
5fafdf24
TS
52//#define DEBUG_TB_CHECK
53//#define DEBUG_TLB_CHECK
fd6ce8f6 54
1196be37 55//#define DEBUG_IOPORT
db7b5426 56//#define DEBUG_SUBPAGE
1196be37 57
99773bd4
PB
58#if !defined(CONFIG_USER_ONLY)
59/* TB consistency checks only implemented for usermode emulation. */
60#undef DEBUG_TB_CHECK
61#endif
62
9fa3e853
FB
63#define SMC_BITMAP_USE_THRESHOLD 10
64
108c49b8
FB
65#if defined(TARGET_SPARC64)
66#define TARGET_PHYS_ADDR_SPACE_BITS 41
5dcb6b91
BS
67#elif defined(TARGET_SPARC)
68#define TARGET_PHYS_ADDR_SPACE_BITS 36
bedb69ea
JM
69#elif defined(TARGET_ALPHA)
70#define TARGET_PHYS_ADDR_SPACE_BITS 42
71#define TARGET_VIRT_ADDR_SPACE_BITS 42
108c49b8
FB
72#elif defined(TARGET_PPC64)
73#define TARGET_PHYS_ADDR_SPACE_BITS 42
4a1418e0 74#elif defined(TARGET_X86_64)
00f82b8a 75#define TARGET_PHYS_ADDR_SPACE_BITS 42
4a1418e0 76#elif defined(TARGET_I386)
00f82b8a 77#define TARGET_PHYS_ADDR_SPACE_BITS 36
108c49b8 78#else
108c49b8
FB
79#define TARGET_PHYS_ADDR_SPACE_BITS 32
80#endif
81
bdaf78e0 82static TranslationBlock *tbs;
26a5f13b 83int code_gen_max_blocks;
9fa3e853 84TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
bdaf78e0 85static int nb_tbs;
eb51d102 86/* any access to the tbs or the page table must use this lock */
c227f099 87spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
fd6ce8f6 88
141ac468
BS
89#if defined(__arm__) || defined(__sparc_v9__)
90/* The prologue must be reachable with a direct jump. ARM and Sparc64
91 have limited branch ranges (possibly also PPC) so place it in a
d03d860b
BS
92 section close to code segment. */
93#define code_gen_section \
94 __attribute__((__section__(".gen_code"))) \
95 __attribute__((aligned (32)))
f8e2af11
SW
96#elif defined(_WIN32)
97/* Maximum alignment for Win32 is 16. */
98#define code_gen_section \
99 __attribute__((aligned (16)))
d03d860b
BS
100#else
101#define code_gen_section \
102 __attribute__((aligned (32)))
103#endif
104
105uint8_t code_gen_prologue[1024] code_gen_section;
bdaf78e0
BS
106static uint8_t *code_gen_buffer;
107static unsigned long code_gen_buffer_size;
26a5f13b 108/* threshold to flush the translated code buffer */
bdaf78e0 109static unsigned long code_gen_buffer_max_size;
fd6ce8f6
FB
110uint8_t *code_gen_ptr;
111
e2eef170 112#if !defined(CONFIG_USER_ONLY)
9fa3e853 113int phys_ram_fd;
1ccde1cb 114uint8_t *phys_ram_dirty;
74576198 115static int in_migration;
94a6b54f
PB
116
117typedef struct RAMBlock {
118 uint8_t *host;
c227f099
AL
119 ram_addr_t offset;
120 ram_addr_t length;
94a6b54f
PB
121 struct RAMBlock *next;
122} RAMBlock;
123
124static RAMBlock *ram_blocks;
125/* TODO: When we implement (and use) ram deallocation (e.g. for hotplug)
ccbb4d44 126 then we can no longer assume contiguous ram offsets, and external uses
94a6b54f 127 of this variable will break. */
c227f099 128ram_addr_t last_ram_offset;
e2eef170 129#endif
9fa3e853 130
6a00d601
FB
131CPUState *first_cpu;
132/* current CPU in the current thread. It is only valid inside
133 cpu_exec() */
5fafdf24 134CPUState *cpu_single_env;
2e70f6ef 135/* 0 = Do not count executed instructions.
bf20dc07 136 1 = Precise instruction counting.
2e70f6ef
PB
137 2 = Adaptive rate instruction counting. */
138int use_icount = 0;
139/* Current instruction counter. While executing translated code this may
140 include some instructions that have not yet been executed. */
141int64_t qemu_icount;
6a00d601 142
54936004 143typedef struct PageDesc {
92e873b9 144 /* list of TBs intersecting this ram page */
fd6ce8f6 145 TranslationBlock *first_tb;
9fa3e853
FB
146 /* in order to optimize self modifying code, we count the number
147 of lookups we do to a given page to use a bitmap */
148 unsigned int code_write_count;
149 uint8_t *code_bitmap;
150#if defined(CONFIG_USER_ONLY)
151 unsigned long flags;
152#endif
54936004
FB
153} PageDesc;
154
92e873b9 155typedef struct PhysPageDesc {
0f459d16 156 /* offset in host memory of the page + io_index in the low bits */
c227f099
AL
157 ram_addr_t phys_offset;
158 ram_addr_t region_offset;
92e873b9
FB
159} PhysPageDesc;
160
54936004 161#define L2_BITS 10
bedb69ea
JM
162#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
163/* XXX: this is a temporary hack for alpha target.
164 * In the future, this is to be replaced by a multi-level table
165 * to actually be able to handle the complete 64 bits address space.
166 */
167#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
168#else
03875444 169#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
bedb69ea 170#endif
54936004
FB
171
172#define L1_SIZE (1 << L1_BITS)
173#define L2_SIZE (1 << L2_BITS)
174
83fb7adf
FB
175unsigned long qemu_real_host_page_size;
176unsigned long qemu_host_page_bits;
177unsigned long qemu_host_page_size;
178unsigned long qemu_host_page_mask;
54936004 179
92e873b9 180/* XXX: for system emulation, it could just be an array */
54936004
FB
181static PageDesc *l1_map[L1_SIZE];
182
e2eef170 183#if !defined(CONFIG_USER_ONLY)
6d9a1304
PB
184static PhysPageDesc **l1_phys_map;
185
e2eef170
PB
186static void io_mem_init(void);
187
33417e70 188/* io memory support */
33417e70
FB
189CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
190CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
a4193c8a 191void *io_mem_opaque[IO_MEM_NB_ENTRIES];
511d2b14 192static char io_mem_used[IO_MEM_NB_ENTRIES];
6658ffb8
PB
193static int io_mem_watch;
194#endif
33417e70 195
34865134 196/* log support */
1e8b27ca
JR
197#ifdef WIN32
198static const char *logfilename = "qemu.log";
199#else
d9b630fd 200static const char *logfilename = "/tmp/qemu.log";
1e8b27ca 201#endif
34865134
FB
202FILE *logfile;
203int loglevel;
e735b91c 204static int log_append = 0;
34865134 205
e3db7226
FB
206/* statistics */
207static int tlb_flush_count;
208static int tb_flush_count;
209static int tb_phys_invalidate_count;
210
7cb69cae
FB
211#ifdef _WIN32
212static void map_exec(void *addr, long size)
213{
214 DWORD old_protect;
215 VirtualProtect(addr, size,
216 PAGE_EXECUTE_READWRITE, &old_protect);
217
218}
219#else
220static void map_exec(void *addr, long size)
221{
4369415f 222 unsigned long start, end, page_size;
7cb69cae 223
4369415f 224 page_size = getpagesize();
7cb69cae 225 start = (unsigned long)addr;
4369415f 226 start &= ~(page_size - 1);
7cb69cae
FB
227
228 end = (unsigned long)addr + size;
4369415f
FB
229 end += page_size - 1;
230 end &= ~(page_size - 1);
7cb69cae
FB
231
232 mprotect((void *)start, end - start,
233 PROT_READ | PROT_WRITE | PROT_EXEC);
234}
235#endif
236
b346ff46 237static void page_init(void)
54936004 238{
83fb7adf 239 /* NOTE: we can always suppose that qemu_host_page_size >=
54936004 240 TARGET_PAGE_SIZE */
c2b48b69
AL
241#ifdef _WIN32
242 {
243 SYSTEM_INFO system_info;
244
245 GetSystemInfo(&system_info);
246 qemu_real_host_page_size = system_info.dwPageSize;
247 }
248#else
249 qemu_real_host_page_size = getpagesize();
250#endif
83fb7adf
FB
251 if (qemu_host_page_size == 0)
252 qemu_host_page_size = qemu_real_host_page_size;
253 if (qemu_host_page_size < TARGET_PAGE_SIZE)
254 qemu_host_page_size = TARGET_PAGE_SIZE;
255 qemu_host_page_bits = 0;
256 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
257 qemu_host_page_bits++;
258 qemu_host_page_mask = ~(qemu_host_page_size - 1);
6d9a1304 259#if !defined(CONFIG_USER_ONLY)
108c49b8
FB
260 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
261 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
6d9a1304 262#endif
50a9569b
AZ
263
264#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
265 {
266 long long startaddr, endaddr;
267 FILE *f;
268 int n;
269
c8a706fe 270 mmap_lock();
0776590d 271 last_brk = (unsigned long)sbrk(0);
50a9569b
AZ
272 f = fopen("/proc/self/maps", "r");
273 if (f) {
274 do {
275 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
276 if (n == 2) {
e0b8d65a
BS
277 startaddr = MIN(startaddr,
278 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
279 endaddr = MIN(endaddr,
280 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
b5fc909e 281 page_set_flags(startaddr & TARGET_PAGE_MASK,
50a9569b
AZ
282 TARGET_PAGE_ALIGN(endaddr),
283 PAGE_RESERVED);
284 }
285 } while (!feof(f));
286 fclose(f);
287 }
c8a706fe 288 mmap_unlock();
50a9569b
AZ
289 }
290#endif
54936004
FB
291}
292
434929bf 293static inline PageDesc **page_l1_map(target_ulong index)
54936004 294{
17e2377a
PB
295#if TARGET_LONG_BITS > 32
296 /* Host memory outside guest VM. For 32-bit targets we have already
297 excluded high addresses. */
d8173e0f 298 if (index > ((target_ulong)L2_SIZE * L1_SIZE))
17e2377a
PB
299 return NULL;
300#endif
434929bf
AL
301 return &l1_map[index >> L2_BITS];
302}
303
304static inline PageDesc *page_find_alloc(target_ulong index)
305{
306 PageDesc **lp, *p;
307 lp = page_l1_map(index);
308 if (!lp)
309 return NULL;
310
54936004
FB
311 p = *lp;
312 if (!p) {
313 /* allocate if not found */
17e2377a 314#if defined(CONFIG_USER_ONLY)
17e2377a
PB
315 size_t len = sizeof(PageDesc) * L2_SIZE;
316 /* Don't use qemu_malloc because it may recurse. */
660f11be 317 p = mmap(NULL, len, PROT_READ | PROT_WRITE,
17e2377a 318 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
54936004 319 *lp = p;
fb1c2cd7
AJ
320 if (h2g_valid(p)) {
321 unsigned long addr = h2g(p);
17e2377a
PB
322 page_set_flags(addr & TARGET_PAGE_MASK,
323 TARGET_PAGE_ALIGN(addr + len),
324 PAGE_RESERVED);
325 }
326#else
327 p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
328 *lp = p;
329#endif
54936004
FB
330 }
331 return p + (index & (L2_SIZE - 1));
332}
333
00f82b8a 334static inline PageDesc *page_find(target_ulong index)
54936004 335{
434929bf
AL
336 PageDesc **lp, *p;
337 lp = page_l1_map(index);
338 if (!lp)
339 return NULL;
54936004 340
434929bf 341 p = *lp;
660f11be
BS
342 if (!p) {
343 return NULL;
344 }
fd6ce8f6
FB
345 return p + (index & (L2_SIZE - 1));
346}
347
6d9a1304 348#if !defined(CONFIG_USER_ONLY)
c227f099 349static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
92e873b9 350{
108c49b8 351 void **lp, **p;
e3f4e2a4 352 PhysPageDesc *pd;
92e873b9 353
108c49b8
FB
354 p = (void **)l1_phys_map;
355#if TARGET_PHYS_ADDR_SPACE_BITS > 32
356
357#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
358#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
359#endif
360 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
92e873b9
FB
361 p = *lp;
362 if (!p) {
363 /* allocate if not found */
108c49b8
FB
364 if (!alloc)
365 return NULL;
366 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
367 memset(p, 0, sizeof(void *) * L1_SIZE);
368 *lp = p;
369 }
370#endif
371 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
e3f4e2a4
PB
372 pd = *lp;
373 if (!pd) {
374 int i;
108c49b8
FB
375 /* allocate if not found */
376 if (!alloc)
377 return NULL;
e3f4e2a4
PB
378 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
379 *lp = pd;
67c4d23c 380 for (i = 0; i < L2_SIZE; i++) {
e3f4e2a4 381 pd[i].phys_offset = IO_MEM_UNASSIGNED;
67c4d23c
PB
382 pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
383 }
92e873b9 384 }
e3f4e2a4 385 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
92e873b9
FB
386}
387
c227f099 388static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
92e873b9 389{
108c49b8 390 return phys_page_find_alloc(index, 0);
92e873b9
FB
391}
392
c227f099
AL
393static void tlb_protect_code(ram_addr_t ram_addr);
394static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 395 target_ulong vaddr);
c8a706fe
PB
396#define mmap_lock() do { } while(0)
397#define mmap_unlock() do { } while(0)
9fa3e853 398#endif
fd6ce8f6 399
4369415f
FB
400#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
401
402#if defined(CONFIG_USER_ONLY)
ccbb4d44 403/* Currently it is not recommended to allocate big chunks of data in
4369415f
FB
404 user mode. It will change when a dedicated libc will be used */
405#define USE_STATIC_CODE_GEN_BUFFER
406#endif
407
408#ifdef USE_STATIC_CODE_GEN_BUFFER
409static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
410#endif
411
8fcd3692 412static void code_gen_alloc(unsigned long tb_size)
26a5f13b 413{
4369415f
FB
414#ifdef USE_STATIC_CODE_GEN_BUFFER
415 code_gen_buffer = static_code_gen_buffer;
416 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
417 map_exec(code_gen_buffer, code_gen_buffer_size);
418#else
26a5f13b
FB
419 code_gen_buffer_size = tb_size;
420 if (code_gen_buffer_size == 0) {
4369415f
FB
421#if defined(CONFIG_USER_ONLY)
422 /* in user mode, phys_ram_size is not meaningful */
423 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
424#else
ccbb4d44 425 /* XXX: needs adjustments */
94a6b54f 426 code_gen_buffer_size = (unsigned long)(ram_size / 4);
4369415f 427#endif
26a5f13b
FB
428 }
429 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
430 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
431 /* The code gen buffer location may have constraints depending on
432 the host cpu and OS */
433#if defined(__linux__)
434 {
435 int flags;
141ac468
BS
436 void *start = NULL;
437
26a5f13b
FB
438 flags = MAP_PRIVATE | MAP_ANONYMOUS;
439#if defined(__x86_64__)
440 flags |= MAP_32BIT;
441 /* Cannot map more than that */
442 if (code_gen_buffer_size > (800 * 1024 * 1024))
443 code_gen_buffer_size = (800 * 1024 * 1024);
141ac468
BS
444#elif defined(__sparc_v9__)
445 // Map the buffer below 2G, so we can use direct calls and branches
446 flags |= MAP_FIXED;
447 start = (void *) 0x60000000UL;
448 if (code_gen_buffer_size > (512 * 1024 * 1024))
449 code_gen_buffer_size = (512 * 1024 * 1024);
1cb0661e 450#elif defined(__arm__)
63d41246 451 /* Map the buffer below 32M, so we can use direct calls and branches */
1cb0661e
AZ
452 flags |= MAP_FIXED;
453 start = (void *) 0x01000000UL;
454 if (code_gen_buffer_size > 16 * 1024 * 1024)
455 code_gen_buffer_size = 16 * 1024 * 1024;
26a5f13b 456#endif
141ac468
BS
457 code_gen_buffer = mmap(start, code_gen_buffer_size,
458 PROT_WRITE | PROT_READ | PROT_EXEC,
26a5f13b
FB
459 flags, -1, 0);
460 if (code_gen_buffer == MAP_FAILED) {
461 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
462 exit(1);
463 }
464 }
a167ba50 465#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__DragonFly__)
06e67a82
AL
466 {
467 int flags;
468 void *addr = NULL;
469 flags = MAP_PRIVATE | MAP_ANONYMOUS;
470#if defined(__x86_64__)
471 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
472 * 0x40000000 is free */
473 flags |= MAP_FIXED;
474 addr = (void *)0x40000000;
475 /* Cannot map more than that */
476 if (code_gen_buffer_size > (800 * 1024 * 1024))
477 code_gen_buffer_size = (800 * 1024 * 1024);
478#endif
479 code_gen_buffer = mmap(addr, code_gen_buffer_size,
480 PROT_WRITE | PROT_READ | PROT_EXEC,
481 flags, -1, 0);
482 if (code_gen_buffer == MAP_FAILED) {
483 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
484 exit(1);
485 }
486 }
26a5f13b
FB
487#else
488 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
26a5f13b
FB
489 map_exec(code_gen_buffer, code_gen_buffer_size);
490#endif
4369415f 491#endif /* !USE_STATIC_CODE_GEN_BUFFER */
26a5f13b
FB
492 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
493 code_gen_buffer_max_size = code_gen_buffer_size -
494 code_gen_max_block_size();
495 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
496 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
497}
498
499/* Must be called before using the QEMU cpus. 'tb_size' is the size
500 (in bytes) allocated to the translation buffer. Zero means default
501 size. */
502void cpu_exec_init_all(unsigned long tb_size)
503{
26a5f13b
FB
504 cpu_gen_init();
505 code_gen_alloc(tb_size);
506 code_gen_ptr = code_gen_buffer;
4369415f 507 page_init();
e2eef170 508#if !defined(CONFIG_USER_ONLY)
26a5f13b 509 io_mem_init();
e2eef170 510#endif
26a5f13b
FB
511}
512
9656f324
PB
513#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
514
e59fb374 515static int cpu_common_post_load(void *opaque, int version_id)
e7f4eff7
JQ
516{
517 CPUState *env = opaque;
9656f324 518
3098dba0
AJ
519 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
520 version_id is increased. */
521 env->interrupt_request &= ~0x01;
9656f324
PB
522 tlb_flush(env, 1);
523
524 return 0;
525}
e7f4eff7
JQ
526
527static const VMStateDescription vmstate_cpu_common = {
528 .name = "cpu_common",
529 .version_id = 1,
530 .minimum_version_id = 1,
531 .minimum_version_id_old = 1,
e7f4eff7
JQ
532 .post_load = cpu_common_post_load,
533 .fields = (VMStateField []) {
534 VMSTATE_UINT32(halted, CPUState),
535 VMSTATE_UINT32(interrupt_request, CPUState),
536 VMSTATE_END_OF_LIST()
537 }
538};
9656f324
PB
539#endif
540
950f1472
GC
541CPUState *qemu_get_cpu(int cpu)
542{
543 CPUState *env = first_cpu;
544
545 while (env) {
546 if (env->cpu_index == cpu)
547 break;
548 env = env->next_cpu;
549 }
550
551 return env;
552}
553
6a00d601 554void cpu_exec_init(CPUState *env)
fd6ce8f6 555{
6a00d601
FB
556 CPUState **penv;
557 int cpu_index;
558
c2764719
PB
559#if defined(CONFIG_USER_ONLY)
560 cpu_list_lock();
561#endif
6a00d601
FB
562 env->next_cpu = NULL;
563 penv = &first_cpu;
564 cpu_index = 0;
565 while (*penv != NULL) {
1e9fa730 566 penv = &(*penv)->next_cpu;
6a00d601
FB
567 cpu_index++;
568 }
569 env->cpu_index = cpu_index;
268a362c 570 env->numa_node = 0;
72cf2d4f
BS
571 QTAILQ_INIT(&env->breakpoints);
572 QTAILQ_INIT(&env->watchpoints);
6a00d601 573 *penv = env;
c2764719
PB
574#if defined(CONFIG_USER_ONLY)
575 cpu_list_unlock();
576#endif
b3c7724c 577#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
e7f4eff7 578 vmstate_register(cpu_index, &vmstate_cpu_common, env);
b3c7724c
PB
579 register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
580 cpu_save, cpu_load, env);
581#endif
fd6ce8f6
FB
582}
583
9fa3e853
FB
584static inline void invalidate_page_bitmap(PageDesc *p)
585{
586 if (p->code_bitmap) {
59817ccb 587 qemu_free(p->code_bitmap);
9fa3e853
FB
588 p->code_bitmap = NULL;
589 }
590 p->code_write_count = 0;
591}
592
fd6ce8f6
FB
593/* set to NULL all the 'first_tb' fields in all PageDescs */
594static void page_flush_tb(void)
595{
596 int i, j;
597 PageDesc *p;
598
599 for(i = 0; i < L1_SIZE; i++) {
600 p = l1_map[i];
601 if (p) {
9fa3e853
FB
602 for(j = 0; j < L2_SIZE; j++) {
603 p->first_tb = NULL;
604 invalidate_page_bitmap(p);
605 p++;
606 }
fd6ce8f6
FB
607 }
608 }
609}
610
611/* flush all the translation blocks */
d4e8164f 612/* XXX: tb_flush is currently not thread safe */
6a00d601 613void tb_flush(CPUState *env1)
fd6ce8f6 614{
6a00d601 615 CPUState *env;
0124311e 616#if defined(DEBUG_FLUSH)
ab3d1727
BS
617 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
618 (unsigned long)(code_gen_ptr - code_gen_buffer),
619 nb_tbs, nb_tbs > 0 ?
620 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
fd6ce8f6 621#endif
26a5f13b 622 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
a208e54a
PB
623 cpu_abort(env1, "Internal error: code buffer overflow\n");
624
fd6ce8f6 625 nb_tbs = 0;
3b46e624 626
6a00d601
FB
627 for(env = first_cpu; env != NULL; env = env->next_cpu) {
628 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
629 }
9fa3e853 630
8a8a608f 631 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
fd6ce8f6 632 page_flush_tb();
9fa3e853 633
fd6ce8f6 634 code_gen_ptr = code_gen_buffer;
d4e8164f
FB
635 /* XXX: flush processor icache at this point if cache flush is
636 expensive */
e3db7226 637 tb_flush_count++;
fd6ce8f6
FB
638}
639
640#ifdef DEBUG_TB_CHECK
641
bc98a7ef 642static void tb_invalidate_check(target_ulong address)
fd6ce8f6
FB
643{
644 TranslationBlock *tb;
645 int i;
646 address &= TARGET_PAGE_MASK;
99773bd4
PB
647 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
648 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
649 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
650 address >= tb->pc + tb->size)) {
0bf9e31a
BS
651 printf("ERROR invalidate: address=" TARGET_FMT_lx
652 " PC=%08lx size=%04x\n",
99773bd4 653 address, (long)tb->pc, tb->size);
fd6ce8f6
FB
654 }
655 }
656 }
657}
658
659/* verify that all the pages have correct rights for code */
660static void tb_page_check(void)
661{
662 TranslationBlock *tb;
663 int i, flags1, flags2;
3b46e624 664
99773bd4
PB
665 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
666 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
667 flags1 = page_get_flags(tb->pc);
668 flags2 = page_get_flags(tb->pc + tb->size - 1);
669 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
670 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
99773bd4 671 (long)tb->pc, tb->size, flags1, flags2);
fd6ce8f6
FB
672 }
673 }
674 }
675}
676
677#endif
678
679/* invalidate one TB */
680static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
681 int next_offset)
682{
683 TranslationBlock *tb1;
684 for(;;) {
685 tb1 = *ptb;
686 if (tb1 == tb) {
687 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
688 break;
689 }
690 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
691 }
692}
693
9fa3e853
FB
694static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
695{
696 TranslationBlock *tb1;
697 unsigned int n1;
698
699 for(;;) {
700 tb1 = *ptb;
701 n1 = (long)tb1 & 3;
702 tb1 = (TranslationBlock *)((long)tb1 & ~3);
703 if (tb1 == tb) {
704 *ptb = tb1->page_next[n1];
705 break;
706 }
707 ptb = &tb1->page_next[n1];
708 }
709}
710
d4e8164f
FB
711static inline void tb_jmp_remove(TranslationBlock *tb, int n)
712{
713 TranslationBlock *tb1, **ptb;
714 unsigned int n1;
715
716 ptb = &tb->jmp_next[n];
717 tb1 = *ptb;
718 if (tb1) {
719 /* find tb(n) in circular list */
720 for(;;) {
721 tb1 = *ptb;
722 n1 = (long)tb1 & 3;
723 tb1 = (TranslationBlock *)((long)tb1 & ~3);
724 if (n1 == n && tb1 == tb)
725 break;
726 if (n1 == 2) {
727 ptb = &tb1->jmp_first;
728 } else {
729 ptb = &tb1->jmp_next[n1];
730 }
731 }
732 /* now we can suppress tb(n) from the list */
733 *ptb = tb->jmp_next[n];
734
735 tb->jmp_next[n] = NULL;
736 }
737}
738
739/* reset the jump entry 'n' of a TB so that it is not chained to
740 another TB */
741static inline void tb_reset_jump(TranslationBlock *tb, int n)
742{
743 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
744}
745
2e70f6ef 746void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
fd6ce8f6 747{
6a00d601 748 CPUState *env;
8a40a180 749 PageDesc *p;
d4e8164f 750 unsigned int h, n1;
c227f099 751 target_phys_addr_t phys_pc;
8a40a180 752 TranslationBlock *tb1, *tb2;
3b46e624 753
8a40a180
FB
754 /* remove the TB from the hash list */
755 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
756 h = tb_phys_hash_func(phys_pc);
5fafdf24 757 tb_remove(&tb_phys_hash[h], tb,
8a40a180
FB
758 offsetof(TranslationBlock, phys_hash_next));
759
760 /* remove the TB from the page list */
761 if (tb->page_addr[0] != page_addr) {
762 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
763 tb_page_remove(&p->first_tb, tb);
764 invalidate_page_bitmap(p);
765 }
766 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
767 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
768 tb_page_remove(&p->first_tb, tb);
769 invalidate_page_bitmap(p);
770 }
771
36bdbe54 772 tb_invalidated_flag = 1;
59817ccb 773
fd6ce8f6 774 /* remove the TB from the hash list */
8a40a180 775 h = tb_jmp_cache_hash_func(tb->pc);
6a00d601
FB
776 for(env = first_cpu; env != NULL; env = env->next_cpu) {
777 if (env->tb_jmp_cache[h] == tb)
778 env->tb_jmp_cache[h] = NULL;
779 }
d4e8164f
FB
780
781 /* suppress this TB from the two jump lists */
782 tb_jmp_remove(tb, 0);
783 tb_jmp_remove(tb, 1);
784
785 /* suppress any remaining jumps to this TB */
786 tb1 = tb->jmp_first;
787 for(;;) {
788 n1 = (long)tb1 & 3;
789 if (n1 == 2)
790 break;
791 tb1 = (TranslationBlock *)((long)tb1 & ~3);
792 tb2 = tb1->jmp_next[n1];
793 tb_reset_jump(tb1, n1);
794 tb1->jmp_next[n1] = NULL;
795 tb1 = tb2;
796 }
797 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
9fa3e853 798
e3db7226 799 tb_phys_invalidate_count++;
9fa3e853
FB
800}
801
802static inline void set_bits(uint8_t *tab, int start, int len)
803{
804 int end, mask, end1;
805
806 end = start + len;
807 tab += start >> 3;
808 mask = 0xff << (start & 7);
809 if ((start & ~7) == (end & ~7)) {
810 if (start < end) {
811 mask &= ~(0xff << (end & 7));
812 *tab |= mask;
813 }
814 } else {
815 *tab++ |= mask;
816 start = (start + 8) & ~7;
817 end1 = end & ~7;
818 while (start < end1) {
819 *tab++ = 0xff;
820 start += 8;
821 }
822 if (start < end) {
823 mask = ~(0xff << (end & 7));
824 *tab |= mask;
825 }
826 }
827}
828
829static void build_page_bitmap(PageDesc *p)
830{
831 int n, tb_start, tb_end;
832 TranslationBlock *tb;
3b46e624 833
b2a7081a 834 p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
9fa3e853
FB
835
836 tb = p->first_tb;
837 while (tb != NULL) {
838 n = (long)tb & 3;
839 tb = (TranslationBlock *)((long)tb & ~3);
840 /* NOTE: this is subtle as a TB may span two physical pages */
841 if (n == 0) {
842 /* NOTE: tb_end may be after the end of the page, but
843 it is not a problem */
844 tb_start = tb->pc & ~TARGET_PAGE_MASK;
845 tb_end = tb_start + tb->size;
846 if (tb_end > TARGET_PAGE_SIZE)
847 tb_end = TARGET_PAGE_SIZE;
848 } else {
849 tb_start = 0;
850 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
851 }
852 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
853 tb = tb->page_next[n];
854 }
855}
856
2e70f6ef
PB
857TranslationBlock *tb_gen_code(CPUState *env,
858 target_ulong pc, target_ulong cs_base,
859 int flags, int cflags)
d720b93d
FB
860{
861 TranslationBlock *tb;
862 uint8_t *tc_ptr;
863 target_ulong phys_pc, phys_page2, virt_page2;
864 int code_gen_size;
865
c27004ec
FB
866 phys_pc = get_phys_addr_code(env, pc);
867 tb = tb_alloc(pc);
d720b93d
FB
868 if (!tb) {
869 /* flush must be done */
870 tb_flush(env);
871 /* cannot fail at this point */
c27004ec 872 tb = tb_alloc(pc);
2e70f6ef
PB
873 /* Don't forget to invalidate previous TB info. */
874 tb_invalidated_flag = 1;
d720b93d
FB
875 }
876 tc_ptr = code_gen_ptr;
877 tb->tc_ptr = tc_ptr;
878 tb->cs_base = cs_base;
879 tb->flags = flags;
880 tb->cflags = cflags;
d07bde88 881 cpu_gen_code(env, tb, &code_gen_size);
d720b93d 882 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
3b46e624 883
d720b93d 884 /* check next page if needed */
c27004ec 885 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
d720b93d 886 phys_page2 = -1;
c27004ec 887 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
d720b93d
FB
888 phys_page2 = get_phys_addr_code(env, virt_page2);
889 }
890 tb_link_phys(tb, phys_pc, phys_page2);
2e70f6ef 891 return tb;
d720b93d 892}
3b46e624 893
9fa3e853
FB
894/* invalidate all TBs which intersect with the target physical page
895 starting in range [start;end[. NOTE: start and end must refer to
d720b93d
FB
896 the same physical page. 'is_cpu_write_access' should be true if called
897 from a real cpu write access: the virtual CPU will exit the current
898 TB if code is modified inside this TB. */
c227f099 899void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
d720b93d
FB
900 int is_cpu_write_access)
901{
6b917547 902 TranslationBlock *tb, *tb_next, *saved_tb;
d720b93d 903 CPUState *env = cpu_single_env;
9fa3e853 904 target_ulong tb_start, tb_end;
6b917547
AL
905 PageDesc *p;
906 int n;
907#ifdef TARGET_HAS_PRECISE_SMC
908 int current_tb_not_found = is_cpu_write_access;
909 TranslationBlock *current_tb = NULL;
910 int current_tb_modified = 0;
911 target_ulong current_pc = 0;
912 target_ulong current_cs_base = 0;
913 int current_flags = 0;
914#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
915
916 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 917 if (!p)
9fa3e853 918 return;
5fafdf24 919 if (!p->code_bitmap &&
d720b93d
FB
920 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
921 is_cpu_write_access) {
9fa3e853
FB
922 /* build code bitmap */
923 build_page_bitmap(p);
924 }
925
926 /* we remove all the TBs in the range [start, end[ */
927 /* XXX: see if in some cases it could be faster to invalidate all the code */
928 tb = p->first_tb;
929 while (tb != NULL) {
930 n = (long)tb & 3;
931 tb = (TranslationBlock *)((long)tb & ~3);
932 tb_next = tb->page_next[n];
933 /* NOTE: this is subtle as a TB may span two physical pages */
934 if (n == 0) {
935 /* NOTE: tb_end may be after the end of the page, but
936 it is not a problem */
937 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
938 tb_end = tb_start + tb->size;
939 } else {
940 tb_start = tb->page_addr[1];
941 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
942 }
943 if (!(tb_end <= start || tb_start >= end)) {
d720b93d
FB
944#ifdef TARGET_HAS_PRECISE_SMC
945 if (current_tb_not_found) {
946 current_tb_not_found = 0;
947 current_tb = NULL;
2e70f6ef 948 if (env->mem_io_pc) {
d720b93d 949 /* now we have a real cpu fault */
2e70f6ef 950 current_tb = tb_find_pc(env->mem_io_pc);
d720b93d
FB
951 }
952 }
953 if (current_tb == tb &&
2e70f6ef 954 (current_tb->cflags & CF_COUNT_MASK) != 1) {
d720b93d
FB
955 /* If we are modifying the current TB, we must stop
956 its execution. We could be more precise by checking
957 that the modification is after the current PC, but it
958 would require a specialized function to partially
959 restore the CPU state */
3b46e624 960
d720b93d 961 current_tb_modified = 1;
5fafdf24 962 cpu_restore_state(current_tb, env,
2e70f6ef 963 env->mem_io_pc, NULL);
6b917547
AL
964 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
965 &current_flags);
d720b93d
FB
966 }
967#endif /* TARGET_HAS_PRECISE_SMC */
6f5a9f7e
FB
968 /* we need to do that to handle the case where a signal
969 occurs while doing tb_phys_invalidate() */
970 saved_tb = NULL;
971 if (env) {
972 saved_tb = env->current_tb;
973 env->current_tb = NULL;
974 }
9fa3e853 975 tb_phys_invalidate(tb, -1);
6f5a9f7e
FB
976 if (env) {
977 env->current_tb = saved_tb;
978 if (env->interrupt_request && env->current_tb)
979 cpu_interrupt(env, env->interrupt_request);
980 }
9fa3e853
FB
981 }
982 tb = tb_next;
983 }
984#if !defined(CONFIG_USER_ONLY)
985 /* if no code remaining, no need to continue to use slow writes */
986 if (!p->first_tb) {
987 invalidate_page_bitmap(p);
d720b93d 988 if (is_cpu_write_access) {
2e70f6ef 989 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
d720b93d
FB
990 }
991 }
992#endif
993#ifdef TARGET_HAS_PRECISE_SMC
994 if (current_tb_modified) {
995 /* we generate a block containing just the instruction
996 modifying the memory. It will ensure that it cannot modify
997 itself */
ea1c1802 998 env->current_tb = NULL;
2e70f6ef 999 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
d720b93d 1000 cpu_resume_from_signal(env, NULL);
9fa3e853 1001 }
fd6ce8f6 1002#endif
9fa3e853 1003}
fd6ce8f6 1004
9fa3e853 1005/* len must be <= 8 and start must be a multiple of len */
c227f099 1006static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
9fa3e853
FB
1007{
1008 PageDesc *p;
1009 int offset, b;
59817ccb 1010#if 0
a4193c8a 1011 if (1) {
93fcfe39
AL
1012 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1013 cpu_single_env->mem_io_vaddr, len,
1014 cpu_single_env->eip,
1015 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
59817ccb
FB
1016 }
1017#endif
9fa3e853 1018 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 1019 if (!p)
9fa3e853
FB
1020 return;
1021 if (p->code_bitmap) {
1022 offset = start & ~TARGET_PAGE_MASK;
1023 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1024 if (b & ((1 << len) - 1))
1025 goto do_invalidate;
1026 } else {
1027 do_invalidate:
d720b93d 1028 tb_invalidate_phys_page_range(start, start + len, 1);
9fa3e853
FB
1029 }
1030}
1031
9fa3e853 1032#if !defined(CONFIG_SOFTMMU)
c227f099 1033static void tb_invalidate_phys_page(target_phys_addr_t addr,
d720b93d 1034 unsigned long pc, void *puc)
9fa3e853 1035{
6b917547 1036 TranslationBlock *tb;
9fa3e853 1037 PageDesc *p;
6b917547 1038 int n;
d720b93d 1039#ifdef TARGET_HAS_PRECISE_SMC
6b917547 1040 TranslationBlock *current_tb = NULL;
d720b93d 1041 CPUState *env = cpu_single_env;
6b917547
AL
1042 int current_tb_modified = 0;
1043 target_ulong current_pc = 0;
1044 target_ulong current_cs_base = 0;
1045 int current_flags = 0;
d720b93d 1046#endif
9fa3e853
FB
1047
1048 addr &= TARGET_PAGE_MASK;
1049 p = page_find(addr >> TARGET_PAGE_BITS);
5fafdf24 1050 if (!p)
9fa3e853
FB
1051 return;
1052 tb = p->first_tb;
d720b93d
FB
1053#ifdef TARGET_HAS_PRECISE_SMC
1054 if (tb && pc != 0) {
1055 current_tb = tb_find_pc(pc);
1056 }
1057#endif
9fa3e853
FB
1058 while (tb != NULL) {
1059 n = (long)tb & 3;
1060 tb = (TranslationBlock *)((long)tb & ~3);
d720b93d
FB
1061#ifdef TARGET_HAS_PRECISE_SMC
1062 if (current_tb == tb &&
2e70f6ef 1063 (current_tb->cflags & CF_COUNT_MASK) != 1) {
d720b93d
FB
1064 /* If we are modifying the current TB, we must stop
1065 its execution. We could be more precise by checking
1066 that the modification is after the current PC, but it
1067 would require a specialized function to partially
1068 restore the CPU state */
3b46e624 1069
d720b93d
FB
1070 current_tb_modified = 1;
1071 cpu_restore_state(current_tb, env, pc, puc);
6b917547
AL
1072 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1073 &current_flags);
d720b93d
FB
1074 }
1075#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
1076 tb_phys_invalidate(tb, addr);
1077 tb = tb->page_next[n];
1078 }
fd6ce8f6 1079 p->first_tb = NULL;
d720b93d
FB
1080#ifdef TARGET_HAS_PRECISE_SMC
1081 if (current_tb_modified) {
1082 /* we generate a block containing just the instruction
1083 modifying the memory. It will ensure that it cannot modify
1084 itself */
ea1c1802 1085 env->current_tb = NULL;
2e70f6ef 1086 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
d720b93d
FB
1087 cpu_resume_from_signal(env, puc);
1088 }
1089#endif
fd6ce8f6 1090}
9fa3e853 1091#endif
fd6ce8f6
FB
1092
1093/* add the tb in the target page and protect it if necessary */
5fafdf24 1094static inline void tb_alloc_page(TranslationBlock *tb,
53a5960a 1095 unsigned int n, target_ulong page_addr)
fd6ce8f6
FB
1096{
1097 PageDesc *p;
9fa3e853
FB
1098 TranslationBlock *last_first_tb;
1099
1100 tb->page_addr[n] = page_addr;
3a7d929e 1101 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
9fa3e853
FB
1102 tb->page_next[n] = p->first_tb;
1103 last_first_tb = p->first_tb;
1104 p->first_tb = (TranslationBlock *)((long)tb | n);
1105 invalidate_page_bitmap(p);
fd6ce8f6 1106
107db443 1107#if defined(TARGET_HAS_SMC) || 1
d720b93d 1108
9fa3e853 1109#if defined(CONFIG_USER_ONLY)
fd6ce8f6 1110 if (p->flags & PAGE_WRITE) {
53a5960a
PB
1111 target_ulong addr;
1112 PageDesc *p2;
9fa3e853
FB
1113 int prot;
1114
fd6ce8f6
FB
1115 /* force the host page as non writable (writes will have a
1116 page fault + mprotect overhead) */
53a5960a 1117 page_addr &= qemu_host_page_mask;
fd6ce8f6 1118 prot = 0;
53a5960a
PB
1119 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1120 addr += TARGET_PAGE_SIZE) {
1121
1122 p2 = page_find (addr >> TARGET_PAGE_BITS);
1123 if (!p2)
1124 continue;
1125 prot |= p2->flags;
1126 p2->flags &= ~PAGE_WRITE;
1127 page_get_flags(addr);
1128 }
5fafdf24 1129 mprotect(g2h(page_addr), qemu_host_page_size,
fd6ce8f6
FB
1130 (prot & PAGE_BITS) & ~PAGE_WRITE);
1131#ifdef DEBUG_TB_INVALIDATE
ab3d1727 1132 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
53a5960a 1133 page_addr);
fd6ce8f6 1134#endif
fd6ce8f6 1135 }
9fa3e853
FB
1136#else
1137 /* if some code is already present, then the pages are already
1138 protected. So we handle the case where only the first TB is
1139 allocated in a physical page */
1140 if (!last_first_tb) {
6a00d601 1141 tlb_protect_code(page_addr);
9fa3e853
FB
1142 }
1143#endif
d720b93d
FB
1144
1145#endif /* TARGET_HAS_SMC */
fd6ce8f6
FB
1146}
1147
1148/* Allocate a new translation block. Flush the translation buffer if
1149 too many translation blocks or too much generated code. */
c27004ec 1150TranslationBlock *tb_alloc(target_ulong pc)
fd6ce8f6
FB
1151{
1152 TranslationBlock *tb;
fd6ce8f6 1153
26a5f13b
FB
1154 if (nb_tbs >= code_gen_max_blocks ||
1155 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
d4e8164f 1156 return NULL;
fd6ce8f6
FB
1157 tb = &tbs[nb_tbs++];
1158 tb->pc = pc;
b448f2f3 1159 tb->cflags = 0;
d4e8164f
FB
1160 return tb;
1161}
1162
2e70f6ef
PB
1163void tb_free(TranslationBlock *tb)
1164{
bf20dc07 1165 /* In practice this is mostly used for single use temporary TB
2e70f6ef
PB
1166 Ignore the hard cases and just back up if this TB happens to
1167 be the last one generated. */
1168 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1169 code_gen_ptr = tb->tc_ptr;
1170 nb_tbs--;
1171 }
1172}
1173
9fa3e853
FB
1174/* add a new TB and link it to the physical page tables. phys_page2 is
1175 (-1) to indicate that only one page contains the TB. */
5fafdf24 1176void tb_link_phys(TranslationBlock *tb,
9fa3e853 1177 target_ulong phys_pc, target_ulong phys_page2)
d4e8164f 1178{
9fa3e853
FB
1179 unsigned int h;
1180 TranslationBlock **ptb;
1181
c8a706fe
PB
1182 /* Grab the mmap lock to stop another thread invalidating this TB
1183 before we are done. */
1184 mmap_lock();
9fa3e853
FB
1185 /* add in the physical hash table */
1186 h = tb_phys_hash_func(phys_pc);
1187 ptb = &tb_phys_hash[h];
1188 tb->phys_hash_next = *ptb;
1189 *ptb = tb;
fd6ce8f6
FB
1190
1191 /* add in the page list */
9fa3e853
FB
1192 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1193 if (phys_page2 != -1)
1194 tb_alloc_page(tb, 1, phys_page2);
1195 else
1196 tb->page_addr[1] = -1;
9fa3e853 1197
d4e8164f
FB
1198 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1199 tb->jmp_next[0] = NULL;
1200 tb->jmp_next[1] = NULL;
1201
1202 /* init original jump addresses */
1203 if (tb->tb_next_offset[0] != 0xffff)
1204 tb_reset_jump(tb, 0);
1205 if (tb->tb_next_offset[1] != 0xffff)
1206 tb_reset_jump(tb, 1);
8a40a180
FB
1207
1208#ifdef DEBUG_TB_CHECK
1209 tb_page_check();
1210#endif
c8a706fe 1211 mmap_unlock();
fd6ce8f6
FB
1212}
1213
9fa3e853
FB
1214/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1215 tb[1].tc_ptr. Return NULL if not found */
1216TranslationBlock *tb_find_pc(unsigned long tc_ptr)
fd6ce8f6 1217{
9fa3e853
FB
1218 int m_min, m_max, m;
1219 unsigned long v;
1220 TranslationBlock *tb;
a513fe19
FB
1221
1222 if (nb_tbs <= 0)
1223 return NULL;
1224 if (tc_ptr < (unsigned long)code_gen_buffer ||
1225 tc_ptr >= (unsigned long)code_gen_ptr)
1226 return NULL;
1227 /* binary search (cf Knuth) */
1228 m_min = 0;
1229 m_max = nb_tbs - 1;
1230 while (m_min <= m_max) {
1231 m = (m_min + m_max) >> 1;
1232 tb = &tbs[m];
1233 v = (unsigned long)tb->tc_ptr;
1234 if (v == tc_ptr)
1235 return tb;
1236 else if (tc_ptr < v) {
1237 m_max = m - 1;
1238 } else {
1239 m_min = m + 1;
1240 }
5fafdf24 1241 }
a513fe19
FB
1242 return &tbs[m_max];
1243}
7501267e 1244
ea041c0e
FB
1245static void tb_reset_jump_recursive(TranslationBlock *tb);
1246
1247static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1248{
1249 TranslationBlock *tb1, *tb_next, **ptb;
1250 unsigned int n1;
1251
1252 tb1 = tb->jmp_next[n];
1253 if (tb1 != NULL) {
1254 /* find head of list */
1255 for(;;) {
1256 n1 = (long)tb1 & 3;
1257 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1258 if (n1 == 2)
1259 break;
1260 tb1 = tb1->jmp_next[n1];
1261 }
1262 /* we are now sure now that tb jumps to tb1 */
1263 tb_next = tb1;
1264
1265 /* remove tb from the jmp_first list */
1266 ptb = &tb_next->jmp_first;
1267 for(;;) {
1268 tb1 = *ptb;
1269 n1 = (long)tb1 & 3;
1270 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1271 if (n1 == n && tb1 == tb)
1272 break;
1273 ptb = &tb1->jmp_next[n1];
1274 }
1275 *ptb = tb->jmp_next[n];
1276 tb->jmp_next[n] = NULL;
3b46e624 1277
ea041c0e
FB
1278 /* suppress the jump to next tb in generated code */
1279 tb_reset_jump(tb, n);
1280
0124311e 1281 /* suppress jumps in the tb on which we could have jumped */
ea041c0e
FB
1282 tb_reset_jump_recursive(tb_next);
1283 }
1284}
1285
1286static void tb_reset_jump_recursive(TranslationBlock *tb)
1287{
1288 tb_reset_jump_recursive2(tb, 0);
1289 tb_reset_jump_recursive2(tb, 1);
1290}
1291
1fddef4b 1292#if defined(TARGET_HAS_ICE)
94df27fd
PB
1293#if defined(CONFIG_USER_ONLY)
1294static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1295{
1296 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1297}
1298#else
d720b93d
FB
1299static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1300{
c227f099 1301 target_phys_addr_t addr;
9b3c35e0 1302 target_ulong pd;
c227f099 1303 ram_addr_t ram_addr;
c2f07f81 1304 PhysPageDesc *p;
d720b93d 1305
c2f07f81
PB
1306 addr = cpu_get_phys_page_debug(env, pc);
1307 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1308 if (!p) {
1309 pd = IO_MEM_UNASSIGNED;
1310 } else {
1311 pd = p->phys_offset;
1312 }
1313 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
706cd4b5 1314 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
d720b93d 1315}
c27004ec 1316#endif
94df27fd 1317#endif /* TARGET_HAS_ICE */
d720b93d 1318
c527ee8f
PB
1319#if defined(CONFIG_USER_ONLY)
1320void cpu_watchpoint_remove_all(CPUState *env, int mask)
1321
1322{
1323}
1324
1325int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1326 int flags, CPUWatchpoint **watchpoint)
1327{
1328 return -ENOSYS;
1329}
1330#else
6658ffb8 1331/* Add a watchpoint. */
a1d1bb31
AL
1332int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1333 int flags, CPUWatchpoint **watchpoint)
6658ffb8 1334{
b4051334 1335 target_ulong len_mask = ~(len - 1);
c0ce998e 1336 CPUWatchpoint *wp;
6658ffb8 1337
b4051334
AL
1338 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1339 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1340 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1341 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1342 return -EINVAL;
1343 }
a1d1bb31 1344 wp = qemu_malloc(sizeof(*wp));
a1d1bb31
AL
1345
1346 wp->vaddr = addr;
b4051334 1347 wp->len_mask = len_mask;
a1d1bb31
AL
1348 wp->flags = flags;
1349
2dc9f411 1350 /* keep all GDB-injected watchpoints in front */
c0ce998e 1351 if (flags & BP_GDB)
72cf2d4f 1352 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
c0ce998e 1353 else
72cf2d4f 1354 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
6658ffb8 1355
6658ffb8 1356 tlb_flush_page(env, addr);
a1d1bb31
AL
1357
1358 if (watchpoint)
1359 *watchpoint = wp;
1360 return 0;
6658ffb8
PB
1361}
1362
a1d1bb31
AL
1363/* Remove a specific watchpoint. */
1364int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1365 int flags)
6658ffb8 1366{
b4051334 1367 target_ulong len_mask = ~(len - 1);
a1d1bb31 1368 CPUWatchpoint *wp;
6658ffb8 1369
72cf2d4f 1370 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334 1371 if (addr == wp->vaddr && len_mask == wp->len_mask
6e140f28 1372 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
a1d1bb31 1373 cpu_watchpoint_remove_by_ref(env, wp);
6658ffb8
PB
1374 return 0;
1375 }
1376 }
a1d1bb31 1377 return -ENOENT;
6658ffb8
PB
1378}
1379
a1d1bb31
AL
1380/* Remove a specific watchpoint by reference. */
1381void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1382{
72cf2d4f 1383 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
7d03f82f 1384
a1d1bb31
AL
1385 tlb_flush_page(env, watchpoint->vaddr);
1386
1387 qemu_free(watchpoint);
1388}
1389
1390/* Remove all matching watchpoints. */
1391void cpu_watchpoint_remove_all(CPUState *env, int mask)
1392{
c0ce998e 1393 CPUWatchpoint *wp, *next;
a1d1bb31 1394
72cf2d4f 1395 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
a1d1bb31
AL
1396 if (wp->flags & mask)
1397 cpu_watchpoint_remove_by_ref(env, wp);
c0ce998e 1398 }
7d03f82f 1399}
c527ee8f 1400#endif
7d03f82f 1401
a1d1bb31
AL
1402/* Add a breakpoint. */
1403int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1404 CPUBreakpoint **breakpoint)
4c3a88a2 1405{
1fddef4b 1406#if defined(TARGET_HAS_ICE)
c0ce998e 1407 CPUBreakpoint *bp;
3b46e624 1408
a1d1bb31 1409 bp = qemu_malloc(sizeof(*bp));
4c3a88a2 1410
a1d1bb31
AL
1411 bp->pc = pc;
1412 bp->flags = flags;
1413
2dc9f411 1414 /* keep all GDB-injected breakpoints in front */
c0ce998e 1415 if (flags & BP_GDB)
72cf2d4f 1416 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
c0ce998e 1417 else
72cf2d4f 1418 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
3b46e624 1419
d720b93d 1420 breakpoint_invalidate(env, pc);
a1d1bb31
AL
1421
1422 if (breakpoint)
1423 *breakpoint = bp;
4c3a88a2
FB
1424 return 0;
1425#else
a1d1bb31 1426 return -ENOSYS;
4c3a88a2
FB
1427#endif
1428}
1429
a1d1bb31
AL
1430/* Remove a specific breakpoint. */
1431int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1432{
7d03f82f 1433#if defined(TARGET_HAS_ICE)
a1d1bb31
AL
1434 CPUBreakpoint *bp;
1435
72cf2d4f 1436 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31
AL
1437 if (bp->pc == pc && bp->flags == flags) {
1438 cpu_breakpoint_remove_by_ref(env, bp);
1439 return 0;
1440 }
7d03f82f 1441 }
a1d1bb31
AL
1442 return -ENOENT;
1443#else
1444 return -ENOSYS;
7d03f82f
EI
1445#endif
1446}
1447
a1d1bb31
AL
1448/* Remove a specific breakpoint by reference. */
1449void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
4c3a88a2 1450{
1fddef4b 1451#if defined(TARGET_HAS_ICE)
72cf2d4f 1452 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
d720b93d 1453
a1d1bb31
AL
1454 breakpoint_invalidate(env, breakpoint->pc);
1455
1456 qemu_free(breakpoint);
1457#endif
1458}
1459
1460/* Remove all matching breakpoints. */
1461void cpu_breakpoint_remove_all(CPUState *env, int mask)
1462{
1463#if defined(TARGET_HAS_ICE)
c0ce998e 1464 CPUBreakpoint *bp, *next;
a1d1bb31 1465
72cf2d4f 1466 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
a1d1bb31
AL
1467 if (bp->flags & mask)
1468 cpu_breakpoint_remove_by_ref(env, bp);
c0ce998e 1469 }
4c3a88a2
FB
1470#endif
1471}
1472
c33a346e
FB
1473/* enable or disable single step mode. EXCP_DEBUG is returned by the
1474 CPU loop after each instruction */
1475void cpu_single_step(CPUState *env, int enabled)
1476{
1fddef4b 1477#if defined(TARGET_HAS_ICE)
c33a346e
FB
1478 if (env->singlestep_enabled != enabled) {
1479 env->singlestep_enabled = enabled;
e22a25c9
AL
1480 if (kvm_enabled())
1481 kvm_update_guest_debug(env, 0);
1482 else {
ccbb4d44 1483 /* must flush all the translated code to avoid inconsistencies */
e22a25c9
AL
1484 /* XXX: only flush what is necessary */
1485 tb_flush(env);
1486 }
c33a346e
FB
1487 }
1488#endif
1489}
1490
34865134
FB
1491/* enable or disable low levels log */
1492void cpu_set_log(int log_flags)
1493{
1494 loglevel = log_flags;
1495 if (loglevel && !logfile) {
11fcfab4 1496 logfile = fopen(logfilename, log_append ? "a" : "w");
34865134
FB
1497 if (!logfile) {
1498 perror(logfilename);
1499 _exit(1);
1500 }
9fa3e853
FB
1501#if !defined(CONFIG_SOFTMMU)
1502 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1503 {
b55266b5 1504 static char logfile_buf[4096];
9fa3e853
FB
1505 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1506 }
bf65f53f
FN
1507#elif !defined(_WIN32)
1508 /* Win32 doesn't support line-buffering and requires size >= 2 */
34865134 1509 setvbuf(logfile, NULL, _IOLBF, 0);
9fa3e853 1510#endif
e735b91c
PB
1511 log_append = 1;
1512 }
1513 if (!loglevel && logfile) {
1514 fclose(logfile);
1515 logfile = NULL;
34865134
FB
1516 }
1517}
1518
1519void cpu_set_log_filename(const char *filename)
1520{
1521 logfilename = strdup(filename);
e735b91c
PB
1522 if (logfile) {
1523 fclose(logfile);
1524 logfile = NULL;
1525 }
1526 cpu_set_log(loglevel);
34865134 1527}
c33a346e 1528
3098dba0 1529static void cpu_unlink_tb(CPUState *env)
ea041c0e 1530{
3098dba0
AJ
1531 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1532 problem and hope the cpu will stop of its own accord. For userspace
1533 emulation this often isn't actually as bad as it sounds. Often
1534 signals are used primarily to interrupt blocking syscalls. */
ea041c0e 1535 TranslationBlock *tb;
c227f099 1536 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
59817ccb 1537
cab1b4bd 1538 spin_lock(&interrupt_lock);
3098dba0
AJ
1539 tb = env->current_tb;
1540 /* if the cpu is currently executing code, we must unlink it and
1541 all the potentially executing TB */
f76cfe56 1542 if (tb) {
3098dba0
AJ
1543 env->current_tb = NULL;
1544 tb_reset_jump_recursive(tb);
be214e6c 1545 }
cab1b4bd 1546 spin_unlock(&interrupt_lock);
3098dba0
AJ
1547}
1548
1549/* mask must never be zero, except for A20 change call */
1550void cpu_interrupt(CPUState *env, int mask)
1551{
1552 int old_mask;
be214e6c 1553
2e70f6ef 1554 old_mask = env->interrupt_request;
68a79315 1555 env->interrupt_request |= mask;
3098dba0 1556
8edac960
AL
1557#ifndef CONFIG_USER_ONLY
1558 /*
1559 * If called from iothread context, wake the target cpu in
1560 * case its halted.
1561 */
1562 if (!qemu_cpu_self(env)) {
1563 qemu_cpu_kick(env);
1564 return;
1565 }
1566#endif
1567
2e70f6ef 1568 if (use_icount) {
266910c4 1569 env->icount_decr.u16.high = 0xffff;
2e70f6ef 1570#ifndef CONFIG_USER_ONLY
2e70f6ef 1571 if (!can_do_io(env)
be214e6c 1572 && (mask & ~old_mask) != 0) {
2e70f6ef
PB
1573 cpu_abort(env, "Raised interrupt while not in I/O function");
1574 }
1575#endif
1576 } else {
3098dba0 1577 cpu_unlink_tb(env);
ea041c0e
FB
1578 }
1579}
1580
b54ad049
FB
1581void cpu_reset_interrupt(CPUState *env, int mask)
1582{
1583 env->interrupt_request &= ~mask;
1584}
1585
3098dba0
AJ
1586void cpu_exit(CPUState *env)
1587{
1588 env->exit_request = 1;
1589 cpu_unlink_tb(env);
1590}
1591
c7cd6a37 1592const CPULogItem cpu_log_items[] = {
5fafdf24 1593 { CPU_LOG_TB_OUT_ASM, "out_asm",
f193c797
FB
1594 "show generated host assembly code for each compiled TB" },
1595 { CPU_LOG_TB_IN_ASM, "in_asm",
1596 "show target assembly code for each compiled TB" },
5fafdf24 1597 { CPU_LOG_TB_OP, "op",
57fec1fe 1598 "show micro ops for each compiled TB" },
f193c797 1599 { CPU_LOG_TB_OP_OPT, "op_opt",
e01a1157
BS
1600 "show micro ops "
1601#ifdef TARGET_I386
1602 "before eflags optimization and "
f193c797 1603#endif
e01a1157 1604 "after liveness analysis" },
f193c797
FB
1605 { CPU_LOG_INT, "int",
1606 "show interrupts/exceptions in short format" },
1607 { CPU_LOG_EXEC, "exec",
1608 "show trace before each executed TB (lots of logs)" },
9fddaa0c 1609 { CPU_LOG_TB_CPU, "cpu",
e91c8a77 1610 "show CPU state before block translation" },
f193c797
FB
1611#ifdef TARGET_I386
1612 { CPU_LOG_PCALL, "pcall",
1613 "show protected mode far calls/returns/exceptions" },
eca1bdf4
AL
1614 { CPU_LOG_RESET, "cpu_reset",
1615 "show CPU state before CPU resets" },
f193c797 1616#endif
8e3a9fd2 1617#ifdef DEBUG_IOPORT
fd872598
FB
1618 { CPU_LOG_IOPORT, "ioport",
1619 "show all i/o ports accesses" },
8e3a9fd2 1620#endif
f193c797
FB
1621 { 0, NULL, NULL },
1622};
1623
f6f3fbca
MT
1624#ifndef CONFIG_USER_ONLY
1625static QLIST_HEAD(memory_client_list, CPUPhysMemoryClient) memory_client_list
1626 = QLIST_HEAD_INITIALIZER(memory_client_list);
1627
1628static void cpu_notify_set_memory(target_phys_addr_t start_addr,
1629 ram_addr_t size,
1630 ram_addr_t phys_offset)
1631{
1632 CPUPhysMemoryClient *client;
1633 QLIST_FOREACH(client, &memory_client_list, list) {
1634 client->set_memory(client, start_addr, size, phys_offset);
1635 }
1636}
1637
1638static int cpu_notify_sync_dirty_bitmap(target_phys_addr_t start,
1639 target_phys_addr_t end)
1640{
1641 CPUPhysMemoryClient *client;
1642 QLIST_FOREACH(client, &memory_client_list, list) {
1643 int r = client->sync_dirty_bitmap(client, start, end);
1644 if (r < 0)
1645 return r;
1646 }
1647 return 0;
1648}
1649
1650static int cpu_notify_migration_log(int enable)
1651{
1652 CPUPhysMemoryClient *client;
1653 QLIST_FOREACH(client, &memory_client_list, list) {
1654 int r = client->migration_log(client, enable);
1655 if (r < 0)
1656 return r;
1657 }
1658 return 0;
1659}
1660
1661static void phys_page_for_each_in_l1_map(PhysPageDesc **phys_map,
1662 CPUPhysMemoryClient *client)
1663{
1664 PhysPageDesc *pd;
1665 int l1, l2;
1666
1667 for (l1 = 0; l1 < L1_SIZE; ++l1) {
1668 pd = phys_map[l1];
1669 if (!pd) {
1670 continue;
1671 }
1672 for (l2 = 0; l2 < L2_SIZE; ++l2) {
1673 if (pd[l2].phys_offset == IO_MEM_UNASSIGNED) {
1674 continue;
1675 }
1676 client->set_memory(client, pd[l2].region_offset,
1677 TARGET_PAGE_SIZE, pd[l2].phys_offset);
1678 }
1679 }
1680}
1681
1682static void phys_page_for_each(CPUPhysMemoryClient *client)
1683{
1684#if TARGET_PHYS_ADDR_SPACE_BITS > 32
1685
1686#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
1687#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
1688#endif
1689 void **phys_map = (void **)l1_phys_map;
1690 int l1;
1691 if (!l1_phys_map) {
1692 return;
1693 }
1694 for (l1 = 0; l1 < L1_SIZE; ++l1) {
1695 if (phys_map[l1]) {
1696 phys_page_for_each_in_l1_map(phys_map[l1], client);
1697 }
1698 }
1699#else
1700 if (!l1_phys_map) {
1701 return;
1702 }
1703 phys_page_for_each_in_l1_map(l1_phys_map, client);
1704#endif
1705}
1706
1707void cpu_register_phys_memory_client(CPUPhysMemoryClient *client)
1708{
1709 QLIST_INSERT_HEAD(&memory_client_list, client, list);
1710 phys_page_for_each(client);
1711}
1712
1713void cpu_unregister_phys_memory_client(CPUPhysMemoryClient *client)
1714{
1715 QLIST_REMOVE(client, list);
1716}
1717#endif
1718
f193c797
FB
1719static int cmp1(const char *s1, int n, const char *s2)
1720{
1721 if (strlen(s2) != n)
1722 return 0;
1723 return memcmp(s1, s2, n) == 0;
1724}
3b46e624 1725
f193c797
FB
1726/* takes a comma separated list of log masks. Return 0 if error. */
1727int cpu_str_to_log_mask(const char *str)
1728{
c7cd6a37 1729 const CPULogItem *item;
f193c797
FB
1730 int mask;
1731 const char *p, *p1;
1732
1733 p = str;
1734 mask = 0;
1735 for(;;) {
1736 p1 = strchr(p, ',');
1737 if (!p1)
1738 p1 = p + strlen(p);
8e3a9fd2
FB
1739 if(cmp1(p,p1-p,"all")) {
1740 for(item = cpu_log_items; item->mask != 0; item++) {
1741 mask |= item->mask;
1742 }
1743 } else {
f193c797
FB
1744 for(item = cpu_log_items; item->mask != 0; item++) {
1745 if (cmp1(p, p1 - p, item->name))
1746 goto found;
1747 }
1748 return 0;
8e3a9fd2 1749 }
f193c797
FB
1750 found:
1751 mask |= item->mask;
1752 if (*p1 != ',')
1753 break;
1754 p = p1 + 1;
1755 }
1756 return mask;
1757}
ea041c0e 1758
7501267e
FB
1759void cpu_abort(CPUState *env, const char *fmt, ...)
1760{
1761 va_list ap;
493ae1f0 1762 va_list ap2;
7501267e
FB
1763
1764 va_start(ap, fmt);
493ae1f0 1765 va_copy(ap2, ap);
7501267e
FB
1766 fprintf(stderr, "qemu: fatal: ");
1767 vfprintf(stderr, fmt, ap);
1768 fprintf(stderr, "\n");
1769#ifdef TARGET_I386
7fe48483
FB
1770 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1771#else
1772 cpu_dump_state(env, stderr, fprintf, 0);
7501267e 1773#endif
93fcfe39
AL
1774 if (qemu_log_enabled()) {
1775 qemu_log("qemu: fatal: ");
1776 qemu_log_vprintf(fmt, ap2);
1777 qemu_log("\n");
f9373291 1778#ifdef TARGET_I386
93fcfe39 1779 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
f9373291 1780#else
93fcfe39 1781 log_cpu_state(env, 0);
f9373291 1782#endif
31b1a7b4 1783 qemu_log_flush();
93fcfe39 1784 qemu_log_close();
924edcae 1785 }
493ae1f0 1786 va_end(ap2);
f9373291 1787 va_end(ap);
fd052bf6
RV
1788#if defined(CONFIG_USER_ONLY)
1789 {
1790 struct sigaction act;
1791 sigfillset(&act.sa_mask);
1792 act.sa_handler = SIG_DFL;
1793 sigaction(SIGABRT, &act, NULL);
1794 }
1795#endif
7501267e
FB
1796 abort();
1797}
1798
c5be9f08
TS
1799CPUState *cpu_copy(CPUState *env)
1800{
01ba9816 1801 CPUState *new_env = cpu_init(env->cpu_model_str);
c5be9f08
TS
1802 CPUState *next_cpu = new_env->next_cpu;
1803 int cpu_index = new_env->cpu_index;
5a38f081
AL
1804#if defined(TARGET_HAS_ICE)
1805 CPUBreakpoint *bp;
1806 CPUWatchpoint *wp;
1807#endif
1808
c5be9f08 1809 memcpy(new_env, env, sizeof(CPUState));
5a38f081
AL
1810
1811 /* Preserve chaining and index. */
c5be9f08
TS
1812 new_env->next_cpu = next_cpu;
1813 new_env->cpu_index = cpu_index;
5a38f081
AL
1814
1815 /* Clone all break/watchpoints.
1816 Note: Once we support ptrace with hw-debug register access, make sure
1817 BP_CPU break/watchpoints are handled correctly on clone. */
72cf2d4f
BS
1818 QTAILQ_INIT(&env->breakpoints);
1819 QTAILQ_INIT(&env->watchpoints);
5a38f081 1820#if defined(TARGET_HAS_ICE)
72cf2d4f 1821 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
5a38f081
AL
1822 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1823 }
72cf2d4f 1824 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
5a38f081
AL
1825 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1826 wp->flags, NULL);
1827 }
1828#endif
1829
c5be9f08
TS
1830 return new_env;
1831}
1832
0124311e
FB
1833#if !defined(CONFIG_USER_ONLY)
1834
5c751e99
EI
1835static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1836{
1837 unsigned int i;
1838
1839 /* Discard jump cache entries for any tb which might potentially
1840 overlap the flushed page. */
1841 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1842 memset (&env->tb_jmp_cache[i], 0,
1843 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1844
1845 i = tb_jmp_cache_hash_page(addr);
1846 memset (&env->tb_jmp_cache[i], 0,
1847 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1848}
1849
08738984
IK
1850static CPUTLBEntry s_cputlb_empty_entry = {
1851 .addr_read = -1,
1852 .addr_write = -1,
1853 .addr_code = -1,
1854 .addend = -1,
1855};
1856
ee8b7021
FB
1857/* NOTE: if flush_global is true, also flush global entries (not
1858 implemented yet) */
1859void tlb_flush(CPUState *env, int flush_global)
33417e70 1860{
33417e70 1861 int i;
0124311e 1862
9fa3e853
FB
1863#if defined(DEBUG_TLB)
1864 printf("tlb_flush:\n");
1865#endif
0124311e
FB
1866 /* must reset current TB so that interrupts cannot modify the
1867 links while we are modifying them */
1868 env->current_tb = NULL;
1869
33417e70 1870 for(i = 0; i < CPU_TLB_SIZE; i++) {
cfde4bd9
IY
1871 int mmu_idx;
1872 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
08738984 1873 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
cfde4bd9 1874 }
33417e70 1875 }
9fa3e853 1876
8a40a180 1877 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
9fa3e853 1878
e3db7226 1879 tlb_flush_count++;
33417e70
FB
1880}
1881
274da6b2 1882static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
61382a50 1883{
5fafdf24 1884 if (addr == (tlb_entry->addr_read &
84b7b8e7 1885 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1886 addr == (tlb_entry->addr_write &
84b7b8e7 1887 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1888 addr == (tlb_entry->addr_code &
84b7b8e7 1889 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
08738984 1890 *tlb_entry = s_cputlb_empty_entry;
84b7b8e7 1891 }
61382a50
FB
1892}
1893
2e12669a 1894void tlb_flush_page(CPUState *env, target_ulong addr)
33417e70 1895{
8a40a180 1896 int i;
cfde4bd9 1897 int mmu_idx;
0124311e 1898
9fa3e853 1899#if defined(DEBUG_TLB)
108c49b8 1900 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
9fa3e853 1901#endif
0124311e
FB
1902 /* must reset current TB so that interrupts cannot modify the
1903 links while we are modifying them */
1904 env->current_tb = NULL;
61382a50
FB
1905
1906 addr &= TARGET_PAGE_MASK;
1907 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
cfde4bd9
IY
1908 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1909 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
0124311e 1910
5c751e99 1911 tlb_flush_jmp_cache(env, addr);
9fa3e853
FB
1912}
1913
9fa3e853
FB
1914/* update the TLBs so that writes to code in the virtual page 'addr'
1915 can be detected */
c227f099 1916static void tlb_protect_code(ram_addr_t ram_addr)
9fa3e853 1917{
5fafdf24 1918 cpu_physical_memory_reset_dirty(ram_addr,
6a00d601
FB
1919 ram_addr + TARGET_PAGE_SIZE,
1920 CODE_DIRTY_FLAG);
9fa3e853
FB
1921}
1922
9fa3e853 1923/* update the TLB so that writes in physical page 'phys_addr' are no longer
3a7d929e 1924 tested for self modifying code */
c227f099 1925static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 1926 target_ulong vaddr)
9fa3e853 1927{
3a7d929e 1928 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1ccde1cb
FB
1929}
1930
5fafdf24 1931static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1ccde1cb
FB
1932 unsigned long start, unsigned long length)
1933{
1934 unsigned long addr;
84b7b8e7
FB
1935 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1936 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1ccde1cb 1937 if ((addr - start) < length) {
0f459d16 1938 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1ccde1cb
FB
1939 }
1940 }
1941}
1942
5579c7f3 1943/* Note: start and end must be within the same ram block. */
c227f099 1944void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 1945 int dirty_flags)
1ccde1cb
FB
1946{
1947 CPUState *env;
4f2ac237 1948 unsigned long length, start1;
0a962c02
FB
1949 int i, mask, len;
1950 uint8_t *p;
1ccde1cb
FB
1951
1952 start &= TARGET_PAGE_MASK;
1953 end = TARGET_PAGE_ALIGN(end);
1954
1955 length = end - start;
1956 if (length == 0)
1957 return;
0a962c02 1958 len = length >> TARGET_PAGE_BITS;
f23db169
FB
1959 mask = ~dirty_flags;
1960 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1961 for(i = 0; i < len; i++)
1962 p[i] &= mask;
1963
1ccde1cb
FB
1964 /* we modify the TLB cache so that the dirty bit will be set again
1965 when accessing the range */
5579c7f3
PB
1966 start1 = (unsigned long)qemu_get_ram_ptr(start);
1967 /* Chek that we don't span multiple blocks - this breaks the
1968 address comparisons below. */
1969 if ((unsigned long)qemu_get_ram_ptr(end - 1) - start1
1970 != (end - 1) - start) {
1971 abort();
1972 }
1973
6a00d601 1974 for(env = first_cpu; env != NULL; env = env->next_cpu) {
cfde4bd9
IY
1975 int mmu_idx;
1976 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1977 for(i = 0; i < CPU_TLB_SIZE; i++)
1978 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
1979 start1, length);
1980 }
6a00d601 1981 }
1ccde1cb
FB
1982}
1983
74576198
AL
1984int cpu_physical_memory_set_dirty_tracking(int enable)
1985{
f6f3fbca 1986 int ret = 0;
74576198 1987 in_migration = enable;
f6f3fbca
MT
1988 ret = cpu_notify_migration_log(!!enable);
1989 return ret;
74576198
AL
1990}
1991
1992int cpu_physical_memory_get_dirty_tracking(void)
1993{
1994 return in_migration;
1995}
1996
c227f099
AL
1997int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
1998 target_phys_addr_t end_addr)
2bec46dc 1999{
7b8f3b78 2000 int ret;
151f7749 2001
f6f3fbca 2002 ret = cpu_notify_sync_dirty_bitmap(start_addr, end_addr);
151f7749 2003 return ret;
2bec46dc
AL
2004}
2005
3a7d929e
FB
2006static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2007{
c227f099 2008 ram_addr_t ram_addr;
5579c7f3 2009 void *p;
3a7d929e 2010
84b7b8e7 2011 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
5579c7f3
PB
2012 p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
2013 + tlb_entry->addend);
2014 ram_addr = qemu_ram_addr_from_host(p);
3a7d929e 2015 if (!cpu_physical_memory_is_dirty(ram_addr)) {
0f459d16 2016 tlb_entry->addr_write |= TLB_NOTDIRTY;
3a7d929e
FB
2017 }
2018 }
2019}
2020
2021/* update the TLB according to the current state of the dirty bits */
2022void cpu_tlb_update_dirty(CPUState *env)
2023{
2024 int i;
cfde4bd9
IY
2025 int mmu_idx;
2026 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2027 for(i = 0; i < CPU_TLB_SIZE; i++)
2028 tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2029 }
3a7d929e
FB
2030}
2031
0f459d16 2032static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1ccde1cb 2033{
0f459d16
PB
2034 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2035 tlb_entry->addr_write = vaddr;
1ccde1cb
FB
2036}
2037
0f459d16
PB
2038/* update the TLB corresponding to virtual page vaddr
2039 so that it is no longer dirty */
2040static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1ccde1cb 2041{
1ccde1cb 2042 int i;
cfde4bd9 2043 int mmu_idx;
1ccde1cb 2044
0f459d16 2045 vaddr &= TARGET_PAGE_MASK;
1ccde1cb 2046 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
cfde4bd9
IY
2047 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2048 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
9fa3e853
FB
2049}
2050
59817ccb
FB
2051/* add a new TLB entry. At most one entry for a given virtual address
2052 is permitted. Return 0 if OK or 2 if the page could not be mapped
2053 (can only happen in non SOFTMMU mode for I/O pages or pages
2054 conflicting with the host address space). */
5fafdf24 2055int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
c227f099 2056 target_phys_addr_t paddr, int prot,
6ebbf390 2057 int mmu_idx, int is_softmmu)
9fa3e853 2058{
92e873b9 2059 PhysPageDesc *p;
4f2ac237 2060 unsigned long pd;
9fa3e853 2061 unsigned int index;
4f2ac237 2062 target_ulong address;
0f459d16 2063 target_ulong code_address;
c227f099 2064 target_phys_addr_t addend;
9fa3e853 2065 int ret;
84b7b8e7 2066 CPUTLBEntry *te;
a1d1bb31 2067 CPUWatchpoint *wp;
c227f099 2068 target_phys_addr_t iotlb;
9fa3e853 2069
92e873b9 2070 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
9fa3e853
FB
2071 if (!p) {
2072 pd = IO_MEM_UNASSIGNED;
9fa3e853
FB
2073 } else {
2074 pd = p->phys_offset;
9fa3e853
FB
2075 }
2076#if defined(DEBUG_TLB)
6ebbf390
JM
2077 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
2078 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
9fa3e853
FB
2079#endif
2080
2081 ret = 0;
0f459d16
PB
2082 address = vaddr;
2083 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2084 /* IO memory case (romd handled later) */
2085 address |= TLB_MMIO;
2086 }
5579c7f3 2087 addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
0f459d16
PB
2088 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2089 /* Normal RAM. */
2090 iotlb = pd & TARGET_PAGE_MASK;
2091 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2092 iotlb |= IO_MEM_NOTDIRTY;
2093 else
2094 iotlb |= IO_MEM_ROM;
2095 } else {
ccbb4d44 2096 /* IO handlers are currently passed a physical address.
0f459d16
PB
2097 It would be nice to pass an offset from the base address
2098 of that region. This would avoid having to special case RAM,
2099 and avoid full address decoding in every device.
2100 We can't use the high bits of pd for this because
2101 IO_MEM_ROMD uses these as a ram address. */
8da3ff18
PB
2102 iotlb = (pd & ~TARGET_PAGE_MASK);
2103 if (p) {
8da3ff18
PB
2104 iotlb += p->region_offset;
2105 } else {
2106 iotlb += paddr;
2107 }
0f459d16
PB
2108 }
2109
2110 code_address = address;
2111 /* Make accesses to pages with watchpoints go via the
2112 watchpoint trap routines. */
72cf2d4f 2113 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
a1d1bb31 2114 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
0f459d16
PB
2115 iotlb = io_mem_watch + paddr;
2116 /* TODO: The memory case can be optimized by not trapping
2117 reads of pages with a write breakpoint. */
2118 address |= TLB_MMIO;
6658ffb8 2119 }
0f459d16 2120 }
d79acba4 2121
0f459d16
PB
2122 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2123 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2124 te = &env->tlb_table[mmu_idx][index];
2125 te->addend = addend - vaddr;
2126 if (prot & PAGE_READ) {
2127 te->addr_read = address;
2128 } else {
2129 te->addr_read = -1;
2130 }
5c751e99 2131
0f459d16
PB
2132 if (prot & PAGE_EXEC) {
2133 te->addr_code = code_address;
2134 } else {
2135 te->addr_code = -1;
2136 }
2137 if (prot & PAGE_WRITE) {
2138 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2139 (pd & IO_MEM_ROMD)) {
2140 /* Write access calls the I/O callback. */
2141 te->addr_write = address | TLB_MMIO;
2142 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2143 !cpu_physical_memory_is_dirty(pd)) {
2144 te->addr_write = address | TLB_NOTDIRTY;
9fa3e853 2145 } else {
0f459d16 2146 te->addr_write = address;
9fa3e853 2147 }
0f459d16
PB
2148 } else {
2149 te->addr_write = -1;
9fa3e853 2150 }
9fa3e853
FB
2151 return ret;
2152}
2153
0124311e
FB
2154#else
2155
ee8b7021 2156void tlb_flush(CPUState *env, int flush_global)
0124311e
FB
2157{
2158}
2159
2e12669a 2160void tlb_flush_page(CPUState *env, target_ulong addr)
0124311e
FB
2161{
2162}
2163
edf8e2af
MW
2164/*
2165 * Walks guest process memory "regions" one by one
2166 * and calls callback function 'fn' for each region.
2167 */
2168int walk_memory_regions(void *priv,
2169 int (*fn)(void *, unsigned long, unsigned long, unsigned long))
33417e70 2170{
9fa3e853 2171 unsigned long start, end;
edf8e2af 2172 PageDesc *p = NULL;
9fa3e853 2173 int i, j, prot, prot1;
edf8e2af 2174 int rc = 0;
33417e70 2175
edf8e2af 2176 start = end = -1;
9fa3e853 2177 prot = 0;
edf8e2af
MW
2178
2179 for (i = 0; i <= L1_SIZE; i++) {
2180 p = (i < L1_SIZE) ? l1_map[i] : NULL;
2181 for (j = 0; j < L2_SIZE; j++) {
2182 prot1 = (p == NULL) ? 0 : p[j].flags;
2183 /*
2184 * "region" is one continuous chunk of memory
2185 * that has same protection flags set.
2186 */
9fa3e853
FB
2187 if (prot1 != prot) {
2188 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2189 if (start != -1) {
edf8e2af
MW
2190 rc = (*fn)(priv, start, end, prot);
2191 /* callback can stop iteration by returning != 0 */
2192 if (rc != 0)
2193 return (rc);
9fa3e853
FB
2194 }
2195 if (prot1 != 0)
2196 start = end;
2197 else
2198 start = -1;
2199 prot = prot1;
2200 }
edf8e2af 2201 if (p == NULL)
9fa3e853
FB
2202 break;
2203 }
33417e70 2204 }
edf8e2af
MW
2205 return (rc);
2206}
2207
2208static int dump_region(void *priv, unsigned long start,
2209 unsigned long end, unsigned long prot)
2210{
2211 FILE *f = (FILE *)priv;
2212
2213 (void) fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
2214 start, end, end - start,
2215 ((prot & PAGE_READ) ? 'r' : '-'),
2216 ((prot & PAGE_WRITE) ? 'w' : '-'),
2217 ((prot & PAGE_EXEC) ? 'x' : '-'));
2218
2219 return (0);
2220}
2221
2222/* dump memory mappings */
2223void page_dump(FILE *f)
2224{
2225 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2226 "start", "end", "size", "prot");
2227 walk_memory_regions(f, dump_region);
33417e70
FB
2228}
2229
53a5960a 2230int page_get_flags(target_ulong address)
33417e70 2231{
9fa3e853
FB
2232 PageDesc *p;
2233
2234 p = page_find(address >> TARGET_PAGE_BITS);
33417e70 2235 if (!p)
9fa3e853
FB
2236 return 0;
2237 return p->flags;
2238}
2239
2240/* modify the flags of a page and invalidate the code if
ccbb4d44 2241 necessary. The flag PAGE_WRITE_ORG is positioned automatically
9fa3e853 2242 depending on PAGE_WRITE */
53a5960a 2243void page_set_flags(target_ulong start, target_ulong end, int flags)
9fa3e853
FB
2244{
2245 PageDesc *p;
53a5960a 2246 target_ulong addr;
9fa3e853 2247
c8a706fe 2248 /* mmap_lock should already be held. */
9fa3e853
FB
2249 start = start & TARGET_PAGE_MASK;
2250 end = TARGET_PAGE_ALIGN(end);
2251 if (flags & PAGE_WRITE)
2252 flags |= PAGE_WRITE_ORG;
9fa3e853
FB
2253 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2254 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
17e2377a
PB
2255 /* We may be called for host regions that are outside guest
2256 address space. */
2257 if (!p)
2258 return;
9fa3e853
FB
2259 /* if the write protection is set, then we invalidate the code
2260 inside */
5fafdf24 2261 if (!(p->flags & PAGE_WRITE) &&
9fa3e853
FB
2262 (flags & PAGE_WRITE) &&
2263 p->first_tb) {
d720b93d 2264 tb_invalidate_phys_page(addr, 0, NULL);
9fa3e853
FB
2265 }
2266 p->flags = flags;
2267 }
33417e70
FB
2268}
2269
3d97b40b
TS
2270int page_check_range(target_ulong start, target_ulong len, int flags)
2271{
2272 PageDesc *p;
2273 target_ulong end;
2274 target_ulong addr;
2275
55f280c9
AZ
2276 if (start + len < start)
2277 /* we've wrapped around */
2278 return -1;
2279
3d97b40b
TS
2280 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2281 start = start & TARGET_PAGE_MASK;
2282
3d97b40b
TS
2283 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2284 p = page_find(addr >> TARGET_PAGE_BITS);
2285 if( !p )
2286 return -1;
2287 if( !(p->flags & PAGE_VALID) )
2288 return -1;
2289
dae3270c 2290 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
3d97b40b 2291 return -1;
dae3270c
FB
2292 if (flags & PAGE_WRITE) {
2293 if (!(p->flags & PAGE_WRITE_ORG))
2294 return -1;
2295 /* unprotect the page if it was put read-only because it
2296 contains translated code */
2297 if (!(p->flags & PAGE_WRITE)) {
2298 if (!page_unprotect(addr, 0, NULL))
2299 return -1;
2300 }
2301 return 0;
2302 }
3d97b40b
TS
2303 }
2304 return 0;
2305}
2306
9fa3e853 2307/* called from signal handler: invalidate the code and unprotect the
ccbb4d44 2308 page. Return TRUE if the fault was successfully handled. */
53a5960a 2309int page_unprotect(target_ulong address, unsigned long pc, void *puc)
9fa3e853
FB
2310{
2311 unsigned int page_index, prot, pindex;
2312 PageDesc *p, *p1;
53a5960a 2313 target_ulong host_start, host_end, addr;
9fa3e853 2314
c8a706fe
PB
2315 /* Technically this isn't safe inside a signal handler. However we
2316 know this only ever happens in a synchronous SEGV handler, so in
2317 practice it seems to be ok. */
2318 mmap_lock();
2319
83fb7adf 2320 host_start = address & qemu_host_page_mask;
9fa3e853
FB
2321 page_index = host_start >> TARGET_PAGE_BITS;
2322 p1 = page_find(page_index);
c8a706fe
PB
2323 if (!p1) {
2324 mmap_unlock();
9fa3e853 2325 return 0;
c8a706fe 2326 }
83fb7adf 2327 host_end = host_start + qemu_host_page_size;
9fa3e853
FB
2328 p = p1;
2329 prot = 0;
2330 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2331 prot |= p->flags;
2332 p++;
2333 }
2334 /* if the page was really writable, then we change its
2335 protection back to writable */
2336 if (prot & PAGE_WRITE_ORG) {
2337 pindex = (address - host_start) >> TARGET_PAGE_BITS;
2338 if (!(p1[pindex].flags & PAGE_WRITE)) {
5fafdf24 2339 mprotect((void *)g2h(host_start), qemu_host_page_size,
9fa3e853
FB
2340 (prot & PAGE_BITS) | PAGE_WRITE);
2341 p1[pindex].flags |= PAGE_WRITE;
2342 /* and since the content will be modified, we must invalidate
2343 the corresponding translated code. */
d720b93d 2344 tb_invalidate_phys_page(address, pc, puc);
9fa3e853
FB
2345#ifdef DEBUG_TB_CHECK
2346 tb_invalidate_check(address);
2347#endif
c8a706fe 2348 mmap_unlock();
9fa3e853
FB
2349 return 1;
2350 }
2351 }
c8a706fe 2352 mmap_unlock();
9fa3e853
FB
2353 return 0;
2354}
2355
6a00d601
FB
2356static inline void tlb_set_dirty(CPUState *env,
2357 unsigned long addr, target_ulong vaddr)
1ccde1cb
FB
2358{
2359}
9fa3e853
FB
2360#endif /* defined(CONFIG_USER_ONLY) */
2361
e2eef170 2362#if !defined(CONFIG_USER_ONLY)
8da3ff18 2363
c04b2b78
PB
2364#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2365typedef struct subpage_t {
2366 target_phys_addr_t base;
2367 CPUReadMemoryFunc * const *mem_read[TARGET_PAGE_SIZE][4];
2368 CPUWriteMemoryFunc * const *mem_write[TARGET_PAGE_SIZE][4];
2369 void *opaque[TARGET_PAGE_SIZE][2][4];
2370 ram_addr_t region_offset[TARGET_PAGE_SIZE][2][4];
2371} subpage_t;
2372
c227f099
AL
2373static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2374 ram_addr_t memory, ram_addr_t region_offset);
2375static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2376 ram_addr_t orig_memory, ram_addr_t region_offset);
db7b5426
BS
2377#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2378 need_subpage) \
2379 do { \
2380 if (addr > start_addr) \
2381 start_addr2 = 0; \
2382 else { \
2383 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2384 if (start_addr2 > 0) \
2385 need_subpage = 1; \
2386 } \
2387 \
49e9fba2 2388 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
db7b5426
BS
2389 end_addr2 = TARGET_PAGE_SIZE - 1; \
2390 else { \
2391 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2392 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2393 need_subpage = 1; \
2394 } \
2395 } while (0)
2396
8f2498f9
MT
2397/* register physical memory.
2398 For RAM, 'size' must be a multiple of the target page size.
2399 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
8da3ff18
PB
2400 io memory page. The address used when calling the IO function is
2401 the offset from the start of the region, plus region_offset. Both
ccbb4d44 2402 start_addr and region_offset are rounded down to a page boundary
8da3ff18
PB
2403 before calculating this offset. This should not be a problem unless
2404 the low bits of start_addr and region_offset differ. */
c227f099
AL
2405void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2406 ram_addr_t size,
2407 ram_addr_t phys_offset,
2408 ram_addr_t region_offset)
33417e70 2409{
c227f099 2410 target_phys_addr_t addr, end_addr;
92e873b9 2411 PhysPageDesc *p;
9d42037b 2412 CPUState *env;
c227f099 2413 ram_addr_t orig_size = size;
db7b5426 2414 void *subpage;
33417e70 2415
f6f3fbca
MT
2416 cpu_notify_set_memory(start_addr, size, phys_offset);
2417
67c4d23c
PB
2418 if (phys_offset == IO_MEM_UNASSIGNED) {
2419 region_offset = start_addr;
2420 }
8da3ff18 2421 region_offset &= TARGET_PAGE_MASK;
5fd386f6 2422 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
c227f099 2423 end_addr = start_addr + (target_phys_addr_t)size;
49e9fba2 2424 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
db7b5426
BS
2425 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2426 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
c227f099
AL
2427 ram_addr_t orig_memory = p->phys_offset;
2428 target_phys_addr_t start_addr2, end_addr2;
db7b5426
BS
2429 int need_subpage = 0;
2430
2431 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2432 need_subpage);
4254fab8 2433 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
db7b5426
BS
2434 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2435 subpage = subpage_init((addr & TARGET_PAGE_MASK),
8da3ff18
PB
2436 &p->phys_offset, orig_memory,
2437 p->region_offset);
db7b5426
BS
2438 } else {
2439 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2440 >> IO_MEM_SHIFT];
2441 }
8da3ff18
PB
2442 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2443 region_offset);
2444 p->region_offset = 0;
db7b5426
BS
2445 } else {
2446 p->phys_offset = phys_offset;
2447 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2448 (phys_offset & IO_MEM_ROMD))
2449 phys_offset += TARGET_PAGE_SIZE;
2450 }
2451 } else {
2452 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2453 p->phys_offset = phys_offset;
8da3ff18 2454 p->region_offset = region_offset;
db7b5426 2455 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
8da3ff18 2456 (phys_offset & IO_MEM_ROMD)) {
db7b5426 2457 phys_offset += TARGET_PAGE_SIZE;
0e8f0967 2458 } else {
c227f099 2459 target_phys_addr_t start_addr2, end_addr2;
db7b5426
BS
2460 int need_subpage = 0;
2461
2462 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2463 end_addr2, need_subpage);
2464
4254fab8 2465 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
db7b5426 2466 subpage = subpage_init((addr & TARGET_PAGE_MASK),
8da3ff18 2467 &p->phys_offset, IO_MEM_UNASSIGNED,
67c4d23c 2468 addr & TARGET_PAGE_MASK);
db7b5426 2469 subpage_register(subpage, start_addr2, end_addr2,
8da3ff18
PB
2470 phys_offset, region_offset);
2471 p->region_offset = 0;
db7b5426
BS
2472 }
2473 }
2474 }
8da3ff18 2475 region_offset += TARGET_PAGE_SIZE;
33417e70 2476 }
3b46e624 2477
9d42037b
FB
2478 /* since each CPU stores ram addresses in its TLB cache, we must
2479 reset the modified entries */
2480 /* XXX: slow ! */
2481 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2482 tlb_flush(env, 1);
2483 }
33417e70
FB
2484}
2485
ba863458 2486/* XXX: temporary until new memory mapping API */
c227f099 2487ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
ba863458
FB
2488{
2489 PhysPageDesc *p;
2490
2491 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2492 if (!p)
2493 return IO_MEM_UNASSIGNED;
2494 return p->phys_offset;
2495}
2496
c227f099 2497void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
f65ed4c1
AL
2498{
2499 if (kvm_enabled())
2500 kvm_coalesce_mmio_region(addr, size);
2501}
2502
c227f099 2503void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
f65ed4c1
AL
2504{
2505 if (kvm_enabled())
2506 kvm_uncoalesce_mmio_region(addr, size);
2507}
2508
62a2744c
SY
2509void qemu_flush_coalesced_mmio_buffer(void)
2510{
2511 if (kvm_enabled())
2512 kvm_flush_coalesced_mmio_buffer();
2513}
2514
c902760f
MT
2515#if defined(__linux__) && !defined(TARGET_S390X)
2516
2517#include <sys/vfs.h>
2518
2519#define HUGETLBFS_MAGIC 0x958458f6
2520
2521static long gethugepagesize(const char *path)
2522{
2523 struct statfs fs;
2524 int ret;
2525
2526 do {
2527 ret = statfs(path, &fs);
2528 } while (ret != 0 && errno == EINTR);
2529
2530 if (ret != 0) {
2531 perror("statfs");
2532 return 0;
2533 }
2534
2535 if (fs.f_type != HUGETLBFS_MAGIC)
2536 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
2537
2538 return fs.f_bsize;
2539}
2540
2541static void *file_ram_alloc(ram_addr_t memory, const char *path)
2542{
2543 char *filename;
2544 void *area;
2545 int fd;
2546#ifdef MAP_POPULATE
2547 int flags;
2548#endif
2549 unsigned long hpagesize;
2550
2551 hpagesize = gethugepagesize(path);
2552 if (!hpagesize) {
2553 return NULL;
2554 }
2555
2556 if (memory < hpagesize) {
2557 return NULL;
2558 }
2559
2560 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2561 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2562 return NULL;
2563 }
2564
2565 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
2566 return NULL;
2567 }
2568
2569 fd = mkstemp(filename);
2570 if (fd < 0) {
2571 perror("mkstemp");
2572 free(filename);
2573 return NULL;
2574 }
2575 unlink(filename);
2576 free(filename);
2577
2578 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2579
2580 /*
2581 * ftruncate is not supported by hugetlbfs in older
2582 * hosts, so don't bother bailing out on errors.
2583 * If anything goes wrong with it under other filesystems,
2584 * mmap will fail.
2585 */
2586 if (ftruncate(fd, memory))
2587 perror("ftruncate");
2588
2589#ifdef MAP_POPULATE
2590 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2591 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2592 * to sidestep this quirk.
2593 */
2594 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2595 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2596#else
2597 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2598#endif
2599 if (area == MAP_FAILED) {
2600 perror("file_ram_alloc: can't mmap RAM pages");
2601 close(fd);
2602 return (NULL);
2603 }
2604 return area;
2605}
2606#endif
2607
c227f099 2608ram_addr_t qemu_ram_alloc(ram_addr_t size)
94a6b54f
PB
2609{
2610 RAMBlock *new_block;
2611
94a6b54f
PB
2612 size = TARGET_PAGE_ALIGN(size);
2613 new_block = qemu_malloc(sizeof(*new_block));
2614
c902760f
MT
2615 if (mem_path) {
2616#if defined (__linux__) && !defined(TARGET_S390X)
2617 new_block->host = file_ram_alloc(size, mem_path);
2618 if (!new_block->host)
2619 exit(1);
2620#else
2621 fprintf(stderr, "-mem-path option unsupported\n");
2622 exit(1);
2623#endif
2624 } else {
6b02494d 2625#if defined(TARGET_S390X) && defined(CONFIG_KVM)
c902760f
MT
2626 /* XXX S390 KVM requires the topmost vma of the RAM to be < 256GB */
2627 new_block->host = mmap((void*)0x1000000, size,
2628 PROT_EXEC|PROT_READ|PROT_WRITE,
2629 MAP_SHARED | MAP_ANONYMOUS, -1, 0);
6b02494d 2630#else
c902760f 2631 new_block->host = qemu_vmalloc(size);
6b02494d 2632#endif
ccb167e9 2633#ifdef MADV_MERGEABLE
c902760f 2634 madvise(new_block->host, size, MADV_MERGEABLE);
ccb167e9 2635#endif
c902760f 2636 }
94a6b54f
PB
2637 new_block->offset = last_ram_offset;
2638 new_block->length = size;
2639
2640 new_block->next = ram_blocks;
2641 ram_blocks = new_block;
2642
2643 phys_ram_dirty = qemu_realloc(phys_ram_dirty,
2644 (last_ram_offset + size) >> TARGET_PAGE_BITS);
2645 memset(phys_ram_dirty + (last_ram_offset >> TARGET_PAGE_BITS),
2646 0xff, size >> TARGET_PAGE_BITS);
2647
2648 last_ram_offset += size;
2649
6f0437e8
JK
2650 if (kvm_enabled())
2651 kvm_setup_guest_memory(new_block->host, size);
2652
94a6b54f
PB
2653 return new_block->offset;
2654}
e9a1ab19 2655
c227f099 2656void qemu_ram_free(ram_addr_t addr)
e9a1ab19 2657{
94a6b54f 2658 /* TODO: implement this. */
e9a1ab19
FB
2659}
2660
dc828ca1 2661/* Return a host pointer to ram allocated with qemu_ram_alloc.
5579c7f3
PB
2662 With the exception of the softmmu code in this file, this should
2663 only be used for local memory (e.g. video ram) that the device owns,
2664 and knows it isn't going to access beyond the end of the block.
2665
2666 It should not be used for general purpose DMA.
2667 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2668 */
c227f099 2669void *qemu_get_ram_ptr(ram_addr_t addr)
dc828ca1 2670{
94a6b54f
PB
2671 RAMBlock *prev;
2672 RAMBlock **prevp;
2673 RAMBlock *block;
2674
94a6b54f
PB
2675 prev = NULL;
2676 prevp = &ram_blocks;
2677 block = ram_blocks;
2678 while (block && (block->offset > addr
2679 || block->offset + block->length <= addr)) {
2680 if (prev)
2681 prevp = &prev->next;
2682 prev = block;
2683 block = block->next;
2684 }
2685 if (!block) {
2686 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2687 abort();
2688 }
2689 /* Move this entry to to start of the list. */
2690 if (prev) {
2691 prev->next = block->next;
2692 block->next = *prevp;
2693 *prevp = block;
2694 }
2695 return block->host + (addr - block->offset);
dc828ca1
PB
2696}
2697
5579c7f3
PB
2698/* Some of the softmmu routines need to translate from a host pointer
2699 (typically a TLB entry) back to a ram offset. */
c227f099 2700ram_addr_t qemu_ram_addr_from_host(void *ptr)
5579c7f3 2701{
94a6b54f 2702 RAMBlock *prev;
94a6b54f
PB
2703 RAMBlock *block;
2704 uint8_t *host = ptr;
2705
94a6b54f 2706 prev = NULL;
94a6b54f
PB
2707 block = ram_blocks;
2708 while (block && (block->host > host
2709 || block->host + block->length <= host)) {
94a6b54f
PB
2710 prev = block;
2711 block = block->next;
2712 }
2713 if (!block) {
2714 fprintf(stderr, "Bad ram pointer %p\n", ptr);
2715 abort();
2716 }
2717 return block->offset + (host - block->host);
5579c7f3
PB
2718}
2719
c227f099 2720static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
33417e70 2721{
67d3b957 2722#ifdef DEBUG_UNASSIGNED
ab3d1727 2723 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
b4f0a316 2724#endif
faed1c2a 2725#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
e18231a3
BS
2726 do_unassigned_access(addr, 0, 0, 0, 1);
2727#endif
2728 return 0;
2729}
2730
c227f099 2731static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
e18231a3
BS
2732{
2733#ifdef DEBUG_UNASSIGNED
2734 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2735#endif
faed1c2a 2736#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
e18231a3
BS
2737 do_unassigned_access(addr, 0, 0, 0, 2);
2738#endif
2739 return 0;
2740}
2741
c227f099 2742static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
e18231a3
BS
2743{
2744#ifdef DEBUG_UNASSIGNED
2745 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2746#endif
faed1c2a 2747#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
e18231a3 2748 do_unassigned_access(addr, 0, 0, 0, 4);
67d3b957 2749#endif
33417e70
FB
2750 return 0;
2751}
2752
c227f099 2753static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
33417e70 2754{
67d3b957 2755#ifdef DEBUG_UNASSIGNED
ab3d1727 2756 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
67d3b957 2757#endif
faed1c2a 2758#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
e18231a3
BS
2759 do_unassigned_access(addr, 1, 0, 0, 1);
2760#endif
2761}
2762
c227f099 2763static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
e18231a3
BS
2764{
2765#ifdef DEBUG_UNASSIGNED
2766 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2767#endif
faed1c2a 2768#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
e18231a3
BS
2769 do_unassigned_access(addr, 1, 0, 0, 2);
2770#endif
2771}
2772
c227f099 2773static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
e18231a3
BS
2774{
2775#ifdef DEBUG_UNASSIGNED
2776 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2777#endif
faed1c2a 2778#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
e18231a3 2779 do_unassigned_access(addr, 1, 0, 0, 4);
b4f0a316 2780#endif
33417e70
FB
2781}
2782
d60efc6b 2783static CPUReadMemoryFunc * const unassigned_mem_read[3] = {
33417e70 2784 unassigned_mem_readb,
e18231a3
BS
2785 unassigned_mem_readw,
2786 unassigned_mem_readl,
33417e70
FB
2787};
2788
d60efc6b 2789static CPUWriteMemoryFunc * const unassigned_mem_write[3] = {
33417e70 2790 unassigned_mem_writeb,
e18231a3
BS
2791 unassigned_mem_writew,
2792 unassigned_mem_writel,
33417e70
FB
2793};
2794
c227f099 2795static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
0f459d16 2796 uint32_t val)
9fa3e853 2797{
3a7d929e 2798 int dirty_flags;
3a7d929e
FB
2799 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2800 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2801#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2802 tb_invalidate_phys_page_fast(ram_addr, 1);
2803 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2804#endif
3a7d929e 2805 }
5579c7f3 2806 stb_p(qemu_get_ram_ptr(ram_addr), val);
f23db169
FB
2807 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2808 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2809 /* we remove the notdirty callback only if the code has been
2810 flushed */
2811 if (dirty_flags == 0xff)
2e70f6ef 2812 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
2813}
2814
c227f099 2815static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
0f459d16 2816 uint32_t val)
9fa3e853 2817{
3a7d929e 2818 int dirty_flags;
3a7d929e
FB
2819 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2820 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2821#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2822 tb_invalidate_phys_page_fast(ram_addr, 2);
2823 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2824#endif
3a7d929e 2825 }
5579c7f3 2826 stw_p(qemu_get_ram_ptr(ram_addr), val);
f23db169
FB
2827 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2828 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2829 /* we remove the notdirty callback only if the code has been
2830 flushed */
2831 if (dirty_flags == 0xff)
2e70f6ef 2832 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
2833}
2834
c227f099 2835static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
0f459d16 2836 uint32_t val)
9fa3e853 2837{
3a7d929e 2838 int dirty_flags;
3a7d929e
FB
2839 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2840 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2841#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2842 tb_invalidate_phys_page_fast(ram_addr, 4);
2843 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2844#endif
3a7d929e 2845 }
5579c7f3 2846 stl_p(qemu_get_ram_ptr(ram_addr), val);
f23db169
FB
2847 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2848 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2849 /* we remove the notdirty callback only if the code has been
2850 flushed */
2851 if (dirty_flags == 0xff)
2e70f6ef 2852 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
2853}
2854
d60efc6b 2855static CPUReadMemoryFunc * const error_mem_read[3] = {
9fa3e853
FB
2856 NULL, /* never used */
2857 NULL, /* never used */
2858 NULL, /* never used */
2859};
2860
d60efc6b 2861static CPUWriteMemoryFunc * const notdirty_mem_write[3] = {
1ccde1cb
FB
2862 notdirty_mem_writeb,
2863 notdirty_mem_writew,
2864 notdirty_mem_writel,
2865};
2866
0f459d16 2867/* Generate a debug exception if a watchpoint has been hit. */
b4051334 2868static void check_watchpoint(int offset, int len_mask, int flags)
0f459d16
PB
2869{
2870 CPUState *env = cpu_single_env;
06d55cc1
AL
2871 target_ulong pc, cs_base;
2872 TranslationBlock *tb;
0f459d16 2873 target_ulong vaddr;
a1d1bb31 2874 CPUWatchpoint *wp;
06d55cc1 2875 int cpu_flags;
0f459d16 2876
06d55cc1
AL
2877 if (env->watchpoint_hit) {
2878 /* We re-entered the check after replacing the TB. Now raise
2879 * the debug interrupt so that is will trigger after the
2880 * current instruction. */
2881 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2882 return;
2883 }
2e70f6ef 2884 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
72cf2d4f 2885 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334
AL
2886 if ((vaddr == (wp->vaddr & len_mask) ||
2887 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
6e140f28
AL
2888 wp->flags |= BP_WATCHPOINT_HIT;
2889 if (!env->watchpoint_hit) {
2890 env->watchpoint_hit = wp;
2891 tb = tb_find_pc(env->mem_io_pc);
2892 if (!tb) {
2893 cpu_abort(env, "check_watchpoint: could not find TB for "
2894 "pc=%p", (void *)env->mem_io_pc);
2895 }
2896 cpu_restore_state(tb, env, env->mem_io_pc, NULL);
2897 tb_phys_invalidate(tb, -1);
2898 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2899 env->exception_index = EXCP_DEBUG;
2900 } else {
2901 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2902 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
2903 }
2904 cpu_resume_from_signal(env, NULL);
06d55cc1 2905 }
6e140f28
AL
2906 } else {
2907 wp->flags &= ~BP_WATCHPOINT_HIT;
0f459d16
PB
2908 }
2909 }
2910}
2911
6658ffb8
PB
2912/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2913 so these check for a hit then pass through to the normal out-of-line
2914 phys routines. */
c227f099 2915static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
6658ffb8 2916{
b4051334 2917 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
6658ffb8
PB
2918 return ldub_phys(addr);
2919}
2920
c227f099 2921static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
6658ffb8 2922{
b4051334 2923 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
6658ffb8
PB
2924 return lduw_phys(addr);
2925}
2926
c227f099 2927static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
6658ffb8 2928{
b4051334 2929 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
6658ffb8
PB
2930 return ldl_phys(addr);
2931}
2932
c227f099 2933static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
6658ffb8
PB
2934 uint32_t val)
2935{
b4051334 2936 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
6658ffb8
PB
2937 stb_phys(addr, val);
2938}
2939
c227f099 2940static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
6658ffb8
PB
2941 uint32_t val)
2942{
b4051334 2943 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
6658ffb8
PB
2944 stw_phys(addr, val);
2945}
2946
c227f099 2947static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
6658ffb8
PB
2948 uint32_t val)
2949{
b4051334 2950 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
6658ffb8
PB
2951 stl_phys(addr, val);
2952}
2953
d60efc6b 2954static CPUReadMemoryFunc * const watch_mem_read[3] = {
6658ffb8
PB
2955 watch_mem_readb,
2956 watch_mem_readw,
2957 watch_mem_readl,
2958};
2959
d60efc6b 2960static CPUWriteMemoryFunc * const watch_mem_write[3] = {
6658ffb8
PB
2961 watch_mem_writeb,
2962 watch_mem_writew,
2963 watch_mem_writel,
2964};
6658ffb8 2965
c227f099 2966static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
db7b5426
BS
2967 unsigned int len)
2968{
db7b5426
BS
2969 uint32_t ret;
2970 unsigned int idx;
2971
8da3ff18 2972 idx = SUBPAGE_IDX(addr);
db7b5426
BS
2973#if defined(DEBUG_SUBPAGE)
2974 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2975 mmio, len, addr, idx);
2976#endif
8da3ff18
PB
2977 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len],
2978 addr + mmio->region_offset[idx][0][len]);
db7b5426
BS
2979
2980 return ret;
2981}
2982
c227f099 2983static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
db7b5426
BS
2984 uint32_t value, unsigned int len)
2985{
db7b5426
BS
2986 unsigned int idx;
2987
8da3ff18 2988 idx = SUBPAGE_IDX(addr);
db7b5426
BS
2989#if defined(DEBUG_SUBPAGE)
2990 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2991 mmio, len, addr, idx, value);
2992#endif
8da3ff18
PB
2993 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len],
2994 addr + mmio->region_offset[idx][1][len],
2995 value);
db7b5426
BS
2996}
2997
c227f099 2998static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
db7b5426
BS
2999{
3000#if defined(DEBUG_SUBPAGE)
3001 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
3002#endif
3003
3004 return subpage_readlen(opaque, addr, 0);
3005}
3006
c227f099 3007static void subpage_writeb (void *opaque, target_phys_addr_t addr,
db7b5426
BS
3008 uint32_t value)
3009{
3010#if defined(DEBUG_SUBPAGE)
3011 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
3012#endif
3013 subpage_writelen(opaque, addr, value, 0);
3014}
3015
c227f099 3016static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
db7b5426
BS
3017{
3018#if defined(DEBUG_SUBPAGE)
3019 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
3020#endif
3021
3022 return subpage_readlen(opaque, addr, 1);
3023}
3024
c227f099 3025static void subpage_writew (void *opaque, target_phys_addr_t addr,
db7b5426
BS
3026 uint32_t value)
3027{
3028#if defined(DEBUG_SUBPAGE)
3029 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
3030#endif
3031 subpage_writelen(opaque, addr, value, 1);
3032}
3033
c227f099 3034static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
db7b5426
BS
3035{
3036#if defined(DEBUG_SUBPAGE)
3037 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
3038#endif
3039
3040 return subpage_readlen(opaque, addr, 2);
3041}
3042
3043static void subpage_writel (void *opaque,
c227f099 3044 target_phys_addr_t addr, uint32_t value)
db7b5426
BS
3045{
3046#if defined(DEBUG_SUBPAGE)
3047 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
3048#endif
3049 subpage_writelen(opaque, addr, value, 2);
3050}
3051
d60efc6b 3052static CPUReadMemoryFunc * const subpage_read[] = {
db7b5426
BS
3053 &subpage_readb,
3054 &subpage_readw,
3055 &subpage_readl,
3056};
3057
d60efc6b 3058static CPUWriteMemoryFunc * const subpage_write[] = {
db7b5426
BS
3059 &subpage_writeb,
3060 &subpage_writew,
3061 &subpage_writel,
3062};
3063
c227f099
AL
3064static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3065 ram_addr_t memory, ram_addr_t region_offset)
db7b5426
BS
3066{
3067 int idx, eidx;
4254fab8 3068 unsigned int i;
db7b5426
BS
3069
3070 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3071 return -1;
3072 idx = SUBPAGE_IDX(start);
3073 eidx = SUBPAGE_IDX(end);
3074#if defined(DEBUG_SUBPAGE)
0bf9e31a 3075 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
db7b5426
BS
3076 mmio, start, end, idx, eidx, memory);
3077#endif
3078 memory >>= IO_MEM_SHIFT;
3079 for (; idx <= eidx; idx++) {
4254fab8 3080 for (i = 0; i < 4; i++) {
3ee89922
BS
3081 if (io_mem_read[memory][i]) {
3082 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
3083 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
8da3ff18 3084 mmio->region_offset[idx][0][i] = region_offset;
3ee89922
BS
3085 }
3086 if (io_mem_write[memory][i]) {
3087 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
3088 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
8da3ff18 3089 mmio->region_offset[idx][1][i] = region_offset;
3ee89922 3090 }
4254fab8 3091 }
db7b5426
BS
3092 }
3093
3094 return 0;
3095}
3096
c227f099
AL
3097static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
3098 ram_addr_t orig_memory, ram_addr_t region_offset)
db7b5426 3099{
c227f099 3100 subpage_t *mmio;
db7b5426
BS
3101 int subpage_memory;
3102
c227f099 3103 mmio = qemu_mallocz(sizeof(subpage_t));
1eec614b
AL
3104
3105 mmio->base = base;
1eed09cb 3106 subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio);
db7b5426 3107#if defined(DEBUG_SUBPAGE)
1eec614b
AL
3108 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3109 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
db7b5426 3110#endif
1eec614b
AL
3111 *phys = subpage_memory | IO_MEM_SUBPAGE;
3112 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory,
8da3ff18 3113 region_offset);
db7b5426
BS
3114
3115 return mmio;
3116}
3117
88715657
AL
3118static int get_free_io_mem_idx(void)
3119{
3120 int i;
3121
3122 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3123 if (!io_mem_used[i]) {
3124 io_mem_used[i] = 1;
3125 return i;
3126 }
c6703b47 3127 fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
88715657
AL
3128 return -1;
3129}
3130
33417e70
FB
3131/* mem_read and mem_write are arrays of functions containing the
3132 function to access byte (index 0), word (index 1) and dword (index
0b4e6e3e 3133 2). Functions can be omitted with a NULL function pointer.
3ee89922 3134 If io_index is non zero, the corresponding io zone is
4254fab8
BS
3135 modified. If it is zero, a new io zone is allocated. The return
3136 value can be used with cpu_register_physical_memory(). (-1) is
3137 returned if error. */
1eed09cb 3138static int cpu_register_io_memory_fixed(int io_index,
d60efc6b
BS
3139 CPUReadMemoryFunc * const *mem_read,
3140 CPUWriteMemoryFunc * const *mem_write,
1eed09cb 3141 void *opaque)
33417e70 3142{
4254fab8 3143 int i, subwidth = 0;
33417e70
FB
3144
3145 if (io_index <= 0) {
88715657
AL
3146 io_index = get_free_io_mem_idx();
3147 if (io_index == -1)
3148 return io_index;
33417e70 3149 } else {
1eed09cb 3150 io_index >>= IO_MEM_SHIFT;
33417e70
FB
3151 if (io_index >= IO_MEM_NB_ENTRIES)
3152 return -1;
3153 }
b5ff1b31 3154
33417e70 3155 for(i = 0;i < 3; i++) {
4254fab8
BS
3156 if (!mem_read[i] || !mem_write[i])
3157 subwidth = IO_MEM_SUBWIDTH;
33417e70
FB
3158 io_mem_read[io_index][i] = mem_read[i];
3159 io_mem_write[io_index][i] = mem_write[i];
3160 }
a4193c8a 3161 io_mem_opaque[io_index] = opaque;
4254fab8 3162 return (io_index << IO_MEM_SHIFT) | subwidth;
33417e70 3163}
61382a50 3164
d60efc6b
BS
3165int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
3166 CPUWriteMemoryFunc * const *mem_write,
1eed09cb
AK
3167 void *opaque)
3168{
3169 return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque);
3170}
3171
88715657
AL
3172void cpu_unregister_io_memory(int io_table_address)
3173{
3174 int i;
3175 int io_index = io_table_address >> IO_MEM_SHIFT;
3176
3177 for (i=0;i < 3; i++) {
3178 io_mem_read[io_index][i] = unassigned_mem_read[i];
3179 io_mem_write[io_index][i] = unassigned_mem_write[i];
3180 }
3181 io_mem_opaque[io_index] = NULL;
3182 io_mem_used[io_index] = 0;
3183}
3184
e9179ce1
AK
3185static void io_mem_init(void)
3186{
3187 int i;
3188
3189 cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read, unassigned_mem_write, NULL);
3190 cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read, unassigned_mem_write, NULL);
3191 cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read, notdirty_mem_write, NULL);
3192 for (i=0; i<5; i++)
3193 io_mem_used[i] = 1;
3194
3195 io_mem_watch = cpu_register_io_memory(watch_mem_read,
3196 watch_mem_write, NULL);
e9179ce1
AK
3197}
3198
e2eef170
PB
3199#endif /* !defined(CONFIG_USER_ONLY) */
3200
13eb76e0
FB
3201/* physical memory access (slow version, mainly for debug) */
3202#if defined(CONFIG_USER_ONLY)
a68fe89c
PB
3203int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3204 uint8_t *buf, int len, int is_write)
13eb76e0
FB
3205{
3206 int l, flags;
3207 target_ulong page;
53a5960a 3208 void * p;
13eb76e0
FB
3209
3210 while (len > 0) {
3211 page = addr & TARGET_PAGE_MASK;
3212 l = (page + TARGET_PAGE_SIZE) - addr;
3213 if (l > len)
3214 l = len;
3215 flags = page_get_flags(page);
3216 if (!(flags & PAGE_VALID))
a68fe89c 3217 return -1;
13eb76e0
FB
3218 if (is_write) {
3219 if (!(flags & PAGE_WRITE))
a68fe89c 3220 return -1;
579a97f7 3221 /* XXX: this code should not depend on lock_user */
72fb7daa 3222 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
a68fe89c 3223 return -1;
72fb7daa
AJ
3224 memcpy(p, buf, l);
3225 unlock_user(p, addr, l);
13eb76e0
FB
3226 } else {
3227 if (!(flags & PAGE_READ))
a68fe89c 3228 return -1;
579a97f7 3229 /* XXX: this code should not depend on lock_user */
72fb7daa 3230 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
a68fe89c 3231 return -1;
72fb7daa 3232 memcpy(buf, p, l);
5b257578 3233 unlock_user(p, addr, 0);
13eb76e0
FB
3234 }
3235 len -= l;
3236 buf += l;
3237 addr += l;
3238 }
a68fe89c 3239 return 0;
13eb76e0 3240}
8df1cd07 3241
13eb76e0 3242#else
c227f099 3243void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
3244 int len, int is_write)
3245{
3246 int l, io_index;
3247 uint8_t *ptr;
3248 uint32_t val;
c227f099 3249 target_phys_addr_t page;
2e12669a 3250 unsigned long pd;
92e873b9 3251 PhysPageDesc *p;
3b46e624 3252
13eb76e0
FB
3253 while (len > 0) {
3254 page = addr & TARGET_PAGE_MASK;
3255 l = (page + TARGET_PAGE_SIZE) - addr;
3256 if (l > len)
3257 l = len;
92e873b9 3258 p = phys_page_find(page >> TARGET_PAGE_BITS);
13eb76e0
FB
3259 if (!p) {
3260 pd = IO_MEM_UNASSIGNED;
3261 } else {
3262 pd = p->phys_offset;
3263 }
3b46e624 3264
13eb76e0 3265 if (is_write) {
3a7d929e 3266 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
c227f099 3267 target_phys_addr_t addr1 = addr;
13eb76e0 3268 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18 3269 if (p)
6c2934db 3270 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
6a00d601
FB
3271 /* XXX: could force cpu_single_env to NULL to avoid
3272 potential bugs */
6c2934db 3273 if (l >= 4 && ((addr1 & 3) == 0)) {
1c213d19 3274 /* 32 bit write access */
c27004ec 3275 val = ldl_p(buf);
6c2934db 3276 io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
13eb76e0 3277 l = 4;
6c2934db 3278 } else if (l >= 2 && ((addr1 & 1) == 0)) {
1c213d19 3279 /* 16 bit write access */
c27004ec 3280 val = lduw_p(buf);
6c2934db 3281 io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
13eb76e0
FB
3282 l = 2;
3283 } else {
1c213d19 3284 /* 8 bit write access */
c27004ec 3285 val = ldub_p(buf);
6c2934db 3286 io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
13eb76e0
FB
3287 l = 1;
3288 }
3289 } else {
b448f2f3
FB
3290 unsigned long addr1;
3291 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
13eb76e0 3292 /* RAM case */
5579c7f3 3293 ptr = qemu_get_ram_ptr(addr1);
13eb76e0 3294 memcpy(ptr, buf, l);
3a7d929e
FB
3295 if (!cpu_physical_memory_is_dirty(addr1)) {
3296 /* invalidate code */
3297 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3298 /* set dirty bit */
5fafdf24 3299 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
f23db169 3300 (0xff & ~CODE_DIRTY_FLAG);
3a7d929e 3301 }
13eb76e0
FB
3302 }
3303 } else {
5fafdf24 3304 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2a4188a3 3305 !(pd & IO_MEM_ROMD)) {
c227f099 3306 target_phys_addr_t addr1 = addr;
13eb76e0
FB
3307 /* I/O case */
3308 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18 3309 if (p)
6c2934db
AJ
3310 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3311 if (l >= 4 && ((addr1 & 3) == 0)) {
13eb76e0 3312 /* 32 bit read access */
6c2934db 3313 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
c27004ec 3314 stl_p(buf, val);
13eb76e0 3315 l = 4;
6c2934db 3316 } else if (l >= 2 && ((addr1 & 1) == 0)) {
13eb76e0 3317 /* 16 bit read access */
6c2934db 3318 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
c27004ec 3319 stw_p(buf, val);
13eb76e0
FB
3320 l = 2;
3321 } else {
1c213d19 3322 /* 8 bit read access */
6c2934db 3323 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
c27004ec 3324 stb_p(buf, val);
13eb76e0
FB
3325 l = 1;
3326 }
3327 } else {
3328 /* RAM case */
5579c7f3 3329 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
13eb76e0
FB
3330 (addr & ~TARGET_PAGE_MASK);
3331 memcpy(buf, ptr, l);
3332 }
3333 }
3334 len -= l;
3335 buf += l;
3336 addr += l;
3337 }
3338}
8df1cd07 3339
d0ecd2aa 3340/* used for ROM loading : can write in RAM and ROM */
c227f099 3341void cpu_physical_memory_write_rom(target_phys_addr_t addr,
d0ecd2aa
FB
3342 const uint8_t *buf, int len)
3343{
3344 int l;
3345 uint8_t *ptr;
c227f099 3346 target_phys_addr_t page;
d0ecd2aa
FB
3347 unsigned long pd;
3348 PhysPageDesc *p;
3b46e624 3349
d0ecd2aa
FB
3350 while (len > 0) {
3351 page = addr & TARGET_PAGE_MASK;
3352 l = (page + TARGET_PAGE_SIZE) - addr;
3353 if (l > len)
3354 l = len;
3355 p = phys_page_find(page >> TARGET_PAGE_BITS);
3356 if (!p) {
3357 pd = IO_MEM_UNASSIGNED;
3358 } else {
3359 pd = p->phys_offset;
3360 }
3b46e624 3361
d0ecd2aa 3362 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2a4188a3
FB
3363 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3364 !(pd & IO_MEM_ROMD)) {
d0ecd2aa
FB
3365 /* do nothing */
3366 } else {
3367 unsigned long addr1;
3368 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3369 /* ROM/RAM case */
5579c7f3 3370 ptr = qemu_get_ram_ptr(addr1);
d0ecd2aa
FB
3371 memcpy(ptr, buf, l);
3372 }
3373 len -= l;
3374 buf += l;
3375 addr += l;
3376 }
3377}
3378
6d16c2f8
AL
3379typedef struct {
3380 void *buffer;
c227f099
AL
3381 target_phys_addr_t addr;
3382 target_phys_addr_t len;
6d16c2f8
AL
3383} BounceBuffer;
3384
3385static BounceBuffer bounce;
3386
ba223c29
AL
3387typedef struct MapClient {
3388 void *opaque;
3389 void (*callback)(void *opaque);
72cf2d4f 3390 QLIST_ENTRY(MapClient) link;
ba223c29
AL
3391} MapClient;
3392
72cf2d4f
BS
3393static QLIST_HEAD(map_client_list, MapClient) map_client_list
3394 = QLIST_HEAD_INITIALIZER(map_client_list);
ba223c29
AL
3395
3396void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3397{
3398 MapClient *client = qemu_malloc(sizeof(*client));
3399
3400 client->opaque = opaque;
3401 client->callback = callback;
72cf2d4f 3402 QLIST_INSERT_HEAD(&map_client_list, client, link);
ba223c29
AL
3403 return client;
3404}
3405
3406void cpu_unregister_map_client(void *_client)
3407{
3408 MapClient *client = (MapClient *)_client;
3409
72cf2d4f 3410 QLIST_REMOVE(client, link);
34d5e948 3411 qemu_free(client);
ba223c29
AL
3412}
3413
3414static void cpu_notify_map_clients(void)
3415{
3416 MapClient *client;
3417
72cf2d4f
BS
3418 while (!QLIST_EMPTY(&map_client_list)) {
3419 client = QLIST_FIRST(&map_client_list);
ba223c29 3420 client->callback(client->opaque);
34d5e948 3421 cpu_unregister_map_client(client);
ba223c29
AL
3422 }
3423}
3424
6d16c2f8
AL
3425/* Map a physical memory region into a host virtual address.
3426 * May map a subset of the requested range, given by and returned in *plen.
3427 * May return NULL if resources needed to perform the mapping are exhausted.
3428 * Use only for reads OR writes - not for read-modify-write operations.
ba223c29
AL
3429 * Use cpu_register_map_client() to know when retrying the map operation is
3430 * likely to succeed.
6d16c2f8 3431 */
c227f099
AL
3432void *cpu_physical_memory_map(target_phys_addr_t addr,
3433 target_phys_addr_t *plen,
6d16c2f8
AL
3434 int is_write)
3435{
c227f099
AL
3436 target_phys_addr_t len = *plen;
3437 target_phys_addr_t done = 0;
6d16c2f8
AL
3438 int l;
3439 uint8_t *ret = NULL;
3440 uint8_t *ptr;
c227f099 3441 target_phys_addr_t page;
6d16c2f8
AL
3442 unsigned long pd;
3443 PhysPageDesc *p;
3444 unsigned long addr1;
3445
3446 while (len > 0) {
3447 page = addr & TARGET_PAGE_MASK;
3448 l = (page + TARGET_PAGE_SIZE) - addr;
3449 if (l > len)
3450 l = len;
3451 p = phys_page_find(page >> TARGET_PAGE_BITS);
3452 if (!p) {
3453 pd = IO_MEM_UNASSIGNED;
3454 } else {
3455 pd = p->phys_offset;
3456 }
3457
3458 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3459 if (done || bounce.buffer) {
3460 break;
3461 }
3462 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3463 bounce.addr = addr;
3464 bounce.len = l;
3465 if (!is_write) {
3466 cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
3467 }
3468 ptr = bounce.buffer;
3469 } else {
3470 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
5579c7f3 3471 ptr = qemu_get_ram_ptr(addr1);
6d16c2f8
AL
3472 }
3473 if (!done) {
3474 ret = ptr;
3475 } else if (ret + done != ptr) {
3476 break;
3477 }
3478
3479 len -= l;
3480 addr += l;
3481 done += l;
3482 }
3483 *plen = done;
3484 return ret;
3485}
3486
3487/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3488 * Will also mark the memory as dirty if is_write == 1. access_len gives
3489 * the amount of memory that was actually read or written by the caller.
3490 */
c227f099
AL
3491void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3492 int is_write, target_phys_addr_t access_len)
6d16c2f8
AL
3493{
3494 if (buffer != bounce.buffer) {
3495 if (is_write) {
c227f099 3496 ram_addr_t addr1 = qemu_ram_addr_from_host(buffer);
6d16c2f8
AL
3497 while (access_len) {
3498 unsigned l;
3499 l = TARGET_PAGE_SIZE;
3500 if (l > access_len)
3501 l = access_len;
3502 if (!cpu_physical_memory_is_dirty(addr1)) {
3503 /* invalidate code */
3504 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3505 /* set dirty bit */
3506 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3507 (0xff & ~CODE_DIRTY_FLAG);
3508 }
3509 addr1 += l;
3510 access_len -= l;
3511 }
3512 }
3513 return;
3514 }
3515 if (is_write) {
3516 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3517 }
f8a83245 3518 qemu_vfree(bounce.buffer);
6d16c2f8 3519 bounce.buffer = NULL;
ba223c29 3520 cpu_notify_map_clients();
6d16c2f8 3521}
d0ecd2aa 3522
8df1cd07 3523/* warning: addr must be aligned */
c227f099 3524uint32_t ldl_phys(target_phys_addr_t addr)
8df1cd07
FB
3525{
3526 int io_index;
3527 uint8_t *ptr;
3528 uint32_t val;
3529 unsigned long pd;
3530 PhysPageDesc *p;
3531
3532 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3533 if (!p) {
3534 pd = IO_MEM_UNASSIGNED;
3535 } else {
3536 pd = p->phys_offset;
3537 }
3b46e624 3538
5fafdf24 3539 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2a4188a3 3540 !(pd & IO_MEM_ROMD)) {
8df1cd07
FB
3541 /* I/O case */
3542 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
3543 if (p)
3544 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
8df1cd07
FB
3545 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3546 } else {
3547 /* RAM case */
5579c7f3 3548 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
8df1cd07
FB
3549 (addr & ~TARGET_PAGE_MASK);
3550 val = ldl_p(ptr);
3551 }
3552 return val;
3553}
3554
84b7b8e7 3555/* warning: addr must be aligned */
c227f099 3556uint64_t ldq_phys(target_phys_addr_t addr)
84b7b8e7
FB
3557{
3558 int io_index;
3559 uint8_t *ptr;
3560 uint64_t val;
3561 unsigned long pd;
3562 PhysPageDesc *p;
3563
3564 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3565 if (!p) {
3566 pd = IO_MEM_UNASSIGNED;
3567 } else {
3568 pd = p->phys_offset;
3569 }
3b46e624 3570
2a4188a3
FB
3571 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3572 !(pd & IO_MEM_ROMD)) {
84b7b8e7
FB
3573 /* I/O case */
3574 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
3575 if (p)
3576 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
84b7b8e7
FB
3577#ifdef TARGET_WORDS_BIGENDIAN
3578 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3579 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3580#else
3581 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3582 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3583#endif
3584 } else {
3585 /* RAM case */
5579c7f3 3586 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
84b7b8e7
FB
3587 (addr & ~TARGET_PAGE_MASK);
3588 val = ldq_p(ptr);
3589 }
3590 return val;
3591}
3592
aab33094 3593/* XXX: optimize */
c227f099 3594uint32_t ldub_phys(target_phys_addr_t addr)
aab33094
FB
3595{
3596 uint8_t val;
3597 cpu_physical_memory_read(addr, &val, 1);
3598 return val;
3599}
3600
3601/* XXX: optimize */
c227f099 3602uint32_t lduw_phys(target_phys_addr_t addr)
aab33094
FB
3603{
3604 uint16_t val;
3605 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3606 return tswap16(val);
3607}
3608
8df1cd07
FB
3609/* warning: addr must be aligned. The ram page is not masked as dirty
3610 and the code inside is not invalidated. It is useful if the dirty
3611 bits are used to track modified PTEs */
c227f099 3612void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
8df1cd07
FB
3613{
3614 int io_index;
3615 uint8_t *ptr;
3616 unsigned long pd;
3617 PhysPageDesc *p;
3618
3619 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3620 if (!p) {
3621 pd = IO_MEM_UNASSIGNED;
3622 } else {
3623 pd = p->phys_offset;
3624 }
3b46e624 3625
3a7d929e 3626 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07 3627 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
3628 if (p)
3629 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
8df1cd07
FB
3630 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3631 } else {
74576198 3632 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
5579c7f3 3633 ptr = qemu_get_ram_ptr(addr1);
8df1cd07 3634 stl_p(ptr, val);
74576198
AL
3635
3636 if (unlikely(in_migration)) {
3637 if (!cpu_physical_memory_is_dirty(addr1)) {
3638 /* invalidate code */
3639 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3640 /* set dirty bit */
3641 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3642 (0xff & ~CODE_DIRTY_FLAG);
3643 }
3644 }
8df1cd07
FB
3645 }
3646}
3647
c227f099 3648void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
bc98a7ef
JM
3649{
3650 int io_index;
3651 uint8_t *ptr;
3652 unsigned long pd;
3653 PhysPageDesc *p;
3654
3655 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3656 if (!p) {
3657 pd = IO_MEM_UNASSIGNED;
3658 } else {
3659 pd = p->phys_offset;
3660 }
3b46e624 3661
bc98a7ef
JM
3662 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3663 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
3664 if (p)
3665 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bc98a7ef
JM
3666#ifdef TARGET_WORDS_BIGENDIAN
3667 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3668 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3669#else
3670 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3671 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3672#endif
3673 } else {
5579c7f3 3674 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
bc98a7ef
JM
3675 (addr & ~TARGET_PAGE_MASK);
3676 stq_p(ptr, val);
3677 }
3678}
3679
8df1cd07 3680/* warning: addr must be aligned */
c227f099 3681void stl_phys(target_phys_addr_t addr, uint32_t val)
8df1cd07
FB
3682{
3683 int io_index;
3684 uint8_t *ptr;
3685 unsigned long pd;
3686 PhysPageDesc *p;
3687
3688 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3689 if (!p) {
3690 pd = IO_MEM_UNASSIGNED;
3691 } else {
3692 pd = p->phys_offset;
3693 }
3b46e624 3694
3a7d929e 3695 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07 3696 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
3697 if (p)
3698 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
8df1cd07
FB
3699 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3700 } else {
3701 unsigned long addr1;
3702 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3703 /* RAM case */
5579c7f3 3704 ptr = qemu_get_ram_ptr(addr1);
8df1cd07 3705 stl_p(ptr, val);
3a7d929e
FB
3706 if (!cpu_physical_memory_is_dirty(addr1)) {
3707 /* invalidate code */
3708 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3709 /* set dirty bit */
f23db169
FB
3710 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3711 (0xff & ~CODE_DIRTY_FLAG);
3a7d929e 3712 }
8df1cd07
FB
3713 }
3714}
3715
aab33094 3716/* XXX: optimize */
c227f099 3717void stb_phys(target_phys_addr_t addr, uint32_t val)
aab33094
FB
3718{
3719 uint8_t v = val;
3720 cpu_physical_memory_write(addr, &v, 1);
3721}
3722
3723/* XXX: optimize */
c227f099 3724void stw_phys(target_phys_addr_t addr, uint32_t val)
aab33094
FB
3725{
3726 uint16_t v = tswap16(val);
3727 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3728}
3729
3730/* XXX: optimize */
c227f099 3731void stq_phys(target_phys_addr_t addr, uint64_t val)
aab33094
FB
3732{
3733 val = tswap64(val);
3734 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3735}
3736
5e2972fd 3737/* virtual memory access for debug (includes writing to ROM) */
5fafdf24 3738int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
b448f2f3 3739 uint8_t *buf, int len, int is_write)
13eb76e0
FB
3740{
3741 int l;
c227f099 3742 target_phys_addr_t phys_addr;
9b3c35e0 3743 target_ulong page;
13eb76e0
FB
3744
3745 while (len > 0) {
3746 page = addr & TARGET_PAGE_MASK;
3747 phys_addr = cpu_get_phys_page_debug(env, page);
3748 /* if no physical page mapped, return an error */
3749 if (phys_addr == -1)
3750 return -1;
3751 l = (page + TARGET_PAGE_SIZE) - addr;
3752 if (l > len)
3753 l = len;
5e2972fd 3754 phys_addr += (addr & ~TARGET_PAGE_MASK);
5e2972fd
AL
3755 if (is_write)
3756 cpu_physical_memory_write_rom(phys_addr, buf, l);
3757 else
5e2972fd 3758 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
13eb76e0
FB
3759 len -= l;
3760 buf += l;
3761 addr += l;
3762 }
3763 return 0;
3764}
a68fe89c 3765#endif
13eb76e0 3766
2e70f6ef
PB
3767/* in deterministic execution mode, instructions doing device I/Os
3768 must be at the end of the TB */
3769void cpu_io_recompile(CPUState *env, void *retaddr)
3770{
3771 TranslationBlock *tb;
3772 uint32_t n, cflags;
3773 target_ulong pc, cs_base;
3774 uint64_t flags;
3775
3776 tb = tb_find_pc((unsigned long)retaddr);
3777 if (!tb) {
3778 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
3779 retaddr);
3780 }
3781 n = env->icount_decr.u16.low + tb->icount;
3782 cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3783 /* Calculate how many instructions had been executed before the fault
bf20dc07 3784 occurred. */
2e70f6ef
PB
3785 n = n - env->icount_decr.u16.low;
3786 /* Generate a new TB ending on the I/O insn. */
3787 n++;
3788 /* On MIPS and SH, delay slot instructions can only be restarted if
3789 they were already the first instruction in the TB. If this is not
bf20dc07 3790 the first instruction in a TB then re-execute the preceding
2e70f6ef
PB
3791 branch. */
3792#if defined(TARGET_MIPS)
3793 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3794 env->active_tc.PC -= 4;
3795 env->icount_decr.u16.low++;
3796 env->hflags &= ~MIPS_HFLAG_BMASK;
3797 }
3798#elif defined(TARGET_SH4)
3799 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3800 && n > 1) {
3801 env->pc -= 2;
3802 env->icount_decr.u16.low++;
3803 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3804 }
3805#endif
3806 /* This should never happen. */
3807 if (n > CF_COUNT_MASK)
3808 cpu_abort(env, "TB too big during recompile");
3809
3810 cflags = n | CF_LAST_IO;
3811 pc = tb->pc;
3812 cs_base = tb->cs_base;
3813 flags = tb->flags;
3814 tb_phys_invalidate(tb, -1);
3815 /* FIXME: In theory this could raise an exception. In practice
3816 we have already translated the block once so it's probably ok. */
3817 tb_gen_code(env, pc, cs_base, flags, cflags);
bf20dc07 3818 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
2e70f6ef
PB
3819 the first in the TB) then we end up generating a whole new TB and
3820 repeating the fault, which is horribly inefficient.
3821 Better would be to execute just this insn uncached, or generate a
3822 second new TB. */
3823 cpu_resume_from_signal(env, NULL);
3824}
3825
e3db7226
FB
3826void dump_exec_info(FILE *f,
3827 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3828{
3829 int i, target_code_size, max_target_code_size;
3830 int direct_jmp_count, direct_jmp2_count, cross_page;
3831 TranslationBlock *tb;
3b46e624 3832
e3db7226
FB
3833 target_code_size = 0;
3834 max_target_code_size = 0;
3835 cross_page = 0;
3836 direct_jmp_count = 0;
3837 direct_jmp2_count = 0;
3838 for(i = 0; i < nb_tbs; i++) {
3839 tb = &tbs[i];
3840 target_code_size += tb->size;
3841 if (tb->size > max_target_code_size)
3842 max_target_code_size = tb->size;
3843 if (tb->page_addr[1] != -1)
3844 cross_page++;
3845 if (tb->tb_next_offset[0] != 0xffff) {
3846 direct_jmp_count++;
3847 if (tb->tb_next_offset[1] != 0xffff) {
3848 direct_jmp2_count++;
3849 }
3850 }
3851 }
3852 /* XXX: avoid using doubles ? */
57fec1fe 3853 cpu_fprintf(f, "Translation buffer state:\n");
26a5f13b
FB
3854 cpu_fprintf(f, "gen code size %ld/%ld\n",
3855 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3856 cpu_fprintf(f, "TB count %d/%d\n",
3857 nb_tbs, code_gen_max_blocks);
5fafdf24 3858 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
e3db7226
FB
3859 nb_tbs ? target_code_size / nb_tbs : 0,
3860 max_target_code_size);
5fafdf24 3861 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
e3db7226
FB
3862 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3863 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
5fafdf24
TS
3864 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3865 cross_page,
e3db7226
FB
3866 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3867 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
5fafdf24 3868 direct_jmp_count,
e3db7226
FB
3869 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3870 direct_jmp2_count,
3871 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
57fec1fe 3872 cpu_fprintf(f, "\nStatistics:\n");
e3db7226
FB
3873 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
3874 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3875 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
b67d9a52 3876 tcg_dump_info(f, cpu_fprintf);
e3db7226
FB
3877}
3878
5fafdf24 3879#if !defined(CONFIG_USER_ONLY)
61382a50
FB
3880
3881#define MMUSUFFIX _cmmu
3882#define GETPC() NULL
3883#define env cpu_single_env
b769d8fe 3884#define SOFTMMU_CODE_ACCESS
61382a50
FB
3885
3886#define SHIFT 0
3887#include "softmmu_template.h"
3888
3889#define SHIFT 1
3890#include "softmmu_template.h"
3891
3892#define SHIFT 2
3893#include "softmmu_template.h"
3894
3895#define SHIFT 3
3896#include "softmmu_template.h"
3897
3898#undef env
3899
3900#endif