]> git.proxmox.com Git - qemu.git/blame - exec.c
Guest debugging support for KVM (Jan Kiszka)
[qemu.git] / exec.c
CommitLineData
54936004 1/*
fd6ce8f6 2 * virtual page mapping and translated block handling
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
fad6cb1a 18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
54936004 19 */
67b915a5 20#include "config.h"
d5a8f07c 21#ifdef _WIN32
4fddf62a 22#define WIN32_LEAN_AND_MEAN
d5a8f07c
FB
23#include <windows.h>
24#else
a98d49b1 25#include <sys/types.h>
d5a8f07c
FB
26#include <sys/mman.h>
27#endif
54936004
FB
28#include <stdlib.h>
29#include <stdio.h>
30#include <stdarg.h>
31#include <string.h>
32#include <errno.h>
33#include <unistd.h>
34#include <inttypes.h>
35
6180a181
FB
36#include "cpu.h"
37#include "exec-all.h"
ca10f867 38#include "qemu-common.h"
b67d9a52 39#include "tcg.h"
b3c7724c 40#include "hw/hw.h"
74576198 41#include "osdep.h"
7ba1e619 42#include "kvm.h"
53a5960a
PB
43#if defined(CONFIG_USER_ONLY)
44#include <qemu.h>
45#endif
54936004 46
fd6ce8f6 47//#define DEBUG_TB_INVALIDATE
66e85a21 48//#define DEBUG_FLUSH
9fa3e853 49//#define DEBUG_TLB
67d3b957 50//#define DEBUG_UNASSIGNED
fd6ce8f6
FB
51
52/* make various TB consistency checks */
5fafdf24
TS
53//#define DEBUG_TB_CHECK
54//#define DEBUG_TLB_CHECK
fd6ce8f6 55
1196be37 56//#define DEBUG_IOPORT
db7b5426 57//#define DEBUG_SUBPAGE
1196be37 58
99773bd4
PB
59#if !defined(CONFIG_USER_ONLY)
60/* TB consistency checks only implemented for usermode emulation. */
61#undef DEBUG_TB_CHECK
62#endif
63
9fa3e853
FB
64#define SMC_BITMAP_USE_THRESHOLD 10
65
66#define MMAP_AREA_START 0x00000000
67#define MMAP_AREA_END 0xa8000000
fd6ce8f6 68
108c49b8
FB
69#if defined(TARGET_SPARC64)
70#define TARGET_PHYS_ADDR_SPACE_BITS 41
5dcb6b91
BS
71#elif defined(TARGET_SPARC)
72#define TARGET_PHYS_ADDR_SPACE_BITS 36
bedb69ea
JM
73#elif defined(TARGET_ALPHA)
74#define TARGET_PHYS_ADDR_SPACE_BITS 42
75#define TARGET_VIRT_ADDR_SPACE_BITS 42
108c49b8
FB
76#elif defined(TARGET_PPC64)
77#define TARGET_PHYS_ADDR_SPACE_BITS 42
00f82b8a
AJ
78#elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
79#define TARGET_PHYS_ADDR_SPACE_BITS 42
80#elif defined(TARGET_I386) && !defined(USE_KQEMU)
81#define TARGET_PHYS_ADDR_SPACE_BITS 36
108c49b8
FB
82#else
83/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
84#define TARGET_PHYS_ADDR_SPACE_BITS 32
85#endif
86
bdaf78e0 87static TranslationBlock *tbs;
26a5f13b 88int code_gen_max_blocks;
9fa3e853 89TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
bdaf78e0 90static int nb_tbs;
eb51d102
FB
91/* any access to the tbs or the page table must use this lock */
92spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
fd6ce8f6 93
141ac468
BS
94#if defined(__arm__) || defined(__sparc_v9__)
95/* The prologue must be reachable with a direct jump. ARM and Sparc64
96 have limited branch ranges (possibly also PPC) so place it in a
d03d860b
BS
97 section close to code segment. */
98#define code_gen_section \
99 __attribute__((__section__(".gen_code"))) \
100 __attribute__((aligned (32)))
101#else
102#define code_gen_section \
103 __attribute__((aligned (32)))
104#endif
105
106uint8_t code_gen_prologue[1024] code_gen_section;
bdaf78e0
BS
107static uint8_t *code_gen_buffer;
108static unsigned long code_gen_buffer_size;
26a5f13b 109/* threshold to flush the translated code buffer */
bdaf78e0 110static unsigned long code_gen_buffer_max_size;
fd6ce8f6
FB
111uint8_t *code_gen_ptr;
112
e2eef170 113#if !defined(CONFIG_USER_ONLY)
00f82b8a 114ram_addr_t phys_ram_size;
9fa3e853
FB
115int phys_ram_fd;
116uint8_t *phys_ram_base;
1ccde1cb 117uint8_t *phys_ram_dirty;
74576198 118static int in_migration;
e9a1ab19 119static ram_addr_t phys_ram_alloc_offset = 0;
e2eef170 120#endif
9fa3e853 121
6a00d601
FB
122CPUState *first_cpu;
123/* current CPU in the current thread. It is only valid inside
124 cpu_exec() */
5fafdf24 125CPUState *cpu_single_env;
2e70f6ef 126/* 0 = Do not count executed instructions.
bf20dc07 127 1 = Precise instruction counting.
2e70f6ef
PB
128 2 = Adaptive rate instruction counting. */
129int use_icount = 0;
130/* Current instruction counter. While executing translated code this may
131 include some instructions that have not yet been executed. */
132int64_t qemu_icount;
6a00d601 133
54936004 134typedef struct PageDesc {
92e873b9 135 /* list of TBs intersecting this ram page */
fd6ce8f6 136 TranslationBlock *first_tb;
9fa3e853
FB
137 /* in order to optimize self modifying code, we count the number
138 of lookups we do to a given page to use a bitmap */
139 unsigned int code_write_count;
140 uint8_t *code_bitmap;
141#if defined(CONFIG_USER_ONLY)
142 unsigned long flags;
143#endif
54936004
FB
144} PageDesc;
145
92e873b9 146typedef struct PhysPageDesc {
0f459d16 147 /* offset in host memory of the page + io_index in the low bits */
00f82b8a 148 ram_addr_t phys_offset;
8da3ff18 149 ram_addr_t region_offset;
92e873b9
FB
150} PhysPageDesc;
151
54936004 152#define L2_BITS 10
bedb69ea
JM
153#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
154/* XXX: this is a temporary hack for alpha target.
155 * In the future, this is to be replaced by a multi-level table
156 * to actually be able to handle the complete 64 bits address space.
157 */
158#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
159#else
03875444 160#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
bedb69ea 161#endif
54936004
FB
162
163#define L1_SIZE (1 << L1_BITS)
164#define L2_SIZE (1 << L2_BITS)
165
83fb7adf
FB
166unsigned long qemu_real_host_page_size;
167unsigned long qemu_host_page_bits;
168unsigned long qemu_host_page_size;
169unsigned long qemu_host_page_mask;
54936004 170
92e873b9 171/* XXX: for system emulation, it could just be an array */
54936004 172static PageDesc *l1_map[L1_SIZE];
bdaf78e0 173static PhysPageDesc **l1_phys_map;
54936004 174
e2eef170
PB
175#if !defined(CONFIG_USER_ONLY)
176static void io_mem_init(void);
177
33417e70 178/* io memory support */
33417e70
FB
179CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
180CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
a4193c8a 181void *io_mem_opaque[IO_MEM_NB_ENTRIES];
511d2b14 182static char io_mem_used[IO_MEM_NB_ENTRIES];
6658ffb8
PB
183static int io_mem_watch;
184#endif
33417e70 185
34865134 186/* log support */
d9b630fd 187static const char *logfilename = "/tmp/qemu.log";
34865134
FB
188FILE *logfile;
189int loglevel;
e735b91c 190static int log_append = 0;
34865134 191
e3db7226
FB
192/* statistics */
193static int tlb_flush_count;
194static int tb_flush_count;
195static int tb_phys_invalidate_count;
196
db7b5426
BS
197#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
198typedef struct subpage_t {
199 target_phys_addr_t base;
3ee89922
BS
200 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
201 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
202 void *opaque[TARGET_PAGE_SIZE][2][4];
8da3ff18 203 ram_addr_t region_offset[TARGET_PAGE_SIZE][2][4];
db7b5426
BS
204} subpage_t;
205
7cb69cae
FB
206#ifdef _WIN32
207static void map_exec(void *addr, long size)
208{
209 DWORD old_protect;
210 VirtualProtect(addr, size,
211 PAGE_EXECUTE_READWRITE, &old_protect);
212
213}
214#else
215static void map_exec(void *addr, long size)
216{
4369415f 217 unsigned long start, end, page_size;
7cb69cae 218
4369415f 219 page_size = getpagesize();
7cb69cae 220 start = (unsigned long)addr;
4369415f 221 start &= ~(page_size - 1);
7cb69cae
FB
222
223 end = (unsigned long)addr + size;
4369415f
FB
224 end += page_size - 1;
225 end &= ~(page_size - 1);
7cb69cae
FB
226
227 mprotect((void *)start, end - start,
228 PROT_READ | PROT_WRITE | PROT_EXEC);
229}
230#endif
231
b346ff46 232static void page_init(void)
54936004 233{
83fb7adf 234 /* NOTE: we can always suppose that qemu_host_page_size >=
54936004 235 TARGET_PAGE_SIZE */
c2b48b69
AL
236#ifdef _WIN32
237 {
238 SYSTEM_INFO system_info;
239
240 GetSystemInfo(&system_info);
241 qemu_real_host_page_size = system_info.dwPageSize;
242 }
243#else
244 qemu_real_host_page_size = getpagesize();
245#endif
83fb7adf
FB
246 if (qemu_host_page_size == 0)
247 qemu_host_page_size = qemu_real_host_page_size;
248 if (qemu_host_page_size < TARGET_PAGE_SIZE)
249 qemu_host_page_size = TARGET_PAGE_SIZE;
250 qemu_host_page_bits = 0;
251 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
252 qemu_host_page_bits++;
253 qemu_host_page_mask = ~(qemu_host_page_size - 1);
108c49b8
FB
254 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
255 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
50a9569b
AZ
256
257#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
258 {
259 long long startaddr, endaddr;
260 FILE *f;
261 int n;
262
c8a706fe 263 mmap_lock();
0776590d 264 last_brk = (unsigned long)sbrk(0);
50a9569b
AZ
265 f = fopen("/proc/self/maps", "r");
266 if (f) {
267 do {
268 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
269 if (n == 2) {
e0b8d65a
BS
270 startaddr = MIN(startaddr,
271 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
272 endaddr = MIN(endaddr,
273 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
b5fc909e 274 page_set_flags(startaddr & TARGET_PAGE_MASK,
50a9569b
AZ
275 TARGET_PAGE_ALIGN(endaddr),
276 PAGE_RESERVED);
277 }
278 } while (!feof(f));
279 fclose(f);
280 }
c8a706fe 281 mmap_unlock();
50a9569b
AZ
282 }
283#endif
54936004
FB
284}
285
434929bf 286static inline PageDesc **page_l1_map(target_ulong index)
54936004 287{
17e2377a
PB
288#if TARGET_LONG_BITS > 32
289 /* Host memory outside guest VM. For 32-bit targets we have already
290 excluded high addresses. */
d8173e0f 291 if (index > ((target_ulong)L2_SIZE * L1_SIZE))
17e2377a
PB
292 return NULL;
293#endif
434929bf
AL
294 return &l1_map[index >> L2_BITS];
295}
296
297static inline PageDesc *page_find_alloc(target_ulong index)
298{
299 PageDesc **lp, *p;
300 lp = page_l1_map(index);
301 if (!lp)
302 return NULL;
303
54936004
FB
304 p = *lp;
305 if (!p) {
306 /* allocate if not found */
17e2377a 307#if defined(CONFIG_USER_ONLY)
17e2377a
PB
308 size_t len = sizeof(PageDesc) * L2_SIZE;
309 /* Don't use qemu_malloc because it may recurse. */
310 p = mmap(0, len, PROT_READ | PROT_WRITE,
311 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
54936004 312 *lp = p;
fb1c2cd7
AJ
313 if (h2g_valid(p)) {
314 unsigned long addr = h2g(p);
17e2377a
PB
315 page_set_flags(addr & TARGET_PAGE_MASK,
316 TARGET_PAGE_ALIGN(addr + len),
317 PAGE_RESERVED);
318 }
319#else
320 p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
321 *lp = p;
322#endif
54936004
FB
323 }
324 return p + (index & (L2_SIZE - 1));
325}
326
00f82b8a 327static inline PageDesc *page_find(target_ulong index)
54936004 328{
434929bf
AL
329 PageDesc **lp, *p;
330 lp = page_l1_map(index);
331 if (!lp)
332 return NULL;
54936004 333
434929bf 334 p = *lp;
54936004
FB
335 if (!p)
336 return 0;
fd6ce8f6
FB
337 return p + (index & (L2_SIZE - 1));
338}
339
108c49b8 340static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
92e873b9 341{
108c49b8 342 void **lp, **p;
e3f4e2a4 343 PhysPageDesc *pd;
92e873b9 344
108c49b8
FB
345 p = (void **)l1_phys_map;
346#if TARGET_PHYS_ADDR_SPACE_BITS > 32
347
348#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
349#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
350#endif
351 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
92e873b9
FB
352 p = *lp;
353 if (!p) {
354 /* allocate if not found */
108c49b8
FB
355 if (!alloc)
356 return NULL;
357 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
358 memset(p, 0, sizeof(void *) * L1_SIZE);
359 *lp = p;
360 }
361#endif
362 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
e3f4e2a4
PB
363 pd = *lp;
364 if (!pd) {
365 int i;
108c49b8
FB
366 /* allocate if not found */
367 if (!alloc)
368 return NULL;
e3f4e2a4
PB
369 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
370 *lp = pd;
67c4d23c 371 for (i = 0; i < L2_SIZE; i++) {
e3f4e2a4 372 pd[i].phys_offset = IO_MEM_UNASSIGNED;
67c4d23c
PB
373 pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
374 }
92e873b9 375 }
e3f4e2a4 376 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
92e873b9
FB
377}
378
108c49b8 379static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
92e873b9 380{
108c49b8 381 return phys_page_find_alloc(index, 0);
92e873b9
FB
382}
383
9fa3e853 384#if !defined(CONFIG_USER_ONLY)
6a00d601 385static void tlb_protect_code(ram_addr_t ram_addr);
5fafdf24 386static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 387 target_ulong vaddr);
c8a706fe
PB
388#define mmap_lock() do { } while(0)
389#define mmap_unlock() do { } while(0)
9fa3e853 390#endif
fd6ce8f6 391
4369415f
FB
392#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
393
394#if defined(CONFIG_USER_ONLY)
395/* Currently it is not recommanded to allocate big chunks of data in
396 user mode. It will change when a dedicated libc will be used */
397#define USE_STATIC_CODE_GEN_BUFFER
398#endif
399
400#ifdef USE_STATIC_CODE_GEN_BUFFER
401static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
402#endif
403
8fcd3692 404static void code_gen_alloc(unsigned long tb_size)
26a5f13b 405{
4369415f
FB
406#ifdef USE_STATIC_CODE_GEN_BUFFER
407 code_gen_buffer = static_code_gen_buffer;
408 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
409 map_exec(code_gen_buffer, code_gen_buffer_size);
410#else
26a5f13b
FB
411 code_gen_buffer_size = tb_size;
412 if (code_gen_buffer_size == 0) {
4369415f
FB
413#if defined(CONFIG_USER_ONLY)
414 /* in user mode, phys_ram_size is not meaningful */
415 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
416#else
26a5f13b 417 /* XXX: needs ajustments */
174a9a1f 418 code_gen_buffer_size = (unsigned long)(phys_ram_size / 4);
4369415f 419#endif
26a5f13b
FB
420 }
421 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
422 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
423 /* The code gen buffer location may have constraints depending on
424 the host cpu and OS */
425#if defined(__linux__)
426 {
427 int flags;
141ac468
BS
428 void *start = NULL;
429
26a5f13b
FB
430 flags = MAP_PRIVATE | MAP_ANONYMOUS;
431#if defined(__x86_64__)
432 flags |= MAP_32BIT;
433 /* Cannot map more than that */
434 if (code_gen_buffer_size > (800 * 1024 * 1024))
435 code_gen_buffer_size = (800 * 1024 * 1024);
141ac468
BS
436#elif defined(__sparc_v9__)
437 // Map the buffer below 2G, so we can use direct calls and branches
438 flags |= MAP_FIXED;
439 start = (void *) 0x60000000UL;
440 if (code_gen_buffer_size > (512 * 1024 * 1024))
441 code_gen_buffer_size = (512 * 1024 * 1024);
1cb0661e 442#elif defined(__arm__)
63d41246 443 /* Map the buffer below 32M, so we can use direct calls and branches */
1cb0661e
AZ
444 flags |= MAP_FIXED;
445 start = (void *) 0x01000000UL;
446 if (code_gen_buffer_size > 16 * 1024 * 1024)
447 code_gen_buffer_size = 16 * 1024 * 1024;
26a5f13b 448#endif
141ac468
BS
449 code_gen_buffer = mmap(start, code_gen_buffer_size,
450 PROT_WRITE | PROT_READ | PROT_EXEC,
26a5f13b
FB
451 flags, -1, 0);
452 if (code_gen_buffer == MAP_FAILED) {
453 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
454 exit(1);
455 }
456 }
c5e97233 457#elif defined(__FreeBSD__) || defined(__DragonFly__)
06e67a82
AL
458 {
459 int flags;
460 void *addr = NULL;
461 flags = MAP_PRIVATE | MAP_ANONYMOUS;
462#if defined(__x86_64__)
463 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
464 * 0x40000000 is free */
465 flags |= MAP_FIXED;
466 addr = (void *)0x40000000;
467 /* Cannot map more than that */
468 if (code_gen_buffer_size > (800 * 1024 * 1024))
469 code_gen_buffer_size = (800 * 1024 * 1024);
470#endif
471 code_gen_buffer = mmap(addr, code_gen_buffer_size,
472 PROT_WRITE | PROT_READ | PROT_EXEC,
473 flags, -1, 0);
474 if (code_gen_buffer == MAP_FAILED) {
475 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
476 exit(1);
477 }
478 }
26a5f13b
FB
479#else
480 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
26a5f13b
FB
481 map_exec(code_gen_buffer, code_gen_buffer_size);
482#endif
4369415f 483#endif /* !USE_STATIC_CODE_GEN_BUFFER */
26a5f13b
FB
484 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
485 code_gen_buffer_max_size = code_gen_buffer_size -
486 code_gen_max_block_size();
487 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
488 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
489}
490
491/* Must be called before using the QEMU cpus. 'tb_size' is the size
492 (in bytes) allocated to the translation buffer. Zero means default
493 size. */
494void cpu_exec_init_all(unsigned long tb_size)
495{
26a5f13b
FB
496 cpu_gen_init();
497 code_gen_alloc(tb_size);
498 code_gen_ptr = code_gen_buffer;
4369415f 499 page_init();
e2eef170 500#if !defined(CONFIG_USER_ONLY)
26a5f13b 501 io_mem_init();
e2eef170 502#endif
26a5f13b
FB
503}
504
9656f324
PB
505#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
506
507#define CPU_COMMON_SAVE_VERSION 1
508
509static void cpu_common_save(QEMUFile *f, void *opaque)
510{
511 CPUState *env = opaque;
512
513 qemu_put_be32s(f, &env->halted);
514 qemu_put_be32s(f, &env->interrupt_request);
515}
516
517static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
518{
519 CPUState *env = opaque;
520
521 if (version_id != CPU_COMMON_SAVE_VERSION)
522 return -EINVAL;
523
524 qemu_get_be32s(f, &env->halted);
75f482ae 525 qemu_get_be32s(f, &env->interrupt_request);
3098dba0
AJ
526 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
527 version_id is increased. */
528 env->interrupt_request &= ~0x01;
9656f324
PB
529 tlb_flush(env, 1);
530
531 return 0;
532}
533#endif
534
6a00d601 535void cpu_exec_init(CPUState *env)
fd6ce8f6 536{
6a00d601
FB
537 CPUState **penv;
538 int cpu_index;
539
c2764719
PB
540#if defined(CONFIG_USER_ONLY)
541 cpu_list_lock();
542#endif
6a00d601
FB
543 env->next_cpu = NULL;
544 penv = &first_cpu;
545 cpu_index = 0;
546 while (*penv != NULL) {
547 penv = (CPUState **)&(*penv)->next_cpu;
548 cpu_index++;
549 }
550 env->cpu_index = cpu_index;
c0ce998e
AL
551 TAILQ_INIT(&env->breakpoints);
552 TAILQ_INIT(&env->watchpoints);
6a00d601 553 *penv = env;
c2764719
PB
554#if defined(CONFIG_USER_ONLY)
555 cpu_list_unlock();
556#endif
b3c7724c 557#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
9656f324
PB
558 register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
559 cpu_common_save, cpu_common_load, env);
b3c7724c
PB
560 register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
561 cpu_save, cpu_load, env);
562#endif
fd6ce8f6
FB
563}
564
9fa3e853
FB
565static inline void invalidate_page_bitmap(PageDesc *p)
566{
567 if (p->code_bitmap) {
59817ccb 568 qemu_free(p->code_bitmap);
9fa3e853
FB
569 p->code_bitmap = NULL;
570 }
571 p->code_write_count = 0;
572}
573
fd6ce8f6
FB
574/* set to NULL all the 'first_tb' fields in all PageDescs */
575static void page_flush_tb(void)
576{
577 int i, j;
578 PageDesc *p;
579
580 for(i = 0; i < L1_SIZE; i++) {
581 p = l1_map[i];
582 if (p) {
9fa3e853
FB
583 for(j = 0; j < L2_SIZE; j++) {
584 p->first_tb = NULL;
585 invalidate_page_bitmap(p);
586 p++;
587 }
fd6ce8f6
FB
588 }
589 }
590}
591
592/* flush all the translation blocks */
d4e8164f 593/* XXX: tb_flush is currently not thread safe */
6a00d601 594void tb_flush(CPUState *env1)
fd6ce8f6 595{
6a00d601 596 CPUState *env;
0124311e 597#if defined(DEBUG_FLUSH)
ab3d1727
BS
598 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
599 (unsigned long)(code_gen_ptr - code_gen_buffer),
600 nb_tbs, nb_tbs > 0 ?
601 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
fd6ce8f6 602#endif
26a5f13b 603 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
a208e54a
PB
604 cpu_abort(env1, "Internal error: code buffer overflow\n");
605
fd6ce8f6 606 nb_tbs = 0;
3b46e624 607
6a00d601
FB
608 for(env = first_cpu; env != NULL; env = env->next_cpu) {
609 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
610 }
9fa3e853 611
8a8a608f 612 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
fd6ce8f6 613 page_flush_tb();
9fa3e853 614
fd6ce8f6 615 code_gen_ptr = code_gen_buffer;
d4e8164f
FB
616 /* XXX: flush processor icache at this point if cache flush is
617 expensive */
e3db7226 618 tb_flush_count++;
fd6ce8f6
FB
619}
620
621#ifdef DEBUG_TB_CHECK
622
bc98a7ef 623static void tb_invalidate_check(target_ulong address)
fd6ce8f6
FB
624{
625 TranslationBlock *tb;
626 int i;
627 address &= TARGET_PAGE_MASK;
99773bd4
PB
628 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
629 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
630 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
631 address >= tb->pc + tb->size)) {
632 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
99773bd4 633 address, (long)tb->pc, tb->size);
fd6ce8f6
FB
634 }
635 }
636 }
637}
638
639/* verify that all the pages have correct rights for code */
640static void tb_page_check(void)
641{
642 TranslationBlock *tb;
643 int i, flags1, flags2;
3b46e624 644
99773bd4
PB
645 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
646 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
647 flags1 = page_get_flags(tb->pc);
648 flags2 = page_get_flags(tb->pc + tb->size - 1);
649 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
650 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
99773bd4 651 (long)tb->pc, tb->size, flags1, flags2);
fd6ce8f6
FB
652 }
653 }
654 }
655}
656
bdaf78e0 657static void tb_jmp_check(TranslationBlock *tb)
d4e8164f
FB
658{
659 TranslationBlock *tb1;
660 unsigned int n1;
661
662 /* suppress any remaining jumps to this TB */
663 tb1 = tb->jmp_first;
664 for(;;) {
665 n1 = (long)tb1 & 3;
666 tb1 = (TranslationBlock *)((long)tb1 & ~3);
667 if (n1 == 2)
668 break;
669 tb1 = tb1->jmp_next[n1];
670 }
671 /* check end of list */
672 if (tb1 != tb) {
673 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
674 }
675}
676
fd6ce8f6
FB
677#endif
678
679/* invalidate one TB */
680static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
681 int next_offset)
682{
683 TranslationBlock *tb1;
684 for(;;) {
685 tb1 = *ptb;
686 if (tb1 == tb) {
687 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
688 break;
689 }
690 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
691 }
692}
693
9fa3e853
FB
694static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
695{
696 TranslationBlock *tb1;
697 unsigned int n1;
698
699 for(;;) {
700 tb1 = *ptb;
701 n1 = (long)tb1 & 3;
702 tb1 = (TranslationBlock *)((long)tb1 & ~3);
703 if (tb1 == tb) {
704 *ptb = tb1->page_next[n1];
705 break;
706 }
707 ptb = &tb1->page_next[n1];
708 }
709}
710
d4e8164f
FB
711static inline void tb_jmp_remove(TranslationBlock *tb, int n)
712{
713 TranslationBlock *tb1, **ptb;
714 unsigned int n1;
715
716 ptb = &tb->jmp_next[n];
717 tb1 = *ptb;
718 if (tb1) {
719 /* find tb(n) in circular list */
720 for(;;) {
721 tb1 = *ptb;
722 n1 = (long)tb1 & 3;
723 tb1 = (TranslationBlock *)((long)tb1 & ~3);
724 if (n1 == n && tb1 == tb)
725 break;
726 if (n1 == 2) {
727 ptb = &tb1->jmp_first;
728 } else {
729 ptb = &tb1->jmp_next[n1];
730 }
731 }
732 /* now we can suppress tb(n) from the list */
733 *ptb = tb->jmp_next[n];
734
735 tb->jmp_next[n] = NULL;
736 }
737}
738
739/* reset the jump entry 'n' of a TB so that it is not chained to
740 another TB */
741static inline void tb_reset_jump(TranslationBlock *tb, int n)
742{
743 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
744}
745
2e70f6ef 746void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
fd6ce8f6 747{
6a00d601 748 CPUState *env;
8a40a180 749 PageDesc *p;
d4e8164f 750 unsigned int h, n1;
00f82b8a 751 target_phys_addr_t phys_pc;
8a40a180 752 TranslationBlock *tb1, *tb2;
3b46e624 753
8a40a180
FB
754 /* remove the TB from the hash list */
755 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
756 h = tb_phys_hash_func(phys_pc);
5fafdf24 757 tb_remove(&tb_phys_hash[h], tb,
8a40a180
FB
758 offsetof(TranslationBlock, phys_hash_next));
759
760 /* remove the TB from the page list */
761 if (tb->page_addr[0] != page_addr) {
762 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
763 tb_page_remove(&p->first_tb, tb);
764 invalidate_page_bitmap(p);
765 }
766 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
767 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
768 tb_page_remove(&p->first_tb, tb);
769 invalidate_page_bitmap(p);
770 }
771
36bdbe54 772 tb_invalidated_flag = 1;
59817ccb 773
fd6ce8f6 774 /* remove the TB from the hash list */
8a40a180 775 h = tb_jmp_cache_hash_func(tb->pc);
6a00d601
FB
776 for(env = first_cpu; env != NULL; env = env->next_cpu) {
777 if (env->tb_jmp_cache[h] == tb)
778 env->tb_jmp_cache[h] = NULL;
779 }
d4e8164f
FB
780
781 /* suppress this TB from the two jump lists */
782 tb_jmp_remove(tb, 0);
783 tb_jmp_remove(tb, 1);
784
785 /* suppress any remaining jumps to this TB */
786 tb1 = tb->jmp_first;
787 for(;;) {
788 n1 = (long)tb1 & 3;
789 if (n1 == 2)
790 break;
791 tb1 = (TranslationBlock *)((long)tb1 & ~3);
792 tb2 = tb1->jmp_next[n1];
793 tb_reset_jump(tb1, n1);
794 tb1->jmp_next[n1] = NULL;
795 tb1 = tb2;
796 }
797 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
9fa3e853 798
e3db7226 799 tb_phys_invalidate_count++;
9fa3e853
FB
800}
801
802static inline void set_bits(uint8_t *tab, int start, int len)
803{
804 int end, mask, end1;
805
806 end = start + len;
807 tab += start >> 3;
808 mask = 0xff << (start & 7);
809 if ((start & ~7) == (end & ~7)) {
810 if (start < end) {
811 mask &= ~(0xff << (end & 7));
812 *tab |= mask;
813 }
814 } else {
815 *tab++ |= mask;
816 start = (start + 8) & ~7;
817 end1 = end & ~7;
818 while (start < end1) {
819 *tab++ = 0xff;
820 start += 8;
821 }
822 if (start < end) {
823 mask = ~(0xff << (end & 7));
824 *tab |= mask;
825 }
826 }
827}
828
829static void build_page_bitmap(PageDesc *p)
830{
831 int n, tb_start, tb_end;
832 TranslationBlock *tb;
3b46e624 833
b2a7081a 834 p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
9fa3e853
FB
835
836 tb = p->first_tb;
837 while (tb != NULL) {
838 n = (long)tb & 3;
839 tb = (TranslationBlock *)((long)tb & ~3);
840 /* NOTE: this is subtle as a TB may span two physical pages */
841 if (n == 0) {
842 /* NOTE: tb_end may be after the end of the page, but
843 it is not a problem */
844 tb_start = tb->pc & ~TARGET_PAGE_MASK;
845 tb_end = tb_start + tb->size;
846 if (tb_end > TARGET_PAGE_SIZE)
847 tb_end = TARGET_PAGE_SIZE;
848 } else {
849 tb_start = 0;
850 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
851 }
852 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
853 tb = tb->page_next[n];
854 }
855}
856
2e70f6ef
PB
857TranslationBlock *tb_gen_code(CPUState *env,
858 target_ulong pc, target_ulong cs_base,
859 int flags, int cflags)
d720b93d
FB
860{
861 TranslationBlock *tb;
862 uint8_t *tc_ptr;
863 target_ulong phys_pc, phys_page2, virt_page2;
864 int code_gen_size;
865
c27004ec
FB
866 phys_pc = get_phys_addr_code(env, pc);
867 tb = tb_alloc(pc);
d720b93d
FB
868 if (!tb) {
869 /* flush must be done */
870 tb_flush(env);
871 /* cannot fail at this point */
c27004ec 872 tb = tb_alloc(pc);
2e70f6ef
PB
873 /* Don't forget to invalidate previous TB info. */
874 tb_invalidated_flag = 1;
d720b93d
FB
875 }
876 tc_ptr = code_gen_ptr;
877 tb->tc_ptr = tc_ptr;
878 tb->cs_base = cs_base;
879 tb->flags = flags;
880 tb->cflags = cflags;
d07bde88 881 cpu_gen_code(env, tb, &code_gen_size);
d720b93d 882 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
3b46e624 883
d720b93d 884 /* check next page if needed */
c27004ec 885 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
d720b93d 886 phys_page2 = -1;
c27004ec 887 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
d720b93d
FB
888 phys_page2 = get_phys_addr_code(env, virt_page2);
889 }
890 tb_link_phys(tb, phys_pc, phys_page2);
2e70f6ef 891 return tb;
d720b93d 892}
3b46e624 893
9fa3e853
FB
894/* invalidate all TBs which intersect with the target physical page
895 starting in range [start;end[. NOTE: start and end must refer to
d720b93d
FB
896 the same physical page. 'is_cpu_write_access' should be true if called
897 from a real cpu write access: the virtual CPU will exit the current
898 TB if code is modified inside this TB. */
00f82b8a 899void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
d720b93d
FB
900 int is_cpu_write_access)
901{
6b917547 902 TranslationBlock *tb, *tb_next, *saved_tb;
d720b93d 903 CPUState *env = cpu_single_env;
9fa3e853 904 target_ulong tb_start, tb_end;
6b917547
AL
905 PageDesc *p;
906 int n;
907#ifdef TARGET_HAS_PRECISE_SMC
908 int current_tb_not_found = is_cpu_write_access;
909 TranslationBlock *current_tb = NULL;
910 int current_tb_modified = 0;
911 target_ulong current_pc = 0;
912 target_ulong current_cs_base = 0;
913 int current_flags = 0;
914#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
915
916 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 917 if (!p)
9fa3e853 918 return;
5fafdf24 919 if (!p->code_bitmap &&
d720b93d
FB
920 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
921 is_cpu_write_access) {
9fa3e853
FB
922 /* build code bitmap */
923 build_page_bitmap(p);
924 }
925
926 /* we remove all the TBs in the range [start, end[ */
927 /* XXX: see if in some cases it could be faster to invalidate all the code */
928 tb = p->first_tb;
929 while (tb != NULL) {
930 n = (long)tb & 3;
931 tb = (TranslationBlock *)((long)tb & ~3);
932 tb_next = tb->page_next[n];
933 /* NOTE: this is subtle as a TB may span two physical pages */
934 if (n == 0) {
935 /* NOTE: tb_end may be after the end of the page, but
936 it is not a problem */
937 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
938 tb_end = tb_start + tb->size;
939 } else {
940 tb_start = tb->page_addr[1];
941 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
942 }
943 if (!(tb_end <= start || tb_start >= end)) {
d720b93d
FB
944#ifdef TARGET_HAS_PRECISE_SMC
945 if (current_tb_not_found) {
946 current_tb_not_found = 0;
947 current_tb = NULL;
2e70f6ef 948 if (env->mem_io_pc) {
d720b93d 949 /* now we have a real cpu fault */
2e70f6ef 950 current_tb = tb_find_pc(env->mem_io_pc);
d720b93d
FB
951 }
952 }
953 if (current_tb == tb &&
2e70f6ef 954 (current_tb->cflags & CF_COUNT_MASK) != 1) {
d720b93d
FB
955 /* If we are modifying the current TB, we must stop
956 its execution. We could be more precise by checking
957 that the modification is after the current PC, but it
958 would require a specialized function to partially
959 restore the CPU state */
3b46e624 960
d720b93d 961 current_tb_modified = 1;
5fafdf24 962 cpu_restore_state(current_tb, env,
2e70f6ef 963 env->mem_io_pc, NULL);
6b917547
AL
964 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
965 &current_flags);
d720b93d
FB
966 }
967#endif /* TARGET_HAS_PRECISE_SMC */
6f5a9f7e
FB
968 /* we need to do that to handle the case where a signal
969 occurs while doing tb_phys_invalidate() */
970 saved_tb = NULL;
971 if (env) {
972 saved_tb = env->current_tb;
973 env->current_tb = NULL;
974 }
9fa3e853 975 tb_phys_invalidate(tb, -1);
6f5a9f7e
FB
976 if (env) {
977 env->current_tb = saved_tb;
978 if (env->interrupt_request && env->current_tb)
979 cpu_interrupt(env, env->interrupt_request);
980 }
9fa3e853
FB
981 }
982 tb = tb_next;
983 }
984#if !defined(CONFIG_USER_ONLY)
985 /* if no code remaining, no need to continue to use slow writes */
986 if (!p->first_tb) {
987 invalidate_page_bitmap(p);
d720b93d 988 if (is_cpu_write_access) {
2e70f6ef 989 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
d720b93d
FB
990 }
991 }
992#endif
993#ifdef TARGET_HAS_PRECISE_SMC
994 if (current_tb_modified) {
995 /* we generate a block containing just the instruction
996 modifying the memory. It will ensure that it cannot modify
997 itself */
ea1c1802 998 env->current_tb = NULL;
2e70f6ef 999 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
d720b93d 1000 cpu_resume_from_signal(env, NULL);
9fa3e853 1001 }
fd6ce8f6 1002#endif
9fa3e853 1003}
fd6ce8f6 1004
9fa3e853 1005/* len must be <= 8 and start must be a multiple of len */
00f82b8a 1006static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
9fa3e853
FB
1007{
1008 PageDesc *p;
1009 int offset, b;
59817ccb 1010#if 0
a4193c8a 1011 if (1) {
93fcfe39
AL
1012 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1013 cpu_single_env->mem_io_vaddr, len,
1014 cpu_single_env->eip,
1015 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
59817ccb
FB
1016 }
1017#endif
9fa3e853 1018 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 1019 if (!p)
9fa3e853
FB
1020 return;
1021 if (p->code_bitmap) {
1022 offset = start & ~TARGET_PAGE_MASK;
1023 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1024 if (b & ((1 << len) - 1))
1025 goto do_invalidate;
1026 } else {
1027 do_invalidate:
d720b93d 1028 tb_invalidate_phys_page_range(start, start + len, 1);
9fa3e853
FB
1029 }
1030}
1031
9fa3e853 1032#if !defined(CONFIG_SOFTMMU)
00f82b8a 1033static void tb_invalidate_phys_page(target_phys_addr_t addr,
d720b93d 1034 unsigned long pc, void *puc)
9fa3e853 1035{
6b917547 1036 TranslationBlock *tb;
9fa3e853 1037 PageDesc *p;
6b917547 1038 int n;
d720b93d 1039#ifdef TARGET_HAS_PRECISE_SMC
6b917547 1040 TranslationBlock *current_tb = NULL;
d720b93d 1041 CPUState *env = cpu_single_env;
6b917547
AL
1042 int current_tb_modified = 0;
1043 target_ulong current_pc = 0;
1044 target_ulong current_cs_base = 0;
1045 int current_flags = 0;
d720b93d 1046#endif
9fa3e853
FB
1047
1048 addr &= TARGET_PAGE_MASK;
1049 p = page_find(addr >> TARGET_PAGE_BITS);
5fafdf24 1050 if (!p)
9fa3e853
FB
1051 return;
1052 tb = p->first_tb;
d720b93d
FB
1053#ifdef TARGET_HAS_PRECISE_SMC
1054 if (tb && pc != 0) {
1055 current_tb = tb_find_pc(pc);
1056 }
1057#endif
9fa3e853
FB
1058 while (tb != NULL) {
1059 n = (long)tb & 3;
1060 tb = (TranslationBlock *)((long)tb & ~3);
d720b93d
FB
1061#ifdef TARGET_HAS_PRECISE_SMC
1062 if (current_tb == tb &&
2e70f6ef 1063 (current_tb->cflags & CF_COUNT_MASK) != 1) {
d720b93d
FB
1064 /* If we are modifying the current TB, we must stop
1065 its execution. We could be more precise by checking
1066 that the modification is after the current PC, but it
1067 would require a specialized function to partially
1068 restore the CPU state */
3b46e624 1069
d720b93d
FB
1070 current_tb_modified = 1;
1071 cpu_restore_state(current_tb, env, pc, puc);
6b917547
AL
1072 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1073 &current_flags);
d720b93d
FB
1074 }
1075#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
1076 tb_phys_invalidate(tb, addr);
1077 tb = tb->page_next[n];
1078 }
fd6ce8f6 1079 p->first_tb = NULL;
d720b93d
FB
1080#ifdef TARGET_HAS_PRECISE_SMC
1081 if (current_tb_modified) {
1082 /* we generate a block containing just the instruction
1083 modifying the memory. It will ensure that it cannot modify
1084 itself */
ea1c1802 1085 env->current_tb = NULL;
2e70f6ef 1086 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
d720b93d
FB
1087 cpu_resume_from_signal(env, puc);
1088 }
1089#endif
fd6ce8f6 1090}
9fa3e853 1091#endif
fd6ce8f6
FB
1092
1093/* add the tb in the target page and protect it if necessary */
5fafdf24 1094static inline void tb_alloc_page(TranslationBlock *tb,
53a5960a 1095 unsigned int n, target_ulong page_addr)
fd6ce8f6
FB
1096{
1097 PageDesc *p;
9fa3e853
FB
1098 TranslationBlock *last_first_tb;
1099
1100 tb->page_addr[n] = page_addr;
3a7d929e 1101 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
9fa3e853
FB
1102 tb->page_next[n] = p->first_tb;
1103 last_first_tb = p->first_tb;
1104 p->first_tb = (TranslationBlock *)((long)tb | n);
1105 invalidate_page_bitmap(p);
fd6ce8f6 1106
107db443 1107#if defined(TARGET_HAS_SMC) || 1
d720b93d 1108
9fa3e853 1109#if defined(CONFIG_USER_ONLY)
fd6ce8f6 1110 if (p->flags & PAGE_WRITE) {
53a5960a
PB
1111 target_ulong addr;
1112 PageDesc *p2;
9fa3e853
FB
1113 int prot;
1114
fd6ce8f6
FB
1115 /* force the host page as non writable (writes will have a
1116 page fault + mprotect overhead) */
53a5960a 1117 page_addr &= qemu_host_page_mask;
fd6ce8f6 1118 prot = 0;
53a5960a
PB
1119 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1120 addr += TARGET_PAGE_SIZE) {
1121
1122 p2 = page_find (addr >> TARGET_PAGE_BITS);
1123 if (!p2)
1124 continue;
1125 prot |= p2->flags;
1126 p2->flags &= ~PAGE_WRITE;
1127 page_get_flags(addr);
1128 }
5fafdf24 1129 mprotect(g2h(page_addr), qemu_host_page_size,
fd6ce8f6
FB
1130 (prot & PAGE_BITS) & ~PAGE_WRITE);
1131#ifdef DEBUG_TB_INVALIDATE
ab3d1727 1132 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
53a5960a 1133 page_addr);
fd6ce8f6 1134#endif
fd6ce8f6 1135 }
9fa3e853
FB
1136#else
1137 /* if some code is already present, then the pages are already
1138 protected. So we handle the case where only the first TB is
1139 allocated in a physical page */
1140 if (!last_first_tb) {
6a00d601 1141 tlb_protect_code(page_addr);
9fa3e853
FB
1142 }
1143#endif
d720b93d
FB
1144
1145#endif /* TARGET_HAS_SMC */
fd6ce8f6
FB
1146}
1147
1148/* Allocate a new translation block. Flush the translation buffer if
1149 too many translation blocks or too much generated code. */
c27004ec 1150TranslationBlock *tb_alloc(target_ulong pc)
fd6ce8f6
FB
1151{
1152 TranslationBlock *tb;
fd6ce8f6 1153
26a5f13b
FB
1154 if (nb_tbs >= code_gen_max_blocks ||
1155 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
d4e8164f 1156 return NULL;
fd6ce8f6
FB
1157 tb = &tbs[nb_tbs++];
1158 tb->pc = pc;
b448f2f3 1159 tb->cflags = 0;
d4e8164f
FB
1160 return tb;
1161}
1162
2e70f6ef
PB
1163void tb_free(TranslationBlock *tb)
1164{
bf20dc07 1165 /* In practice this is mostly used for single use temporary TB
2e70f6ef
PB
1166 Ignore the hard cases and just back up if this TB happens to
1167 be the last one generated. */
1168 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1169 code_gen_ptr = tb->tc_ptr;
1170 nb_tbs--;
1171 }
1172}
1173
9fa3e853
FB
1174/* add a new TB and link it to the physical page tables. phys_page2 is
1175 (-1) to indicate that only one page contains the TB. */
5fafdf24 1176void tb_link_phys(TranslationBlock *tb,
9fa3e853 1177 target_ulong phys_pc, target_ulong phys_page2)
d4e8164f 1178{
9fa3e853
FB
1179 unsigned int h;
1180 TranslationBlock **ptb;
1181
c8a706fe
PB
1182 /* Grab the mmap lock to stop another thread invalidating this TB
1183 before we are done. */
1184 mmap_lock();
9fa3e853
FB
1185 /* add in the physical hash table */
1186 h = tb_phys_hash_func(phys_pc);
1187 ptb = &tb_phys_hash[h];
1188 tb->phys_hash_next = *ptb;
1189 *ptb = tb;
fd6ce8f6
FB
1190
1191 /* add in the page list */
9fa3e853
FB
1192 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1193 if (phys_page2 != -1)
1194 tb_alloc_page(tb, 1, phys_page2);
1195 else
1196 tb->page_addr[1] = -1;
9fa3e853 1197
d4e8164f
FB
1198 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1199 tb->jmp_next[0] = NULL;
1200 tb->jmp_next[1] = NULL;
1201
1202 /* init original jump addresses */
1203 if (tb->tb_next_offset[0] != 0xffff)
1204 tb_reset_jump(tb, 0);
1205 if (tb->tb_next_offset[1] != 0xffff)
1206 tb_reset_jump(tb, 1);
8a40a180
FB
1207
1208#ifdef DEBUG_TB_CHECK
1209 tb_page_check();
1210#endif
c8a706fe 1211 mmap_unlock();
fd6ce8f6
FB
1212}
1213
9fa3e853
FB
1214/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1215 tb[1].tc_ptr. Return NULL if not found */
1216TranslationBlock *tb_find_pc(unsigned long tc_ptr)
fd6ce8f6 1217{
9fa3e853
FB
1218 int m_min, m_max, m;
1219 unsigned long v;
1220 TranslationBlock *tb;
a513fe19
FB
1221
1222 if (nb_tbs <= 0)
1223 return NULL;
1224 if (tc_ptr < (unsigned long)code_gen_buffer ||
1225 tc_ptr >= (unsigned long)code_gen_ptr)
1226 return NULL;
1227 /* binary search (cf Knuth) */
1228 m_min = 0;
1229 m_max = nb_tbs - 1;
1230 while (m_min <= m_max) {
1231 m = (m_min + m_max) >> 1;
1232 tb = &tbs[m];
1233 v = (unsigned long)tb->tc_ptr;
1234 if (v == tc_ptr)
1235 return tb;
1236 else if (tc_ptr < v) {
1237 m_max = m - 1;
1238 } else {
1239 m_min = m + 1;
1240 }
5fafdf24 1241 }
a513fe19
FB
1242 return &tbs[m_max];
1243}
7501267e 1244
ea041c0e
FB
1245static void tb_reset_jump_recursive(TranslationBlock *tb);
1246
1247static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1248{
1249 TranslationBlock *tb1, *tb_next, **ptb;
1250 unsigned int n1;
1251
1252 tb1 = tb->jmp_next[n];
1253 if (tb1 != NULL) {
1254 /* find head of list */
1255 for(;;) {
1256 n1 = (long)tb1 & 3;
1257 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1258 if (n1 == 2)
1259 break;
1260 tb1 = tb1->jmp_next[n1];
1261 }
1262 /* we are now sure now that tb jumps to tb1 */
1263 tb_next = tb1;
1264
1265 /* remove tb from the jmp_first list */
1266 ptb = &tb_next->jmp_first;
1267 for(;;) {
1268 tb1 = *ptb;
1269 n1 = (long)tb1 & 3;
1270 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1271 if (n1 == n && tb1 == tb)
1272 break;
1273 ptb = &tb1->jmp_next[n1];
1274 }
1275 *ptb = tb->jmp_next[n];
1276 tb->jmp_next[n] = NULL;
3b46e624 1277
ea041c0e
FB
1278 /* suppress the jump to next tb in generated code */
1279 tb_reset_jump(tb, n);
1280
0124311e 1281 /* suppress jumps in the tb on which we could have jumped */
ea041c0e
FB
1282 tb_reset_jump_recursive(tb_next);
1283 }
1284}
1285
1286static void tb_reset_jump_recursive(TranslationBlock *tb)
1287{
1288 tb_reset_jump_recursive2(tb, 0);
1289 tb_reset_jump_recursive2(tb, 1);
1290}
1291
1fddef4b 1292#if defined(TARGET_HAS_ICE)
d720b93d
FB
1293static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1294{
9b3c35e0
JM
1295 target_phys_addr_t addr;
1296 target_ulong pd;
c2f07f81
PB
1297 ram_addr_t ram_addr;
1298 PhysPageDesc *p;
d720b93d 1299
c2f07f81
PB
1300 addr = cpu_get_phys_page_debug(env, pc);
1301 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1302 if (!p) {
1303 pd = IO_MEM_UNASSIGNED;
1304 } else {
1305 pd = p->phys_offset;
1306 }
1307 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
706cd4b5 1308 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
d720b93d 1309}
c27004ec 1310#endif
d720b93d 1311
6658ffb8 1312/* Add a watchpoint. */
a1d1bb31
AL
1313int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1314 int flags, CPUWatchpoint **watchpoint)
6658ffb8 1315{
b4051334 1316 target_ulong len_mask = ~(len - 1);
c0ce998e 1317 CPUWatchpoint *wp;
6658ffb8 1318
b4051334
AL
1319 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1320 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1321 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1322 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1323 return -EINVAL;
1324 }
a1d1bb31 1325 wp = qemu_malloc(sizeof(*wp));
a1d1bb31
AL
1326
1327 wp->vaddr = addr;
b4051334 1328 wp->len_mask = len_mask;
a1d1bb31
AL
1329 wp->flags = flags;
1330
2dc9f411 1331 /* keep all GDB-injected watchpoints in front */
c0ce998e
AL
1332 if (flags & BP_GDB)
1333 TAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1334 else
1335 TAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
6658ffb8 1336
6658ffb8 1337 tlb_flush_page(env, addr);
a1d1bb31
AL
1338
1339 if (watchpoint)
1340 *watchpoint = wp;
1341 return 0;
6658ffb8
PB
1342}
1343
a1d1bb31
AL
1344/* Remove a specific watchpoint. */
1345int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1346 int flags)
6658ffb8 1347{
b4051334 1348 target_ulong len_mask = ~(len - 1);
a1d1bb31 1349 CPUWatchpoint *wp;
6658ffb8 1350
c0ce998e 1351 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334 1352 if (addr == wp->vaddr && len_mask == wp->len_mask
6e140f28 1353 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
a1d1bb31 1354 cpu_watchpoint_remove_by_ref(env, wp);
6658ffb8
PB
1355 return 0;
1356 }
1357 }
a1d1bb31 1358 return -ENOENT;
6658ffb8
PB
1359}
1360
a1d1bb31
AL
1361/* Remove a specific watchpoint by reference. */
1362void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1363{
c0ce998e 1364 TAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
7d03f82f 1365
a1d1bb31
AL
1366 tlb_flush_page(env, watchpoint->vaddr);
1367
1368 qemu_free(watchpoint);
1369}
1370
1371/* Remove all matching watchpoints. */
1372void cpu_watchpoint_remove_all(CPUState *env, int mask)
1373{
c0ce998e 1374 CPUWatchpoint *wp, *next;
a1d1bb31 1375
c0ce998e 1376 TAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
a1d1bb31
AL
1377 if (wp->flags & mask)
1378 cpu_watchpoint_remove_by_ref(env, wp);
c0ce998e 1379 }
7d03f82f
EI
1380}
1381
a1d1bb31
AL
1382/* Add a breakpoint. */
1383int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1384 CPUBreakpoint **breakpoint)
4c3a88a2 1385{
1fddef4b 1386#if defined(TARGET_HAS_ICE)
c0ce998e 1387 CPUBreakpoint *bp;
3b46e624 1388
a1d1bb31 1389 bp = qemu_malloc(sizeof(*bp));
4c3a88a2 1390
a1d1bb31
AL
1391 bp->pc = pc;
1392 bp->flags = flags;
1393
2dc9f411 1394 /* keep all GDB-injected breakpoints in front */
c0ce998e
AL
1395 if (flags & BP_GDB)
1396 TAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1397 else
1398 TAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
3b46e624 1399
d720b93d 1400 breakpoint_invalidate(env, pc);
a1d1bb31
AL
1401
1402 if (breakpoint)
1403 *breakpoint = bp;
4c3a88a2
FB
1404 return 0;
1405#else
a1d1bb31 1406 return -ENOSYS;
4c3a88a2
FB
1407#endif
1408}
1409
a1d1bb31
AL
1410/* Remove a specific breakpoint. */
1411int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1412{
7d03f82f 1413#if defined(TARGET_HAS_ICE)
a1d1bb31
AL
1414 CPUBreakpoint *bp;
1415
c0ce998e 1416 TAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31
AL
1417 if (bp->pc == pc && bp->flags == flags) {
1418 cpu_breakpoint_remove_by_ref(env, bp);
1419 return 0;
1420 }
7d03f82f 1421 }
a1d1bb31
AL
1422 return -ENOENT;
1423#else
1424 return -ENOSYS;
7d03f82f
EI
1425#endif
1426}
1427
a1d1bb31
AL
1428/* Remove a specific breakpoint by reference. */
1429void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
4c3a88a2 1430{
1fddef4b 1431#if defined(TARGET_HAS_ICE)
c0ce998e 1432 TAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
d720b93d 1433
a1d1bb31
AL
1434 breakpoint_invalidate(env, breakpoint->pc);
1435
1436 qemu_free(breakpoint);
1437#endif
1438}
1439
1440/* Remove all matching breakpoints. */
1441void cpu_breakpoint_remove_all(CPUState *env, int mask)
1442{
1443#if defined(TARGET_HAS_ICE)
c0ce998e 1444 CPUBreakpoint *bp, *next;
a1d1bb31 1445
c0ce998e 1446 TAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
a1d1bb31
AL
1447 if (bp->flags & mask)
1448 cpu_breakpoint_remove_by_ref(env, bp);
c0ce998e 1449 }
4c3a88a2
FB
1450#endif
1451}
1452
c33a346e
FB
1453/* enable or disable single step mode. EXCP_DEBUG is returned by the
1454 CPU loop after each instruction */
1455void cpu_single_step(CPUState *env, int enabled)
1456{
1fddef4b 1457#if defined(TARGET_HAS_ICE)
c33a346e
FB
1458 if (env->singlestep_enabled != enabled) {
1459 env->singlestep_enabled = enabled;
e22a25c9
AL
1460 if (kvm_enabled())
1461 kvm_update_guest_debug(env, 0);
1462 else {
1463 /* must flush all the translated code to avoid inconsistancies */
1464 /* XXX: only flush what is necessary */
1465 tb_flush(env);
1466 }
c33a346e
FB
1467 }
1468#endif
1469}
1470
34865134
FB
1471/* enable or disable low levels log */
1472void cpu_set_log(int log_flags)
1473{
1474 loglevel = log_flags;
1475 if (loglevel && !logfile) {
11fcfab4 1476 logfile = fopen(logfilename, log_append ? "a" : "w");
34865134
FB
1477 if (!logfile) {
1478 perror(logfilename);
1479 _exit(1);
1480 }
9fa3e853
FB
1481#if !defined(CONFIG_SOFTMMU)
1482 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1483 {
b55266b5 1484 static char logfile_buf[4096];
9fa3e853
FB
1485 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1486 }
1487#else
34865134 1488 setvbuf(logfile, NULL, _IOLBF, 0);
9fa3e853 1489#endif
e735b91c
PB
1490 log_append = 1;
1491 }
1492 if (!loglevel && logfile) {
1493 fclose(logfile);
1494 logfile = NULL;
34865134
FB
1495 }
1496}
1497
1498void cpu_set_log_filename(const char *filename)
1499{
1500 logfilename = strdup(filename);
e735b91c
PB
1501 if (logfile) {
1502 fclose(logfile);
1503 logfile = NULL;
1504 }
1505 cpu_set_log(loglevel);
34865134 1506}
c33a346e 1507
3098dba0 1508static void cpu_unlink_tb(CPUState *env)
ea041c0e 1509{
3098dba0
AJ
1510#if defined(USE_NPTL)
1511 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1512 problem and hope the cpu will stop of its own accord. For userspace
1513 emulation this often isn't actually as bad as it sounds. Often
1514 signals are used primarily to interrupt blocking syscalls. */
1515#else
ea041c0e 1516 TranslationBlock *tb;
15a51156 1517 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
59817ccb 1518
3098dba0
AJ
1519 tb = env->current_tb;
1520 /* if the cpu is currently executing code, we must unlink it and
1521 all the potentially executing TB */
1522 if (tb && !testandset(&interrupt_lock)) {
1523 env->current_tb = NULL;
1524 tb_reset_jump_recursive(tb);
1525 resetlock(&interrupt_lock);
be214e6c 1526 }
3098dba0
AJ
1527#endif
1528}
1529
1530/* mask must never be zero, except for A20 change call */
1531void cpu_interrupt(CPUState *env, int mask)
1532{
1533 int old_mask;
be214e6c 1534
2e70f6ef 1535 old_mask = env->interrupt_request;
68a79315 1536 env->interrupt_request |= mask;
3098dba0 1537
2e70f6ef 1538 if (use_icount) {
266910c4 1539 env->icount_decr.u16.high = 0xffff;
2e70f6ef 1540#ifndef CONFIG_USER_ONLY
2e70f6ef 1541 if (!can_do_io(env)
be214e6c 1542 && (mask & ~old_mask) != 0) {
2e70f6ef
PB
1543 cpu_abort(env, "Raised interrupt while not in I/O function");
1544 }
1545#endif
1546 } else {
3098dba0 1547 cpu_unlink_tb(env);
ea041c0e
FB
1548 }
1549}
1550
b54ad049
FB
1551void cpu_reset_interrupt(CPUState *env, int mask)
1552{
1553 env->interrupt_request &= ~mask;
1554}
1555
3098dba0
AJ
1556void cpu_exit(CPUState *env)
1557{
1558 env->exit_request = 1;
1559 cpu_unlink_tb(env);
1560}
1561
c7cd6a37 1562const CPULogItem cpu_log_items[] = {
5fafdf24 1563 { CPU_LOG_TB_OUT_ASM, "out_asm",
f193c797
FB
1564 "show generated host assembly code for each compiled TB" },
1565 { CPU_LOG_TB_IN_ASM, "in_asm",
1566 "show target assembly code for each compiled TB" },
5fafdf24 1567 { CPU_LOG_TB_OP, "op",
57fec1fe 1568 "show micro ops for each compiled TB" },
f193c797 1569 { CPU_LOG_TB_OP_OPT, "op_opt",
e01a1157
BS
1570 "show micro ops "
1571#ifdef TARGET_I386
1572 "before eflags optimization and "
f193c797 1573#endif
e01a1157 1574 "after liveness analysis" },
f193c797
FB
1575 { CPU_LOG_INT, "int",
1576 "show interrupts/exceptions in short format" },
1577 { CPU_LOG_EXEC, "exec",
1578 "show trace before each executed TB (lots of logs)" },
9fddaa0c 1579 { CPU_LOG_TB_CPU, "cpu",
e91c8a77 1580 "show CPU state before block translation" },
f193c797
FB
1581#ifdef TARGET_I386
1582 { CPU_LOG_PCALL, "pcall",
1583 "show protected mode far calls/returns/exceptions" },
eca1bdf4
AL
1584 { CPU_LOG_RESET, "cpu_reset",
1585 "show CPU state before CPU resets" },
f193c797 1586#endif
8e3a9fd2 1587#ifdef DEBUG_IOPORT
fd872598
FB
1588 { CPU_LOG_IOPORT, "ioport",
1589 "show all i/o ports accesses" },
8e3a9fd2 1590#endif
f193c797
FB
1591 { 0, NULL, NULL },
1592};
1593
1594static int cmp1(const char *s1, int n, const char *s2)
1595{
1596 if (strlen(s2) != n)
1597 return 0;
1598 return memcmp(s1, s2, n) == 0;
1599}
3b46e624 1600
f193c797
FB
1601/* takes a comma separated list of log masks. Return 0 if error. */
1602int cpu_str_to_log_mask(const char *str)
1603{
c7cd6a37 1604 const CPULogItem *item;
f193c797
FB
1605 int mask;
1606 const char *p, *p1;
1607
1608 p = str;
1609 mask = 0;
1610 for(;;) {
1611 p1 = strchr(p, ',');
1612 if (!p1)
1613 p1 = p + strlen(p);
8e3a9fd2
FB
1614 if(cmp1(p,p1-p,"all")) {
1615 for(item = cpu_log_items; item->mask != 0; item++) {
1616 mask |= item->mask;
1617 }
1618 } else {
f193c797
FB
1619 for(item = cpu_log_items; item->mask != 0; item++) {
1620 if (cmp1(p, p1 - p, item->name))
1621 goto found;
1622 }
1623 return 0;
8e3a9fd2 1624 }
f193c797
FB
1625 found:
1626 mask |= item->mask;
1627 if (*p1 != ',')
1628 break;
1629 p = p1 + 1;
1630 }
1631 return mask;
1632}
ea041c0e 1633
7501267e
FB
1634void cpu_abort(CPUState *env, const char *fmt, ...)
1635{
1636 va_list ap;
493ae1f0 1637 va_list ap2;
7501267e
FB
1638
1639 va_start(ap, fmt);
493ae1f0 1640 va_copy(ap2, ap);
7501267e
FB
1641 fprintf(stderr, "qemu: fatal: ");
1642 vfprintf(stderr, fmt, ap);
1643 fprintf(stderr, "\n");
1644#ifdef TARGET_I386
7fe48483
FB
1645 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1646#else
1647 cpu_dump_state(env, stderr, fprintf, 0);
7501267e 1648#endif
93fcfe39
AL
1649 if (qemu_log_enabled()) {
1650 qemu_log("qemu: fatal: ");
1651 qemu_log_vprintf(fmt, ap2);
1652 qemu_log("\n");
f9373291 1653#ifdef TARGET_I386
93fcfe39 1654 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
f9373291 1655#else
93fcfe39 1656 log_cpu_state(env, 0);
f9373291 1657#endif
31b1a7b4 1658 qemu_log_flush();
93fcfe39 1659 qemu_log_close();
924edcae 1660 }
493ae1f0 1661 va_end(ap2);
f9373291 1662 va_end(ap);
7501267e
FB
1663 abort();
1664}
1665
c5be9f08
TS
1666CPUState *cpu_copy(CPUState *env)
1667{
01ba9816 1668 CPUState *new_env = cpu_init(env->cpu_model_str);
c5be9f08
TS
1669 CPUState *next_cpu = new_env->next_cpu;
1670 int cpu_index = new_env->cpu_index;
5a38f081
AL
1671#if defined(TARGET_HAS_ICE)
1672 CPUBreakpoint *bp;
1673 CPUWatchpoint *wp;
1674#endif
1675
c5be9f08 1676 memcpy(new_env, env, sizeof(CPUState));
5a38f081
AL
1677
1678 /* Preserve chaining and index. */
c5be9f08
TS
1679 new_env->next_cpu = next_cpu;
1680 new_env->cpu_index = cpu_index;
5a38f081
AL
1681
1682 /* Clone all break/watchpoints.
1683 Note: Once we support ptrace with hw-debug register access, make sure
1684 BP_CPU break/watchpoints are handled correctly on clone. */
1685 TAILQ_INIT(&env->breakpoints);
1686 TAILQ_INIT(&env->watchpoints);
1687#if defined(TARGET_HAS_ICE)
1688 TAILQ_FOREACH(bp, &env->breakpoints, entry) {
1689 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1690 }
1691 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1692 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1693 wp->flags, NULL);
1694 }
1695#endif
1696
c5be9f08
TS
1697 return new_env;
1698}
1699
0124311e
FB
1700#if !defined(CONFIG_USER_ONLY)
1701
5c751e99
EI
1702static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1703{
1704 unsigned int i;
1705
1706 /* Discard jump cache entries for any tb which might potentially
1707 overlap the flushed page. */
1708 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1709 memset (&env->tb_jmp_cache[i], 0,
1710 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1711
1712 i = tb_jmp_cache_hash_page(addr);
1713 memset (&env->tb_jmp_cache[i], 0,
1714 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1715}
1716
ee8b7021
FB
1717/* NOTE: if flush_global is true, also flush global entries (not
1718 implemented yet) */
1719void tlb_flush(CPUState *env, int flush_global)
33417e70 1720{
33417e70 1721 int i;
0124311e 1722
9fa3e853
FB
1723#if defined(DEBUG_TLB)
1724 printf("tlb_flush:\n");
1725#endif
0124311e
FB
1726 /* must reset current TB so that interrupts cannot modify the
1727 links while we are modifying them */
1728 env->current_tb = NULL;
1729
33417e70 1730 for(i = 0; i < CPU_TLB_SIZE; i++) {
84b7b8e7
FB
1731 env->tlb_table[0][i].addr_read = -1;
1732 env->tlb_table[0][i].addr_write = -1;
1733 env->tlb_table[0][i].addr_code = -1;
1734 env->tlb_table[1][i].addr_read = -1;
1735 env->tlb_table[1][i].addr_write = -1;
1736 env->tlb_table[1][i].addr_code = -1;
6fa4cea9
JM
1737#if (NB_MMU_MODES >= 3)
1738 env->tlb_table[2][i].addr_read = -1;
1739 env->tlb_table[2][i].addr_write = -1;
1740 env->tlb_table[2][i].addr_code = -1;
1741#if (NB_MMU_MODES == 4)
1742 env->tlb_table[3][i].addr_read = -1;
1743 env->tlb_table[3][i].addr_write = -1;
1744 env->tlb_table[3][i].addr_code = -1;
1745#endif
1746#endif
33417e70 1747 }
9fa3e853 1748
8a40a180 1749 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
9fa3e853 1750
0a962c02
FB
1751#ifdef USE_KQEMU
1752 if (env->kqemu_enabled) {
1753 kqemu_flush(env, flush_global);
1754 }
9fa3e853 1755#endif
e3db7226 1756 tlb_flush_count++;
33417e70
FB
1757}
1758
274da6b2 1759static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
61382a50 1760{
5fafdf24 1761 if (addr == (tlb_entry->addr_read &
84b7b8e7 1762 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1763 addr == (tlb_entry->addr_write &
84b7b8e7 1764 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1765 addr == (tlb_entry->addr_code &
84b7b8e7
FB
1766 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1767 tlb_entry->addr_read = -1;
1768 tlb_entry->addr_write = -1;
1769 tlb_entry->addr_code = -1;
1770 }
61382a50
FB
1771}
1772
2e12669a 1773void tlb_flush_page(CPUState *env, target_ulong addr)
33417e70 1774{
8a40a180 1775 int i;
0124311e 1776
9fa3e853 1777#if defined(DEBUG_TLB)
108c49b8 1778 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
9fa3e853 1779#endif
0124311e
FB
1780 /* must reset current TB so that interrupts cannot modify the
1781 links while we are modifying them */
1782 env->current_tb = NULL;
61382a50
FB
1783
1784 addr &= TARGET_PAGE_MASK;
1785 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
84b7b8e7
FB
1786 tlb_flush_entry(&env->tlb_table[0][i], addr);
1787 tlb_flush_entry(&env->tlb_table[1][i], addr);
6fa4cea9
JM
1788#if (NB_MMU_MODES >= 3)
1789 tlb_flush_entry(&env->tlb_table[2][i], addr);
1790#if (NB_MMU_MODES == 4)
1791 tlb_flush_entry(&env->tlb_table[3][i], addr);
1792#endif
1793#endif
0124311e 1794
5c751e99 1795 tlb_flush_jmp_cache(env, addr);
9fa3e853 1796
0a962c02
FB
1797#ifdef USE_KQEMU
1798 if (env->kqemu_enabled) {
1799 kqemu_flush_page(env, addr);
1800 }
1801#endif
9fa3e853
FB
1802}
1803
9fa3e853
FB
1804/* update the TLBs so that writes to code in the virtual page 'addr'
1805 can be detected */
6a00d601 1806static void tlb_protect_code(ram_addr_t ram_addr)
9fa3e853 1807{
5fafdf24 1808 cpu_physical_memory_reset_dirty(ram_addr,
6a00d601
FB
1809 ram_addr + TARGET_PAGE_SIZE,
1810 CODE_DIRTY_FLAG);
9fa3e853
FB
1811}
1812
9fa3e853 1813/* update the TLB so that writes in physical page 'phys_addr' are no longer
3a7d929e 1814 tested for self modifying code */
5fafdf24 1815static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 1816 target_ulong vaddr)
9fa3e853 1817{
3a7d929e 1818 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1ccde1cb
FB
1819}
1820
5fafdf24 1821static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1ccde1cb
FB
1822 unsigned long start, unsigned long length)
1823{
1824 unsigned long addr;
84b7b8e7
FB
1825 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1826 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1ccde1cb 1827 if ((addr - start) < length) {
0f459d16 1828 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1ccde1cb
FB
1829 }
1830 }
1831}
1832
3a7d929e 1833void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 1834 int dirty_flags)
1ccde1cb
FB
1835{
1836 CPUState *env;
4f2ac237 1837 unsigned long length, start1;
0a962c02
FB
1838 int i, mask, len;
1839 uint8_t *p;
1ccde1cb
FB
1840
1841 start &= TARGET_PAGE_MASK;
1842 end = TARGET_PAGE_ALIGN(end);
1843
1844 length = end - start;
1845 if (length == 0)
1846 return;
0a962c02 1847 len = length >> TARGET_PAGE_BITS;
3a7d929e 1848#ifdef USE_KQEMU
6a00d601
FB
1849 /* XXX: should not depend on cpu context */
1850 env = first_cpu;
3a7d929e 1851 if (env->kqemu_enabled) {
f23db169
FB
1852 ram_addr_t addr;
1853 addr = start;
1854 for(i = 0; i < len; i++) {
1855 kqemu_set_notdirty(env, addr);
1856 addr += TARGET_PAGE_SIZE;
1857 }
3a7d929e
FB
1858 }
1859#endif
f23db169
FB
1860 mask = ~dirty_flags;
1861 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1862 for(i = 0; i < len; i++)
1863 p[i] &= mask;
1864
1ccde1cb
FB
1865 /* we modify the TLB cache so that the dirty bit will be set again
1866 when accessing the range */
59817ccb 1867 start1 = start + (unsigned long)phys_ram_base;
6a00d601
FB
1868 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1869 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1870 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
6a00d601 1871 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1872 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
6fa4cea9
JM
1873#if (NB_MMU_MODES >= 3)
1874 for(i = 0; i < CPU_TLB_SIZE; i++)
1875 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1876#if (NB_MMU_MODES == 4)
1877 for(i = 0; i < CPU_TLB_SIZE; i++)
1878 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1879#endif
1880#endif
6a00d601 1881 }
1ccde1cb
FB
1882}
1883
74576198
AL
1884int cpu_physical_memory_set_dirty_tracking(int enable)
1885{
1886 in_migration = enable;
1887 return 0;
1888}
1889
1890int cpu_physical_memory_get_dirty_tracking(void)
1891{
1892 return in_migration;
1893}
1894
2bec46dc
AL
1895void cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr, target_phys_addr_t end_addr)
1896{
1897 if (kvm_enabled())
1898 kvm_physical_sync_dirty_bitmap(start_addr, end_addr);
1899}
1900
3a7d929e
FB
1901static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1902{
1903 ram_addr_t ram_addr;
1904
84b7b8e7 1905 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
5fafdf24 1906 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
3a7d929e
FB
1907 tlb_entry->addend - (unsigned long)phys_ram_base;
1908 if (!cpu_physical_memory_is_dirty(ram_addr)) {
0f459d16 1909 tlb_entry->addr_write |= TLB_NOTDIRTY;
3a7d929e
FB
1910 }
1911 }
1912}
1913
1914/* update the TLB according to the current state of the dirty bits */
1915void cpu_tlb_update_dirty(CPUState *env)
1916{
1917 int i;
1918 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1919 tlb_update_dirty(&env->tlb_table[0][i]);
3a7d929e 1920 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1921 tlb_update_dirty(&env->tlb_table[1][i]);
6fa4cea9
JM
1922#if (NB_MMU_MODES >= 3)
1923 for(i = 0; i < CPU_TLB_SIZE; i++)
1924 tlb_update_dirty(&env->tlb_table[2][i]);
1925#if (NB_MMU_MODES == 4)
1926 for(i = 0; i < CPU_TLB_SIZE; i++)
1927 tlb_update_dirty(&env->tlb_table[3][i]);
1928#endif
1929#endif
3a7d929e
FB
1930}
1931
0f459d16 1932static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1ccde1cb 1933{
0f459d16
PB
1934 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
1935 tlb_entry->addr_write = vaddr;
1ccde1cb
FB
1936}
1937
0f459d16
PB
1938/* update the TLB corresponding to virtual page vaddr
1939 so that it is no longer dirty */
1940static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1ccde1cb 1941{
1ccde1cb
FB
1942 int i;
1943
0f459d16 1944 vaddr &= TARGET_PAGE_MASK;
1ccde1cb 1945 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
0f459d16
PB
1946 tlb_set_dirty1(&env->tlb_table[0][i], vaddr);
1947 tlb_set_dirty1(&env->tlb_table[1][i], vaddr);
6fa4cea9 1948#if (NB_MMU_MODES >= 3)
0f459d16 1949 tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
6fa4cea9 1950#if (NB_MMU_MODES == 4)
0f459d16 1951 tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
6fa4cea9
JM
1952#endif
1953#endif
9fa3e853
FB
1954}
1955
59817ccb
FB
1956/* add a new TLB entry. At most one entry for a given virtual address
1957 is permitted. Return 0 if OK or 2 if the page could not be mapped
1958 (can only happen in non SOFTMMU mode for I/O pages or pages
1959 conflicting with the host address space). */
5fafdf24
TS
1960int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1961 target_phys_addr_t paddr, int prot,
6ebbf390 1962 int mmu_idx, int is_softmmu)
9fa3e853 1963{
92e873b9 1964 PhysPageDesc *p;
4f2ac237 1965 unsigned long pd;
9fa3e853 1966 unsigned int index;
4f2ac237 1967 target_ulong address;
0f459d16 1968 target_ulong code_address;
108c49b8 1969 target_phys_addr_t addend;
9fa3e853 1970 int ret;
84b7b8e7 1971 CPUTLBEntry *te;
a1d1bb31 1972 CPUWatchpoint *wp;
0f459d16 1973 target_phys_addr_t iotlb;
9fa3e853 1974
92e873b9 1975 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
9fa3e853
FB
1976 if (!p) {
1977 pd = IO_MEM_UNASSIGNED;
9fa3e853
FB
1978 } else {
1979 pd = p->phys_offset;
9fa3e853
FB
1980 }
1981#if defined(DEBUG_TLB)
6ebbf390
JM
1982 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1983 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
9fa3e853
FB
1984#endif
1985
1986 ret = 0;
0f459d16
PB
1987 address = vaddr;
1988 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1989 /* IO memory case (romd handled later) */
1990 address |= TLB_MMIO;
1991 }
1992 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1993 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
1994 /* Normal RAM. */
1995 iotlb = pd & TARGET_PAGE_MASK;
1996 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
1997 iotlb |= IO_MEM_NOTDIRTY;
1998 else
1999 iotlb |= IO_MEM_ROM;
2000 } else {
2001 /* IO handlers are currently passed a phsical address.
2002 It would be nice to pass an offset from the base address
2003 of that region. This would avoid having to special case RAM,
2004 and avoid full address decoding in every device.
2005 We can't use the high bits of pd for this because
2006 IO_MEM_ROMD uses these as a ram address. */
8da3ff18
PB
2007 iotlb = (pd & ~TARGET_PAGE_MASK);
2008 if (p) {
8da3ff18
PB
2009 iotlb += p->region_offset;
2010 } else {
2011 iotlb += paddr;
2012 }
0f459d16
PB
2013 }
2014
2015 code_address = address;
2016 /* Make accesses to pages with watchpoints go via the
2017 watchpoint trap routines. */
c0ce998e 2018 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
a1d1bb31 2019 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
0f459d16
PB
2020 iotlb = io_mem_watch + paddr;
2021 /* TODO: The memory case can be optimized by not trapping
2022 reads of pages with a write breakpoint. */
2023 address |= TLB_MMIO;
6658ffb8 2024 }
0f459d16 2025 }
d79acba4 2026
0f459d16
PB
2027 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2028 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2029 te = &env->tlb_table[mmu_idx][index];
2030 te->addend = addend - vaddr;
2031 if (prot & PAGE_READ) {
2032 te->addr_read = address;
2033 } else {
2034 te->addr_read = -1;
2035 }
5c751e99 2036
0f459d16
PB
2037 if (prot & PAGE_EXEC) {
2038 te->addr_code = code_address;
2039 } else {
2040 te->addr_code = -1;
2041 }
2042 if (prot & PAGE_WRITE) {
2043 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2044 (pd & IO_MEM_ROMD)) {
2045 /* Write access calls the I/O callback. */
2046 te->addr_write = address | TLB_MMIO;
2047 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2048 !cpu_physical_memory_is_dirty(pd)) {
2049 te->addr_write = address | TLB_NOTDIRTY;
9fa3e853 2050 } else {
0f459d16 2051 te->addr_write = address;
9fa3e853 2052 }
0f459d16
PB
2053 } else {
2054 te->addr_write = -1;
9fa3e853 2055 }
9fa3e853
FB
2056 return ret;
2057}
2058
0124311e
FB
2059#else
2060
ee8b7021 2061void tlb_flush(CPUState *env, int flush_global)
0124311e
FB
2062{
2063}
2064
2e12669a 2065void tlb_flush_page(CPUState *env, target_ulong addr)
0124311e
FB
2066{
2067}
2068
5fafdf24
TS
2069int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2070 target_phys_addr_t paddr, int prot,
6ebbf390 2071 int mmu_idx, int is_softmmu)
9fa3e853
FB
2072{
2073 return 0;
2074}
0124311e 2075
9fa3e853
FB
2076/* dump memory mappings */
2077void page_dump(FILE *f)
33417e70 2078{
9fa3e853
FB
2079 unsigned long start, end;
2080 int i, j, prot, prot1;
2081 PageDesc *p;
33417e70 2082
9fa3e853
FB
2083 fprintf(f, "%-8s %-8s %-8s %s\n",
2084 "start", "end", "size", "prot");
2085 start = -1;
2086 end = -1;
2087 prot = 0;
2088 for(i = 0; i <= L1_SIZE; i++) {
2089 if (i < L1_SIZE)
2090 p = l1_map[i];
2091 else
2092 p = NULL;
2093 for(j = 0;j < L2_SIZE; j++) {
2094 if (!p)
2095 prot1 = 0;
2096 else
2097 prot1 = p[j].flags;
2098 if (prot1 != prot) {
2099 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2100 if (start != -1) {
2101 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
5fafdf24 2102 start, end, end - start,
9fa3e853
FB
2103 prot & PAGE_READ ? 'r' : '-',
2104 prot & PAGE_WRITE ? 'w' : '-',
2105 prot & PAGE_EXEC ? 'x' : '-');
2106 }
2107 if (prot1 != 0)
2108 start = end;
2109 else
2110 start = -1;
2111 prot = prot1;
2112 }
2113 if (!p)
2114 break;
2115 }
33417e70 2116 }
33417e70
FB
2117}
2118
53a5960a 2119int page_get_flags(target_ulong address)
33417e70 2120{
9fa3e853
FB
2121 PageDesc *p;
2122
2123 p = page_find(address >> TARGET_PAGE_BITS);
33417e70 2124 if (!p)
9fa3e853
FB
2125 return 0;
2126 return p->flags;
2127}
2128
2129/* modify the flags of a page and invalidate the code if
2130 necessary. The flag PAGE_WRITE_ORG is positionned automatically
2131 depending on PAGE_WRITE */
53a5960a 2132void page_set_flags(target_ulong start, target_ulong end, int flags)
9fa3e853
FB
2133{
2134 PageDesc *p;
53a5960a 2135 target_ulong addr;
9fa3e853 2136
c8a706fe 2137 /* mmap_lock should already be held. */
9fa3e853
FB
2138 start = start & TARGET_PAGE_MASK;
2139 end = TARGET_PAGE_ALIGN(end);
2140 if (flags & PAGE_WRITE)
2141 flags |= PAGE_WRITE_ORG;
9fa3e853
FB
2142 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2143 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
17e2377a
PB
2144 /* We may be called for host regions that are outside guest
2145 address space. */
2146 if (!p)
2147 return;
9fa3e853
FB
2148 /* if the write protection is set, then we invalidate the code
2149 inside */
5fafdf24 2150 if (!(p->flags & PAGE_WRITE) &&
9fa3e853
FB
2151 (flags & PAGE_WRITE) &&
2152 p->first_tb) {
d720b93d 2153 tb_invalidate_phys_page(addr, 0, NULL);
9fa3e853
FB
2154 }
2155 p->flags = flags;
2156 }
33417e70
FB
2157}
2158
3d97b40b
TS
2159int page_check_range(target_ulong start, target_ulong len, int flags)
2160{
2161 PageDesc *p;
2162 target_ulong end;
2163 target_ulong addr;
2164
55f280c9
AZ
2165 if (start + len < start)
2166 /* we've wrapped around */
2167 return -1;
2168
3d97b40b
TS
2169 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2170 start = start & TARGET_PAGE_MASK;
2171
3d97b40b
TS
2172 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2173 p = page_find(addr >> TARGET_PAGE_BITS);
2174 if( !p )
2175 return -1;
2176 if( !(p->flags & PAGE_VALID) )
2177 return -1;
2178
dae3270c 2179 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
3d97b40b 2180 return -1;
dae3270c
FB
2181 if (flags & PAGE_WRITE) {
2182 if (!(p->flags & PAGE_WRITE_ORG))
2183 return -1;
2184 /* unprotect the page if it was put read-only because it
2185 contains translated code */
2186 if (!(p->flags & PAGE_WRITE)) {
2187 if (!page_unprotect(addr, 0, NULL))
2188 return -1;
2189 }
2190 return 0;
2191 }
3d97b40b
TS
2192 }
2193 return 0;
2194}
2195
9fa3e853
FB
2196/* called from signal handler: invalidate the code and unprotect the
2197 page. Return TRUE if the fault was succesfully handled. */
53a5960a 2198int page_unprotect(target_ulong address, unsigned long pc, void *puc)
9fa3e853
FB
2199{
2200 unsigned int page_index, prot, pindex;
2201 PageDesc *p, *p1;
53a5960a 2202 target_ulong host_start, host_end, addr;
9fa3e853 2203
c8a706fe
PB
2204 /* Technically this isn't safe inside a signal handler. However we
2205 know this only ever happens in a synchronous SEGV handler, so in
2206 practice it seems to be ok. */
2207 mmap_lock();
2208
83fb7adf 2209 host_start = address & qemu_host_page_mask;
9fa3e853
FB
2210 page_index = host_start >> TARGET_PAGE_BITS;
2211 p1 = page_find(page_index);
c8a706fe
PB
2212 if (!p1) {
2213 mmap_unlock();
9fa3e853 2214 return 0;
c8a706fe 2215 }
83fb7adf 2216 host_end = host_start + qemu_host_page_size;
9fa3e853
FB
2217 p = p1;
2218 prot = 0;
2219 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2220 prot |= p->flags;
2221 p++;
2222 }
2223 /* if the page was really writable, then we change its
2224 protection back to writable */
2225 if (prot & PAGE_WRITE_ORG) {
2226 pindex = (address - host_start) >> TARGET_PAGE_BITS;
2227 if (!(p1[pindex].flags & PAGE_WRITE)) {
5fafdf24 2228 mprotect((void *)g2h(host_start), qemu_host_page_size,
9fa3e853
FB
2229 (prot & PAGE_BITS) | PAGE_WRITE);
2230 p1[pindex].flags |= PAGE_WRITE;
2231 /* and since the content will be modified, we must invalidate
2232 the corresponding translated code. */
d720b93d 2233 tb_invalidate_phys_page(address, pc, puc);
9fa3e853
FB
2234#ifdef DEBUG_TB_CHECK
2235 tb_invalidate_check(address);
2236#endif
c8a706fe 2237 mmap_unlock();
9fa3e853
FB
2238 return 1;
2239 }
2240 }
c8a706fe 2241 mmap_unlock();
9fa3e853
FB
2242 return 0;
2243}
2244
6a00d601
FB
2245static inline void tlb_set_dirty(CPUState *env,
2246 unsigned long addr, target_ulong vaddr)
1ccde1cb
FB
2247{
2248}
9fa3e853
FB
2249#endif /* defined(CONFIG_USER_ONLY) */
2250
e2eef170 2251#if !defined(CONFIG_USER_ONLY)
8da3ff18 2252
db7b5426 2253static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
8da3ff18 2254 ram_addr_t memory, ram_addr_t region_offset);
00f82b8a 2255static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
8da3ff18 2256 ram_addr_t orig_memory, ram_addr_t region_offset);
db7b5426
BS
2257#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2258 need_subpage) \
2259 do { \
2260 if (addr > start_addr) \
2261 start_addr2 = 0; \
2262 else { \
2263 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2264 if (start_addr2 > 0) \
2265 need_subpage = 1; \
2266 } \
2267 \
49e9fba2 2268 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
db7b5426
BS
2269 end_addr2 = TARGET_PAGE_SIZE - 1; \
2270 else { \
2271 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2272 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2273 need_subpage = 1; \
2274 } \
2275 } while (0)
2276
33417e70
FB
2277/* register physical memory. 'size' must be a multiple of the target
2278 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
8da3ff18
PB
2279 io memory page. The address used when calling the IO function is
2280 the offset from the start of the region, plus region_offset. Both
2281 start_region and regon_offset are rounded down to a page boundary
2282 before calculating this offset. This should not be a problem unless
2283 the low bits of start_addr and region_offset differ. */
2284void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2285 ram_addr_t size,
2286 ram_addr_t phys_offset,
2287 ram_addr_t region_offset)
33417e70 2288{
108c49b8 2289 target_phys_addr_t addr, end_addr;
92e873b9 2290 PhysPageDesc *p;
9d42037b 2291 CPUState *env;
00f82b8a 2292 ram_addr_t orig_size = size;
db7b5426 2293 void *subpage;
33417e70 2294
da260249
FB
2295#ifdef USE_KQEMU
2296 /* XXX: should not depend on cpu context */
2297 env = first_cpu;
2298 if (env->kqemu_enabled) {
2299 kqemu_set_phys_mem(start_addr, size, phys_offset);
2300 }
2301#endif
7ba1e619
AL
2302 if (kvm_enabled())
2303 kvm_set_phys_mem(start_addr, size, phys_offset);
2304
67c4d23c
PB
2305 if (phys_offset == IO_MEM_UNASSIGNED) {
2306 region_offset = start_addr;
2307 }
8da3ff18 2308 region_offset &= TARGET_PAGE_MASK;
5fd386f6 2309 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
49e9fba2
BS
2310 end_addr = start_addr + (target_phys_addr_t)size;
2311 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
db7b5426
BS
2312 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2313 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
00f82b8a 2314 ram_addr_t orig_memory = p->phys_offset;
db7b5426
BS
2315 target_phys_addr_t start_addr2, end_addr2;
2316 int need_subpage = 0;
2317
2318 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2319 need_subpage);
4254fab8 2320 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
db7b5426
BS
2321 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2322 subpage = subpage_init((addr & TARGET_PAGE_MASK),
8da3ff18
PB
2323 &p->phys_offset, orig_memory,
2324 p->region_offset);
db7b5426
BS
2325 } else {
2326 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2327 >> IO_MEM_SHIFT];
2328 }
8da3ff18
PB
2329 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2330 region_offset);
2331 p->region_offset = 0;
db7b5426
BS
2332 } else {
2333 p->phys_offset = phys_offset;
2334 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2335 (phys_offset & IO_MEM_ROMD))
2336 phys_offset += TARGET_PAGE_SIZE;
2337 }
2338 } else {
2339 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2340 p->phys_offset = phys_offset;
8da3ff18 2341 p->region_offset = region_offset;
db7b5426 2342 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
8da3ff18 2343 (phys_offset & IO_MEM_ROMD)) {
db7b5426 2344 phys_offset += TARGET_PAGE_SIZE;
0e8f0967 2345 } else {
db7b5426
BS
2346 target_phys_addr_t start_addr2, end_addr2;
2347 int need_subpage = 0;
2348
2349 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2350 end_addr2, need_subpage);
2351
4254fab8 2352 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
db7b5426 2353 subpage = subpage_init((addr & TARGET_PAGE_MASK),
8da3ff18 2354 &p->phys_offset, IO_MEM_UNASSIGNED,
67c4d23c 2355 addr & TARGET_PAGE_MASK);
db7b5426 2356 subpage_register(subpage, start_addr2, end_addr2,
8da3ff18
PB
2357 phys_offset, region_offset);
2358 p->region_offset = 0;
db7b5426
BS
2359 }
2360 }
2361 }
8da3ff18 2362 region_offset += TARGET_PAGE_SIZE;
33417e70 2363 }
3b46e624 2364
9d42037b
FB
2365 /* since each CPU stores ram addresses in its TLB cache, we must
2366 reset the modified entries */
2367 /* XXX: slow ! */
2368 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2369 tlb_flush(env, 1);
2370 }
33417e70
FB
2371}
2372
ba863458 2373/* XXX: temporary until new memory mapping API */
00f82b8a 2374ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
ba863458
FB
2375{
2376 PhysPageDesc *p;
2377
2378 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2379 if (!p)
2380 return IO_MEM_UNASSIGNED;
2381 return p->phys_offset;
2382}
2383
f65ed4c1
AL
2384void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2385{
2386 if (kvm_enabled())
2387 kvm_coalesce_mmio_region(addr, size);
2388}
2389
2390void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2391{
2392 if (kvm_enabled())
2393 kvm_uncoalesce_mmio_region(addr, size);
2394}
2395
e9a1ab19 2396/* XXX: better than nothing */
00f82b8a 2397ram_addr_t qemu_ram_alloc(ram_addr_t size)
e9a1ab19
FB
2398{
2399 ram_addr_t addr;
7fb4fdcf 2400 if ((phys_ram_alloc_offset + size) > phys_ram_size) {
012a7045 2401 fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 ")\n",
ed441467 2402 (uint64_t)size, (uint64_t)phys_ram_size);
e9a1ab19
FB
2403 abort();
2404 }
2405 addr = phys_ram_alloc_offset;
2406 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2407 return addr;
2408}
2409
2410void qemu_ram_free(ram_addr_t addr)
2411{
2412}
2413
a4193c8a 2414static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
33417e70 2415{
67d3b957 2416#ifdef DEBUG_UNASSIGNED
ab3d1727 2417 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
b4f0a316 2418#endif
0a6f8a6d 2419#if defined(TARGET_SPARC)
e18231a3
BS
2420 do_unassigned_access(addr, 0, 0, 0, 1);
2421#endif
2422 return 0;
2423}
2424
2425static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2426{
2427#ifdef DEBUG_UNASSIGNED
2428 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2429#endif
0a6f8a6d 2430#if defined(TARGET_SPARC)
e18231a3
BS
2431 do_unassigned_access(addr, 0, 0, 0, 2);
2432#endif
2433 return 0;
2434}
2435
2436static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2437{
2438#ifdef DEBUG_UNASSIGNED
2439 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2440#endif
0a6f8a6d 2441#if defined(TARGET_SPARC)
e18231a3 2442 do_unassigned_access(addr, 0, 0, 0, 4);
67d3b957 2443#endif
33417e70
FB
2444 return 0;
2445}
2446
a4193c8a 2447static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
33417e70 2448{
67d3b957 2449#ifdef DEBUG_UNASSIGNED
ab3d1727 2450 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
67d3b957 2451#endif
0a6f8a6d 2452#if defined(TARGET_SPARC)
e18231a3
BS
2453 do_unassigned_access(addr, 1, 0, 0, 1);
2454#endif
2455}
2456
2457static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2458{
2459#ifdef DEBUG_UNASSIGNED
2460 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2461#endif
0a6f8a6d 2462#if defined(TARGET_SPARC)
e18231a3
BS
2463 do_unassigned_access(addr, 1, 0, 0, 2);
2464#endif
2465}
2466
2467static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2468{
2469#ifdef DEBUG_UNASSIGNED
2470 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2471#endif
0a6f8a6d 2472#if defined(TARGET_SPARC)
e18231a3 2473 do_unassigned_access(addr, 1, 0, 0, 4);
b4f0a316 2474#endif
33417e70
FB
2475}
2476
2477static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2478 unassigned_mem_readb,
e18231a3
BS
2479 unassigned_mem_readw,
2480 unassigned_mem_readl,
33417e70
FB
2481};
2482
2483static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2484 unassigned_mem_writeb,
e18231a3
BS
2485 unassigned_mem_writew,
2486 unassigned_mem_writel,
33417e70
FB
2487};
2488
0f459d16
PB
2489static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2490 uint32_t val)
9fa3e853 2491{
3a7d929e 2492 int dirty_flags;
3a7d929e
FB
2493 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2494 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2495#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2496 tb_invalidate_phys_page_fast(ram_addr, 1);
2497 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2498#endif
3a7d929e 2499 }
0f459d16 2500 stb_p(phys_ram_base + ram_addr, val);
f32fc648
FB
2501#ifdef USE_KQEMU
2502 if (cpu_single_env->kqemu_enabled &&
2503 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2504 kqemu_modify_page(cpu_single_env, ram_addr);
2505#endif
f23db169
FB
2506 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2507 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2508 /* we remove the notdirty callback only if the code has been
2509 flushed */
2510 if (dirty_flags == 0xff)
2e70f6ef 2511 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
2512}
2513
0f459d16
PB
2514static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2515 uint32_t val)
9fa3e853 2516{
3a7d929e 2517 int dirty_flags;
3a7d929e
FB
2518 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2519 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2520#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2521 tb_invalidate_phys_page_fast(ram_addr, 2);
2522 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2523#endif
3a7d929e 2524 }
0f459d16 2525 stw_p(phys_ram_base + ram_addr, val);
f32fc648
FB
2526#ifdef USE_KQEMU
2527 if (cpu_single_env->kqemu_enabled &&
2528 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2529 kqemu_modify_page(cpu_single_env, ram_addr);
2530#endif
f23db169
FB
2531 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2532 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2533 /* we remove the notdirty callback only if the code has been
2534 flushed */
2535 if (dirty_flags == 0xff)
2e70f6ef 2536 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
2537}
2538
0f459d16
PB
2539static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2540 uint32_t val)
9fa3e853 2541{
3a7d929e 2542 int dirty_flags;
3a7d929e
FB
2543 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2544 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2545#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2546 tb_invalidate_phys_page_fast(ram_addr, 4);
2547 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2548#endif
3a7d929e 2549 }
0f459d16 2550 stl_p(phys_ram_base + ram_addr, val);
f32fc648
FB
2551#ifdef USE_KQEMU
2552 if (cpu_single_env->kqemu_enabled &&
2553 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2554 kqemu_modify_page(cpu_single_env, ram_addr);
2555#endif
f23db169
FB
2556 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2557 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2558 /* we remove the notdirty callback only if the code has been
2559 flushed */
2560 if (dirty_flags == 0xff)
2e70f6ef 2561 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
2562}
2563
3a7d929e 2564static CPUReadMemoryFunc *error_mem_read[3] = {
9fa3e853
FB
2565 NULL, /* never used */
2566 NULL, /* never used */
2567 NULL, /* never used */
2568};
2569
1ccde1cb
FB
2570static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2571 notdirty_mem_writeb,
2572 notdirty_mem_writew,
2573 notdirty_mem_writel,
2574};
2575
0f459d16 2576/* Generate a debug exception if a watchpoint has been hit. */
b4051334 2577static void check_watchpoint(int offset, int len_mask, int flags)
0f459d16
PB
2578{
2579 CPUState *env = cpu_single_env;
06d55cc1
AL
2580 target_ulong pc, cs_base;
2581 TranslationBlock *tb;
0f459d16 2582 target_ulong vaddr;
a1d1bb31 2583 CPUWatchpoint *wp;
06d55cc1 2584 int cpu_flags;
0f459d16 2585
06d55cc1
AL
2586 if (env->watchpoint_hit) {
2587 /* We re-entered the check after replacing the TB. Now raise
2588 * the debug interrupt so that is will trigger after the
2589 * current instruction. */
2590 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2591 return;
2592 }
2e70f6ef 2593 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
c0ce998e 2594 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334
AL
2595 if ((vaddr == (wp->vaddr & len_mask) ||
2596 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
6e140f28
AL
2597 wp->flags |= BP_WATCHPOINT_HIT;
2598 if (!env->watchpoint_hit) {
2599 env->watchpoint_hit = wp;
2600 tb = tb_find_pc(env->mem_io_pc);
2601 if (!tb) {
2602 cpu_abort(env, "check_watchpoint: could not find TB for "
2603 "pc=%p", (void *)env->mem_io_pc);
2604 }
2605 cpu_restore_state(tb, env, env->mem_io_pc, NULL);
2606 tb_phys_invalidate(tb, -1);
2607 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2608 env->exception_index = EXCP_DEBUG;
2609 } else {
2610 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2611 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
2612 }
2613 cpu_resume_from_signal(env, NULL);
06d55cc1 2614 }
6e140f28
AL
2615 } else {
2616 wp->flags &= ~BP_WATCHPOINT_HIT;
0f459d16
PB
2617 }
2618 }
2619}
2620
6658ffb8
PB
2621/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2622 so these check for a hit then pass through to the normal out-of-line
2623 phys routines. */
2624static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2625{
b4051334 2626 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
6658ffb8
PB
2627 return ldub_phys(addr);
2628}
2629
2630static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2631{
b4051334 2632 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
6658ffb8
PB
2633 return lduw_phys(addr);
2634}
2635
2636static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2637{
b4051334 2638 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
6658ffb8
PB
2639 return ldl_phys(addr);
2640}
2641
6658ffb8
PB
2642static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2643 uint32_t val)
2644{
b4051334 2645 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
6658ffb8
PB
2646 stb_phys(addr, val);
2647}
2648
2649static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2650 uint32_t val)
2651{
b4051334 2652 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
6658ffb8
PB
2653 stw_phys(addr, val);
2654}
2655
2656static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2657 uint32_t val)
2658{
b4051334 2659 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
6658ffb8
PB
2660 stl_phys(addr, val);
2661}
2662
2663static CPUReadMemoryFunc *watch_mem_read[3] = {
2664 watch_mem_readb,
2665 watch_mem_readw,
2666 watch_mem_readl,
2667};
2668
2669static CPUWriteMemoryFunc *watch_mem_write[3] = {
2670 watch_mem_writeb,
2671 watch_mem_writew,
2672 watch_mem_writel,
2673};
6658ffb8 2674
db7b5426
BS
2675static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2676 unsigned int len)
2677{
db7b5426
BS
2678 uint32_t ret;
2679 unsigned int idx;
2680
8da3ff18 2681 idx = SUBPAGE_IDX(addr);
db7b5426
BS
2682#if defined(DEBUG_SUBPAGE)
2683 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2684 mmio, len, addr, idx);
2685#endif
8da3ff18
PB
2686 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len],
2687 addr + mmio->region_offset[idx][0][len]);
db7b5426
BS
2688
2689 return ret;
2690}
2691
2692static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2693 uint32_t value, unsigned int len)
2694{
db7b5426
BS
2695 unsigned int idx;
2696
8da3ff18 2697 idx = SUBPAGE_IDX(addr);
db7b5426
BS
2698#if defined(DEBUG_SUBPAGE)
2699 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2700 mmio, len, addr, idx, value);
2701#endif
8da3ff18
PB
2702 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len],
2703 addr + mmio->region_offset[idx][1][len],
2704 value);
db7b5426
BS
2705}
2706
2707static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2708{
2709#if defined(DEBUG_SUBPAGE)
2710 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2711#endif
2712
2713 return subpage_readlen(opaque, addr, 0);
2714}
2715
2716static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2717 uint32_t value)
2718{
2719#if defined(DEBUG_SUBPAGE)
2720 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2721#endif
2722 subpage_writelen(opaque, addr, value, 0);
2723}
2724
2725static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2726{
2727#if defined(DEBUG_SUBPAGE)
2728 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2729#endif
2730
2731 return subpage_readlen(opaque, addr, 1);
2732}
2733
2734static void subpage_writew (void *opaque, target_phys_addr_t addr,
2735 uint32_t value)
2736{
2737#if defined(DEBUG_SUBPAGE)
2738 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2739#endif
2740 subpage_writelen(opaque, addr, value, 1);
2741}
2742
2743static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2744{
2745#if defined(DEBUG_SUBPAGE)
2746 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2747#endif
2748
2749 return subpage_readlen(opaque, addr, 2);
2750}
2751
2752static void subpage_writel (void *opaque,
2753 target_phys_addr_t addr, uint32_t value)
2754{
2755#if defined(DEBUG_SUBPAGE)
2756 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2757#endif
2758 subpage_writelen(opaque, addr, value, 2);
2759}
2760
2761static CPUReadMemoryFunc *subpage_read[] = {
2762 &subpage_readb,
2763 &subpage_readw,
2764 &subpage_readl,
2765};
2766
2767static CPUWriteMemoryFunc *subpage_write[] = {
2768 &subpage_writeb,
2769 &subpage_writew,
2770 &subpage_writel,
2771};
2772
2773static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
8da3ff18 2774 ram_addr_t memory, ram_addr_t region_offset)
db7b5426
BS
2775{
2776 int idx, eidx;
4254fab8 2777 unsigned int i;
db7b5426
BS
2778
2779 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2780 return -1;
2781 idx = SUBPAGE_IDX(start);
2782 eidx = SUBPAGE_IDX(end);
2783#if defined(DEBUG_SUBPAGE)
2784 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2785 mmio, start, end, idx, eidx, memory);
2786#endif
2787 memory >>= IO_MEM_SHIFT;
2788 for (; idx <= eidx; idx++) {
4254fab8 2789 for (i = 0; i < 4; i++) {
3ee89922
BS
2790 if (io_mem_read[memory][i]) {
2791 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2792 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
8da3ff18 2793 mmio->region_offset[idx][0][i] = region_offset;
3ee89922
BS
2794 }
2795 if (io_mem_write[memory][i]) {
2796 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2797 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
8da3ff18 2798 mmio->region_offset[idx][1][i] = region_offset;
3ee89922 2799 }
4254fab8 2800 }
db7b5426
BS
2801 }
2802
2803 return 0;
2804}
2805
00f82b8a 2806static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
8da3ff18 2807 ram_addr_t orig_memory, ram_addr_t region_offset)
db7b5426
BS
2808{
2809 subpage_t *mmio;
2810 int subpage_memory;
2811
2812 mmio = qemu_mallocz(sizeof(subpage_t));
1eec614b
AL
2813
2814 mmio->base = base;
2815 subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
db7b5426 2816#if defined(DEBUG_SUBPAGE)
1eec614b
AL
2817 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2818 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
db7b5426 2819#endif
1eec614b
AL
2820 *phys = subpage_memory | IO_MEM_SUBPAGE;
2821 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory,
8da3ff18 2822 region_offset);
db7b5426
BS
2823
2824 return mmio;
2825}
2826
88715657
AL
2827static int get_free_io_mem_idx(void)
2828{
2829 int i;
2830
2831 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
2832 if (!io_mem_used[i]) {
2833 io_mem_used[i] = 1;
2834 return i;
2835 }
2836
2837 return -1;
2838}
2839
33417e70
FB
2840static void io_mem_init(void)
2841{
88715657
AL
2842 int i;
2843
3a7d929e 2844 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
a4193c8a 2845 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
3a7d929e 2846 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
88715657
AL
2847 for (i=0; i<5; i++)
2848 io_mem_used[i] = 1;
1ccde1cb 2849
0f459d16 2850 io_mem_watch = cpu_register_io_memory(0, watch_mem_read,
6658ffb8 2851 watch_mem_write, NULL);
1ccde1cb 2852 /* alloc dirty bits array */
0a962c02 2853 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
3a7d929e 2854 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
33417e70
FB
2855}
2856
2857/* mem_read and mem_write are arrays of functions containing the
2858 function to access byte (index 0), word (index 1) and dword (index
3ee89922
BS
2859 2). Functions can be omitted with a NULL function pointer. The
2860 registered functions may be modified dynamically later.
2861 If io_index is non zero, the corresponding io zone is
4254fab8
BS
2862 modified. If it is zero, a new io zone is allocated. The return
2863 value can be used with cpu_register_physical_memory(). (-1) is
2864 returned if error. */
33417e70
FB
2865int cpu_register_io_memory(int io_index,
2866 CPUReadMemoryFunc **mem_read,
a4193c8a
FB
2867 CPUWriteMemoryFunc **mem_write,
2868 void *opaque)
33417e70 2869{
4254fab8 2870 int i, subwidth = 0;
33417e70
FB
2871
2872 if (io_index <= 0) {
88715657
AL
2873 io_index = get_free_io_mem_idx();
2874 if (io_index == -1)
2875 return io_index;
33417e70
FB
2876 } else {
2877 if (io_index >= IO_MEM_NB_ENTRIES)
2878 return -1;
2879 }
b5ff1b31 2880
33417e70 2881 for(i = 0;i < 3; i++) {
4254fab8
BS
2882 if (!mem_read[i] || !mem_write[i])
2883 subwidth = IO_MEM_SUBWIDTH;
33417e70
FB
2884 io_mem_read[io_index][i] = mem_read[i];
2885 io_mem_write[io_index][i] = mem_write[i];
2886 }
a4193c8a 2887 io_mem_opaque[io_index] = opaque;
4254fab8 2888 return (io_index << IO_MEM_SHIFT) | subwidth;
33417e70 2889}
61382a50 2890
88715657
AL
2891void cpu_unregister_io_memory(int io_table_address)
2892{
2893 int i;
2894 int io_index = io_table_address >> IO_MEM_SHIFT;
2895
2896 for (i=0;i < 3; i++) {
2897 io_mem_read[io_index][i] = unassigned_mem_read[i];
2898 io_mem_write[io_index][i] = unassigned_mem_write[i];
2899 }
2900 io_mem_opaque[io_index] = NULL;
2901 io_mem_used[io_index] = 0;
2902}
2903
8926b517
FB
2904CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2905{
2906 return io_mem_write[io_index >> IO_MEM_SHIFT];
2907}
2908
2909CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2910{
2911 return io_mem_read[io_index >> IO_MEM_SHIFT];
2912}
2913
e2eef170
PB
2914#endif /* !defined(CONFIG_USER_ONLY) */
2915
13eb76e0
FB
2916/* physical memory access (slow version, mainly for debug) */
2917#if defined(CONFIG_USER_ONLY)
5fafdf24 2918void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
2919 int len, int is_write)
2920{
2921 int l, flags;
2922 target_ulong page;
53a5960a 2923 void * p;
13eb76e0
FB
2924
2925 while (len > 0) {
2926 page = addr & TARGET_PAGE_MASK;
2927 l = (page + TARGET_PAGE_SIZE) - addr;
2928 if (l > len)
2929 l = len;
2930 flags = page_get_flags(page);
2931 if (!(flags & PAGE_VALID))
2932 return;
2933 if (is_write) {
2934 if (!(flags & PAGE_WRITE))
2935 return;
579a97f7 2936 /* XXX: this code should not depend on lock_user */
72fb7daa 2937 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
579a97f7
FB
2938 /* FIXME - should this return an error rather than just fail? */
2939 return;
72fb7daa
AJ
2940 memcpy(p, buf, l);
2941 unlock_user(p, addr, l);
13eb76e0
FB
2942 } else {
2943 if (!(flags & PAGE_READ))
2944 return;
579a97f7 2945 /* XXX: this code should not depend on lock_user */
72fb7daa 2946 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
579a97f7
FB
2947 /* FIXME - should this return an error rather than just fail? */
2948 return;
72fb7daa 2949 memcpy(buf, p, l);
5b257578 2950 unlock_user(p, addr, 0);
13eb76e0
FB
2951 }
2952 len -= l;
2953 buf += l;
2954 addr += l;
2955 }
2956}
8df1cd07 2957
13eb76e0 2958#else
5fafdf24 2959void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
2960 int len, int is_write)
2961{
2962 int l, io_index;
2963 uint8_t *ptr;
2964 uint32_t val;
2e12669a
FB
2965 target_phys_addr_t page;
2966 unsigned long pd;
92e873b9 2967 PhysPageDesc *p;
3b46e624 2968
13eb76e0
FB
2969 while (len > 0) {
2970 page = addr & TARGET_PAGE_MASK;
2971 l = (page + TARGET_PAGE_SIZE) - addr;
2972 if (l > len)
2973 l = len;
92e873b9 2974 p = phys_page_find(page >> TARGET_PAGE_BITS);
13eb76e0
FB
2975 if (!p) {
2976 pd = IO_MEM_UNASSIGNED;
2977 } else {
2978 pd = p->phys_offset;
2979 }
3b46e624 2980
13eb76e0 2981 if (is_write) {
3a7d929e 2982 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
6c2934db 2983 target_phys_addr_t addr1 = addr;
13eb76e0 2984 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18 2985 if (p)
6c2934db 2986 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
6a00d601
FB
2987 /* XXX: could force cpu_single_env to NULL to avoid
2988 potential bugs */
6c2934db 2989 if (l >= 4 && ((addr1 & 3) == 0)) {
1c213d19 2990 /* 32 bit write access */
c27004ec 2991 val = ldl_p(buf);
6c2934db 2992 io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
13eb76e0 2993 l = 4;
6c2934db 2994 } else if (l >= 2 && ((addr1 & 1) == 0)) {
1c213d19 2995 /* 16 bit write access */
c27004ec 2996 val = lduw_p(buf);
6c2934db 2997 io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
13eb76e0
FB
2998 l = 2;
2999 } else {
1c213d19 3000 /* 8 bit write access */
c27004ec 3001 val = ldub_p(buf);
6c2934db 3002 io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
13eb76e0
FB
3003 l = 1;
3004 }
3005 } else {
b448f2f3
FB
3006 unsigned long addr1;
3007 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
13eb76e0 3008 /* RAM case */
b448f2f3 3009 ptr = phys_ram_base + addr1;
13eb76e0 3010 memcpy(ptr, buf, l);
3a7d929e
FB
3011 if (!cpu_physical_memory_is_dirty(addr1)) {
3012 /* invalidate code */
3013 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3014 /* set dirty bit */
5fafdf24 3015 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
f23db169 3016 (0xff & ~CODE_DIRTY_FLAG);
3a7d929e 3017 }
13eb76e0
FB
3018 }
3019 } else {
5fafdf24 3020 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2a4188a3 3021 !(pd & IO_MEM_ROMD)) {
6c2934db 3022 target_phys_addr_t addr1 = addr;
13eb76e0
FB
3023 /* I/O case */
3024 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18 3025 if (p)
6c2934db
AJ
3026 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3027 if (l >= 4 && ((addr1 & 3) == 0)) {
13eb76e0 3028 /* 32 bit read access */
6c2934db 3029 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
c27004ec 3030 stl_p(buf, val);
13eb76e0 3031 l = 4;
6c2934db 3032 } else if (l >= 2 && ((addr1 & 1) == 0)) {
13eb76e0 3033 /* 16 bit read access */
6c2934db 3034 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
c27004ec 3035 stw_p(buf, val);
13eb76e0
FB
3036 l = 2;
3037 } else {
1c213d19 3038 /* 8 bit read access */
6c2934db 3039 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
c27004ec 3040 stb_p(buf, val);
13eb76e0
FB
3041 l = 1;
3042 }
3043 } else {
3044 /* RAM case */
5fafdf24 3045 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
13eb76e0
FB
3046 (addr & ~TARGET_PAGE_MASK);
3047 memcpy(buf, ptr, l);
3048 }
3049 }
3050 len -= l;
3051 buf += l;
3052 addr += l;
3053 }
3054}
8df1cd07 3055
d0ecd2aa 3056/* used for ROM loading : can write in RAM and ROM */
5fafdf24 3057void cpu_physical_memory_write_rom(target_phys_addr_t addr,
d0ecd2aa
FB
3058 const uint8_t *buf, int len)
3059{
3060 int l;
3061 uint8_t *ptr;
3062 target_phys_addr_t page;
3063 unsigned long pd;
3064 PhysPageDesc *p;
3b46e624 3065
d0ecd2aa
FB
3066 while (len > 0) {
3067 page = addr & TARGET_PAGE_MASK;
3068 l = (page + TARGET_PAGE_SIZE) - addr;
3069 if (l > len)
3070 l = len;
3071 p = phys_page_find(page >> TARGET_PAGE_BITS);
3072 if (!p) {
3073 pd = IO_MEM_UNASSIGNED;
3074 } else {
3075 pd = p->phys_offset;
3076 }
3b46e624 3077
d0ecd2aa 3078 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2a4188a3
FB
3079 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3080 !(pd & IO_MEM_ROMD)) {
d0ecd2aa
FB
3081 /* do nothing */
3082 } else {
3083 unsigned long addr1;
3084 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3085 /* ROM/RAM case */
3086 ptr = phys_ram_base + addr1;
3087 memcpy(ptr, buf, l);
3088 }
3089 len -= l;
3090 buf += l;
3091 addr += l;
3092 }
3093}
3094
6d16c2f8
AL
3095typedef struct {
3096 void *buffer;
3097 target_phys_addr_t addr;
3098 target_phys_addr_t len;
3099} BounceBuffer;
3100
3101static BounceBuffer bounce;
3102
ba223c29
AL
3103typedef struct MapClient {
3104 void *opaque;
3105 void (*callback)(void *opaque);
3106 LIST_ENTRY(MapClient) link;
3107} MapClient;
3108
3109static LIST_HEAD(map_client_list, MapClient) map_client_list
3110 = LIST_HEAD_INITIALIZER(map_client_list);
3111
3112void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3113{
3114 MapClient *client = qemu_malloc(sizeof(*client));
3115
3116 client->opaque = opaque;
3117 client->callback = callback;
3118 LIST_INSERT_HEAD(&map_client_list, client, link);
3119 return client;
3120}
3121
3122void cpu_unregister_map_client(void *_client)
3123{
3124 MapClient *client = (MapClient *)_client;
3125
3126 LIST_REMOVE(client, link);
3127}
3128
3129static void cpu_notify_map_clients(void)
3130{
3131 MapClient *client;
3132
3133 while (!LIST_EMPTY(&map_client_list)) {
3134 client = LIST_FIRST(&map_client_list);
3135 client->callback(client->opaque);
3136 LIST_REMOVE(client, link);
3137 }
3138}
3139
6d16c2f8
AL
3140/* Map a physical memory region into a host virtual address.
3141 * May map a subset of the requested range, given by and returned in *plen.
3142 * May return NULL if resources needed to perform the mapping are exhausted.
3143 * Use only for reads OR writes - not for read-modify-write operations.
ba223c29
AL
3144 * Use cpu_register_map_client() to know when retrying the map operation is
3145 * likely to succeed.
6d16c2f8
AL
3146 */
3147void *cpu_physical_memory_map(target_phys_addr_t addr,
3148 target_phys_addr_t *plen,
3149 int is_write)
3150{
3151 target_phys_addr_t len = *plen;
3152 target_phys_addr_t done = 0;
3153 int l;
3154 uint8_t *ret = NULL;
3155 uint8_t *ptr;
3156 target_phys_addr_t page;
3157 unsigned long pd;
3158 PhysPageDesc *p;
3159 unsigned long addr1;
3160
3161 while (len > 0) {
3162 page = addr & TARGET_PAGE_MASK;
3163 l = (page + TARGET_PAGE_SIZE) - addr;
3164 if (l > len)
3165 l = len;
3166 p = phys_page_find(page >> TARGET_PAGE_BITS);
3167 if (!p) {
3168 pd = IO_MEM_UNASSIGNED;
3169 } else {
3170 pd = p->phys_offset;
3171 }
3172
3173 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3174 if (done || bounce.buffer) {
3175 break;
3176 }
3177 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3178 bounce.addr = addr;
3179 bounce.len = l;
3180 if (!is_write) {
3181 cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
3182 }
3183 ptr = bounce.buffer;
3184 } else {
3185 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3186 ptr = phys_ram_base + addr1;
3187 }
3188 if (!done) {
3189 ret = ptr;
3190 } else if (ret + done != ptr) {
3191 break;
3192 }
3193
3194 len -= l;
3195 addr += l;
3196 done += l;
3197 }
3198 *plen = done;
3199 return ret;
3200}
3201
3202/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3203 * Will also mark the memory as dirty if is_write == 1. access_len gives
3204 * the amount of memory that was actually read or written by the caller.
3205 */
3206void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3207 int is_write, target_phys_addr_t access_len)
3208{
3209 if (buffer != bounce.buffer) {
3210 if (is_write) {
3211 unsigned long addr1 = (uint8_t *)buffer - phys_ram_base;
3212 while (access_len) {
3213 unsigned l;
3214 l = TARGET_PAGE_SIZE;
3215 if (l > access_len)
3216 l = access_len;
3217 if (!cpu_physical_memory_is_dirty(addr1)) {
3218 /* invalidate code */
3219 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3220 /* set dirty bit */
3221 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3222 (0xff & ~CODE_DIRTY_FLAG);
3223 }
3224 addr1 += l;
3225 access_len -= l;
3226 }
3227 }
3228 return;
3229 }
3230 if (is_write) {
3231 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3232 }
3233 qemu_free(bounce.buffer);
3234 bounce.buffer = NULL;
ba223c29 3235 cpu_notify_map_clients();
6d16c2f8 3236}
d0ecd2aa 3237
8df1cd07
FB
3238/* warning: addr must be aligned */
3239uint32_t ldl_phys(target_phys_addr_t addr)
3240{
3241 int io_index;
3242 uint8_t *ptr;
3243 uint32_t val;
3244 unsigned long pd;
3245 PhysPageDesc *p;
3246
3247 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3248 if (!p) {
3249 pd = IO_MEM_UNASSIGNED;
3250 } else {
3251 pd = p->phys_offset;
3252 }
3b46e624 3253
5fafdf24 3254 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2a4188a3 3255 !(pd & IO_MEM_ROMD)) {
8df1cd07
FB
3256 /* I/O case */
3257 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
3258 if (p)
3259 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
8df1cd07
FB
3260 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3261 } else {
3262 /* RAM case */
5fafdf24 3263 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
8df1cd07
FB
3264 (addr & ~TARGET_PAGE_MASK);
3265 val = ldl_p(ptr);
3266 }
3267 return val;
3268}
3269
84b7b8e7
FB
3270/* warning: addr must be aligned */
3271uint64_t ldq_phys(target_phys_addr_t addr)
3272{
3273 int io_index;
3274 uint8_t *ptr;
3275 uint64_t val;
3276 unsigned long pd;
3277 PhysPageDesc *p;
3278
3279 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3280 if (!p) {
3281 pd = IO_MEM_UNASSIGNED;
3282 } else {
3283 pd = p->phys_offset;
3284 }
3b46e624 3285
2a4188a3
FB
3286 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3287 !(pd & IO_MEM_ROMD)) {
84b7b8e7
FB
3288 /* I/O case */
3289 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
3290 if (p)
3291 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
84b7b8e7
FB
3292#ifdef TARGET_WORDS_BIGENDIAN
3293 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3294 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3295#else
3296 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3297 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3298#endif
3299 } else {
3300 /* RAM case */
5fafdf24 3301 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
84b7b8e7
FB
3302 (addr & ~TARGET_PAGE_MASK);
3303 val = ldq_p(ptr);
3304 }
3305 return val;
3306}
3307
aab33094
FB
3308/* XXX: optimize */
3309uint32_t ldub_phys(target_phys_addr_t addr)
3310{
3311 uint8_t val;
3312 cpu_physical_memory_read(addr, &val, 1);
3313 return val;
3314}
3315
3316/* XXX: optimize */
3317uint32_t lduw_phys(target_phys_addr_t addr)
3318{
3319 uint16_t val;
3320 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3321 return tswap16(val);
3322}
3323
8df1cd07
FB
3324/* warning: addr must be aligned. The ram page is not masked as dirty
3325 and the code inside is not invalidated. It is useful if the dirty
3326 bits are used to track modified PTEs */
3327void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3328{
3329 int io_index;
3330 uint8_t *ptr;
3331 unsigned long pd;
3332 PhysPageDesc *p;
3333
3334 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3335 if (!p) {
3336 pd = IO_MEM_UNASSIGNED;
3337 } else {
3338 pd = p->phys_offset;
3339 }
3b46e624 3340
3a7d929e 3341 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07 3342 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
3343 if (p)
3344 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
8df1cd07
FB
3345 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3346 } else {
74576198
AL
3347 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3348 ptr = phys_ram_base + addr1;
8df1cd07 3349 stl_p(ptr, val);
74576198
AL
3350
3351 if (unlikely(in_migration)) {
3352 if (!cpu_physical_memory_is_dirty(addr1)) {
3353 /* invalidate code */
3354 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3355 /* set dirty bit */
3356 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3357 (0xff & ~CODE_DIRTY_FLAG);
3358 }
3359 }
8df1cd07
FB
3360 }
3361}
3362
bc98a7ef
JM
3363void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3364{
3365 int io_index;
3366 uint8_t *ptr;
3367 unsigned long pd;
3368 PhysPageDesc *p;
3369
3370 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3371 if (!p) {
3372 pd = IO_MEM_UNASSIGNED;
3373 } else {
3374 pd = p->phys_offset;
3375 }
3b46e624 3376
bc98a7ef
JM
3377 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3378 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
3379 if (p)
3380 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bc98a7ef
JM
3381#ifdef TARGET_WORDS_BIGENDIAN
3382 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3383 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3384#else
3385 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3386 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3387#endif
3388 } else {
5fafdf24 3389 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
bc98a7ef
JM
3390 (addr & ~TARGET_PAGE_MASK);
3391 stq_p(ptr, val);
3392 }
3393}
3394
8df1cd07 3395/* warning: addr must be aligned */
8df1cd07
FB
3396void stl_phys(target_phys_addr_t addr, uint32_t val)
3397{
3398 int io_index;
3399 uint8_t *ptr;
3400 unsigned long pd;
3401 PhysPageDesc *p;
3402
3403 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3404 if (!p) {
3405 pd = IO_MEM_UNASSIGNED;
3406 } else {
3407 pd = p->phys_offset;
3408 }
3b46e624 3409
3a7d929e 3410 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07 3411 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
3412 if (p)
3413 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
8df1cd07
FB
3414 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3415 } else {
3416 unsigned long addr1;
3417 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3418 /* RAM case */
3419 ptr = phys_ram_base + addr1;
3420 stl_p(ptr, val);
3a7d929e
FB
3421 if (!cpu_physical_memory_is_dirty(addr1)) {
3422 /* invalidate code */
3423 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3424 /* set dirty bit */
f23db169
FB
3425 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3426 (0xff & ~CODE_DIRTY_FLAG);
3a7d929e 3427 }
8df1cd07
FB
3428 }
3429}
3430
aab33094
FB
3431/* XXX: optimize */
3432void stb_phys(target_phys_addr_t addr, uint32_t val)
3433{
3434 uint8_t v = val;
3435 cpu_physical_memory_write(addr, &v, 1);
3436}
3437
3438/* XXX: optimize */
3439void stw_phys(target_phys_addr_t addr, uint32_t val)
3440{
3441 uint16_t v = tswap16(val);
3442 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3443}
3444
3445/* XXX: optimize */
3446void stq_phys(target_phys_addr_t addr, uint64_t val)
3447{
3448 val = tswap64(val);
3449 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3450}
3451
13eb76e0
FB
3452#endif
3453
3454/* virtual memory access for debug */
5fafdf24 3455int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
b448f2f3 3456 uint8_t *buf, int len, int is_write)
13eb76e0
FB
3457{
3458 int l;
9b3c35e0
JM
3459 target_phys_addr_t phys_addr;
3460 target_ulong page;
13eb76e0
FB
3461
3462 while (len > 0) {
3463 page = addr & TARGET_PAGE_MASK;
3464 phys_addr = cpu_get_phys_page_debug(env, page);
3465 /* if no physical page mapped, return an error */
3466 if (phys_addr == -1)
3467 return -1;
3468 l = (page + TARGET_PAGE_SIZE) - addr;
3469 if (l > len)
3470 l = len;
5fafdf24 3471 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
b448f2f3 3472 buf, l, is_write);
13eb76e0
FB
3473 len -= l;
3474 buf += l;
3475 addr += l;
3476 }
3477 return 0;
3478}
3479
2e70f6ef
PB
3480/* in deterministic execution mode, instructions doing device I/Os
3481 must be at the end of the TB */
3482void cpu_io_recompile(CPUState *env, void *retaddr)
3483{
3484 TranslationBlock *tb;
3485 uint32_t n, cflags;
3486 target_ulong pc, cs_base;
3487 uint64_t flags;
3488
3489 tb = tb_find_pc((unsigned long)retaddr);
3490 if (!tb) {
3491 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
3492 retaddr);
3493 }
3494 n = env->icount_decr.u16.low + tb->icount;
3495 cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3496 /* Calculate how many instructions had been executed before the fault
bf20dc07 3497 occurred. */
2e70f6ef
PB
3498 n = n - env->icount_decr.u16.low;
3499 /* Generate a new TB ending on the I/O insn. */
3500 n++;
3501 /* On MIPS and SH, delay slot instructions can only be restarted if
3502 they were already the first instruction in the TB. If this is not
bf20dc07 3503 the first instruction in a TB then re-execute the preceding
2e70f6ef
PB
3504 branch. */
3505#if defined(TARGET_MIPS)
3506 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3507 env->active_tc.PC -= 4;
3508 env->icount_decr.u16.low++;
3509 env->hflags &= ~MIPS_HFLAG_BMASK;
3510 }
3511#elif defined(TARGET_SH4)
3512 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3513 && n > 1) {
3514 env->pc -= 2;
3515 env->icount_decr.u16.low++;
3516 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3517 }
3518#endif
3519 /* This should never happen. */
3520 if (n > CF_COUNT_MASK)
3521 cpu_abort(env, "TB too big during recompile");
3522
3523 cflags = n | CF_LAST_IO;
3524 pc = tb->pc;
3525 cs_base = tb->cs_base;
3526 flags = tb->flags;
3527 tb_phys_invalidate(tb, -1);
3528 /* FIXME: In theory this could raise an exception. In practice
3529 we have already translated the block once so it's probably ok. */
3530 tb_gen_code(env, pc, cs_base, flags, cflags);
bf20dc07 3531 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
2e70f6ef
PB
3532 the first in the TB) then we end up generating a whole new TB and
3533 repeating the fault, which is horribly inefficient.
3534 Better would be to execute just this insn uncached, or generate a
3535 second new TB. */
3536 cpu_resume_from_signal(env, NULL);
3537}
3538
e3db7226
FB
3539void dump_exec_info(FILE *f,
3540 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3541{
3542 int i, target_code_size, max_target_code_size;
3543 int direct_jmp_count, direct_jmp2_count, cross_page;
3544 TranslationBlock *tb;
3b46e624 3545
e3db7226
FB
3546 target_code_size = 0;
3547 max_target_code_size = 0;
3548 cross_page = 0;
3549 direct_jmp_count = 0;
3550 direct_jmp2_count = 0;
3551 for(i = 0; i < nb_tbs; i++) {
3552 tb = &tbs[i];
3553 target_code_size += tb->size;
3554 if (tb->size > max_target_code_size)
3555 max_target_code_size = tb->size;
3556 if (tb->page_addr[1] != -1)
3557 cross_page++;
3558 if (tb->tb_next_offset[0] != 0xffff) {
3559 direct_jmp_count++;
3560 if (tb->tb_next_offset[1] != 0xffff) {
3561 direct_jmp2_count++;
3562 }
3563 }
3564 }
3565 /* XXX: avoid using doubles ? */
57fec1fe 3566 cpu_fprintf(f, "Translation buffer state:\n");
26a5f13b
FB
3567 cpu_fprintf(f, "gen code size %ld/%ld\n",
3568 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3569 cpu_fprintf(f, "TB count %d/%d\n",
3570 nb_tbs, code_gen_max_blocks);
5fafdf24 3571 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
e3db7226
FB
3572 nb_tbs ? target_code_size / nb_tbs : 0,
3573 max_target_code_size);
5fafdf24 3574 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
e3db7226
FB
3575 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3576 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
5fafdf24
TS
3577 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3578 cross_page,
e3db7226
FB
3579 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3580 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
5fafdf24 3581 direct_jmp_count,
e3db7226
FB
3582 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3583 direct_jmp2_count,
3584 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
57fec1fe 3585 cpu_fprintf(f, "\nStatistics:\n");
e3db7226
FB
3586 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
3587 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3588 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
b67d9a52 3589 tcg_dump_info(f, cpu_fprintf);
e3db7226
FB
3590}
3591
5fafdf24 3592#if !defined(CONFIG_USER_ONLY)
61382a50
FB
3593
3594#define MMUSUFFIX _cmmu
3595#define GETPC() NULL
3596#define env cpu_single_env
b769d8fe 3597#define SOFTMMU_CODE_ACCESS
61382a50
FB
3598
3599#define SHIFT 0
3600#include "softmmu_template.h"
3601
3602#define SHIFT 1
3603#include "softmmu_template.h"
3604
3605#define SHIFT 2
3606#include "softmmu_template.h"
3607
3608#define SHIFT 3
3609#include "softmmu_template.h"
3610
3611#undef env
3612
3613#endif