]> git.proxmox.com Git - qemu.git/blame - exec.c
Add release tag for 0.10.1 release
[qemu.git] / exec.c
CommitLineData
54936004 1/*
fd6ce8f6 2 * virtual page mapping and translated block handling
5fafdf24 3 *
54936004
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
fad6cb1a 18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
54936004 19 */
67b915a5 20#include "config.h"
d5a8f07c 21#ifdef _WIN32
4fddf62a 22#define WIN32_LEAN_AND_MEAN
d5a8f07c
FB
23#include <windows.h>
24#else
a98d49b1 25#include <sys/types.h>
d5a8f07c
FB
26#include <sys/mman.h>
27#endif
54936004
FB
28#include <stdlib.h>
29#include <stdio.h>
30#include <stdarg.h>
31#include <string.h>
32#include <errno.h>
33#include <unistd.h>
34#include <inttypes.h>
35
6180a181
FB
36#include "cpu.h"
37#include "exec-all.h"
ca10f867 38#include "qemu-common.h"
b67d9a52 39#include "tcg.h"
b3c7724c 40#include "hw/hw.h"
74576198 41#include "osdep.h"
7ba1e619 42#include "kvm.h"
53a5960a
PB
43#if defined(CONFIG_USER_ONLY)
44#include <qemu.h>
45#endif
54936004 46
fd6ce8f6 47//#define DEBUG_TB_INVALIDATE
66e85a21 48//#define DEBUG_FLUSH
9fa3e853 49//#define DEBUG_TLB
67d3b957 50//#define DEBUG_UNASSIGNED
fd6ce8f6
FB
51
52/* make various TB consistency checks */
5fafdf24
TS
53//#define DEBUG_TB_CHECK
54//#define DEBUG_TLB_CHECK
fd6ce8f6 55
1196be37 56//#define DEBUG_IOPORT
db7b5426 57//#define DEBUG_SUBPAGE
1196be37 58
99773bd4
PB
59#if !defined(CONFIG_USER_ONLY)
60/* TB consistency checks only implemented for usermode emulation. */
61#undef DEBUG_TB_CHECK
62#endif
63
9fa3e853
FB
64#define SMC_BITMAP_USE_THRESHOLD 10
65
66#define MMAP_AREA_START 0x00000000
67#define MMAP_AREA_END 0xa8000000
fd6ce8f6 68
108c49b8
FB
69#if defined(TARGET_SPARC64)
70#define TARGET_PHYS_ADDR_SPACE_BITS 41
5dcb6b91
BS
71#elif defined(TARGET_SPARC)
72#define TARGET_PHYS_ADDR_SPACE_BITS 36
bedb69ea
JM
73#elif defined(TARGET_ALPHA)
74#define TARGET_PHYS_ADDR_SPACE_BITS 42
75#define TARGET_VIRT_ADDR_SPACE_BITS 42
108c49b8
FB
76#elif defined(TARGET_PPC64)
77#define TARGET_PHYS_ADDR_SPACE_BITS 42
00f82b8a
AJ
78#elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
79#define TARGET_PHYS_ADDR_SPACE_BITS 42
80#elif defined(TARGET_I386) && !defined(USE_KQEMU)
81#define TARGET_PHYS_ADDR_SPACE_BITS 36
108c49b8
FB
82#else
83/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
84#define TARGET_PHYS_ADDR_SPACE_BITS 32
85#endif
86
bdaf78e0 87static TranslationBlock *tbs;
26a5f13b 88int code_gen_max_blocks;
9fa3e853 89TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
bdaf78e0 90static int nb_tbs;
eb51d102
FB
91/* any access to the tbs or the page table must use this lock */
92spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
fd6ce8f6 93
141ac468
BS
94#if defined(__arm__) || defined(__sparc_v9__)
95/* The prologue must be reachable with a direct jump. ARM and Sparc64
96 have limited branch ranges (possibly also PPC) so place it in a
d03d860b
BS
97 section close to code segment. */
98#define code_gen_section \
99 __attribute__((__section__(".gen_code"))) \
100 __attribute__((aligned (32)))
101#else
102#define code_gen_section \
103 __attribute__((aligned (32)))
104#endif
105
106uint8_t code_gen_prologue[1024] code_gen_section;
bdaf78e0
BS
107static uint8_t *code_gen_buffer;
108static unsigned long code_gen_buffer_size;
26a5f13b 109/* threshold to flush the translated code buffer */
bdaf78e0 110static unsigned long code_gen_buffer_max_size;
fd6ce8f6
FB
111uint8_t *code_gen_ptr;
112
e2eef170 113#if !defined(CONFIG_USER_ONLY)
00f82b8a 114ram_addr_t phys_ram_size;
9fa3e853
FB
115int phys_ram_fd;
116uint8_t *phys_ram_base;
1ccde1cb 117uint8_t *phys_ram_dirty;
74576198 118static int in_migration;
e9a1ab19 119static ram_addr_t phys_ram_alloc_offset = 0;
e2eef170 120#endif
9fa3e853 121
6a00d601
FB
122CPUState *first_cpu;
123/* current CPU in the current thread. It is only valid inside
124 cpu_exec() */
5fafdf24 125CPUState *cpu_single_env;
2e70f6ef 126/* 0 = Do not count executed instructions.
bf20dc07 127 1 = Precise instruction counting.
2e70f6ef
PB
128 2 = Adaptive rate instruction counting. */
129int use_icount = 0;
130/* Current instruction counter. While executing translated code this may
131 include some instructions that have not yet been executed. */
132int64_t qemu_icount;
6a00d601 133
54936004 134typedef struct PageDesc {
92e873b9 135 /* list of TBs intersecting this ram page */
fd6ce8f6 136 TranslationBlock *first_tb;
9fa3e853
FB
137 /* in order to optimize self modifying code, we count the number
138 of lookups we do to a given page to use a bitmap */
139 unsigned int code_write_count;
140 uint8_t *code_bitmap;
141#if defined(CONFIG_USER_ONLY)
142 unsigned long flags;
143#endif
54936004
FB
144} PageDesc;
145
92e873b9 146typedef struct PhysPageDesc {
0f459d16 147 /* offset in host memory of the page + io_index in the low bits */
00f82b8a 148 ram_addr_t phys_offset;
8da3ff18 149 ram_addr_t region_offset;
92e873b9
FB
150} PhysPageDesc;
151
54936004 152#define L2_BITS 10
bedb69ea
JM
153#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
154/* XXX: this is a temporary hack for alpha target.
155 * In the future, this is to be replaced by a multi-level table
156 * to actually be able to handle the complete 64 bits address space.
157 */
158#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
159#else
03875444 160#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
bedb69ea 161#endif
54936004
FB
162
163#define L1_SIZE (1 << L1_BITS)
164#define L2_SIZE (1 << L2_BITS)
165
83fb7adf
FB
166unsigned long qemu_real_host_page_size;
167unsigned long qemu_host_page_bits;
168unsigned long qemu_host_page_size;
169unsigned long qemu_host_page_mask;
54936004 170
92e873b9 171/* XXX: for system emulation, it could just be an array */
54936004 172static PageDesc *l1_map[L1_SIZE];
bdaf78e0 173static PhysPageDesc **l1_phys_map;
54936004 174
e2eef170
PB
175#if !defined(CONFIG_USER_ONLY)
176static void io_mem_init(void);
177
33417e70 178/* io memory support */
33417e70
FB
179CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
180CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
a4193c8a 181void *io_mem_opaque[IO_MEM_NB_ENTRIES];
88715657 182char io_mem_used[IO_MEM_NB_ENTRIES];
6658ffb8
PB
183static int io_mem_watch;
184#endif
33417e70 185
34865134 186/* log support */
d9b630fd 187static const char *logfilename = "/tmp/qemu.log";
34865134
FB
188FILE *logfile;
189int loglevel;
e735b91c 190static int log_append = 0;
34865134 191
e3db7226
FB
192/* statistics */
193static int tlb_flush_count;
194static int tb_flush_count;
195static int tb_phys_invalidate_count;
196
db7b5426
BS
197#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
198typedef struct subpage_t {
199 target_phys_addr_t base;
3ee89922
BS
200 CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
201 CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
202 void *opaque[TARGET_PAGE_SIZE][2][4];
8da3ff18 203 ram_addr_t region_offset[TARGET_PAGE_SIZE][2][4];
db7b5426
BS
204} subpage_t;
205
7cb69cae
FB
206#ifdef _WIN32
207static void map_exec(void *addr, long size)
208{
209 DWORD old_protect;
210 VirtualProtect(addr, size,
211 PAGE_EXECUTE_READWRITE, &old_protect);
212
213}
214#else
215static void map_exec(void *addr, long size)
216{
4369415f 217 unsigned long start, end, page_size;
7cb69cae 218
4369415f 219 page_size = getpagesize();
7cb69cae 220 start = (unsigned long)addr;
4369415f 221 start &= ~(page_size - 1);
7cb69cae
FB
222
223 end = (unsigned long)addr + size;
4369415f
FB
224 end += page_size - 1;
225 end &= ~(page_size - 1);
7cb69cae
FB
226
227 mprotect((void *)start, end - start,
228 PROT_READ | PROT_WRITE | PROT_EXEC);
229}
230#endif
231
b346ff46 232static void page_init(void)
54936004 233{
83fb7adf 234 /* NOTE: we can always suppose that qemu_host_page_size >=
54936004 235 TARGET_PAGE_SIZE */
c2b48b69
AL
236#ifdef _WIN32
237 {
238 SYSTEM_INFO system_info;
239
240 GetSystemInfo(&system_info);
241 qemu_real_host_page_size = system_info.dwPageSize;
242 }
243#else
244 qemu_real_host_page_size = getpagesize();
245#endif
83fb7adf
FB
246 if (qemu_host_page_size == 0)
247 qemu_host_page_size = qemu_real_host_page_size;
248 if (qemu_host_page_size < TARGET_PAGE_SIZE)
249 qemu_host_page_size = TARGET_PAGE_SIZE;
250 qemu_host_page_bits = 0;
251 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
252 qemu_host_page_bits++;
253 qemu_host_page_mask = ~(qemu_host_page_size - 1);
108c49b8
FB
254 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
255 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
50a9569b
AZ
256
257#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
258 {
259 long long startaddr, endaddr;
260 FILE *f;
261 int n;
262
c8a706fe 263 mmap_lock();
0776590d 264 last_brk = (unsigned long)sbrk(0);
50a9569b
AZ
265 f = fopen("/proc/self/maps", "r");
266 if (f) {
267 do {
268 n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
269 if (n == 2) {
e0b8d65a
BS
270 startaddr = MIN(startaddr,
271 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
272 endaddr = MIN(endaddr,
273 (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
b5fc909e 274 page_set_flags(startaddr & TARGET_PAGE_MASK,
50a9569b
AZ
275 TARGET_PAGE_ALIGN(endaddr),
276 PAGE_RESERVED);
277 }
278 } while (!feof(f));
279 fclose(f);
280 }
c8a706fe 281 mmap_unlock();
50a9569b
AZ
282 }
283#endif
54936004
FB
284}
285
434929bf 286static inline PageDesc **page_l1_map(target_ulong index)
54936004 287{
17e2377a
PB
288#if TARGET_LONG_BITS > 32
289 /* Host memory outside guest VM. For 32-bit targets we have already
290 excluded high addresses. */
d8173e0f 291 if (index > ((target_ulong)L2_SIZE * L1_SIZE))
17e2377a
PB
292 return NULL;
293#endif
434929bf
AL
294 return &l1_map[index >> L2_BITS];
295}
296
297static inline PageDesc *page_find_alloc(target_ulong index)
298{
299 PageDesc **lp, *p;
300 lp = page_l1_map(index);
301 if (!lp)
302 return NULL;
303
54936004
FB
304 p = *lp;
305 if (!p) {
306 /* allocate if not found */
17e2377a 307#if defined(CONFIG_USER_ONLY)
17e2377a
PB
308 size_t len = sizeof(PageDesc) * L2_SIZE;
309 /* Don't use qemu_malloc because it may recurse. */
310 p = mmap(0, len, PROT_READ | PROT_WRITE,
311 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
54936004 312 *lp = p;
fb1c2cd7
AJ
313 if (h2g_valid(p)) {
314 unsigned long addr = h2g(p);
17e2377a
PB
315 page_set_flags(addr & TARGET_PAGE_MASK,
316 TARGET_PAGE_ALIGN(addr + len),
317 PAGE_RESERVED);
318 }
319#else
320 p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
321 *lp = p;
322#endif
54936004
FB
323 }
324 return p + (index & (L2_SIZE - 1));
325}
326
00f82b8a 327static inline PageDesc *page_find(target_ulong index)
54936004 328{
434929bf
AL
329 PageDesc **lp, *p;
330 lp = page_l1_map(index);
331 if (!lp)
332 return NULL;
54936004 333
434929bf 334 p = *lp;
54936004
FB
335 if (!p)
336 return 0;
fd6ce8f6
FB
337 return p + (index & (L2_SIZE - 1));
338}
339
108c49b8 340static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
92e873b9 341{
108c49b8 342 void **lp, **p;
e3f4e2a4 343 PhysPageDesc *pd;
92e873b9 344
108c49b8
FB
345 p = (void **)l1_phys_map;
346#if TARGET_PHYS_ADDR_SPACE_BITS > 32
347
348#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
349#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
350#endif
351 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
92e873b9
FB
352 p = *lp;
353 if (!p) {
354 /* allocate if not found */
108c49b8
FB
355 if (!alloc)
356 return NULL;
357 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
358 memset(p, 0, sizeof(void *) * L1_SIZE);
359 *lp = p;
360 }
361#endif
362 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
e3f4e2a4
PB
363 pd = *lp;
364 if (!pd) {
365 int i;
108c49b8
FB
366 /* allocate if not found */
367 if (!alloc)
368 return NULL;
e3f4e2a4
PB
369 pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
370 *lp = pd;
67c4d23c 371 for (i = 0; i < L2_SIZE; i++) {
e3f4e2a4 372 pd[i].phys_offset = IO_MEM_UNASSIGNED;
67c4d23c
PB
373 pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
374 }
92e873b9 375 }
e3f4e2a4 376 return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
92e873b9
FB
377}
378
108c49b8 379static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
92e873b9 380{
108c49b8 381 return phys_page_find_alloc(index, 0);
92e873b9
FB
382}
383
9fa3e853 384#if !defined(CONFIG_USER_ONLY)
6a00d601 385static void tlb_protect_code(ram_addr_t ram_addr);
5fafdf24 386static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 387 target_ulong vaddr);
c8a706fe
PB
388#define mmap_lock() do { } while(0)
389#define mmap_unlock() do { } while(0)
9fa3e853 390#endif
fd6ce8f6 391
4369415f
FB
392#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
393
394#if defined(CONFIG_USER_ONLY)
395/* Currently it is not recommanded to allocate big chunks of data in
396 user mode. It will change when a dedicated libc will be used */
397#define USE_STATIC_CODE_GEN_BUFFER
398#endif
399
400#ifdef USE_STATIC_CODE_GEN_BUFFER
401static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
402#endif
403
8fcd3692 404static void code_gen_alloc(unsigned long tb_size)
26a5f13b 405{
4369415f
FB
406#ifdef USE_STATIC_CODE_GEN_BUFFER
407 code_gen_buffer = static_code_gen_buffer;
408 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
409 map_exec(code_gen_buffer, code_gen_buffer_size);
410#else
26a5f13b
FB
411 code_gen_buffer_size = tb_size;
412 if (code_gen_buffer_size == 0) {
4369415f
FB
413#if defined(CONFIG_USER_ONLY)
414 /* in user mode, phys_ram_size is not meaningful */
415 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
416#else
26a5f13b 417 /* XXX: needs ajustments */
174a9a1f 418 code_gen_buffer_size = (unsigned long)(phys_ram_size / 4);
4369415f 419#endif
26a5f13b
FB
420 }
421 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
422 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
423 /* The code gen buffer location may have constraints depending on
424 the host cpu and OS */
425#if defined(__linux__)
426 {
427 int flags;
141ac468
BS
428 void *start = NULL;
429
26a5f13b
FB
430 flags = MAP_PRIVATE | MAP_ANONYMOUS;
431#if defined(__x86_64__)
432 flags |= MAP_32BIT;
433 /* Cannot map more than that */
434 if (code_gen_buffer_size > (800 * 1024 * 1024))
435 code_gen_buffer_size = (800 * 1024 * 1024);
141ac468
BS
436#elif defined(__sparc_v9__)
437 // Map the buffer below 2G, so we can use direct calls and branches
438 flags |= MAP_FIXED;
439 start = (void *) 0x60000000UL;
440 if (code_gen_buffer_size > (512 * 1024 * 1024))
441 code_gen_buffer_size = (512 * 1024 * 1024);
1cb0661e 442#elif defined(__arm__)
63d41246 443 /* Map the buffer below 32M, so we can use direct calls and branches */
1cb0661e
AZ
444 flags |= MAP_FIXED;
445 start = (void *) 0x01000000UL;
446 if (code_gen_buffer_size > 16 * 1024 * 1024)
447 code_gen_buffer_size = 16 * 1024 * 1024;
26a5f13b 448#endif
141ac468
BS
449 code_gen_buffer = mmap(start, code_gen_buffer_size,
450 PROT_WRITE | PROT_READ | PROT_EXEC,
26a5f13b
FB
451 flags, -1, 0);
452 if (code_gen_buffer == MAP_FAILED) {
453 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
454 exit(1);
455 }
456 }
06e67a82
AL
457#elif defined(__FreeBSD__)
458 {
459 int flags;
460 void *addr = NULL;
461 flags = MAP_PRIVATE | MAP_ANONYMOUS;
462#if defined(__x86_64__)
463 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
464 * 0x40000000 is free */
465 flags |= MAP_FIXED;
466 addr = (void *)0x40000000;
467 /* Cannot map more than that */
468 if (code_gen_buffer_size > (800 * 1024 * 1024))
469 code_gen_buffer_size = (800 * 1024 * 1024);
470#endif
471 code_gen_buffer = mmap(addr, code_gen_buffer_size,
472 PROT_WRITE | PROT_READ | PROT_EXEC,
473 flags, -1, 0);
474 if (code_gen_buffer == MAP_FAILED) {
475 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
476 exit(1);
477 }
478 }
26a5f13b
FB
479#else
480 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
26a5f13b
FB
481 map_exec(code_gen_buffer, code_gen_buffer_size);
482#endif
4369415f 483#endif /* !USE_STATIC_CODE_GEN_BUFFER */
26a5f13b
FB
484 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
485 code_gen_buffer_max_size = code_gen_buffer_size -
486 code_gen_max_block_size();
487 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
488 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
489}
490
491/* Must be called before using the QEMU cpus. 'tb_size' is the size
492 (in bytes) allocated to the translation buffer. Zero means default
493 size. */
494void cpu_exec_init_all(unsigned long tb_size)
495{
26a5f13b
FB
496 cpu_gen_init();
497 code_gen_alloc(tb_size);
498 code_gen_ptr = code_gen_buffer;
4369415f 499 page_init();
e2eef170 500#if !defined(CONFIG_USER_ONLY)
26a5f13b 501 io_mem_init();
e2eef170 502#endif
26a5f13b
FB
503}
504
9656f324
PB
505#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
506
507#define CPU_COMMON_SAVE_VERSION 1
508
509static void cpu_common_save(QEMUFile *f, void *opaque)
510{
511 CPUState *env = opaque;
512
513 qemu_put_be32s(f, &env->halted);
514 qemu_put_be32s(f, &env->interrupt_request);
515}
516
517static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
518{
519 CPUState *env = opaque;
520
521 if (version_id != CPU_COMMON_SAVE_VERSION)
522 return -EINVAL;
523
524 qemu_get_be32s(f, &env->halted);
75f482ae 525 qemu_get_be32s(f, &env->interrupt_request);
d9aa1fce 526 env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
9656f324
PB
527 tlb_flush(env, 1);
528
529 return 0;
530}
531#endif
532
6a00d601 533void cpu_exec_init(CPUState *env)
fd6ce8f6 534{
6a00d601
FB
535 CPUState **penv;
536 int cpu_index;
537
6a00d601
FB
538 env->next_cpu = NULL;
539 penv = &first_cpu;
540 cpu_index = 0;
541 while (*penv != NULL) {
542 penv = (CPUState **)&(*penv)->next_cpu;
543 cpu_index++;
544 }
545 env->cpu_index = cpu_index;
c0ce998e
AL
546 TAILQ_INIT(&env->breakpoints);
547 TAILQ_INIT(&env->watchpoints);
6a00d601 548 *penv = env;
b3c7724c 549#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
9656f324
PB
550 register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
551 cpu_common_save, cpu_common_load, env);
b3c7724c
PB
552 register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
553 cpu_save, cpu_load, env);
554#endif
fd6ce8f6
FB
555}
556
9fa3e853
FB
557static inline void invalidate_page_bitmap(PageDesc *p)
558{
559 if (p->code_bitmap) {
59817ccb 560 qemu_free(p->code_bitmap);
9fa3e853
FB
561 p->code_bitmap = NULL;
562 }
563 p->code_write_count = 0;
564}
565
fd6ce8f6
FB
566/* set to NULL all the 'first_tb' fields in all PageDescs */
567static void page_flush_tb(void)
568{
569 int i, j;
570 PageDesc *p;
571
572 for(i = 0; i < L1_SIZE; i++) {
573 p = l1_map[i];
574 if (p) {
9fa3e853
FB
575 for(j = 0; j < L2_SIZE; j++) {
576 p->first_tb = NULL;
577 invalidate_page_bitmap(p);
578 p++;
579 }
fd6ce8f6
FB
580 }
581 }
582}
583
584/* flush all the translation blocks */
d4e8164f 585/* XXX: tb_flush is currently not thread safe */
6a00d601 586void tb_flush(CPUState *env1)
fd6ce8f6 587{
6a00d601 588 CPUState *env;
0124311e 589#if defined(DEBUG_FLUSH)
ab3d1727
BS
590 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
591 (unsigned long)(code_gen_ptr - code_gen_buffer),
592 nb_tbs, nb_tbs > 0 ?
593 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
fd6ce8f6 594#endif
26a5f13b 595 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
a208e54a
PB
596 cpu_abort(env1, "Internal error: code buffer overflow\n");
597
fd6ce8f6 598 nb_tbs = 0;
3b46e624 599
6a00d601
FB
600 for(env = first_cpu; env != NULL; env = env->next_cpu) {
601 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
602 }
9fa3e853 603
8a8a608f 604 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
fd6ce8f6 605 page_flush_tb();
9fa3e853 606
fd6ce8f6 607 code_gen_ptr = code_gen_buffer;
d4e8164f
FB
608 /* XXX: flush processor icache at this point if cache flush is
609 expensive */
e3db7226 610 tb_flush_count++;
fd6ce8f6
FB
611}
612
613#ifdef DEBUG_TB_CHECK
614
bc98a7ef 615static void tb_invalidate_check(target_ulong address)
fd6ce8f6
FB
616{
617 TranslationBlock *tb;
618 int i;
619 address &= TARGET_PAGE_MASK;
99773bd4
PB
620 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
621 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
622 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
623 address >= tb->pc + tb->size)) {
624 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
99773bd4 625 address, (long)tb->pc, tb->size);
fd6ce8f6
FB
626 }
627 }
628 }
629}
630
631/* verify that all the pages have correct rights for code */
632static void tb_page_check(void)
633{
634 TranslationBlock *tb;
635 int i, flags1, flags2;
3b46e624 636
99773bd4
PB
637 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
638 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
fd6ce8f6
FB
639 flags1 = page_get_flags(tb->pc);
640 flags2 = page_get_flags(tb->pc + tb->size - 1);
641 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
642 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
99773bd4 643 (long)tb->pc, tb->size, flags1, flags2);
fd6ce8f6
FB
644 }
645 }
646 }
647}
648
bdaf78e0 649static void tb_jmp_check(TranslationBlock *tb)
d4e8164f
FB
650{
651 TranslationBlock *tb1;
652 unsigned int n1;
653
654 /* suppress any remaining jumps to this TB */
655 tb1 = tb->jmp_first;
656 for(;;) {
657 n1 = (long)tb1 & 3;
658 tb1 = (TranslationBlock *)((long)tb1 & ~3);
659 if (n1 == 2)
660 break;
661 tb1 = tb1->jmp_next[n1];
662 }
663 /* check end of list */
664 if (tb1 != tb) {
665 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
666 }
667}
668
fd6ce8f6
FB
669#endif
670
671/* invalidate one TB */
672static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
673 int next_offset)
674{
675 TranslationBlock *tb1;
676 for(;;) {
677 tb1 = *ptb;
678 if (tb1 == tb) {
679 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
680 break;
681 }
682 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
683 }
684}
685
9fa3e853
FB
686static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
687{
688 TranslationBlock *tb1;
689 unsigned int n1;
690
691 for(;;) {
692 tb1 = *ptb;
693 n1 = (long)tb1 & 3;
694 tb1 = (TranslationBlock *)((long)tb1 & ~3);
695 if (tb1 == tb) {
696 *ptb = tb1->page_next[n1];
697 break;
698 }
699 ptb = &tb1->page_next[n1];
700 }
701}
702
d4e8164f
FB
703static inline void tb_jmp_remove(TranslationBlock *tb, int n)
704{
705 TranslationBlock *tb1, **ptb;
706 unsigned int n1;
707
708 ptb = &tb->jmp_next[n];
709 tb1 = *ptb;
710 if (tb1) {
711 /* find tb(n) in circular list */
712 for(;;) {
713 tb1 = *ptb;
714 n1 = (long)tb1 & 3;
715 tb1 = (TranslationBlock *)((long)tb1 & ~3);
716 if (n1 == n && tb1 == tb)
717 break;
718 if (n1 == 2) {
719 ptb = &tb1->jmp_first;
720 } else {
721 ptb = &tb1->jmp_next[n1];
722 }
723 }
724 /* now we can suppress tb(n) from the list */
725 *ptb = tb->jmp_next[n];
726
727 tb->jmp_next[n] = NULL;
728 }
729}
730
731/* reset the jump entry 'n' of a TB so that it is not chained to
732 another TB */
733static inline void tb_reset_jump(TranslationBlock *tb, int n)
734{
735 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
736}
737
2e70f6ef 738void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
fd6ce8f6 739{
6a00d601 740 CPUState *env;
8a40a180 741 PageDesc *p;
d4e8164f 742 unsigned int h, n1;
00f82b8a 743 target_phys_addr_t phys_pc;
8a40a180 744 TranslationBlock *tb1, *tb2;
3b46e624 745
8a40a180
FB
746 /* remove the TB from the hash list */
747 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
748 h = tb_phys_hash_func(phys_pc);
5fafdf24 749 tb_remove(&tb_phys_hash[h], tb,
8a40a180
FB
750 offsetof(TranslationBlock, phys_hash_next));
751
752 /* remove the TB from the page list */
753 if (tb->page_addr[0] != page_addr) {
754 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
755 tb_page_remove(&p->first_tb, tb);
756 invalidate_page_bitmap(p);
757 }
758 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
759 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
760 tb_page_remove(&p->first_tb, tb);
761 invalidate_page_bitmap(p);
762 }
763
36bdbe54 764 tb_invalidated_flag = 1;
59817ccb 765
fd6ce8f6 766 /* remove the TB from the hash list */
8a40a180 767 h = tb_jmp_cache_hash_func(tb->pc);
6a00d601
FB
768 for(env = first_cpu; env != NULL; env = env->next_cpu) {
769 if (env->tb_jmp_cache[h] == tb)
770 env->tb_jmp_cache[h] = NULL;
771 }
d4e8164f
FB
772
773 /* suppress this TB from the two jump lists */
774 tb_jmp_remove(tb, 0);
775 tb_jmp_remove(tb, 1);
776
777 /* suppress any remaining jumps to this TB */
778 tb1 = tb->jmp_first;
779 for(;;) {
780 n1 = (long)tb1 & 3;
781 if (n1 == 2)
782 break;
783 tb1 = (TranslationBlock *)((long)tb1 & ~3);
784 tb2 = tb1->jmp_next[n1];
785 tb_reset_jump(tb1, n1);
786 tb1->jmp_next[n1] = NULL;
787 tb1 = tb2;
788 }
789 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
9fa3e853 790
e3db7226 791 tb_phys_invalidate_count++;
9fa3e853
FB
792}
793
794static inline void set_bits(uint8_t *tab, int start, int len)
795{
796 int end, mask, end1;
797
798 end = start + len;
799 tab += start >> 3;
800 mask = 0xff << (start & 7);
801 if ((start & ~7) == (end & ~7)) {
802 if (start < end) {
803 mask &= ~(0xff << (end & 7));
804 *tab |= mask;
805 }
806 } else {
807 *tab++ |= mask;
808 start = (start + 8) & ~7;
809 end1 = end & ~7;
810 while (start < end1) {
811 *tab++ = 0xff;
812 start += 8;
813 }
814 if (start < end) {
815 mask = ~(0xff << (end & 7));
816 *tab |= mask;
817 }
818 }
819}
820
821static void build_page_bitmap(PageDesc *p)
822{
823 int n, tb_start, tb_end;
824 TranslationBlock *tb;
3b46e624 825
b2a7081a 826 p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
9fa3e853
FB
827
828 tb = p->first_tb;
829 while (tb != NULL) {
830 n = (long)tb & 3;
831 tb = (TranslationBlock *)((long)tb & ~3);
832 /* NOTE: this is subtle as a TB may span two physical pages */
833 if (n == 0) {
834 /* NOTE: tb_end may be after the end of the page, but
835 it is not a problem */
836 tb_start = tb->pc & ~TARGET_PAGE_MASK;
837 tb_end = tb_start + tb->size;
838 if (tb_end > TARGET_PAGE_SIZE)
839 tb_end = TARGET_PAGE_SIZE;
840 } else {
841 tb_start = 0;
842 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
843 }
844 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
845 tb = tb->page_next[n];
846 }
847}
848
2e70f6ef
PB
849TranslationBlock *tb_gen_code(CPUState *env,
850 target_ulong pc, target_ulong cs_base,
851 int flags, int cflags)
d720b93d
FB
852{
853 TranslationBlock *tb;
854 uint8_t *tc_ptr;
855 target_ulong phys_pc, phys_page2, virt_page2;
856 int code_gen_size;
857
c27004ec
FB
858 phys_pc = get_phys_addr_code(env, pc);
859 tb = tb_alloc(pc);
d720b93d
FB
860 if (!tb) {
861 /* flush must be done */
862 tb_flush(env);
863 /* cannot fail at this point */
c27004ec 864 tb = tb_alloc(pc);
2e70f6ef
PB
865 /* Don't forget to invalidate previous TB info. */
866 tb_invalidated_flag = 1;
d720b93d
FB
867 }
868 tc_ptr = code_gen_ptr;
869 tb->tc_ptr = tc_ptr;
870 tb->cs_base = cs_base;
871 tb->flags = flags;
872 tb->cflags = cflags;
d07bde88 873 cpu_gen_code(env, tb, &code_gen_size);
d720b93d 874 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
3b46e624 875
d720b93d 876 /* check next page if needed */
c27004ec 877 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
d720b93d 878 phys_page2 = -1;
c27004ec 879 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
d720b93d
FB
880 phys_page2 = get_phys_addr_code(env, virt_page2);
881 }
882 tb_link_phys(tb, phys_pc, phys_page2);
2e70f6ef 883 return tb;
d720b93d 884}
3b46e624 885
9fa3e853
FB
886/* invalidate all TBs which intersect with the target physical page
887 starting in range [start;end[. NOTE: start and end must refer to
d720b93d
FB
888 the same physical page. 'is_cpu_write_access' should be true if called
889 from a real cpu write access: the virtual CPU will exit the current
890 TB if code is modified inside this TB. */
00f82b8a 891void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
d720b93d
FB
892 int is_cpu_write_access)
893{
6b917547 894 TranslationBlock *tb, *tb_next, *saved_tb;
d720b93d 895 CPUState *env = cpu_single_env;
9fa3e853 896 target_ulong tb_start, tb_end;
6b917547
AL
897 PageDesc *p;
898 int n;
899#ifdef TARGET_HAS_PRECISE_SMC
900 int current_tb_not_found = is_cpu_write_access;
901 TranslationBlock *current_tb = NULL;
902 int current_tb_modified = 0;
903 target_ulong current_pc = 0;
904 target_ulong current_cs_base = 0;
905 int current_flags = 0;
906#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
907
908 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 909 if (!p)
9fa3e853 910 return;
5fafdf24 911 if (!p->code_bitmap &&
d720b93d
FB
912 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
913 is_cpu_write_access) {
9fa3e853
FB
914 /* build code bitmap */
915 build_page_bitmap(p);
916 }
917
918 /* we remove all the TBs in the range [start, end[ */
919 /* XXX: see if in some cases it could be faster to invalidate all the code */
920 tb = p->first_tb;
921 while (tb != NULL) {
922 n = (long)tb & 3;
923 tb = (TranslationBlock *)((long)tb & ~3);
924 tb_next = tb->page_next[n];
925 /* NOTE: this is subtle as a TB may span two physical pages */
926 if (n == 0) {
927 /* NOTE: tb_end may be after the end of the page, but
928 it is not a problem */
929 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
930 tb_end = tb_start + tb->size;
931 } else {
932 tb_start = tb->page_addr[1];
933 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
934 }
935 if (!(tb_end <= start || tb_start >= end)) {
d720b93d
FB
936#ifdef TARGET_HAS_PRECISE_SMC
937 if (current_tb_not_found) {
938 current_tb_not_found = 0;
939 current_tb = NULL;
2e70f6ef 940 if (env->mem_io_pc) {
d720b93d 941 /* now we have a real cpu fault */
2e70f6ef 942 current_tb = tb_find_pc(env->mem_io_pc);
d720b93d
FB
943 }
944 }
945 if (current_tb == tb &&
2e70f6ef 946 (current_tb->cflags & CF_COUNT_MASK) != 1) {
d720b93d
FB
947 /* If we are modifying the current TB, we must stop
948 its execution. We could be more precise by checking
949 that the modification is after the current PC, but it
950 would require a specialized function to partially
951 restore the CPU state */
3b46e624 952
d720b93d 953 current_tb_modified = 1;
5fafdf24 954 cpu_restore_state(current_tb, env,
2e70f6ef 955 env->mem_io_pc, NULL);
6b917547
AL
956 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
957 &current_flags);
d720b93d
FB
958 }
959#endif /* TARGET_HAS_PRECISE_SMC */
6f5a9f7e
FB
960 /* we need to do that to handle the case where a signal
961 occurs while doing tb_phys_invalidate() */
962 saved_tb = NULL;
963 if (env) {
964 saved_tb = env->current_tb;
965 env->current_tb = NULL;
966 }
9fa3e853 967 tb_phys_invalidate(tb, -1);
6f5a9f7e
FB
968 if (env) {
969 env->current_tb = saved_tb;
970 if (env->interrupt_request && env->current_tb)
971 cpu_interrupt(env, env->interrupt_request);
972 }
9fa3e853
FB
973 }
974 tb = tb_next;
975 }
976#if !defined(CONFIG_USER_ONLY)
977 /* if no code remaining, no need to continue to use slow writes */
978 if (!p->first_tb) {
979 invalidate_page_bitmap(p);
d720b93d 980 if (is_cpu_write_access) {
2e70f6ef 981 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
d720b93d
FB
982 }
983 }
984#endif
985#ifdef TARGET_HAS_PRECISE_SMC
986 if (current_tb_modified) {
987 /* we generate a block containing just the instruction
988 modifying the memory. It will ensure that it cannot modify
989 itself */
ea1c1802 990 env->current_tb = NULL;
2e70f6ef 991 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
d720b93d 992 cpu_resume_from_signal(env, NULL);
9fa3e853 993 }
fd6ce8f6 994#endif
9fa3e853 995}
fd6ce8f6 996
9fa3e853 997/* len must be <= 8 and start must be a multiple of len */
00f82b8a 998static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
9fa3e853
FB
999{
1000 PageDesc *p;
1001 int offset, b;
59817ccb 1002#if 0
a4193c8a 1003 if (1) {
93fcfe39
AL
1004 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1005 cpu_single_env->mem_io_vaddr, len,
1006 cpu_single_env->eip,
1007 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
59817ccb
FB
1008 }
1009#endif
9fa3e853 1010 p = page_find(start >> TARGET_PAGE_BITS);
5fafdf24 1011 if (!p)
9fa3e853
FB
1012 return;
1013 if (p->code_bitmap) {
1014 offset = start & ~TARGET_PAGE_MASK;
1015 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1016 if (b & ((1 << len) - 1))
1017 goto do_invalidate;
1018 } else {
1019 do_invalidate:
d720b93d 1020 tb_invalidate_phys_page_range(start, start + len, 1);
9fa3e853
FB
1021 }
1022}
1023
9fa3e853 1024#if !defined(CONFIG_SOFTMMU)
00f82b8a 1025static void tb_invalidate_phys_page(target_phys_addr_t addr,
d720b93d 1026 unsigned long pc, void *puc)
9fa3e853 1027{
6b917547 1028 TranslationBlock *tb;
9fa3e853 1029 PageDesc *p;
6b917547 1030 int n;
d720b93d 1031#ifdef TARGET_HAS_PRECISE_SMC
6b917547 1032 TranslationBlock *current_tb = NULL;
d720b93d 1033 CPUState *env = cpu_single_env;
6b917547
AL
1034 int current_tb_modified = 0;
1035 target_ulong current_pc = 0;
1036 target_ulong current_cs_base = 0;
1037 int current_flags = 0;
d720b93d 1038#endif
9fa3e853
FB
1039
1040 addr &= TARGET_PAGE_MASK;
1041 p = page_find(addr >> TARGET_PAGE_BITS);
5fafdf24 1042 if (!p)
9fa3e853
FB
1043 return;
1044 tb = p->first_tb;
d720b93d
FB
1045#ifdef TARGET_HAS_PRECISE_SMC
1046 if (tb && pc != 0) {
1047 current_tb = tb_find_pc(pc);
1048 }
1049#endif
9fa3e853
FB
1050 while (tb != NULL) {
1051 n = (long)tb & 3;
1052 tb = (TranslationBlock *)((long)tb & ~3);
d720b93d
FB
1053#ifdef TARGET_HAS_PRECISE_SMC
1054 if (current_tb == tb &&
2e70f6ef 1055 (current_tb->cflags & CF_COUNT_MASK) != 1) {
d720b93d
FB
1056 /* If we are modifying the current TB, we must stop
1057 its execution. We could be more precise by checking
1058 that the modification is after the current PC, but it
1059 would require a specialized function to partially
1060 restore the CPU state */
3b46e624 1061
d720b93d
FB
1062 current_tb_modified = 1;
1063 cpu_restore_state(current_tb, env, pc, puc);
6b917547
AL
1064 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1065 &current_flags);
d720b93d
FB
1066 }
1067#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
1068 tb_phys_invalidate(tb, addr);
1069 tb = tb->page_next[n];
1070 }
fd6ce8f6 1071 p->first_tb = NULL;
d720b93d
FB
1072#ifdef TARGET_HAS_PRECISE_SMC
1073 if (current_tb_modified) {
1074 /* we generate a block containing just the instruction
1075 modifying the memory. It will ensure that it cannot modify
1076 itself */
ea1c1802 1077 env->current_tb = NULL;
2e70f6ef 1078 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
d720b93d
FB
1079 cpu_resume_from_signal(env, puc);
1080 }
1081#endif
fd6ce8f6 1082}
9fa3e853 1083#endif
fd6ce8f6
FB
1084
1085/* add the tb in the target page and protect it if necessary */
5fafdf24 1086static inline void tb_alloc_page(TranslationBlock *tb,
53a5960a 1087 unsigned int n, target_ulong page_addr)
fd6ce8f6
FB
1088{
1089 PageDesc *p;
9fa3e853
FB
1090 TranslationBlock *last_first_tb;
1091
1092 tb->page_addr[n] = page_addr;
3a7d929e 1093 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
9fa3e853
FB
1094 tb->page_next[n] = p->first_tb;
1095 last_first_tb = p->first_tb;
1096 p->first_tb = (TranslationBlock *)((long)tb | n);
1097 invalidate_page_bitmap(p);
fd6ce8f6 1098
107db443 1099#if defined(TARGET_HAS_SMC) || 1
d720b93d 1100
9fa3e853 1101#if defined(CONFIG_USER_ONLY)
fd6ce8f6 1102 if (p->flags & PAGE_WRITE) {
53a5960a
PB
1103 target_ulong addr;
1104 PageDesc *p2;
9fa3e853
FB
1105 int prot;
1106
fd6ce8f6
FB
1107 /* force the host page as non writable (writes will have a
1108 page fault + mprotect overhead) */
53a5960a 1109 page_addr &= qemu_host_page_mask;
fd6ce8f6 1110 prot = 0;
53a5960a
PB
1111 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1112 addr += TARGET_PAGE_SIZE) {
1113
1114 p2 = page_find (addr >> TARGET_PAGE_BITS);
1115 if (!p2)
1116 continue;
1117 prot |= p2->flags;
1118 p2->flags &= ~PAGE_WRITE;
1119 page_get_flags(addr);
1120 }
5fafdf24 1121 mprotect(g2h(page_addr), qemu_host_page_size,
fd6ce8f6
FB
1122 (prot & PAGE_BITS) & ~PAGE_WRITE);
1123#ifdef DEBUG_TB_INVALIDATE
ab3d1727 1124 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
53a5960a 1125 page_addr);
fd6ce8f6 1126#endif
fd6ce8f6 1127 }
9fa3e853
FB
1128#else
1129 /* if some code is already present, then the pages are already
1130 protected. So we handle the case where only the first TB is
1131 allocated in a physical page */
1132 if (!last_first_tb) {
6a00d601 1133 tlb_protect_code(page_addr);
9fa3e853
FB
1134 }
1135#endif
d720b93d
FB
1136
1137#endif /* TARGET_HAS_SMC */
fd6ce8f6
FB
1138}
1139
1140/* Allocate a new translation block. Flush the translation buffer if
1141 too many translation blocks or too much generated code. */
c27004ec 1142TranslationBlock *tb_alloc(target_ulong pc)
fd6ce8f6
FB
1143{
1144 TranslationBlock *tb;
fd6ce8f6 1145
26a5f13b
FB
1146 if (nb_tbs >= code_gen_max_blocks ||
1147 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
d4e8164f 1148 return NULL;
fd6ce8f6
FB
1149 tb = &tbs[nb_tbs++];
1150 tb->pc = pc;
b448f2f3 1151 tb->cflags = 0;
d4e8164f
FB
1152 return tb;
1153}
1154
2e70f6ef
PB
1155void tb_free(TranslationBlock *tb)
1156{
bf20dc07 1157 /* In practice this is mostly used for single use temporary TB
2e70f6ef
PB
1158 Ignore the hard cases and just back up if this TB happens to
1159 be the last one generated. */
1160 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1161 code_gen_ptr = tb->tc_ptr;
1162 nb_tbs--;
1163 }
1164}
1165
9fa3e853
FB
1166/* add a new TB and link it to the physical page tables. phys_page2 is
1167 (-1) to indicate that only one page contains the TB. */
5fafdf24 1168void tb_link_phys(TranslationBlock *tb,
9fa3e853 1169 target_ulong phys_pc, target_ulong phys_page2)
d4e8164f 1170{
9fa3e853
FB
1171 unsigned int h;
1172 TranslationBlock **ptb;
1173
c8a706fe
PB
1174 /* Grab the mmap lock to stop another thread invalidating this TB
1175 before we are done. */
1176 mmap_lock();
9fa3e853
FB
1177 /* add in the physical hash table */
1178 h = tb_phys_hash_func(phys_pc);
1179 ptb = &tb_phys_hash[h];
1180 tb->phys_hash_next = *ptb;
1181 *ptb = tb;
fd6ce8f6
FB
1182
1183 /* add in the page list */
9fa3e853
FB
1184 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1185 if (phys_page2 != -1)
1186 tb_alloc_page(tb, 1, phys_page2);
1187 else
1188 tb->page_addr[1] = -1;
9fa3e853 1189
d4e8164f
FB
1190 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1191 tb->jmp_next[0] = NULL;
1192 tb->jmp_next[1] = NULL;
1193
1194 /* init original jump addresses */
1195 if (tb->tb_next_offset[0] != 0xffff)
1196 tb_reset_jump(tb, 0);
1197 if (tb->tb_next_offset[1] != 0xffff)
1198 tb_reset_jump(tb, 1);
8a40a180
FB
1199
1200#ifdef DEBUG_TB_CHECK
1201 tb_page_check();
1202#endif
c8a706fe 1203 mmap_unlock();
fd6ce8f6
FB
1204}
1205
9fa3e853
FB
1206/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1207 tb[1].tc_ptr. Return NULL if not found */
1208TranslationBlock *tb_find_pc(unsigned long tc_ptr)
fd6ce8f6 1209{
9fa3e853
FB
1210 int m_min, m_max, m;
1211 unsigned long v;
1212 TranslationBlock *tb;
a513fe19
FB
1213
1214 if (nb_tbs <= 0)
1215 return NULL;
1216 if (tc_ptr < (unsigned long)code_gen_buffer ||
1217 tc_ptr >= (unsigned long)code_gen_ptr)
1218 return NULL;
1219 /* binary search (cf Knuth) */
1220 m_min = 0;
1221 m_max = nb_tbs - 1;
1222 while (m_min <= m_max) {
1223 m = (m_min + m_max) >> 1;
1224 tb = &tbs[m];
1225 v = (unsigned long)tb->tc_ptr;
1226 if (v == tc_ptr)
1227 return tb;
1228 else if (tc_ptr < v) {
1229 m_max = m - 1;
1230 } else {
1231 m_min = m + 1;
1232 }
5fafdf24 1233 }
a513fe19
FB
1234 return &tbs[m_max];
1235}
7501267e 1236
ea041c0e
FB
1237static void tb_reset_jump_recursive(TranslationBlock *tb);
1238
1239static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1240{
1241 TranslationBlock *tb1, *tb_next, **ptb;
1242 unsigned int n1;
1243
1244 tb1 = tb->jmp_next[n];
1245 if (tb1 != NULL) {
1246 /* find head of list */
1247 for(;;) {
1248 n1 = (long)tb1 & 3;
1249 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1250 if (n1 == 2)
1251 break;
1252 tb1 = tb1->jmp_next[n1];
1253 }
1254 /* we are now sure now that tb jumps to tb1 */
1255 tb_next = tb1;
1256
1257 /* remove tb from the jmp_first list */
1258 ptb = &tb_next->jmp_first;
1259 for(;;) {
1260 tb1 = *ptb;
1261 n1 = (long)tb1 & 3;
1262 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1263 if (n1 == n && tb1 == tb)
1264 break;
1265 ptb = &tb1->jmp_next[n1];
1266 }
1267 *ptb = tb->jmp_next[n];
1268 tb->jmp_next[n] = NULL;
3b46e624 1269
ea041c0e
FB
1270 /* suppress the jump to next tb in generated code */
1271 tb_reset_jump(tb, n);
1272
0124311e 1273 /* suppress jumps in the tb on which we could have jumped */
ea041c0e
FB
1274 tb_reset_jump_recursive(tb_next);
1275 }
1276}
1277
1278static void tb_reset_jump_recursive(TranslationBlock *tb)
1279{
1280 tb_reset_jump_recursive2(tb, 0);
1281 tb_reset_jump_recursive2(tb, 1);
1282}
1283
1fddef4b 1284#if defined(TARGET_HAS_ICE)
d720b93d
FB
1285static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1286{
9b3c35e0
JM
1287 target_phys_addr_t addr;
1288 target_ulong pd;
c2f07f81
PB
1289 ram_addr_t ram_addr;
1290 PhysPageDesc *p;
d720b93d 1291
c2f07f81
PB
1292 addr = cpu_get_phys_page_debug(env, pc);
1293 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1294 if (!p) {
1295 pd = IO_MEM_UNASSIGNED;
1296 } else {
1297 pd = p->phys_offset;
1298 }
1299 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
706cd4b5 1300 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
d720b93d 1301}
c27004ec 1302#endif
d720b93d 1303
6658ffb8 1304/* Add a watchpoint. */
a1d1bb31
AL
1305int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1306 int flags, CPUWatchpoint **watchpoint)
6658ffb8 1307{
b4051334 1308 target_ulong len_mask = ~(len - 1);
c0ce998e 1309 CPUWatchpoint *wp;
6658ffb8 1310
b4051334
AL
1311 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1312 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1313 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1314 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1315 return -EINVAL;
1316 }
a1d1bb31 1317 wp = qemu_malloc(sizeof(*wp));
a1d1bb31
AL
1318
1319 wp->vaddr = addr;
b4051334 1320 wp->len_mask = len_mask;
a1d1bb31
AL
1321 wp->flags = flags;
1322
2dc9f411 1323 /* keep all GDB-injected watchpoints in front */
c0ce998e
AL
1324 if (flags & BP_GDB)
1325 TAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1326 else
1327 TAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
6658ffb8 1328
6658ffb8 1329 tlb_flush_page(env, addr);
a1d1bb31
AL
1330
1331 if (watchpoint)
1332 *watchpoint = wp;
1333 return 0;
6658ffb8
PB
1334}
1335
a1d1bb31
AL
1336/* Remove a specific watchpoint. */
1337int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1338 int flags)
6658ffb8 1339{
b4051334 1340 target_ulong len_mask = ~(len - 1);
a1d1bb31 1341 CPUWatchpoint *wp;
6658ffb8 1342
c0ce998e 1343 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334 1344 if (addr == wp->vaddr && len_mask == wp->len_mask
6e140f28 1345 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
a1d1bb31 1346 cpu_watchpoint_remove_by_ref(env, wp);
6658ffb8
PB
1347 return 0;
1348 }
1349 }
a1d1bb31 1350 return -ENOENT;
6658ffb8
PB
1351}
1352
a1d1bb31
AL
1353/* Remove a specific watchpoint by reference. */
1354void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1355{
c0ce998e 1356 TAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
7d03f82f 1357
a1d1bb31
AL
1358 tlb_flush_page(env, watchpoint->vaddr);
1359
1360 qemu_free(watchpoint);
1361}
1362
1363/* Remove all matching watchpoints. */
1364void cpu_watchpoint_remove_all(CPUState *env, int mask)
1365{
c0ce998e 1366 CPUWatchpoint *wp, *next;
a1d1bb31 1367
c0ce998e 1368 TAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
a1d1bb31
AL
1369 if (wp->flags & mask)
1370 cpu_watchpoint_remove_by_ref(env, wp);
c0ce998e 1371 }
7d03f82f
EI
1372}
1373
a1d1bb31
AL
1374/* Add a breakpoint. */
1375int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1376 CPUBreakpoint **breakpoint)
4c3a88a2 1377{
1fddef4b 1378#if defined(TARGET_HAS_ICE)
c0ce998e 1379 CPUBreakpoint *bp;
3b46e624 1380
a1d1bb31 1381 bp = qemu_malloc(sizeof(*bp));
4c3a88a2 1382
a1d1bb31
AL
1383 bp->pc = pc;
1384 bp->flags = flags;
1385
2dc9f411 1386 /* keep all GDB-injected breakpoints in front */
c0ce998e
AL
1387 if (flags & BP_GDB)
1388 TAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1389 else
1390 TAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
3b46e624 1391
d720b93d 1392 breakpoint_invalidate(env, pc);
a1d1bb31
AL
1393
1394 if (breakpoint)
1395 *breakpoint = bp;
4c3a88a2
FB
1396 return 0;
1397#else
a1d1bb31 1398 return -ENOSYS;
4c3a88a2
FB
1399#endif
1400}
1401
a1d1bb31
AL
1402/* Remove a specific breakpoint. */
1403int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1404{
7d03f82f 1405#if defined(TARGET_HAS_ICE)
a1d1bb31
AL
1406 CPUBreakpoint *bp;
1407
c0ce998e 1408 TAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31
AL
1409 if (bp->pc == pc && bp->flags == flags) {
1410 cpu_breakpoint_remove_by_ref(env, bp);
1411 return 0;
1412 }
7d03f82f 1413 }
a1d1bb31
AL
1414 return -ENOENT;
1415#else
1416 return -ENOSYS;
7d03f82f
EI
1417#endif
1418}
1419
a1d1bb31
AL
1420/* Remove a specific breakpoint by reference. */
1421void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
4c3a88a2 1422{
1fddef4b 1423#if defined(TARGET_HAS_ICE)
c0ce998e 1424 TAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
d720b93d 1425
a1d1bb31
AL
1426 breakpoint_invalidate(env, breakpoint->pc);
1427
1428 qemu_free(breakpoint);
1429#endif
1430}
1431
1432/* Remove all matching breakpoints. */
1433void cpu_breakpoint_remove_all(CPUState *env, int mask)
1434{
1435#if defined(TARGET_HAS_ICE)
c0ce998e 1436 CPUBreakpoint *bp, *next;
a1d1bb31 1437
c0ce998e 1438 TAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
a1d1bb31
AL
1439 if (bp->flags & mask)
1440 cpu_breakpoint_remove_by_ref(env, bp);
c0ce998e 1441 }
4c3a88a2
FB
1442#endif
1443}
1444
c33a346e
FB
1445/* enable or disable single step mode. EXCP_DEBUG is returned by the
1446 CPU loop after each instruction */
1447void cpu_single_step(CPUState *env, int enabled)
1448{
1fddef4b 1449#if defined(TARGET_HAS_ICE)
c33a346e
FB
1450 if (env->singlestep_enabled != enabled) {
1451 env->singlestep_enabled = enabled;
1452 /* must flush all the translated code to avoid inconsistancies */
9fa3e853 1453 /* XXX: only flush what is necessary */
0124311e 1454 tb_flush(env);
c33a346e
FB
1455 }
1456#endif
1457}
1458
34865134
FB
1459/* enable or disable low levels log */
1460void cpu_set_log(int log_flags)
1461{
1462 loglevel = log_flags;
1463 if (loglevel && !logfile) {
11fcfab4 1464 logfile = fopen(logfilename, log_append ? "a" : "w");
34865134
FB
1465 if (!logfile) {
1466 perror(logfilename);
1467 _exit(1);
1468 }
9fa3e853
FB
1469#if !defined(CONFIG_SOFTMMU)
1470 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1471 {
b55266b5 1472 static char logfile_buf[4096];
9fa3e853
FB
1473 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1474 }
1475#else
34865134 1476 setvbuf(logfile, NULL, _IOLBF, 0);
9fa3e853 1477#endif
e735b91c
PB
1478 log_append = 1;
1479 }
1480 if (!loglevel && logfile) {
1481 fclose(logfile);
1482 logfile = NULL;
34865134
FB
1483 }
1484}
1485
1486void cpu_set_log_filename(const char *filename)
1487{
1488 logfilename = strdup(filename);
e735b91c
PB
1489 if (logfile) {
1490 fclose(logfile);
1491 logfile = NULL;
1492 }
1493 cpu_set_log(loglevel);
34865134 1494}
c33a346e 1495
0124311e 1496/* mask must never be zero, except for A20 change call */
68a79315 1497void cpu_interrupt(CPUState *env, int mask)
ea041c0e 1498{
d5975363 1499#if !defined(USE_NPTL)
ea041c0e 1500 TranslationBlock *tb;
15a51156 1501 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
d5975363 1502#endif
2e70f6ef 1503 int old_mask;
59817ccb 1504
8a11f5ff
AJ
1505 if (mask & CPU_INTERRUPT_EXIT) {
1506 env->exit_request = 1;
1507 mask &= ~CPU_INTERRUPT_EXIT;
1508 }
1509
2e70f6ef 1510 old_mask = env->interrupt_request;
68a79315 1511 env->interrupt_request |= mask;
d5975363
PB
1512#if defined(USE_NPTL)
1513 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1514 problem and hope the cpu will stop of its own accord. For userspace
1515 emulation this often isn't actually as bad as it sounds. Often
1516 signals are used primarily to interrupt blocking syscalls. */
1517#else
2e70f6ef 1518 if (use_icount) {
266910c4 1519 env->icount_decr.u16.high = 0xffff;
2e70f6ef 1520#ifndef CONFIG_USER_ONLY
2e70f6ef 1521 if (!can_do_io(env)
8a11f5ff 1522 && (mask & ~old_mask) != 0) {
2e70f6ef
PB
1523 cpu_abort(env, "Raised interrupt while not in I/O function");
1524 }
1525#endif
1526 } else {
1527 tb = env->current_tb;
1528 /* if the cpu is currently executing code, we must unlink it and
1529 all the potentially executing TB */
1530 if (tb && !testandset(&interrupt_lock)) {
1531 env->current_tb = NULL;
1532 tb_reset_jump_recursive(tb);
1533 resetlock(&interrupt_lock);
1534 }
ea041c0e 1535 }
d5975363 1536#endif
ea041c0e
FB
1537}
1538
b54ad049
FB
1539void cpu_reset_interrupt(CPUState *env, int mask)
1540{
1541 env->interrupt_request &= ~mask;
1542}
1543
c7cd6a37 1544const CPULogItem cpu_log_items[] = {
5fafdf24 1545 { CPU_LOG_TB_OUT_ASM, "out_asm",
f193c797
FB
1546 "show generated host assembly code for each compiled TB" },
1547 { CPU_LOG_TB_IN_ASM, "in_asm",
1548 "show target assembly code for each compiled TB" },
5fafdf24 1549 { CPU_LOG_TB_OP, "op",
57fec1fe 1550 "show micro ops for each compiled TB" },
f193c797 1551 { CPU_LOG_TB_OP_OPT, "op_opt",
e01a1157
BS
1552 "show micro ops "
1553#ifdef TARGET_I386
1554 "before eflags optimization and "
f193c797 1555#endif
e01a1157 1556 "after liveness analysis" },
f193c797
FB
1557 { CPU_LOG_INT, "int",
1558 "show interrupts/exceptions in short format" },
1559 { CPU_LOG_EXEC, "exec",
1560 "show trace before each executed TB (lots of logs)" },
9fddaa0c 1561 { CPU_LOG_TB_CPU, "cpu",
e91c8a77 1562 "show CPU state before block translation" },
f193c797
FB
1563#ifdef TARGET_I386
1564 { CPU_LOG_PCALL, "pcall",
1565 "show protected mode far calls/returns/exceptions" },
eca1bdf4
AL
1566 { CPU_LOG_RESET, "cpu_reset",
1567 "show CPU state before CPU resets" },
f193c797 1568#endif
8e3a9fd2 1569#ifdef DEBUG_IOPORT
fd872598
FB
1570 { CPU_LOG_IOPORT, "ioport",
1571 "show all i/o ports accesses" },
8e3a9fd2 1572#endif
f193c797
FB
1573 { 0, NULL, NULL },
1574};
1575
1576static int cmp1(const char *s1, int n, const char *s2)
1577{
1578 if (strlen(s2) != n)
1579 return 0;
1580 return memcmp(s1, s2, n) == 0;
1581}
3b46e624 1582
f193c797
FB
1583/* takes a comma separated list of log masks. Return 0 if error. */
1584int cpu_str_to_log_mask(const char *str)
1585{
c7cd6a37 1586 const CPULogItem *item;
f193c797
FB
1587 int mask;
1588 const char *p, *p1;
1589
1590 p = str;
1591 mask = 0;
1592 for(;;) {
1593 p1 = strchr(p, ',');
1594 if (!p1)
1595 p1 = p + strlen(p);
8e3a9fd2
FB
1596 if(cmp1(p,p1-p,"all")) {
1597 for(item = cpu_log_items; item->mask != 0; item++) {
1598 mask |= item->mask;
1599 }
1600 } else {
f193c797
FB
1601 for(item = cpu_log_items; item->mask != 0; item++) {
1602 if (cmp1(p, p1 - p, item->name))
1603 goto found;
1604 }
1605 return 0;
8e3a9fd2 1606 }
f193c797
FB
1607 found:
1608 mask |= item->mask;
1609 if (*p1 != ',')
1610 break;
1611 p = p1 + 1;
1612 }
1613 return mask;
1614}
ea041c0e 1615
7501267e
FB
1616void cpu_abort(CPUState *env, const char *fmt, ...)
1617{
1618 va_list ap;
493ae1f0 1619 va_list ap2;
7501267e
FB
1620
1621 va_start(ap, fmt);
493ae1f0 1622 va_copy(ap2, ap);
7501267e
FB
1623 fprintf(stderr, "qemu: fatal: ");
1624 vfprintf(stderr, fmt, ap);
1625 fprintf(stderr, "\n");
1626#ifdef TARGET_I386
7fe48483
FB
1627 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1628#else
1629 cpu_dump_state(env, stderr, fprintf, 0);
7501267e 1630#endif
93fcfe39
AL
1631 if (qemu_log_enabled()) {
1632 qemu_log("qemu: fatal: ");
1633 qemu_log_vprintf(fmt, ap2);
1634 qemu_log("\n");
f9373291 1635#ifdef TARGET_I386
93fcfe39 1636 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
f9373291 1637#else
93fcfe39 1638 log_cpu_state(env, 0);
f9373291 1639#endif
31b1a7b4 1640 qemu_log_flush();
93fcfe39 1641 qemu_log_close();
924edcae 1642 }
493ae1f0 1643 va_end(ap2);
f9373291 1644 va_end(ap);
7501267e
FB
1645 abort();
1646}
1647
c5be9f08
TS
1648CPUState *cpu_copy(CPUState *env)
1649{
01ba9816 1650 CPUState *new_env = cpu_init(env->cpu_model_str);
c5be9f08
TS
1651 CPUState *next_cpu = new_env->next_cpu;
1652 int cpu_index = new_env->cpu_index;
5a38f081
AL
1653#if defined(TARGET_HAS_ICE)
1654 CPUBreakpoint *bp;
1655 CPUWatchpoint *wp;
1656#endif
1657
c5be9f08 1658 memcpy(new_env, env, sizeof(CPUState));
5a38f081
AL
1659
1660 /* Preserve chaining and index. */
c5be9f08
TS
1661 new_env->next_cpu = next_cpu;
1662 new_env->cpu_index = cpu_index;
5a38f081
AL
1663
1664 /* Clone all break/watchpoints.
1665 Note: Once we support ptrace with hw-debug register access, make sure
1666 BP_CPU break/watchpoints are handled correctly on clone. */
1667 TAILQ_INIT(&env->breakpoints);
1668 TAILQ_INIT(&env->watchpoints);
1669#if defined(TARGET_HAS_ICE)
1670 TAILQ_FOREACH(bp, &env->breakpoints, entry) {
1671 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1672 }
1673 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1674 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1675 wp->flags, NULL);
1676 }
1677#endif
1678
c5be9f08
TS
1679 return new_env;
1680}
1681
0124311e
FB
1682#if !defined(CONFIG_USER_ONLY)
1683
5c751e99
EI
1684static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1685{
1686 unsigned int i;
1687
1688 /* Discard jump cache entries for any tb which might potentially
1689 overlap the flushed page. */
1690 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1691 memset (&env->tb_jmp_cache[i], 0,
1692 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1693
1694 i = tb_jmp_cache_hash_page(addr);
1695 memset (&env->tb_jmp_cache[i], 0,
1696 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1697}
1698
ee8b7021
FB
1699/* NOTE: if flush_global is true, also flush global entries (not
1700 implemented yet) */
1701void tlb_flush(CPUState *env, int flush_global)
33417e70 1702{
33417e70 1703 int i;
0124311e 1704
9fa3e853
FB
1705#if defined(DEBUG_TLB)
1706 printf("tlb_flush:\n");
1707#endif
0124311e
FB
1708 /* must reset current TB so that interrupts cannot modify the
1709 links while we are modifying them */
1710 env->current_tb = NULL;
1711
33417e70 1712 for(i = 0; i < CPU_TLB_SIZE; i++) {
84b7b8e7
FB
1713 env->tlb_table[0][i].addr_read = -1;
1714 env->tlb_table[0][i].addr_write = -1;
1715 env->tlb_table[0][i].addr_code = -1;
1716 env->tlb_table[1][i].addr_read = -1;
1717 env->tlb_table[1][i].addr_write = -1;
1718 env->tlb_table[1][i].addr_code = -1;
6fa4cea9
JM
1719#if (NB_MMU_MODES >= 3)
1720 env->tlb_table[2][i].addr_read = -1;
1721 env->tlb_table[2][i].addr_write = -1;
1722 env->tlb_table[2][i].addr_code = -1;
1723#if (NB_MMU_MODES == 4)
1724 env->tlb_table[3][i].addr_read = -1;
1725 env->tlb_table[3][i].addr_write = -1;
1726 env->tlb_table[3][i].addr_code = -1;
1727#endif
1728#endif
33417e70 1729 }
9fa3e853 1730
8a40a180 1731 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
9fa3e853 1732
0a962c02
FB
1733#ifdef USE_KQEMU
1734 if (env->kqemu_enabled) {
1735 kqemu_flush(env, flush_global);
1736 }
9fa3e853 1737#endif
e3db7226 1738 tlb_flush_count++;
33417e70
FB
1739}
1740
274da6b2 1741static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
61382a50 1742{
5fafdf24 1743 if (addr == (tlb_entry->addr_read &
84b7b8e7 1744 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1745 addr == (tlb_entry->addr_write &
84b7b8e7 1746 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
5fafdf24 1747 addr == (tlb_entry->addr_code &
84b7b8e7
FB
1748 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1749 tlb_entry->addr_read = -1;
1750 tlb_entry->addr_write = -1;
1751 tlb_entry->addr_code = -1;
1752 }
61382a50
FB
1753}
1754
2e12669a 1755void tlb_flush_page(CPUState *env, target_ulong addr)
33417e70 1756{
8a40a180 1757 int i;
0124311e 1758
9fa3e853 1759#if defined(DEBUG_TLB)
108c49b8 1760 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
9fa3e853 1761#endif
0124311e
FB
1762 /* must reset current TB so that interrupts cannot modify the
1763 links while we are modifying them */
1764 env->current_tb = NULL;
61382a50
FB
1765
1766 addr &= TARGET_PAGE_MASK;
1767 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
84b7b8e7
FB
1768 tlb_flush_entry(&env->tlb_table[0][i], addr);
1769 tlb_flush_entry(&env->tlb_table[1][i], addr);
6fa4cea9
JM
1770#if (NB_MMU_MODES >= 3)
1771 tlb_flush_entry(&env->tlb_table[2][i], addr);
1772#if (NB_MMU_MODES == 4)
1773 tlb_flush_entry(&env->tlb_table[3][i], addr);
1774#endif
1775#endif
0124311e 1776
5c751e99 1777 tlb_flush_jmp_cache(env, addr);
9fa3e853 1778
0a962c02
FB
1779#ifdef USE_KQEMU
1780 if (env->kqemu_enabled) {
1781 kqemu_flush_page(env, addr);
1782 }
1783#endif
9fa3e853
FB
1784}
1785
9fa3e853
FB
1786/* update the TLBs so that writes to code in the virtual page 'addr'
1787 can be detected */
6a00d601 1788static void tlb_protect_code(ram_addr_t ram_addr)
9fa3e853 1789{
5fafdf24 1790 cpu_physical_memory_reset_dirty(ram_addr,
6a00d601
FB
1791 ram_addr + TARGET_PAGE_SIZE,
1792 CODE_DIRTY_FLAG);
9fa3e853
FB
1793}
1794
9fa3e853 1795/* update the TLB so that writes in physical page 'phys_addr' are no longer
3a7d929e 1796 tested for self modifying code */
5fafdf24 1797static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
3a7d929e 1798 target_ulong vaddr)
9fa3e853 1799{
3a7d929e 1800 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1ccde1cb
FB
1801}
1802
5fafdf24 1803static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1ccde1cb
FB
1804 unsigned long start, unsigned long length)
1805{
1806 unsigned long addr;
84b7b8e7
FB
1807 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1808 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1ccde1cb 1809 if ((addr - start) < length) {
0f459d16 1810 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1ccde1cb
FB
1811 }
1812 }
1813}
1814
3a7d929e 1815void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 1816 int dirty_flags)
1ccde1cb
FB
1817{
1818 CPUState *env;
4f2ac237 1819 unsigned long length, start1;
0a962c02
FB
1820 int i, mask, len;
1821 uint8_t *p;
1ccde1cb
FB
1822
1823 start &= TARGET_PAGE_MASK;
1824 end = TARGET_PAGE_ALIGN(end);
1825
1826 length = end - start;
1827 if (length == 0)
1828 return;
0a962c02 1829 len = length >> TARGET_PAGE_BITS;
3a7d929e 1830#ifdef USE_KQEMU
6a00d601
FB
1831 /* XXX: should not depend on cpu context */
1832 env = first_cpu;
3a7d929e 1833 if (env->kqemu_enabled) {
f23db169
FB
1834 ram_addr_t addr;
1835 addr = start;
1836 for(i = 0; i < len; i++) {
1837 kqemu_set_notdirty(env, addr);
1838 addr += TARGET_PAGE_SIZE;
1839 }
3a7d929e
FB
1840 }
1841#endif
f23db169
FB
1842 mask = ~dirty_flags;
1843 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1844 for(i = 0; i < len; i++)
1845 p[i] &= mask;
1846
1ccde1cb
FB
1847 /* we modify the TLB cache so that the dirty bit will be set again
1848 when accessing the range */
59817ccb 1849 start1 = start + (unsigned long)phys_ram_base;
6a00d601
FB
1850 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1851 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1852 tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
6a00d601 1853 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1854 tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
6fa4cea9
JM
1855#if (NB_MMU_MODES >= 3)
1856 for(i = 0; i < CPU_TLB_SIZE; i++)
1857 tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1858#if (NB_MMU_MODES == 4)
1859 for(i = 0; i < CPU_TLB_SIZE; i++)
1860 tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1861#endif
1862#endif
6a00d601 1863 }
1ccde1cb
FB
1864}
1865
74576198
AL
1866int cpu_physical_memory_set_dirty_tracking(int enable)
1867{
1868 in_migration = enable;
1869 return 0;
1870}
1871
1872int cpu_physical_memory_get_dirty_tracking(void)
1873{
1874 return in_migration;
1875}
1876
2bec46dc
AL
1877void cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr, target_phys_addr_t end_addr)
1878{
1879 if (kvm_enabled())
1880 kvm_physical_sync_dirty_bitmap(start_addr, end_addr);
1881}
1882
3a7d929e
FB
1883static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1884{
1885 ram_addr_t ram_addr;
1886
84b7b8e7 1887 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
5fafdf24 1888 ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
3a7d929e
FB
1889 tlb_entry->addend - (unsigned long)phys_ram_base;
1890 if (!cpu_physical_memory_is_dirty(ram_addr)) {
0f459d16 1891 tlb_entry->addr_write |= TLB_NOTDIRTY;
3a7d929e
FB
1892 }
1893 }
1894}
1895
1896/* update the TLB according to the current state of the dirty bits */
1897void cpu_tlb_update_dirty(CPUState *env)
1898{
1899 int i;
1900 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1901 tlb_update_dirty(&env->tlb_table[0][i]);
3a7d929e 1902 for(i = 0; i < CPU_TLB_SIZE; i++)
84b7b8e7 1903 tlb_update_dirty(&env->tlb_table[1][i]);
6fa4cea9
JM
1904#if (NB_MMU_MODES >= 3)
1905 for(i = 0; i < CPU_TLB_SIZE; i++)
1906 tlb_update_dirty(&env->tlb_table[2][i]);
1907#if (NB_MMU_MODES == 4)
1908 for(i = 0; i < CPU_TLB_SIZE; i++)
1909 tlb_update_dirty(&env->tlb_table[3][i]);
1910#endif
1911#endif
3a7d929e
FB
1912}
1913
0f459d16 1914static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1ccde1cb 1915{
0f459d16
PB
1916 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
1917 tlb_entry->addr_write = vaddr;
1ccde1cb
FB
1918}
1919
0f459d16
PB
1920/* update the TLB corresponding to virtual page vaddr
1921 so that it is no longer dirty */
1922static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1ccde1cb 1923{
1ccde1cb
FB
1924 int i;
1925
0f459d16 1926 vaddr &= TARGET_PAGE_MASK;
1ccde1cb 1927 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
0f459d16
PB
1928 tlb_set_dirty1(&env->tlb_table[0][i], vaddr);
1929 tlb_set_dirty1(&env->tlb_table[1][i], vaddr);
6fa4cea9 1930#if (NB_MMU_MODES >= 3)
0f459d16 1931 tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
6fa4cea9 1932#if (NB_MMU_MODES == 4)
0f459d16 1933 tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
6fa4cea9
JM
1934#endif
1935#endif
9fa3e853
FB
1936}
1937
59817ccb
FB
1938/* add a new TLB entry. At most one entry for a given virtual address
1939 is permitted. Return 0 if OK or 2 if the page could not be mapped
1940 (can only happen in non SOFTMMU mode for I/O pages or pages
1941 conflicting with the host address space). */
5fafdf24
TS
1942int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1943 target_phys_addr_t paddr, int prot,
6ebbf390 1944 int mmu_idx, int is_softmmu)
9fa3e853 1945{
92e873b9 1946 PhysPageDesc *p;
4f2ac237 1947 unsigned long pd;
9fa3e853 1948 unsigned int index;
4f2ac237 1949 target_ulong address;
0f459d16 1950 target_ulong code_address;
108c49b8 1951 target_phys_addr_t addend;
9fa3e853 1952 int ret;
84b7b8e7 1953 CPUTLBEntry *te;
a1d1bb31 1954 CPUWatchpoint *wp;
0f459d16 1955 target_phys_addr_t iotlb;
9fa3e853 1956
92e873b9 1957 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
9fa3e853
FB
1958 if (!p) {
1959 pd = IO_MEM_UNASSIGNED;
9fa3e853
FB
1960 } else {
1961 pd = p->phys_offset;
9fa3e853
FB
1962 }
1963#if defined(DEBUG_TLB)
6ebbf390
JM
1964 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1965 vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
9fa3e853
FB
1966#endif
1967
1968 ret = 0;
0f459d16
PB
1969 address = vaddr;
1970 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1971 /* IO memory case (romd handled later) */
1972 address |= TLB_MMIO;
1973 }
1974 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1975 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
1976 /* Normal RAM. */
1977 iotlb = pd & TARGET_PAGE_MASK;
1978 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
1979 iotlb |= IO_MEM_NOTDIRTY;
1980 else
1981 iotlb |= IO_MEM_ROM;
1982 } else {
1983 /* IO handlers are currently passed a phsical address.
1984 It would be nice to pass an offset from the base address
1985 of that region. This would avoid having to special case RAM,
1986 and avoid full address decoding in every device.
1987 We can't use the high bits of pd for this because
1988 IO_MEM_ROMD uses these as a ram address. */
8da3ff18
PB
1989 iotlb = (pd & ~TARGET_PAGE_MASK);
1990 if (p) {
8da3ff18
PB
1991 iotlb += p->region_offset;
1992 } else {
1993 iotlb += paddr;
1994 }
0f459d16
PB
1995 }
1996
1997 code_address = address;
1998 /* Make accesses to pages with watchpoints go via the
1999 watchpoint trap routines. */
c0ce998e 2000 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
a1d1bb31 2001 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
0f459d16
PB
2002 iotlb = io_mem_watch + paddr;
2003 /* TODO: The memory case can be optimized by not trapping
2004 reads of pages with a write breakpoint. */
2005 address |= TLB_MMIO;
6658ffb8 2006 }
0f459d16 2007 }
d79acba4 2008
0f459d16
PB
2009 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2010 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2011 te = &env->tlb_table[mmu_idx][index];
2012 te->addend = addend - vaddr;
2013 if (prot & PAGE_READ) {
2014 te->addr_read = address;
2015 } else {
2016 te->addr_read = -1;
2017 }
5c751e99 2018
0f459d16
PB
2019 if (prot & PAGE_EXEC) {
2020 te->addr_code = code_address;
2021 } else {
2022 te->addr_code = -1;
2023 }
2024 if (prot & PAGE_WRITE) {
2025 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2026 (pd & IO_MEM_ROMD)) {
2027 /* Write access calls the I/O callback. */
2028 te->addr_write = address | TLB_MMIO;
2029 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2030 !cpu_physical_memory_is_dirty(pd)) {
2031 te->addr_write = address | TLB_NOTDIRTY;
9fa3e853 2032 } else {
0f459d16 2033 te->addr_write = address;
9fa3e853 2034 }
0f459d16
PB
2035 } else {
2036 te->addr_write = -1;
9fa3e853 2037 }
9fa3e853
FB
2038 return ret;
2039}
2040
0124311e
FB
2041#else
2042
ee8b7021 2043void tlb_flush(CPUState *env, int flush_global)
0124311e
FB
2044{
2045}
2046
2e12669a 2047void tlb_flush_page(CPUState *env, target_ulong addr)
0124311e
FB
2048{
2049}
2050
5fafdf24
TS
2051int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2052 target_phys_addr_t paddr, int prot,
6ebbf390 2053 int mmu_idx, int is_softmmu)
9fa3e853
FB
2054{
2055 return 0;
2056}
0124311e 2057
9fa3e853
FB
2058/* dump memory mappings */
2059void page_dump(FILE *f)
33417e70 2060{
9fa3e853
FB
2061 unsigned long start, end;
2062 int i, j, prot, prot1;
2063 PageDesc *p;
33417e70 2064
9fa3e853
FB
2065 fprintf(f, "%-8s %-8s %-8s %s\n",
2066 "start", "end", "size", "prot");
2067 start = -1;
2068 end = -1;
2069 prot = 0;
2070 for(i = 0; i <= L1_SIZE; i++) {
2071 if (i < L1_SIZE)
2072 p = l1_map[i];
2073 else
2074 p = NULL;
2075 for(j = 0;j < L2_SIZE; j++) {
2076 if (!p)
2077 prot1 = 0;
2078 else
2079 prot1 = p[j].flags;
2080 if (prot1 != prot) {
2081 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2082 if (start != -1) {
2083 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
5fafdf24 2084 start, end, end - start,
9fa3e853
FB
2085 prot & PAGE_READ ? 'r' : '-',
2086 prot & PAGE_WRITE ? 'w' : '-',
2087 prot & PAGE_EXEC ? 'x' : '-');
2088 }
2089 if (prot1 != 0)
2090 start = end;
2091 else
2092 start = -1;
2093 prot = prot1;
2094 }
2095 if (!p)
2096 break;
2097 }
33417e70 2098 }
33417e70
FB
2099}
2100
53a5960a 2101int page_get_flags(target_ulong address)
33417e70 2102{
9fa3e853
FB
2103 PageDesc *p;
2104
2105 p = page_find(address >> TARGET_PAGE_BITS);
33417e70 2106 if (!p)
9fa3e853
FB
2107 return 0;
2108 return p->flags;
2109}
2110
2111/* modify the flags of a page and invalidate the code if
2112 necessary. The flag PAGE_WRITE_ORG is positionned automatically
2113 depending on PAGE_WRITE */
53a5960a 2114void page_set_flags(target_ulong start, target_ulong end, int flags)
9fa3e853
FB
2115{
2116 PageDesc *p;
53a5960a 2117 target_ulong addr;
9fa3e853 2118
c8a706fe 2119 /* mmap_lock should already be held. */
9fa3e853
FB
2120 start = start & TARGET_PAGE_MASK;
2121 end = TARGET_PAGE_ALIGN(end);
2122 if (flags & PAGE_WRITE)
2123 flags |= PAGE_WRITE_ORG;
9fa3e853
FB
2124 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2125 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
17e2377a
PB
2126 /* We may be called for host regions that are outside guest
2127 address space. */
2128 if (!p)
2129 return;
9fa3e853
FB
2130 /* if the write protection is set, then we invalidate the code
2131 inside */
5fafdf24 2132 if (!(p->flags & PAGE_WRITE) &&
9fa3e853
FB
2133 (flags & PAGE_WRITE) &&
2134 p->first_tb) {
d720b93d 2135 tb_invalidate_phys_page(addr, 0, NULL);
9fa3e853
FB
2136 }
2137 p->flags = flags;
2138 }
33417e70
FB
2139}
2140
3d97b40b
TS
2141int page_check_range(target_ulong start, target_ulong len, int flags)
2142{
2143 PageDesc *p;
2144 target_ulong end;
2145 target_ulong addr;
2146
55f280c9
AZ
2147 if (start + len < start)
2148 /* we've wrapped around */
2149 return -1;
2150
3d97b40b
TS
2151 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2152 start = start & TARGET_PAGE_MASK;
2153
3d97b40b
TS
2154 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2155 p = page_find(addr >> TARGET_PAGE_BITS);
2156 if( !p )
2157 return -1;
2158 if( !(p->flags & PAGE_VALID) )
2159 return -1;
2160
dae3270c 2161 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
3d97b40b 2162 return -1;
dae3270c
FB
2163 if (flags & PAGE_WRITE) {
2164 if (!(p->flags & PAGE_WRITE_ORG))
2165 return -1;
2166 /* unprotect the page if it was put read-only because it
2167 contains translated code */
2168 if (!(p->flags & PAGE_WRITE)) {
2169 if (!page_unprotect(addr, 0, NULL))
2170 return -1;
2171 }
2172 return 0;
2173 }
3d97b40b
TS
2174 }
2175 return 0;
2176}
2177
9fa3e853
FB
2178/* called from signal handler: invalidate the code and unprotect the
2179 page. Return TRUE if the fault was succesfully handled. */
53a5960a 2180int page_unprotect(target_ulong address, unsigned long pc, void *puc)
9fa3e853
FB
2181{
2182 unsigned int page_index, prot, pindex;
2183 PageDesc *p, *p1;
53a5960a 2184 target_ulong host_start, host_end, addr;
9fa3e853 2185
c8a706fe
PB
2186 /* Technically this isn't safe inside a signal handler. However we
2187 know this only ever happens in a synchronous SEGV handler, so in
2188 practice it seems to be ok. */
2189 mmap_lock();
2190
83fb7adf 2191 host_start = address & qemu_host_page_mask;
9fa3e853
FB
2192 page_index = host_start >> TARGET_PAGE_BITS;
2193 p1 = page_find(page_index);
c8a706fe
PB
2194 if (!p1) {
2195 mmap_unlock();
9fa3e853 2196 return 0;
c8a706fe 2197 }
83fb7adf 2198 host_end = host_start + qemu_host_page_size;
9fa3e853
FB
2199 p = p1;
2200 prot = 0;
2201 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2202 prot |= p->flags;
2203 p++;
2204 }
2205 /* if the page was really writable, then we change its
2206 protection back to writable */
2207 if (prot & PAGE_WRITE_ORG) {
2208 pindex = (address - host_start) >> TARGET_PAGE_BITS;
2209 if (!(p1[pindex].flags & PAGE_WRITE)) {
5fafdf24 2210 mprotect((void *)g2h(host_start), qemu_host_page_size,
9fa3e853
FB
2211 (prot & PAGE_BITS) | PAGE_WRITE);
2212 p1[pindex].flags |= PAGE_WRITE;
2213 /* and since the content will be modified, we must invalidate
2214 the corresponding translated code. */
d720b93d 2215 tb_invalidate_phys_page(address, pc, puc);
9fa3e853
FB
2216#ifdef DEBUG_TB_CHECK
2217 tb_invalidate_check(address);
2218#endif
c8a706fe 2219 mmap_unlock();
9fa3e853
FB
2220 return 1;
2221 }
2222 }
c8a706fe 2223 mmap_unlock();
9fa3e853
FB
2224 return 0;
2225}
2226
6a00d601
FB
2227static inline void tlb_set_dirty(CPUState *env,
2228 unsigned long addr, target_ulong vaddr)
1ccde1cb
FB
2229{
2230}
9fa3e853
FB
2231#endif /* defined(CONFIG_USER_ONLY) */
2232
e2eef170 2233#if !defined(CONFIG_USER_ONLY)
8da3ff18 2234
db7b5426 2235static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
8da3ff18 2236 ram_addr_t memory, ram_addr_t region_offset);
00f82b8a 2237static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
8da3ff18 2238 ram_addr_t orig_memory, ram_addr_t region_offset);
db7b5426
BS
2239#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2240 need_subpage) \
2241 do { \
2242 if (addr > start_addr) \
2243 start_addr2 = 0; \
2244 else { \
2245 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2246 if (start_addr2 > 0) \
2247 need_subpage = 1; \
2248 } \
2249 \
49e9fba2 2250 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
db7b5426
BS
2251 end_addr2 = TARGET_PAGE_SIZE - 1; \
2252 else { \
2253 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2254 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2255 need_subpage = 1; \
2256 } \
2257 } while (0)
2258
33417e70
FB
2259/* register physical memory. 'size' must be a multiple of the target
2260 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
8da3ff18
PB
2261 io memory page. The address used when calling the IO function is
2262 the offset from the start of the region, plus region_offset. Both
2263 start_region and regon_offset are rounded down to a page boundary
2264 before calculating this offset. This should not be a problem unless
2265 the low bits of start_addr and region_offset differ. */
2266void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2267 ram_addr_t size,
2268 ram_addr_t phys_offset,
2269 ram_addr_t region_offset)
33417e70 2270{
108c49b8 2271 target_phys_addr_t addr, end_addr;
92e873b9 2272 PhysPageDesc *p;
9d42037b 2273 CPUState *env;
00f82b8a 2274 ram_addr_t orig_size = size;
db7b5426 2275 void *subpage;
33417e70 2276
da260249
FB
2277#ifdef USE_KQEMU
2278 /* XXX: should not depend on cpu context */
2279 env = first_cpu;
2280 if (env->kqemu_enabled) {
2281 kqemu_set_phys_mem(start_addr, size, phys_offset);
2282 }
2283#endif
7ba1e619
AL
2284 if (kvm_enabled())
2285 kvm_set_phys_mem(start_addr, size, phys_offset);
2286
67c4d23c
PB
2287 if (phys_offset == IO_MEM_UNASSIGNED) {
2288 region_offset = start_addr;
2289 }
8da3ff18 2290 region_offset &= TARGET_PAGE_MASK;
5fd386f6 2291 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
49e9fba2
BS
2292 end_addr = start_addr + (target_phys_addr_t)size;
2293 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
db7b5426
BS
2294 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2295 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
00f82b8a 2296 ram_addr_t orig_memory = p->phys_offset;
db7b5426
BS
2297 target_phys_addr_t start_addr2, end_addr2;
2298 int need_subpage = 0;
2299
2300 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2301 need_subpage);
4254fab8 2302 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
db7b5426
BS
2303 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2304 subpage = subpage_init((addr & TARGET_PAGE_MASK),
8da3ff18
PB
2305 &p->phys_offset, orig_memory,
2306 p->region_offset);
db7b5426
BS
2307 } else {
2308 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2309 >> IO_MEM_SHIFT];
2310 }
8da3ff18
PB
2311 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2312 region_offset);
2313 p->region_offset = 0;
db7b5426
BS
2314 } else {
2315 p->phys_offset = phys_offset;
2316 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2317 (phys_offset & IO_MEM_ROMD))
2318 phys_offset += TARGET_PAGE_SIZE;
2319 }
2320 } else {
2321 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2322 p->phys_offset = phys_offset;
8da3ff18 2323 p->region_offset = region_offset;
db7b5426 2324 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
8da3ff18 2325 (phys_offset & IO_MEM_ROMD)) {
db7b5426 2326 phys_offset += TARGET_PAGE_SIZE;
0e8f0967 2327 } else {
db7b5426
BS
2328 target_phys_addr_t start_addr2, end_addr2;
2329 int need_subpage = 0;
2330
2331 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2332 end_addr2, need_subpage);
2333
4254fab8 2334 if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
db7b5426 2335 subpage = subpage_init((addr & TARGET_PAGE_MASK),
8da3ff18 2336 &p->phys_offset, IO_MEM_UNASSIGNED,
67c4d23c 2337 addr & TARGET_PAGE_MASK);
db7b5426 2338 subpage_register(subpage, start_addr2, end_addr2,
8da3ff18
PB
2339 phys_offset, region_offset);
2340 p->region_offset = 0;
db7b5426
BS
2341 }
2342 }
2343 }
8da3ff18 2344 region_offset += TARGET_PAGE_SIZE;
33417e70 2345 }
3b46e624 2346
9d42037b
FB
2347 /* since each CPU stores ram addresses in its TLB cache, we must
2348 reset the modified entries */
2349 /* XXX: slow ! */
2350 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2351 tlb_flush(env, 1);
2352 }
33417e70
FB
2353}
2354
ba863458 2355/* XXX: temporary until new memory mapping API */
00f82b8a 2356ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
ba863458
FB
2357{
2358 PhysPageDesc *p;
2359
2360 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2361 if (!p)
2362 return IO_MEM_UNASSIGNED;
2363 return p->phys_offset;
2364}
2365
f65ed4c1
AL
2366void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2367{
2368 if (kvm_enabled())
2369 kvm_coalesce_mmio_region(addr, size);
2370}
2371
2372void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2373{
2374 if (kvm_enabled())
2375 kvm_uncoalesce_mmio_region(addr, size);
2376}
2377
e9a1ab19 2378/* XXX: better than nothing */
00f82b8a 2379ram_addr_t qemu_ram_alloc(ram_addr_t size)
e9a1ab19
FB
2380{
2381 ram_addr_t addr;
7fb4fdcf 2382 if ((phys_ram_alloc_offset + size) > phys_ram_size) {
012a7045 2383 fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 ")\n",
ed441467 2384 (uint64_t)size, (uint64_t)phys_ram_size);
e9a1ab19
FB
2385 abort();
2386 }
2387 addr = phys_ram_alloc_offset;
2388 phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2389 return addr;
2390}
2391
2392void qemu_ram_free(ram_addr_t addr)
2393{
2394}
2395
a4193c8a 2396static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
33417e70 2397{
67d3b957 2398#ifdef DEBUG_UNASSIGNED
ab3d1727 2399 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
b4f0a316 2400#endif
0a6f8a6d 2401#if defined(TARGET_SPARC)
e18231a3
BS
2402 do_unassigned_access(addr, 0, 0, 0, 1);
2403#endif
2404 return 0;
2405}
2406
2407static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2408{
2409#ifdef DEBUG_UNASSIGNED
2410 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2411#endif
0a6f8a6d 2412#if defined(TARGET_SPARC)
e18231a3
BS
2413 do_unassigned_access(addr, 0, 0, 0, 2);
2414#endif
2415 return 0;
2416}
2417
2418static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2419{
2420#ifdef DEBUG_UNASSIGNED
2421 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2422#endif
0a6f8a6d 2423#if defined(TARGET_SPARC)
e18231a3 2424 do_unassigned_access(addr, 0, 0, 0, 4);
67d3b957 2425#endif
33417e70
FB
2426 return 0;
2427}
2428
a4193c8a 2429static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
33417e70 2430{
67d3b957 2431#ifdef DEBUG_UNASSIGNED
ab3d1727 2432 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
67d3b957 2433#endif
0a6f8a6d 2434#if defined(TARGET_SPARC)
e18231a3
BS
2435 do_unassigned_access(addr, 1, 0, 0, 1);
2436#endif
2437}
2438
2439static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2440{
2441#ifdef DEBUG_UNASSIGNED
2442 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2443#endif
0a6f8a6d 2444#if defined(TARGET_SPARC)
e18231a3
BS
2445 do_unassigned_access(addr, 1, 0, 0, 2);
2446#endif
2447}
2448
2449static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2450{
2451#ifdef DEBUG_UNASSIGNED
2452 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2453#endif
0a6f8a6d 2454#if defined(TARGET_SPARC)
e18231a3 2455 do_unassigned_access(addr, 1, 0, 0, 4);
b4f0a316 2456#endif
33417e70
FB
2457}
2458
2459static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2460 unassigned_mem_readb,
e18231a3
BS
2461 unassigned_mem_readw,
2462 unassigned_mem_readl,
33417e70
FB
2463};
2464
2465static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2466 unassigned_mem_writeb,
e18231a3
BS
2467 unassigned_mem_writew,
2468 unassigned_mem_writel,
33417e70
FB
2469};
2470
0f459d16
PB
2471static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2472 uint32_t val)
9fa3e853 2473{
3a7d929e 2474 int dirty_flags;
3a7d929e
FB
2475 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2476 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2477#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2478 tb_invalidate_phys_page_fast(ram_addr, 1);
2479 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2480#endif
3a7d929e 2481 }
0f459d16 2482 stb_p(phys_ram_base + ram_addr, val);
f32fc648
FB
2483#ifdef USE_KQEMU
2484 if (cpu_single_env->kqemu_enabled &&
2485 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2486 kqemu_modify_page(cpu_single_env, ram_addr);
2487#endif
f23db169
FB
2488 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2489 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2490 /* we remove the notdirty callback only if the code has been
2491 flushed */
2492 if (dirty_flags == 0xff)
2e70f6ef 2493 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
2494}
2495
0f459d16
PB
2496static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2497 uint32_t val)
9fa3e853 2498{
3a7d929e 2499 int dirty_flags;
3a7d929e
FB
2500 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2501 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2502#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2503 tb_invalidate_phys_page_fast(ram_addr, 2);
2504 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2505#endif
3a7d929e 2506 }
0f459d16 2507 stw_p(phys_ram_base + ram_addr, val);
f32fc648
FB
2508#ifdef USE_KQEMU
2509 if (cpu_single_env->kqemu_enabled &&
2510 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2511 kqemu_modify_page(cpu_single_env, ram_addr);
2512#endif
f23db169
FB
2513 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2514 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2515 /* we remove the notdirty callback only if the code has been
2516 flushed */
2517 if (dirty_flags == 0xff)
2e70f6ef 2518 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
2519}
2520
0f459d16
PB
2521static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2522 uint32_t val)
9fa3e853 2523{
3a7d929e 2524 int dirty_flags;
3a7d929e
FB
2525 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2526 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 2527#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
2528 tb_invalidate_phys_page_fast(ram_addr, 4);
2529 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 2530#endif
3a7d929e 2531 }
0f459d16 2532 stl_p(phys_ram_base + ram_addr, val);
f32fc648
FB
2533#ifdef USE_KQEMU
2534 if (cpu_single_env->kqemu_enabled &&
2535 (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2536 kqemu_modify_page(cpu_single_env, ram_addr);
2537#endif
f23db169
FB
2538 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2539 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2540 /* we remove the notdirty callback only if the code has been
2541 flushed */
2542 if (dirty_flags == 0xff)
2e70f6ef 2543 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
9fa3e853
FB
2544}
2545
3a7d929e 2546static CPUReadMemoryFunc *error_mem_read[3] = {
9fa3e853
FB
2547 NULL, /* never used */
2548 NULL, /* never used */
2549 NULL, /* never used */
2550};
2551
1ccde1cb
FB
2552static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2553 notdirty_mem_writeb,
2554 notdirty_mem_writew,
2555 notdirty_mem_writel,
2556};
2557
0f459d16 2558/* Generate a debug exception if a watchpoint has been hit. */
b4051334 2559static void check_watchpoint(int offset, int len_mask, int flags)
0f459d16
PB
2560{
2561 CPUState *env = cpu_single_env;
06d55cc1
AL
2562 target_ulong pc, cs_base;
2563 TranslationBlock *tb;
0f459d16 2564 target_ulong vaddr;
a1d1bb31 2565 CPUWatchpoint *wp;
06d55cc1 2566 int cpu_flags;
0f459d16 2567
06d55cc1
AL
2568 if (env->watchpoint_hit) {
2569 /* We re-entered the check after replacing the TB. Now raise
2570 * the debug interrupt so that is will trigger after the
2571 * current instruction. */
2572 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2573 return;
2574 }
2e70f6ef 2575 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
c0ce998e 2576 TAILQ_FOREACH(wp, &env->watchpoints, entry) {
b4051334
AL
2577 if ((vaddr == (wp->vaddr & len_mask) ||
2578 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
6e140f28
AL
2579 wp->flags |= BP_WATCHPOINT_HIT;
2580 if (!env->watchpoint_hit) {
2581 env->watchpoint_hit = wp;
2582 tb = tb_find_pc(env->mem_io_pc);
2583 if (!tb) {
2584 cpu_abort(env, "check_watchpoint: could not find TB for "
2585 "pc=%p", (void *)env->mem_io_pc);
2586 }
2587 cpu_restore_state(tb, env, env->mem_io_pc, NULL);
2588 tb_phys_invalidate(tb, -1);
2589 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2590 env->exception_index = EXCP_DEBUG;
2591 } else {
2592 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2593 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
2594 }
2595 cpu_resume_from_signal(env, NULL);
06d55cc1 2596 }
6e140f28
AL
2597 } else {
2598 wp->flags &= ~BP_WATCHPOINT_HIT;
0f459d16
PB
2599 }
2600 }
2601}
2602
6658ffb8
PB
2603/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2604 so these check for a hit then pass through to the normal out-of-line
2605 phys routines. */
2606static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2607{
b4051334 2608 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
6658ffb8
PB
2609 return ldub_phys(addr);
2610}
2611
2612static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2613{
b4051334 2614 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
6658ffb8
PB
2615 return lduw_phys(addr);
2616}
2617
2618static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2619{
b4051334 2620 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
6658ffb8
PB
2621 return ldl_phys(addr);
2622}
2623
6658ffb8
PB
2624static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2625 uint32_t val)
2626{
b4051334 2627 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
6658ffb8
PB
2628 stb_phys(addr, val);
2629}
2630
2631static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2632 uint32_t val)
2633{
b4051334 2634 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
6658ffb8
PB
2635 stw_phys(addr, val);
2636}
2637
2638static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2639 uint32_t val)
2640{
b4051334 2641 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
6658ffb8
PB
2642 stl_phys(addr, val);
2643}
2644
2645static CPUReadMemoryFunc *watch_mem_read[3] = {
2646 watch_mem_readb,
2647 watch_mem_readw,
2648 watch_mem_readl,
2649};
2650
2651static CPUWriteMemoryFunc *watch_mem_write[3] = {
2652 watch_mem_writeb,
2653 watch_mem_writew,
2654 watch_mem_writel,
2655};
6658ffb8 2656
db7b5426
BS
2657static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2658 unsigned int len)
2659{
db7b5426
BS
2660 uint32_t ret;
2661 unsigned int idx;
2662
8da3ff18 2663 idx = SUBPAGE_IDX(addr);
db7b5426
BS
2664#if defined(DEBUG_SUBPAGE)
2665 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2666 mmio, len, addr, idx);
2667#endif
8da3ff18
PB
2668 ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len],
2669 addr + mmio->region_offset[idx][0][len]);
db7b5426
BS
2670
2671 return ret;
2672}
2673
2674static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2675 uint32_t value, unsigned int len)
2676{
db7b5426
BS
2677 unsigned int idx;
2678
8da3ff18 2679 idx = SUBPAGE_IDX(addr);
db7b5426
BS
2680#if defined(DEBUG_SUBPAGE)
2681 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2682 mmio, len, addr, idx, value);
2683#endif
8da3ff18
PB
2684 (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len],
2685 addr + mmio->region_offset[idx][1][len],
2686 value);
db7b5426
BS
2687}
2688
2689static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2690{
2691#if defined(DEBUG_SUBPAGE)
2692 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2693#endif
2694
2695 return subpage_readlen(opaque, addr, 0);
2696}
2697
2698static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2699 uint32_t value)
2700{
2701#if defined(DEBUG_SUBPAGE)
2702 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2703#endif
2704 subpage_writelen(opaque, addr, value, 0);
2705}
2706
2707static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2708{
2709#if defined(DEBUG_SUBPAGE)
2710 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2711#endif
2712
2713 return subpage_readlen(opaque, addr, 1);
2714}
2715
2716static void subpage_writew (void *opaque, target_phys_addr_t addr,
2717 uint32_t value)
2718{
2719#if defined(DEBUG_SUBPAGE)
2720 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2721#endif
2722 subpage_writelen(opaque, addr, value, 1);
2723}
2724
2725static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2726{
2727#if defined(DEBUG_SUBPAGE)
2728 printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2729#endif
2730
2731 return subpage_readlen(opaque, addr, 2);
2732}
2733
2734static void subpage_writel (void *opaque,
2735 target_phys_addr_t addr, uint32_t value)
2736{
2737#if defined(DEBUG_SUBPAGE)
2738 printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2739#endif
2740 subpage_writelen(opaque, addr, value, 2);
2741}
2742
2743static CPUReadMemoryFunc *subpage_read[] = {
2744 &subpage_readb,
2745 &subpage_readw,
2746 &subpage_readl,
2747};
2748
2749static CPUWriteMemoryFunc *subpage_write[] = {
2750 &subpage_writeb,
2751 &subpage_writew,
2752 &subpage_writel,
2753};
2754
2755static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
8da3ff18 2756 ram_addr_t memory, ram_addr_t region_offset)
db7b5426
BS
2757{
2758 int idx, eidx;
4254fab8 2759 unsigned int i;
db7b5426
BS
2760
2761 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2762 return -1;
2763 idx = SUBPAGE_IDX(start);
2764 eidx = SUBPAGE_IDX(end);
2765#if defined(DEBUG_SUBPAGE)
2766 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2767 mmio, start, end, idx, eidx, memory);
2768#endif
2769 memory >>= IO_MEM_SHIFT;
2770 for (; idx <= eidx; idx++) {
4254fab8 2771 for (i = 0; i < 4; i++) {
3ee89922
BS
2772 if (io_mem_read[memory][i]) {
2773 mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2774 mmio->opaque[idx][0][i] = io_mem_opaque[memory];
8da3ff18 2775 mmio->region_offset[idx][0][i] = region_offset;
3ee89922
BS
2776 }
2777 if (io_mem_write[memory][i]) {
2778 mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2779 mmio->opaque[idx][1][i] = io_mem_opaque[memory];
8da3ff18 2780 mmio->region_offset[idx][1][i] = region_offset;
3ee89922 2781 }
4254fab8 2782 }
db7b5426
BS
2783 }
2784
2785 return 0;
2786}
2787
00f82b8a 2788static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
8da3ff18 2789 ram_addr_t orig_memory, ram_addr_t region_offset)
db7b5426
BS
2790{
2791 subpage_t *mmio;
2792 int subpage_memory;
2793
2794 mmio = qemu_mallocz(sizeof(subpage_t));
1eec614b
AL
2795
2796 mmio->base = base;
2797 subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
db7b5426 2798#if defined(DEBUG_SUBPAGE)
1eec614b
AL
2799 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2800 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
db7b5426 2801#endif
1eec614b
AL
2802 *phys = subpage_memory | IO_MEM_SUBPAGE;
2803 subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory,
8da3ff18 2804 region_offset);
db7b5426
BS
2805
2806 return mmio;
2807}
2808
88715657
AL
2809static int get_free_io_mem_idx(void)
2810{
2811 int i;
2812
2813 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
2814 if (!io_mem_used[i]) {
2815 io_mem_used[i] = 1;
2816 return i;
2817 }
2818
2819 return -1;
2820}
2821
33417e70
FB
2822static void io_mem_init(void)
2823{
88715657
AL
2824 int i;
2825
3a7d929e 2826 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
a4193c8a 2827 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
3a7d929e 2828 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
88715657
AL
2829 for (i=0; i<5; i++)
2830 io_mem_used[i] = 1;
1ccde1cb 2831
0f459d16 2832 io_mem_watch = cpu_register_io_memory(0, watch_mem_read,
6658ffb8 2833 watch_mem_write, NULL);
1ccde1cb 2834 /* alloc dirty bits array */
0a962c02 2835 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
3a7d929e 2836 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
33417e70
FB
2837}
2838
2839/* mem_read and mem_write are arrays of functions containing the
2840 function to access byte (index 0), word (index 1) and dword (index
3ee89922
BS
2841 2). Functions can be omitted with a NULL function pointer. The
2842 registered functions may be modified dynamically later.
2843 If io_index is non zero, the corresponding io zone is
4254fab8
BS
2844 modified. If it is zero, a new io zone is allocated. The return
2845 value can be used with cpu_register_physical_memory(). (-1) is
2846 returned if error. */
33417e70
FB
2847int cpu_register_io_memory(int io_index,
2848 CPUReadMemoryFunc **mem_read,
a4193c8a
FB
2849 CPUWriteMemoryFunc **mem_write,
2850 void *opaque)
33417e70 2851{
4254fab8 2852 int i, subwidth = 0;
33417e70
FB
2853
2854 if (io_index <= 0) {
88715657
AL
2855 io_index = get_free_io_mem_idx();
2856 if (io_index == -1)
2857 return io_index;
33417e70
FB
2858 } else {
2859 if (io_index >= IO_MEM_NB_ENTRIES)
2860 return -1;
2861 }
b5ff1b31 2862
33417e70 2863 for(i = 0;i < 3; i++) {
4254fab8
BS
2864 if (!mem_read[i] || !mem_write[i])
2865 subwidth = IO_MEM_SUBWIDTH;
33417e70
FB
2866 io_mem_read[io_index][i] = mem_read[i];
2867 io_mem_write[io_index][i] = mem_write[i];
2868 }
a4193c8a 2869 io_mem_opaque[io_index] = opaque;
4254fab8 2870 return (io_index << IO_MEM_SHIFT) | subwidth;
33417e70 2871}
61382a50 2872
88715657
AL
2873void cpu_unregister_io_memory(int io_table_address)
2874{
2875 int i;
2876 int io_index = io_table_address >> IO_MEM_SHIFT;
2877
2878 for (i=0;i < 3; i++) {
2879 io_mem_read[io_index][i] = unassigned_mem_read[i];
2880 io_mem_write[io_index][i] = unassigned_mem_write[i];
2881 }
2882 io_mem_opaque[io_index] = NULL;
2883 io_mem_used[io_index] = 0;
2884}
2885
8926b517
FB
2886CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2887{
2888 return io_mem_write[io_index >> IO_MEM_SHIFT];
2889}
2890
2891CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2892{
2893 return io_mem_read[io_index >> IO_MEM_SHIFT];
2894}
2895
e2eef170
PB
2896#endif /* !defined(CONFIG_USER_ONLY) */
2897
13eb76e0
FB
2898/* physical memory access (slow version, mainly for debug) */
2899#if defined(CONFIG_USER_ONLY)
5fafdf24 2900void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
2901 int len, int is_write)
2902{
2903 int l, flags;
2904 target_ulong page;
53a5960a 2905 void * p;
13eb76e0
FB
2906
2907 while (len > 0) {
2908 page = addr & TARGET_PAGE_MASK;
2909 l = (page + TARGET_PAGE_SIZE) - addr;
2910 if (l > len)
2911 l = len;
2912 flags = page_get_flags(page);
2913 if (!(flags & PAGE_VALID))
2914 return;
2915 if (is_write) {
2916 if (!(flags & PAGE_WRITE))
2917 return;
579a97f7 2918 /* XXX: this code should not depend on lock_user */
72fb7daa 2919 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
579a97f7
FB
2920 /* FIXME - should this return an error rather than just fail? */
2921 return;
72fb7daa
AJ
2922 memcpy(p, buf, l);
2923 unlock_user(p, addr, l);
13eb76e0
FB
2924 } else {
2925 if (!(flags & PAGE_READ))
2926 return;
579a97f7 2927 /* XXX: this code should not depend on lock_user */
72fb7daa 2928 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
579a97f7
FB
2929 /* FIXME - should this return an error rather than just fail? */
2930 return;
72fb7daa 2931 memcpy(buf, p, l);
5b257578 2932 unlock_user(p, addr, 0);
13eb76e0
FB
2933 }
2934 len -= l;
2935 buf += l;
2936 addr += l;
2937 }
2938}
8df1cd07 2939
13eb76e0 2940#else
5fafdf24 2941void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
2942 int len, int is_write)
2943{
2944 int l, io_index;
2945 uint8_t *ptr;
2946 uint32_t val;
2e12669a
FB
2947 target_phys_addr_t page;
2948 unsigned long pd;
92e873b9 2949 PhysPageDesc *p;
3b46e624 2950
13eb76e0
FB
2951 while (len > 0) {
2952 page = addr & TARGET_PAGE_MASK;
2953 l = (page + TARGET_PAGE_SIZE) - addr;
2954 if (l > len)
2955 l = len;
92e873b9 2956 p = phys_page_find(page >> TARGET_PAGE_BITS);
13eb76e0
FB
2957 if (!p) {
2958 pd = IO_MEM_UNASSIGNED;
2959 } else {
2960 pd = p->phys_offset;
2961 }
3b46e624 2962
13eb76e0 2963 if (is_write) {
3a7d929e 2964 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
6c2934db 2965 target_phys_addr_t addr1 = addr;
13eb76e0 2966 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18 2967 if (p)
6c2934db 2968 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
6a00d601
FB
2969 /* XXX: could force cpu_single_env to NULL to avoid
2970 potential bugs */
6c2934db 2971 if (l >= 4 && ((addr1 & 3) == 0)) {
1c213d19 2972 /* 32 bit write access */
c27004ec 2973 val = ldl_p(buf);
6c2934db 2974 io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
13eb76e0 2975 l = 4;
6c2934db 2976 } else if (l >= 2 && ((addr1 & 1) == 0)) {
1c213d19 2977 /* 16 bit write access */
c27004ec 2978 val = lduw_p(buf);
6c2934db 2979 io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
13eb76e0
FB
2980 l = 2;
2981 } else {
1c213d19 2982 /* 8 bit write access */
c27004ec 2983 val = ldub_p(buf);
6c2934db 2984 io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
13eb76e0
FB
2985 l = 1;
2986 }
2987 } else {
b448f2f3
FB
2988 unsigned long addr1;
2989 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
13eb76e0 2990 /* RAM case */
b448f2f3 2991 ptr = phys_ram_base + addr1;
13eb76e0 2992 memcpy(ptr, buf, l);
3a7d929e
FB
2993 if (!cpu_physical_memory_is_dirty(addr1)) {
2994 /* invalidate code */
2995 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2996 /* set dirty bit */
5fafdf24 2997 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
f23db169 2998 (0xff & ~CODE_DIRTY_FLAG);
3a7d929e 2999 }
13eb76e0
FB
3000 }
3001 } else {
5fafdf24 3002 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2a4188a3 3003 !(pd & IO_MEM_ROMD)) {
6c2934db 3004 target_phys_addr_t addr1 = addr;
13eb76e0
FB
3005 /* I/O case */
3006 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18 3007 if (p)
6c2934db
AJ
3008 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3009 if (l >= 4 && ((addr1 & 3) == 0)) {
13eb76e0 3010 /* 32 bit read access */
6c2934db 3011 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
c27004ec 3012 stl_p(buf, val);
13eb76e0 3013 l = 4;
6c2934db 3014 } else if (l >= 2 && ((addr1 & 1) == 0)) {
13eb76e0 3015 /* 16 bit read access */
6c2934db 3016 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
c27004ec 3017 stw_p(buf, val);
13eb76e0
FB
3018 l = 2;
3019 } else {
1c213d19 3020 /* 8 bit read access */
6c2934db 3021 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
c27004ec 3022 stb_p(buf, val);
13eb76e0
FB
3023 l = 1;
3024 }
3025 } else {
3026 /* RAM case */
5fafdf24 3027 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
13eb76e0
FB
3028 (addr & ~TARGET_PAGE_MASK);
3029 memcpy(buf, ptr, l);
3030 }
3031 }
3032 len -= l;
3033 buf += l;
3034 addr += l;
3035 }
3036}
8df1cd07 3037
d0ecd2aa 3038/* used for ROM loading : can write in RAM and ROM */
5fafdf24 3039void cpu_physical_memory_write_rom(target_phys_addr_t addr,
d0ecd2aa
FB
3040 const uint8_t *buf, int len)
3041{
3042 int l;
3043 uint8_t *ptr;
3044 target_phys_addr_t page;
3045 unsigned long pd;
3046 PhysPageDesc *p;
3b46e624 3047
d0ecd2aa
FB
3048 while (len > 0) {
3049 page = addr & TARGET_PAGE_MASK;
3050 l = (page + TARGET_PAGE_SIZE) - addr;
3051 if (l > len)
3052 l = len;
3053 p = phys_page_find(page >> TARGET_PAGE_BITS);
3054 if (!p) {
3055 pd = IO_MEM_UNASSIGNED;
3056 } else {
3057 pd = p->phys_offset;
3058 }
3b46e624 3059
d0ecd2aa 3060 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2a4188a3
FB
3061 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3062 !(pd & IO_MEM_ROMD)) {
d0ecd2aa
FB
3063 /* do nothing */
3064 } else {
3065 unsigned long addr1;
3066 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3067 /* ROM/RAM case */
3068 ptr = phys_ram_base + addr1;
3069 memcpy(ptr, buf, l);
3070 }
3071 len -= l;
3072 buf += l;
3073 addr += l;
3074 }
3075}
3076
6d16c2f8
AL
3077typedef struct {
3078 void *buffer;
3079 target_phys_addr_t addr;
3080 target_phys_addr_t len;
3081} BounceBuffer;
3082
3083static BounceBuffer bounce;
3084
ba223c29
AL
3085typedef struct MapClient {
3086 void *opaque;
3087 void (*callback)(void *opaque);
3088 LIST_ENTRY(MapClient) link;
3089} MapClient;
3090
3091static LIST_HEAD(map_client_list, MapClient) map_client_list
3092 = LIST_HEAD_INITIALIZER(map_client_list);
3093
3094void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3095{
3096 MapClient *client = qemu_malloc(sizeof(*client));
3097
3098 client->opaque = opaque;
3099 client->callback = callback;
3100 LIST_INSERT_HEAD(&map_client_list, client, link);
3101 return client;
3102}
3103
3104void cpu_unregister_map_client(void *_client)
3105{
3106 MapClient *client = (MapClient *)_client;
3107
3108 LIST_REMOVE(client, link);
3109}
3110
3111static void cpu_notify_map_clients(void)
3112{
3113 MapClient *client;
3114
3115 while (!LIST_EMPTY(&map_client_list)) {
3116 client = LIST_FIRST(&map_client_list);
3117 client->callback(client->opaque);
3118 LIST_REMOVE(client, link);
3119 }
3120}
3121
6d16c2f8
AL
3122/* Map a physical memory region into a host virtual address.
3123 * May map a subset of the requested range, given by and returned in *plen.
3124 * May return NULL if resources needed to perform the mapping are exhausted.
3125 * Use only for reads OR writes - not for read-modify-write operations.
ba223c29
AL
3126 * Use cpu_register_map_client() to know when retrying the map operation is
3127 * likely to succeed.
6d16c2f8
AL
3128 */
3129void *cpu_physical_memory_map(target_phys_addr_t addr,
3130 target_phys_addr_t *plen,
3131 int is_write)
3132{
3133 target_phys_addr_t len = *plen;
3134 target_phys_addr_t done = 0;
3135 int l;
3136 uint8_t *ret = NULL;
3137 uint8_t *ptr;
3138 target_phys_addr_t page;
3139 unsigned long pd;
3140 PhysPageDesc *p;
3141 unsigned long addr1;
3142
3143 while (len > 0) {
3144 page = addr & TARGET_PAGE_MASK;
3145 l = (page + TARGET_PAGE_SIZE) - addr;
3146 if (l > len)
3147 l = len;
3148 p = phys_page_find(page >> TARGET_PAGE_BITS);
3149 if (!p) {
3150 pd = IO_MEM_UNASSIGNED;
3151 } else {
3152 pd = p->phys_offset;
3153 }
3154
3155 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3156 if (done || bounce.buffer) {
3157 break;
3158 }
3159 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3160 bounce.addr = addr;
3161 bounce.len = l;
3162 if (!is_write) {
3163 cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
3164 }
3165 ptr = bounce.buffer;
3166 } else {
3167 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3168 ptr = phys_ram_base + addr1;
3169 }
3170 if (!done) {
3171 ret = ptr;
3172 } else if (ret + done != ptr) {
3173 break;
3174 }
3175
3176 len -= l;
3177 addr += l;
3178 done += l;
3179 }
3180 *plen = done;
3181 return ret;
3182}
3183
3184/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3185 * Will also mark the memory as dirty if is_write == 1. access_len gives
3186 * the amount of memory that was actually read or written by the caller.
3187 */
3188void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3189 int is_write, target_phys_addr_t access_len)
3190{
3191 if (buffer != bounce.buffer) {
3192 if (is_write) {
3193 unsigned long addr1 = (uint8_t *)buffer - phys_ram_base;
3194 while (access_len) {
3195 unsigned l;
3196 l = TARGET_PAGE_SIZE;
3197 if (l > access_len)
3198 l = access_len;
3199 if (!cpu_physical_memory_is_dirty(addr1)) {
3200 /* invalidate code */
3201 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3202 /* set dirty bit */
3203 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3204 (0xff & ~CODE_DIRTY_FLAG);
3205 }
3206 addr1 += l;
3207 access_len -= l;
3208 }
3209 }
3210 return;
3211 }
3212 if (is_write) {
3213 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3214 }
3215 qemu_free(bounce.buffer);
3216 bounce.buffer = NULL;
ba223c29 3217 cpu_notify_map_clients();
6d16c2f8 3218}
d0ecd2aa 3219
8df1cd07
FB
3220/* warning: addr must be aligned */
3221uint32_t ldl_phys(target_phys_addr_t addr)
3222{
3223 int io_index;
3224 uint8_t *ptr;
3225 uint32_t val;
3226 unsigned long pd;
3227 PhysPageDesc *p;
3228
3229 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3230 if (!p) {
3231 pd = IO_MEM_UNASSIGNED;
3232 } else {
3233 pd = p->phys_offset;
3234 }
3b46e624 3235
5fafdf24 3236 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2a4188a3 3237 !(pd & IO_MEM_ROMD)) {
8df1cd07
FB
3238 /* I/O case */
3239 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
3240 if (p)
3241 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
8df1cd07
FB
3242 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3243 } else {
3244 /* RAM case */
5fafdf24 3245 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
8df1cd07
FB
3246 (addr & ~TARGET_PAGE_MASK);
3247 val = ldl_p(ptr);
3248 }
3249 return val;
3250}
3251
84b7b8e7
FB
3252/* warning: addr must be aligned */
3253uint64_t ldq_phys(target_phys_addr_t addr)
3254{
3255 int io_index;
3256 uint8_t *ptr;
3257 uint64_t val;
3258 unsigned long pd;
3259 PhysPageDesc *p;
3260
3261 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3262 if (!p) {
3263 pd = IO_MEM_UNASSIGNED;
3264 } else {
3265 pd = p->phys_offset;
3266 }
3b46e624 3267
2a4188a3
FB
3268 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3269 !(pd & IO_MEM_ROMD)) {
84b7b8e7
FB
3270 /* I/O case */
3271 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
3272 if (p)
3273 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
84b7b8e7
FB
3274#ifdef TARGET_WORDS_BIGENDIAN
3275 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3276 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3277#else
3278 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3279 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3280#endif
3281 } else {
3282 /* RAM case */
5fafdf24 3283 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
84b7b8e7
FB
3284 (addr & ~TARGET_PAGE_MASK);
3285 val = ldq_p(ptr);
3286 }
3287 return val;
3288}
3289
aab33094
FB
3290/* XXX: optimize */
3291uint32_t ldub_phys(target_phys_addr_t addr)
3292{
3293 uint8_t val;
3294 cpu_physical_memory_read(addr, &val, 1);
3295 return val;
3296}
3297
3298/* XXX: optimize */
3299uint32_t lduw_phys(target_phys_addr_t addr)
3300{
3301 uint16_t val;
3302 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3303 return tswap16(val);
3304}
3305
8df1cd07
FB
3306/* warning: addr must be aligned. The ram page is not masked as dirty
3307 and the code inside is not invalidated. It is useful if the dirty
3308 bits are used to track modified PTEs */
3309void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3310{
3311 int io_index;
3312 uint8_t *ptr;
3313 unsigned long pd;
3314 PhysPageDesc *p;
3315
3316 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3317 if (!p) {
3318 pd = IO_MEM_UNASSIGNED;
3319 } else {
3320 pd = p->phys_offset;
3321 }
3b46e624 3322
3a7d929e 3323 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07 3324 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
3325 if (p)
3326 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
8df1cd07
FB
3327 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3328 } else {
74576198
AL
3329 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3330 ptr = phys_ram_base + addr1;
8df1cd07 3331 stl_p(ptr, val);
74576198
AL
3332
3333 if (unlikely(in_migration)) {
3334 if (!cpu_physical_memory_is_dirty(addr1)) {
3335 /* invalidate code */
3336 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3337 /* set dirty bit */
3338 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3339 (0xff & ~CODE_DIRTY_FLAG);
3340 }
3341 }
8df1cd07
FB
3342 }
3343}
3344
bc98a7ef
JM
3345void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3346{
3347 int io_index;
3348 uint8_t *ptr;
3349 unsigned long pd;
3350 PhysPageDesc *p;
3351
3352 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3353 if (!p) {
3354 pd = IO_MEM_UNASSIGNED;
3355 } else {
3356 pd = p->phys_offset;
3357 }
3b46e624 3358
bc98a7ef
JM
3359 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3360 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
3361 if (p)
3362 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bc98a7ef
JM
3363#ifdef TARGET_WORDS_BIGENDIAN
3364 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3365 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3366#else
3367 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3368 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3369#endif
3370 } else {
5fafdf24 3371 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
bc98a7ef
JM
3372 (addr & ~TARGET_PAGE_MASK);
3373 stq_p(ptr, val);
3374 }
3375}
3376
8df1cd07 3377/* warning: addr must be aligned */
8df1cd07
FB
3378void stl_phys(target_phys_addr_t addr, uint32_t val)
3379{
3380 int io_index;
3381 uint8_t *ptr;
3382 unsigned long pd;
3383 PhysPageDesc *p;
3384
3385 p = phys_page_find(addr >> TARGET_PAGE_BITS);
3386 if (!p) {
3387 pd = IO_MEM_UNASSIGNED;
3388 } else {
3389 pd = p->phys_offset;
3390 }
3b46e624 3391
3a7d929e 3392 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07 3393 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
8da3ff18
PB
3394 if (p)
3395 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
8df1cd07
FB
3396 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3397 } else {
3398 unsigned long addr1;
3399 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3400 /* RAM case */
3401 ptr = phys_ram_base + addr1;
3402 stl_p(ptr, val);
3a7d929e
FB
3403 if (!cpu_physical_memory_is_dirty(addr1)) {
3404 /* invalidate code */
3405 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3406 /* set dirty bit */
f23db169
FB
3407 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3408 (0xff & ~CODE_DIRTY_FLAG);
3a7d929e 3409 }
8df1cd07
FB
3410 }
3411}
3412
aab33094
FB
3413/* XXX: optimize */
3414void stb_phys(target_phys_addr_t addr, uint32_t val)
3415{
3416 uint8_t v = val;
3417 cpu_physical_memory_write(addr, &v, 1);
3418}
3419
3420/* XXX: optimize */
3421void stw_phys(target_phys_addr_t addr, uint32_t val)
3422{
3423 uint16_t v = tswap16(val);
3424 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3425}
3426
3427/* XXX: optimize */
3428void stq_phys(target_phys_addr_t addr, uint64_t val)
3429{
3430 val = tswap64(val);
3431 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3432}
3433
13eb76e0
FB
3434#endif
3435
3436/* virtual memory access for debug */
5fafdf24 3437int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
b448f2f3 3438 uint8_t *buf, int len, int is_write)
13eb76e0
FB
3439{
3440 int l;
9b3c35e0
JM
3441 target_phys_addr_t phys_addr;
3442 target_ulong page;
13eb76e0
FB
3443
3444 while (len > 0) {
3445 page = addr & TARGET_PAGE_MASK;
3446 phys_addr = cpu_get_phys_page_debug(env, page);
3447 /* if no physical page mapped, return an error */
3448 if (phys_addr == -1)
3449 return -1;
3450 l = (page + TARGET_PAGE_SIZE) - addr;
3451 if (l > len)
3452 l = len;
5fafdf24 3453 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
b448f2f3 3454 buf, l, is_write);
13eb76e0
FB
3455 len -= l;
3456 buf += l;
3457 addr += l;
3458 }
3459 return 0;
3460}
3461
2e70f6ef
PB
3462/* in deterministic execution mode, instructions doing device I/Os
3463 must be at the end of the TB */
3464void cpu_io_recompile(CPUState *env, void *retaddr)
3465{
3466 TranslationBlock *tb;
3467 uint32_t n, cflags;
3468 target_ulong pc, cs_base;
3469 uint64_t flags;
3470
3471 tb = tb_find_pc((unsigned long)retaddr);
3472 if (!tb) {
3473 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
3474 retaddr);
3475 }
3476 n = env->icount_decr.u16.low + tb->icount;
3477 cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3478 /* Calculate how many instructions had been executed before the fault
bf20dc07 3479 occurred. */
2e70f6ef
PB
3480 n = n - env->icount_decr.u16.low;
3481 /* Generate a new TB ending on the I/O insn. */
3482 n++;
3483 /* On MIPS and SH, delay slot instructions can only be restarted if
3484 they were already the first instruction in the TB. If this is not
bf20dc07 3485 the first instruction in a TB then re-execute the preceding
2e70f6ef
PB
3486 branch. */
3487#if defined(TARGET_MIPS)
3488 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3489 env->active_tc.PC -= 4;
3490 env->icount_decr.u16.low++;
3491 env->hflags &= ~MIPS_HFLAG_BMASK;
3492 }
3493#elif defined(TARGET_SH4)
3494 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3495 && n > 1) {
3496 env->pc -= 2;
3497 env->icount_decr.u16.low++;
3498 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3499 }
3500#endif
3501 /* This should never happen. */
3502 if (n > CF_COUNT_MASK)
3503 cpu_abort(env, "TB too big during recompile");
3504
3505 cflags = n | CF_LAST_IO;
3506 pc = tb->pc;
3507 cs_base = tb->cs_base;
3508 flags = tb->flags;
3509 tb_phys_invalidate(tb, -1);
3510 /* FIXME: In theory this could raise an exception. In practice
3511 we have already translated the block once so it's probably ok. */
3512 tb_gen_code(env, pc, cs_base, flags, cflags);
bf20dc07 3513 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
2e70f6ef
PB
3514 the first in the TB) then we end up generating a whole new TB and
3515 repeating the fault, which is horribly inefficient.
3516 Better would be to execute just this insn uncached, or generate a
3517 second new TB. */
3518 cpu_resume_from_signal(env, NULL);
3519}
3520
e3db7226
FB
3521void dump_exec_info(FILE *f,
3522 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3523{
3524 int i, target_code_size, max_target_code_size;
3525 int direct_jmp_count, direct_jmp2_count, cross_page;
3526 TranslationBlock *tb;
3b46e624 3527
e3db7226
FB
3528 target_code_size = 0;
3529 max_target_code_size = 0;
3530 cross_page = 0;
3531 direct_jmp_count = 0;
3532 direct_jmp2_count = 0;
3533 for(i = 0; i < nb_tbs; i++) {
3534 tb = &tbs[i];
3535 target_code_size += tb->size;
3536 if (tb->size > max_target_code_size)
3537 max_target_code_size = tb->size;
3538 if (tb->page_addr[1] != -1)
3539 cross_page++;
3540 if (tb->tb_next_offset[0] != 0xffff) {
3541 direct_jmp_count++;
3542 if (tb->tb_next_offset[1] != 0xffff) {
3543 direct_jmp2_count++;
3544 }
3545 }
3546 }
3547 /* XXX: avoid using doubles ? */
57fec1fe 3548 cpu_fprintf(f, "Translation buffer state:\n");
26a5f13b
FB
3549 cpu_fprintf(f, "gen code size %ld/%ld\n",
3550 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3551 cpu_fprintf(f, "TB count %d/%d\n",
3552 nb_tbs, code_gen_max_blocks);
5fafdf24 3553 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
e3db7226
FB
3554 nb_tbs ? target_code_size / nb_tbs : 0,
3555 max_target_code_size);
5fafdf24 3556 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
e3db7226
FB
3557 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3558 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
5fafdf24
TS
3559 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3560 cross_page,
e3db7226
FB
3561 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3562 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
5fafdf24 3563 direct_jmp_count,
e3db7226
FB
3564 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3565 direct_jmp2_count,
3566 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
57fec1fe 3567 cpu_fprintf(f, "\nStatistics:\n");
e3db7226
FB
3568 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
3569 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3570 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
b67d9a52 3571 tcg_dump_info(f, cpu_fprintf);
e3db7226
FB
3572}
3573
5fafdf24 3574#if !defined(CONFIG_USER_ONLY)
61382a50
FB
3575
3576#define MMUSUFFIX _cmmu
3577#define GETPC() NULL
3578#define env cpu_single_env
b769d8fe 3579#define SOFTMMU_CODE_ACCESS
61382a50
FB
3580
3581#define SHIFT 0
3582#include "softmmu_template.h"
3583
3584#define SHIFT 1
3585#include "softmmu_template.h"
3586
3587#define SHIFT 2
3588#include "softmmu_template.h"
3589
3590#define SHIFT 3
3591#include "softmmu_template.h"
3592
3593#undef env
3594
3595#endif